galaxy-dev
Threads by month
- ----- 2026 -----
- April
- March
- February
- January
- ----- 2025 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2009 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2008 -----
- December
- November
- October
- September
- August
- 10009 discussions
30 Apr '09
details: http://www.bx.psu.edu/hg/galaxy/rev/656f1df80b46
changeset: 2383:656f1df80b46
user: James Taylor <james(a)jamestaylor.org>
date: Wed Apr 29 14:47:18 2009 -0400
description:
Accidently commited a testing change, removed
1 file(s) affected in this change:
tools/maf/interval2maf.xml
diffs (11 lines):
diff -r 89541ac4983f -r 656f1df80b46 tools/maf/interval2maf.xml
--- a/tools/maf/interval2maf.xml Wed Apr 29 13:56:59 2009 -0400
+++ b/tools/maf/interval2maf.xml Wed Apr 29 14:47:18 2009 -0400
@@ -6,7 +6,6 @@
#end if
</command>
<inputs>
- <param type="text" name="TEST" label="Test" />
<param format="interval" name="input1" type="data" label="Choose intervals">
<validator type="unspecified_build" />
</param>
1
0
30 Apr '09
details: http://www.bx.psu.edu/hg/galaxy/rev/22b08d47f7ba
changeset: 2380:22b08d47f7ba
user: guru
date: Wed Apr 29 11:53:22 2009 -0400
description:
Replacing \"qual\" datatype with 3 new datatypes: qualsolid, qual454 and qualsolexa.
10 file(s) affected in this change:
datatypes_conf.xml.sample
lib/galaxy/datatypes/converters/fastqsolexa_to_qual_converter.xml
lib/galaxy/datatypes/qualityscore.py
lib/galaxy/datatypes/registry.py
lib/galaxy/model/migrate/versions/0006_change_qual_datatype.py
tools/metag_tools/fastqsolexa_to_fasta_qual.xml
tools/metag_tools/rmapq_wrapper.xml
tools/metag_tools/short_reads_figure_high_quality_length.xml
tools/metag_tools/short_reads_figure_score.xml
tools/metag_tools/short_reads_trim_seq.xml
diffs (409 lines):
diff -r 5a4aac327bad -r 22b08d47f7ba datatypes_conf.xml.sample
--- a/datatypes_conf.xml.sample Wed Apr 29 10:27:57 2009 -0400
+++ b/datatypes_conf.xml.sample Wed Apr 29 11:53:22 2009 -0400
@@ -19,7 +19,7 @@
</datatype>
<datatype extension="fastqsolexa" type="galaxy.datatypes.sequence:FastqSolexa" display_in_upload="true">
<converter file="fastqsolexa_to_fasta_converter.xml" target_datatype="fasta"/>
- <converter file="fastqsolexa_to_qual_converter.xml" target_datatype="qual"/>
+ <converter file="fastqsolexa_to_qual_converter.xml" target_datatype="qualsolexa"/>
</datatype>
<datatype extension="genetrack" type="galaxy.datatypes.tracks:GeneTrack"/>
<datatype extension="gff" type="galaxy.datatypes.interval:Gff" display_in_upload="true">
@@ -42,7 +42,9 @@
</datatype>
<datatype extension="pdf" type="galaxy.datatypes.images:Image" mimetype="application/pdf"/>
<datatype extension="png" type="galaxy.datatypes.images:Image" mimetype="image/png"/>
- <datatype extension="qual" type="galaxy.datatypes.qualityscore:QualityScore" display_in_upload="true"/>
+ <datatype extension="qualsolexa" type="galaxy.datatypes.qualityscore:QualityScoreSolexa" display_in_upload="true"/>
+ <datatype extension="qualsolid" type="galaxy.datatypes.qualityscore:QualityScoreSOLiD" display_in_upload="true"/>
+ <datatype extension="qual454" type="galaxy.datatypes.qualityscore:QualityScore454" display_in_upload="true"/>
<datatype extension="scf" type="galaxy.datatypes.images:Scf" mimetype="application/octet-stream" display_in_upload="true"/>
<datatype extension="taxonomy" type="galaxy.datatypes.tabular:Taxonomy" display_in_upload="true"/>
<datatype extension="tabular" type="galaxy.datatypes.tabular:Tabular" display_in_upload="true"/>
@@ -186,7 +188,9 @@
<sniffer type="galaxy.datatypes.sequence:Maf"/>
<sniffer type="galaxy.datatypes.sequence:Lav"/>
<sniffer type="galaxy.datatypes.sequence:csFasta"/>
- <sniffer type="galaxy.datatypes.qualityscore:QualityScore"/>
+ <sniffer type="galaxy.datatypes.qualityscore:QualityScoreSolexa"/>
+ <sniffer type="galaxy.datatypes.qualityscore:QualityScoreSOLiD"/>
+ <sniffer type="galaxy.datatypes.qualityscore:QualityScore454"/>
<sniffer type="galaxy.datatypes.sequence:Fasta"/>
<sniffer type="galaxy.datatypes.sequence:FastqSolexa"/>
<sniffer type="galaxy.datatypes.interval:Wiggle"/>
diff -r 5a4aac327bad -r 22b08d47f7ba lib/galaxy/datatypes/converters/fastqsolexa_to_qual_converter.xml
--- a/lib/galaxy/datatypes/converters/fastqsolexa_to_qual_converter.xml Wed Apr 29 10:27:57 2009 -0400
+++ b/lib/galaxy/datatypes/converters/fastqsolexa_to_qual_converter.xml Wed Apr 29 11:53:22 2009 -0400
@@ -4,7 +4,7 @@
<param format="fastqsolexa" name="input1" type="data" label="Choose Fastqsolexa file"/>
</inputs>
<outputs>
- <data format="qual" name="output1" />
+ <data format="qualsolexa" name="output1" />
</outputs>
<help>
</help>
diff -r 5a4aac327bad -r 22b08d47f7ba lib/galaxy/datatypes/qualityscore.py
--- a/lib/galaxy/datatypes/qualityscore.py Wed Apr 29 10:27:57 2009 -0400
+++ b/lib/galaxy/datatypes/qualityscore.py Wed Apr 29 11:53:22 2009 -0400
@@ -9,11 +9,11 @@
log = logging.getLogger(__name__)
-class QualityScore ( data.Text ):
+class QualityScoreSOLiD ( data.Text ):
"""
until we know more about quality score formats
"""
- file_ext = "qual"
+ file_ext = "qualsolid"
def set_peek( self, dataset, line_count=None ):
if not dataset.dataset.purged:
@@ -21,7 +21,7 @@
if line_count is None:
dataset.blurb = data.nice_size( dataset.get_size() )
else:
- dataset.blurb = "%s lines, Quality score file" % util.commaify( str( line_count ) )
+ dataset.blurb = "%s lines, SOLiD Quality score file" % util.commaify( str( line_count ) )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
@@ -30,15 +30,80 @@
try:
return dataset.peek
except:
- return "Quality score file (%s)" % ( data.nice_size( dataset.get_size() ) )
+ return "SOLiD Quality score file (%s)" % ( data.nice_size( dataset.get_size() ) )
def sniff( self, filename ):
"""
>>> fname = get_test_fname( 'sequence.fasta' )
- >>> QualityScore().sniff( fname )
+ >>> QualityScoreSOLiD().sniff( fname )
False
- >>> fname = get_test_fname( 'sequence.qual' )
- >>> QualityScore().sniff( fname )
+ >>> fname = get_test_fname( 'sequence.qualsolid' )
+ >>> QualityScoreSOLiD().sniff( fname )
+ True
+ """
+ try:
+ fh = open( filename )
+ readlen = None
+ goodblock = 0
+ while True:
+ line = fh.readline()
+ if not line:
+ if goodblock > 0:
+ return True
+ else:
+ break #EOF
+ line = line.strip()
+ if line and not line.startswith( '#' ): #first non-empty non-comment line
+ if line.startswith( '>' ):
+ line = fh.readline().strip()
+ if line == '' or line.startswith( '>' ):
+ break
+ try:
+ [ int( x ) for x in line.split() ]
+ if not(readlen):
+ readlen = len(line.split())
+ assert len(line.split()) == readlen #SOLiD reads should be of the same length
+ except:
+ break
+ goodblock += 1
+ if goodblock > 10:
+ return True
+ else:
+ break #we found a non-empty line, but it's not a header
+ except:
+ pass
+ return False
+
+class QualityScore454 ( data.Text ):
+ """
+ until we know more about quality score formats
+ """
+ file_ext = "qual454"
+
+ def set_peek( self, dataset, line_count=None ):
+ if not dataset.dataset.purged:
+ dataset.peek = data.get_file_peek( dataset.file_name )
+ if line_count is None:
+ dataset.blurb = data.nice_size( dataset.get_size() )
+ else:
+ dataset.blurb = "%s lines, 454 Quality score file" % util.commaify( str( line_count ) )
+ else:
+ dataset.peek = 'file does not exist'
+ dataset.blurb = 'file purged from disk'
+
+ def display_peek(self, dataset):
+ try:
+ return dataset.peek
+ except:
+ return "454 Quality score file (%s)" % ( data.nice_size( dataset.get_size() ) )
+
+ def sniff( self, filename ):
+ """
+ >>> fname = get_test_fname( 'sequence.fasta' )
+ >>> QualityScore454().sniff( fname )
+ False
+ >>> fname = get_test_fname( 'sequence.qual454' )
+ >>> QualityScore454().sniff( fname )
True
"""
try:
@@ -63,3 +128,59 @@
except:
pass
return False
+
+class QualityScoreSolexa ( data.Text ):
+ """
+ until we know more about quality score formats
+ """
+ file_ext = "qualsolexa"
+
+ def set_peek( self, dataset, line_count=None ):
+ if not dataset.dataset.purged:
+ dataset.peek = data.get_file_peek( dataset.file_name )
+ if line_count is None:
+ dataset.blurb = data.nice_size( dataset.get_size() )
+ else:
+ dataset.blurb = "%s lines, Solexa Quality score file" % util.commaify( str( line_count ) )
+ else:
+ dataset.peek = 'file does not exist'
+ dataset.blurb = 'file purged from disk'
+
+ def display_peek(self, dataset):
+ try:
+ return dataset.peek
+ except:
+ return "Solexa Quality score file (%s)" % ( data.nice_size( dataset.get_size() ) )
+
+ def sniff( self, filename ):
+ """
+ >>> fname = get_test_fname( 'sequence.fasta' )
+ >>> QualityScoreSolexa().sniff( fname )
+ False
+ >>> fname = get_test_fname( 'sequence.qualsolexa' )
+ >>> QualityScoreSolexa().sniff( fname )
+ True
+ """
+ try:
+ fh = open( filename )
+ readlen = None
+ while True:
+ line = fh.readline()
+ if not line:
+ break #EOF
+ line = line.strip()
+ if line and not line.startswith( '#' ):
+ if len(line.split('\t')) > 1:
+ break
+ try:
+ [ int( x ) for x in line.split() ]
+ if not(readlen):
+ readlen = len(line.split())
+ assert len(line.split()) == readlen #Solexa reads should be of the same length
+ except:
+ break
+
+ except:
+ pass
+ return False
+
diff -r 5a4aac327bad -r 22b08d47f7ba lib/galaxy/datatypes/registry.py
--- a/lib/galaxy/datatypes/registry.py Wed Apr 29 10:27:57 2009 -0400
+++ b/lib/galaxy/datatypes/registry.py Wed Apr 29 11:53:22 2009 -0400
@@ -116,7 +116,9 @@
'laj' : images.Laj(),
'lav' : sequence.Lav(),
'maf' : sequence.Maf(),
- 'qual' : qualityscore.QualityScore(),
+ 'qualsolid' : qualityscore.QualityScoreSOLiD(),
+ 'qualsolexa' : qualityscore.QualityScoreSolexa(),
+ 'qual454' : qualityscore.QualityScore454(),
'scf' : images.Scf(),
'tabular' : tabular.Tabular(),
'taxonomy' : tabular.Taxonomy(),
@@ -140,7 +142,9 @@
'laj' : 'text/plain',
'lav' : 'text/plain',
'maf' : 'text/plain',
- 'qual' : 'text/plain',
+ 'qualsolid' : 'text/plain',
+ 'qualsolexa' : 'text/plain',
+ 'qual454' : 'text/plain',
'scf' : 'application/octet-stream',
'tabular' : 'text/plain',
'taxonomy' : 'text/plain',
diff -r 5a4aac327bad -r 22b08d47f7ba lib/galaxy/model/migrate/versions/0006_change_qual_datatype.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/model/migrate/versions/0006_change_qual_datatype.py Wed Apr 29 11:53:22 2009 -0400
@@ -0,0 +1,47 @@
+from sqlalchemy import *
+from sqlalchemy.orm import *
+from migrate import *
+import sys, logging
+
+log = logging.getLogger( __name__ )
+log.setLevel(logging.DEBUG)
+handler = logging.StreamHandler( sys.stdout )
+format = "%(name)s %(levelname)s %(asctime)s %(message)s"
+formatter = logging.Formatter( format )
+handler.setFormatter( formatter )
+log.addHandler( handler )
+
+metadata = MetaData( migrate_engine )
+db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, transactional=False ) )
+HistoryDatasetAssociation_table = Table( "history_dataset_association", metadata, autoload=True )
+
+def upgrade():
+ # Load existing tables
+ metadata.reflect()
+ # Add 2 indexes to the galaxy_user table
+ i = Index( 'ix_hda_extension', HistoryDatasetAssociation_table.c.extension )
+ try:
+ i.create()
+ except Exception, e:
+ log.debug( "Adding index 'ix_hda_extension' to history_dataset_association table failed: %s" % ( str( e ) ) )
+
+ # Set the default data in the galaxy_user table, but only for null values
+ cmd = "UPDATE history_dataset_association SET extension = 'qual454' WHERE extension = 'qual' and peek like \'>%%\'"
+ try:
+ db_session.execute( cmd )
+ except Exception, e:
+ log.debug( "Resetting extension qual to qual454 in history_dataset_association failed: %s" % ( str( e ) ) )
+ cmd = "UPDATE history_dataset_association SET extension = 'qualsolexa' WHERE extension = 'qual' and peek not like \'>%%\'"
+ try:
+ db_session.execute( cmd )
+ except Exception, e:
+ log.debug( "Resetting extension qual to qualsolexa in history_dataset_association failed: %s" % ( str( e ) ) )
+ # Add 1 index to the history_dataset_association table
+ try:
+ i.drop()
+ except Exception, e:
+ log.debug( "Dropping index 'ix_hda_extension' to history_dataset_association table failed: %s" % ( str( e ) ) )
+
+
+def downgrade():
+ pass
diff -r 5a4aac327bad -r 22b08d47f7ba tools/metag_tools/fastqsolexa_to_fasta_qual.xml
--- a/tools/metag_tools/fastqsolexa_to_fasta_qual.xml Wed Apr 29 10:27:57 2009 -0400
+++ b/tools/metag_tools/fastqsolexa_to_fasta_qual.xml Wed Apr 29 11:53:22 2009 -0400
@@ -6,7 +6,7 @@
</inputs>
<outputs>
<data name="output1" format="fasta"/>
- <data name="output2" format="qual"/>
+ <data name="output2" format="qualsolexa"/>
</outputs>
<tests>
<!-- NOTE: this tool generates 2 output files, but our functional tests currently only handle the last one generated -->
diff -r 5a4aac327bad -r 22b08d47f7ba tools/metag_tools/rmapq_wrapper.xml
--- a/tools/metag_tools/rmapq_wrapper.xml Wed Apr 29 10:27:57 2009 -0400
+++ b/tools/metag_tools/rmapq_wrapper.xml Wed Apr 29 11:53:22 2009 -0400
@@ -13,7 +13,7 @@
</options>
</param>
<param name="input_seq" type="data" format="fasta" label="Sequence file"/>
- <param name="input_score" type="data" format="qual" label="Quality score file"/>
+ <param name="input_score" type="data" format="qualsolexa" label="Quality score file"/>
<param name="high_score" type="float" size="15" value="40" label="Minimum score for high-quality base (-q)"/>
<param name="high_len" type="integer" size="15" value="36" label="Minimal high-quality bases (-M)"/>
<param name="align_len" type="integer" size="15" value="11" label="Minimal length of a hit (-h)" help="seed"/>
@@ -46,7 +46,7 @@
<test>
<param name="database" value="/depot/data2/galaxy/faseq/test" />
<param name="input_seq" value="rmapq_wrapper_test1.fasta" ftype="fasta"/>
- <param name="input_score" value="rmapq_wrapper_test1.qual" ftype="qual" />
+ <param name="input_score" value="rmapq_wrapper_test1.qual" ftype="qualsolexa" />
<param name="high_score" value="40" />
<param name="high_len" value="36" />
<param name="read_len" value="36" />
diff -r 5a4aac327bad -r 22b08d47f7ba tools/metag_tools/short_reads_figure_high_quality_length.xml
--- a/tools/metag_tools/short_reads_figure_high_quality_length.xml Wed Apr 29 10:27:57 2009 -0400
+++ b/tools/metag_tools/short_reads_figure_high_quality_length.xml Wed Apr 29 11:53:22 2009 -0400
@@ -5,7 +5,7 @@
<inputs>
<page>
- <param name="input1" type="data" format="qual,txtseq.zip" label="Quality score file" help="No dataset? Read tip below"/>
+ <param name="input1" type="data" format="qualsolexa,qual454,txtseq.zip" label="Quality score file" help="No dataset? Read tip below"/>
<param name="input2" type="integer" size="5" value="20" label="Quality score threshold" />
</page>
</inputs>
@@ -17,12 +17,12 @@
</requirements>
<tests>
<test>
- <param name="input1" value="solexa.qual" ftype="qual" />
+ <param name="input1" value="solexa.qual" ftype="qualsolexa" />
<param name="input2" value="5" />
<output name="output1" file="solexa_high_quality_hist.pdf" ftype="pdf"/>
</test>
<test>
- <param name="input1" value="454.qual" ftype="qual" />
+ <param name="input1" value="454.qual" ftype="qual454" />
<param name="input2" value="5" />
<output name="output1" file="454_high_quality_hist.pdf" ftype="pdf"/>
</test>
diff -r 5a4aac327bad -r 22b08d47f7ba tools/metag_tools/short_reads_figure_score.xml
--- a/tools/metag_tools/short_reads_figure_score.xml Wed Apr 29 10:27:57 2009 -0400
+++ b/tools/metag_tools/short_reads_figure_score.xml Wed Apr 29 11:53:22 2009 -0400
@@ -5,7 +5,7 @@
<inputs>
<page>
- <param name="input1" type="data" format="qual, txtseq.zip" label="Quality score file" help="No dataset? Read tip below"/>
+ <param name="input1" type="data" format="qualsolexa, qual454, txtseq.zip" label="Quality score file" help="No dataset? Read tip below"/>
</page>
</inputs>
@@ -17,11 +17,11 @@
</requirements>
<tests>
<test>
- <param name="input1" value="solexa.qual" ftype="qual" />
+ <param name="input1" value="solexa.qual" ftype="qualsolexa" />
<output name="output1" file="solexaScore.png" ftype="png" />
</test>
<test>
- <param name="input1" value="454.qual" ftype="qual" />
+ <param name="input1" value="454.qual" ftype="qual454" />
<output name="output1" file="454Score.png" ftype="png" />
</test>
</tests>
diff -r 5a4aac327bad -r 22b08d47f7ba tools/metag_tools/short_reads_trim_seq.xml
--- a/tools/metag_tools/short_reads_trim_seq.xml Wed Apr 29 10:27:57 2009 -0400
+++ b/tools/metag_tools/short_reads_trim_seq.xml Wed Apr 29 11:53:22 2009 -0400
@@ -7,7 +7,7 @@
<inputs>
<page>
<param name="input1" type="data" format="fasta,txtseq.zip" label="Reads" />
- <param name="input2" type="data" format="qual,txtseq.zip" label="Quality scores" />
+ <param name="input2" type="data" format="qualsolexa,qual454,txtseq.zip" label="Quality scores" />
<param name="trim" type="integer" size="5" value="20" label="Minimal quality score" help="bases scoring below this value will trigger splitting"/>
<param name="length" type="integer" size="5" value="100" label="Minimal length of contiguous segment" help="report all high quality segments above this length. Setting this option to '0' will cause the program to return a single longest run of high quality bases per read" />
<conditional name="sequencing_method_choice">
@@ -36,7 +36,7 @@
<test>
<param name="sequencer" value="454" />
<param name="input1" value="454.fasta" ftype="fasta" />
- <param name="input2" value="454.qual" ftype="qual" />
+ <param name="input2" value="454.qual" ftype="qual454" />
<param name="input3" value="no" />
<param name="trim" value="20" />
<param name="length" value="0" />
@@ -45,7 +45,7 @@
<test>
<param name="sequencer" value="Solexa" />
<param name="input1" value="solexa.fasta" ftype="fasta" />
- <param name="input2" value="solexa.qual" ftype="qual" />
+ <param name="input2" value="solexa.qual" ftype="qualsolexa" />
<param name="input3" value="0" />
<param name="trim" value="20" />
<param name="length" value="0" />
1
0
30 Apr '09
details: http://www.bx.psu.edu/hg/galaxy/rev/dae0313bf5bb
changeset: 2381:dae0313bf5bb
user: James Taylor <james(a)jamestaylor.org>
date: Wed Apr 29 13:46:02 2009 -0400
description:
Handle the addition of new tool parameters better in the workflow editor. Default values will be filled in and a message displayed to the user
7 file(s) affected in this change:
lib/galaxy/tools/__init__.py
lib/galaxy/tools/parameters/grouping.py
lib/galaxy/web/controllers/workflow.py
lib/galaxy/workflow/modules.py
static/scripts/galaxy.workflow_editor.canvas.js
templates/workflow/editor.mako
tools/maf/interval2maf.xml
diffs (252 lines):
diff -r 22b08d47f7ba -r dae0313bf5bb lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py Wed Apr 29 11:53:22 2009 -0400
+++ b/lib/galaxy/tools/__init__.py Wed Apr 29 13:46:02 2009 -0400
@@ -676,25 +676,7 @@
"""
context = ExpressionContext( state, context )
for input in inputs.itervalues():
- if isinstance( input, Repeat ):
- # Repeat elements are always initialized to have 0 units.
- state[ input.name ] = []
- elif isinstance( input, Conditional ):
- # State for a conditional is a plain dictionary.
- s = state[ input.name ] = {}
- # Get the default value for the 'test element' and use it
- # to determine the current case
- test_value = input.test_param.get_initial_value( trans, context )
- current_case = input.get_current_case( test_value, trans )
- # Store the current case in a special value
- s['__current_case__'] = current_case
- # Store the value of the test element
- s[ input.test_param.name ] = test_value
- # Recursively fill in state for selected case
- self.fill_in_new_state( trans, input.cases[current_case].inputs, s, context )
- else:
- # `input` is just a plain parameter, get its default value
- state[ input.name ] = input.get_initial_value( trans, context )
+ state[ input.name ] = input.get_initial_value( trans, context )
def get_param_html_map( self, trans, page=0, other_values={} ):
"""
@@ -1057,6 +1039,41 @@
def params_from_strings( self, params, app, ignore_errors=False ):
return params_from_strings( self.inputs, params, app, ignore_errors )
+
+
+ def check_and_update_param_values( self, values, trans ):
+ """
+ Check that all parameters have values, and fill in with default
+ values where neccesary. This could be called after loading values
+ from a database in case new parameters have been added.
+ """
+ messages = []
+ self.check_and_update_param_values_helper( self.inputs, values, trans, messages )
+ return messages
+
+ def check_and_update_param_values_helper( self, inputs, values, trans, messages, context=None, prefix="" ):
+ """
+ Recursive helper for `check_and_update_param_values_helper`
+ """
+ context = ExpressionContext( values, context )
+ for input in inputs.itervalues():
+ # No value, insert the default
+ if input.name not in values:
+ messages.append( prefix + input.label )
+ values[input.name] = input.get_initial_value( trans, context )
+ # Value, visit recursively as usual
+ else:
+ if isinstance( input, Repeat ):
+ for i, d in enumerate( values[ input.name ] ):
+ rep_prefix = prefix + "%s %d > " % ( input.title, i + 1 )
+ self.check_and_update_param_values_helper( input.inputs, d, trans, messages, context, rep_prefix )
+ elif isinstance( input, Conditional ):
+ group_values = values[ input.name ]
+ current = group_values["__current_case__"]
+ self.check_and_update_param_values_helper( input.cases[current].inputs, group_values, trans, messages, context, prefix )
+ else:
+ # Regular tool parameter, no recursion needed
+ pass
def handle_unvalidated_param_values( self, input_values, app ):
"""
diff -r 22b08d47f7ba -r dae0313bf5bb lib/galaxy/tools/parameters/grouping.py
--- a/lib/galaxy/tools/parameters/grouping.py Wed Apr 29 11:53:22 2009 -0400
+++ b/lib/galaxy/tools/parameters/grouping.py Wed Apr 29 13:46:02 2009 -0400
@@ -3,6 +3,7 @@
"""
from basic import ToolParameter
+from galaxy.util.expressions import ExpressionContext
class Group( object ):
def __init__( self ):
@@ -19,6 +20,11 @@
into the preferred value form.
"""
return value
+ def get_initial_value( self, trans, context ):
+ """
+ Return the initial state/value for this group
+ """
+ raise TypeError( "Not implemented" )
class Repeat( Group ):
type = "repeat"
@@ -65,7 +71,9 @@
if isinstance( input, ToolParameter ):
callback( new_prefix, input, d[input.name], parent = d )
else:
- input.visit_inputs( new_prefix, d[input.name], callback )
+ input.visit_inputs( new_prefix, d[input.name], callback )
+ def get_initial_value( self, trans, context ):
+ return []
class Conditional( Group ):
type = "conditional"
@@ -109,6 +117,22 @@
callback( prefix, input, value[input.name], parent = value )
else:
input.visit_inputs( prefix, value[input.name], callback )
+ def get_initial_value( self, trans, context ):
+ # State for a conditional is a plain dictionary.
+ rval = {}
+ # Get the default value for the 'test element' and use it
+ # to determine the current case
+ test_value = self.test_param.get_initial_value( trans, context )
+ current_case = self.get_current_case( test_value, trans )
+ # Store the current case in a special value
+ rval['__current_case__'] = current_case
+ # Store the value of the test element
+ rval[ self.test_param.name ] = test_value
+ # Fill in state for selected case
+ child_context = ExpressionContext( rval, context )
+ for child_input in self.cases[current_case].inputs.itervalues():
+ rval[ child_input.name ] = child_input.get_initial_value( trans, child_context )
+ return rval
class ConditionalWhen( object ):
def __init__( self ):
diff -r 22b08d47f7ba -r dae0313bf5bb lib/galaxy/web/controllers/workflow.py
--- a/lib/galaxy/web/controllers/workflow.py Wed Apr 29 11:53:22 2009 -0400
+++ b/lib/galaxy/web/controllers/workflow.py Wed Apr 29 13:46:02 2009 -0400
@@ -300,9 +300,16 @@
data = {}
data['name'] = workflow.name
data['steps'] = {}
+ data['upgrade_messages'] = {}
# For each step, rebuild the form and encode the state
for step in workflow.steps:
+ # Load from database representation
module = module_factory.from_workflow_step( trans, step )
+ # Fix any missing parameters
+ upgrade_message = module.check_and_update_state()
+ if upgrade_message:
+ data['upgrade_messages'][step.order_index] = upgrade_message
+ # Pack atrributes into plain dictionary
step_dict = {
'id': step.order_index,
'type': module.type,
@@ -312,7 +319,7 @@
'tool_errors': module.get_errors(),
'data_inputs': module.get_data_inputs(),
'data_outputs': module.get_data_outputs(),
- 'form_html': module.get_config_form()
+ 'form_html': module.get_config_form(),
}
# Connections
input_conn_dict = {}
@@ -324,6 +331,7 @@
step_dict['position'] = step.position
# Add to return value
data['steps'][step.order_index] = step_dict
+ print data['upgrade_messages']
return data
@web.json
diff -r 22b08d47f7ba -r dae0313bf5bb lib/galaxy/workflow/modules.py
--- a/lib/galaxy/workflow/modules.py Wed Apr 29 11:53:22 2009 -0400
+++ b/lib/galaxy/workflow/modules.py Wed Apr 29 13:46:02 2009 -0400
@@ -60,6 +60,13 @@
pass
def get_config_form( self ):
raise TypeError( "Abstract method" )
+
+ def check_and_update_state( self ):
+ """
+ If the state is not in sync with the current implementation of the
+ module, try to update. Returns a list of messages to be displayed
+ """
+ pass
## ---- Run time ---------------------------------------------------------
@@ -236,7 +243,11 @@
return value, error
# Update state using incoming values
errors = self.tool.update_state( self.trans, self.tool.inputs, self.state.inputs, incoming, item_callback=item_callback )
- self.errors = errors or None
+ self.errors = errors or None
+
+ def check_and_update_state( self ):
+ return self.tool.check_and_update_param_values( self.state.inputs, self.trans )
+
def add_dummy_datasets( self, connections=None):
if connections:
# Store onnections by input name
diff -r 22b08d47f7ba -r dae0313bf5bb static/scripts/galaxy.workflow_editor.canvas.js
--- a/static/scripts/galaxy.workflow_editor.canvas.js Wed Apr 29 11:53:22 2009 -0400
+++ b/static/scripts/galaxy.workflow_editor.canvas.js Wed Apr 29 13:46:02 2009 -0400
@@ -255,6 +255,7 @@
if ( data.type ) {
this.type = data.type;
}
+ this.name = data.name;
this.form_html = data.form_html;
this.tool_state = data.tool_state;
this.tool_errors = data.tool_errors;
diff -r 22b08d47f7ba -r dae0313bf5bb templates/workflow/editor.mako
--- a/templates/workflow/editor.mako Wed Apr 29 11:53:22 2009 -0400
+++ b/templates/workflow/editor.mako Wed Apr 29 13:46:02 2009 -0400
@@ -78,7 +78,18 @@
workflow.fit_canvas_to_nodes();
scroll_to_nodes();
canvas_manager.draw_overview();
- hide_modal();
+ // Determine if any parameters were 'upgraded' and provide message
+ upgrade_message = ""
+ $.each( data['upgrade_messages'], function( k, v ) {
+ upgrade_message += ( "<li>Step " + ( parseInt(k) + 1 ) + ": " + workflow.nodes[k].name + " -- " + v.join( ", " ) );
+ });
+ if ( upgrade_message ) {
+ show_modal( "Workflow loaded with changes",
+ "Values were not found for the following parameters (possibly a result of tool upgrades), <br/> default values have been used. Please review the following parameters and then save.<ul>" + upgrade_message + "</ul>",
+ { "Continue" : hide_modal } );
+ } else {
+ hide_modal();
+ }
},
beforeSubmit: function( data ) {
show_modal( "Loading workflow", "progress" );
@@ -88,7 +99,9 @@
});
$(document).ajaxError( function ( e, x ) {
- show_modal( "Server error", x.responseText, { "Ignore error" : hide_modal } );
+ console.log( e, x );
+ var message = x.responseText || x.statusText || "Could not connect to server";
+ show_modal( "Server error", message, { "Ignore error" : hide_modal } );
return false;
});
diff -r 22b08d47f7ba -r dae0313bf5bb tools/maf/interval2maf.xml
--- a/tools/maf/interval2maf.xml Wed Apr 29 11:53:22 2009 -0400
+++ b/tools/maf/interval2maf.xml Wed Apr 29 13:46:02 2009 -0400
@@ -6,6 +6,7 @@
#end if
</command>
<inputs>
+ <param type="text" name="TEST" label="Test" />
<param format="interval" name="input1" type="data" label="Choose intervals">
<validator type="unspecified_build" />
</param>
1
0
30 Apr '09
details: http://www.bx.psu.edu/hg/galaxy/rev/cd1bcec32519
changeset: 2384:cd1bcec32519
user: guru
date: Wed Apr 29 16:32:23 2009 -0400
description:
Added functional tests for testing sniffing and metadata settings of qual454 and qualsolid datatypes.
6 file(s) affected in this change:
datatypes_conf.xml.sample
lib/galaxy/datatypes/qualityscore.py
lib/galaxy/datatypes/registry.py
test-data/qualscores.qual454
test-data/qualscores.qualsolid
test/functional/test_sniffing_and_metadata_settings.py
diffs (194 lines):
diff -r 656f1df80b46 -r cd1bcec32519 datatypes_conf.xml.sample
--- a/datatypes_conf.xml.sample Wed Apr 29 14:47:18 2009 -0400
+++ b/datatypes_conf.xml.sample Wed Apr 29 16:32:23 2009 -0400
@@ -188,7 +188,6 @@
<sniffer type="galaxy.datatypes.sequence:Maf"/>
<sniffer type="galaxy.datatypes.sequence:Lav"/>
<sniffer type="galaxy.datatypes.sequence:csFasta"/>
- <sniffer type="galaxy.datatypes.qualityscore:QualityScoreSolexa"/>
<sniffer type="galaxy.datatypes.qualityscore:QualityScoreSOLiD"/>
<sniffer type="galaxy.datatypes.qualityscore:QualityScore454"/>
<sniffer type="galaxy.datatypes.sequence:Fasta"/>
diff -r 656f1df80b46 -r cd1bcec32519 lib/galaxy/datatypes/qualityscore.py
--- a/lib/galaxy/datatypes/qualityscore.py Wed Apr 29 14:47:18 2009 -0400
+++ b/lib/galaxy/datatypes/qualityscore.py Wed Apr 29 16:32:23 2009 -0400
@@ -152,35 +152,4 @@
except:
return "Solexa Quality score file (%s)" % ( data.nice_size( dataset.get_size() ) )
- def sniff( self, filename ):
- """
- >>> fname = get_test_fname( 'sequence.fasta' )
- >>> QualityScoreSolexa().sniff( fname )
- False
- >>> fname = get_test_fname( 'sequence.qualsolexa' )
- >>> QualityScoreSolexa().sniff( fname )
- True
- """
- try:
- fh = open( filename )
- readlen = None
- while True:
- line = fh.readline()
- if not line:
- break #EOF
- line = line.strip()
- if line and not line.startswith( '#' ):
- if len(line.split('\t')) > 1:
- break
- try:
- [ int( x ) for x in line.split() ]
- if not(readlen):
- readlen = len(line.split())
- assert len(line.split()) == readlen #Solexa reads should be of the same length
- except:
- break
-
- except:
- pass
- return False
-
+
\ No newline at end of file
diff -r 656f1df80b46 -r cd1bcec32519 lib/galaxy/datatypes/registry.py
--- a/lib/galaxy/datatypes/registry.py Wed Apr 29 14:47:18 2009 -0400
+++ b/lib/galaxy/datatypes/registry.py Wed Apr 29 16:32:23 2009 -0400
@@ -159,6 +159,9 @@
xml.BlastXml(),
sequence.Maf(),
sequence.Lav(),
+ sequence.csFasta(),
+ qualityscore.QualityScoreSOLiD(),
+ qualityscore.QualityScore454(),
sequence.Fasta(),
sequence.FastqSolexa(),
interval.Wiggle(),
diff -r 656f1df80b46 -r cd1bcec32519 test-data/qualscores.qual454
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/qualscores.qual454 Wed Apr 29 16:32:23 2009 -0400
@@ -0,0 +1,49 @@
+>EYKX4VC04IWAEA length=68 xy=3531_0528 region=4 run=R_2007_11_07_16_15_57_
+22 13 9 6 4 3 2 2 1 1 1 1 24 44 33 23 16 11 7 2 28 33 23 18 28 27 27 28 20 21 42 35 21 6 24 25 31 21 28 27 41 34 15 28 28 27 28 28 33 24 27 28 28 24 27 36 27 28 28 28
+28 28 36 30 8 34 25 18
+>EYKX4VC04JKOGH length=48 xy=3808_3903 region=4 run=R_2007_11_07_16_15_57_
+28 28 27 28 38 31 10 28 28 27 27 34 25 28 24 26 27 28 27 37 29 34 25 31 21 28 21 36 28 31 20 24 27 37 28 28 34 27 3 34 25 24 28 28 26 28 35 28
+>EYKX4VC04JIUVK length=84 xy=3788_0830 region=4 run=R_2007_11_07_16_15_57_
+29 20 14 11 8 6 3 1 25 27 25 28 28 27 28 27 28 28 36 28 27 28 36 29 7 28 28 28 27 27 27 35 26 35 26 27 36 28 28 28 38 32 11 28 36 28 27 26 35 25 28 38 31 11 27 28 37 28 27 27
+28 36 29 8 33 24 41 34 19 3 26 28 28 28 35 26 36 29 8 38 32 11 28 28
+>EYKX4VC04JWDRY length=78 xy=3942_1068 region=4 run=R_2007_11_07_16_15_57_
+36 24 14 5 27 20 28 27 28 32 22 28 27 43 36 23 11 27 28 28 28 32 23 36 27 28 28 26 38 32 11 34 25 27 43 36 23 11 38 31 11 37 28 28 28 27 28 30 20 28 32 22 28 36 27 37 30 9 27 28
+28 27 28 42 35 20 5 28 28 28 35 26 27 27 26 39 32 12
+>EYKX4VC04JWMUW length=55 xy=3945_0550 region=4 run=R_2007_11_07_16_15_57_
+36 24 14 4 28 17 34 25 35 25 31 20 28 28 36 27 28 28 24 27 28 28 37 28 27 27 35 25 31 21 27 39 32 12 28 36 28 28 26 27 28 27 26 28 42 35 20 6 28 27 28 28 28 28 28
+>EYKX4VC04JH4RG length=85 xy=3779_3850 region=4 run=R_2007_11_07_16_15_57_
+37 28 35 26 38 31 10 27 37 28 28 38 31 10 27 35 25 25 28 28 28 28 28 28 28 28 27 28 33 23 28 32 22 35 25 31 20 34 25 31 21 26 28 27 26 26 15 36 29 7 27 27 24 36 27 28 37 28 36 28
+27 28 28 28 37 28 28 40 34 14 37 28 28 26 28 36 28 26 28 37 28 28 28 28 27
+>EYKX4VC04JDAWO length=117 xy=3724_3814 region=4 run=R_2007_11_07_16_15_57_
+25 28 28 28 34 25 28 28 28 28 28 28 28 27 27 30 19 28 28 34 25 28 28 26 32 23 28 28 27 28 28 34 25 28 27 28 25 25 36 27 36 29 7 36 27 33 23 28 28 36 27 31 21 28 30 20 28 34 24 28
+27 34 25 28 28 28 28 28 28 27 28 27 37 30 9 28 28 27 28 27 28 28 28 27 33 23 28 28 28 17 28 31 20 28 21 26 28 33 23 26 28 27 26 28 35 26 28 28 21 28 26 28 33 23 36 27 27
+>EYKX4VC04JEY0S length=57 xy=3743_3898 region=4 run=R_2007_11_07_16_15_57_
+23 28 35 28 6 26 24 27 33 23 26 28 28 28 28 33 27 3 27 30 19 28 28 36 27 33 24 24 32 23 25 26 27 28 31 21 34 27 5 28 22 27 28 24 26 28 28 27 28 24 37 29 26 35 26 26 15
+>EYKX4VC04JKOGB length=68 xy=3808_3897 region=4 run=R_2007_11_07_16_15_57_
+28 34 27 4 27 28 24 28 28 28 28 27 27 31 20 28 27 27 26 28 35 26 37 28 28 28 28 28 41 34 19 4 35 26 27 32 22 28 25 36 28 26 28 28 25 36 29 8 28 28 28 27 24 28 41 34 16 27 21 28
+21 26 33 26 21 40 34 14
+>EYKX4VC04JOZA4 length=160 xy=3857_3886 region=4 run=R_2007_11_07_16_15_57_
+35 24 15 7 1 26 28 41 34 15 28 28 28 28 28 27 25 40 34 14 28 28 44 35 24 15 8 2 27 24 27 27 35 26 28 27 36 27 26 36 28 24 27 37 29 27 28 26 35 26 28 28 28 27 26 35 26 37 29 36
+28 28 26 36 28 28 28 37 28 28 28 28 28 28 28 28 36 28 43 36 22 10 19 37 28 27 37 28 24 27 37 28 38 32 11 28 37 29 26 25 34 25 36 27 24 25 24 36 27 23 27 28 39 32 12 39 32 12 28 37
+29 25 27 27 27 24 28 39 32 12 28 26 44 36 24 14 5 33 26 2 33 24 26 24 28 28 27 35 25 34 25 26 41 34 16 43 36 22 8 28
+>EYKX4VC04JLDPN length=75 xy=3816_3865 region=4 run=R_2007_11_07_16_15_57_
+28 28 36 27 28 28 36 27 28 27 28 28 28 27 26 36 27 28 28 27 28 28 28 28 28 28 28 28 27 28 40 33 14 28 28 28 40 33 13 24 27 28 28 28 27 23 32 22 25 18 26 21 24 36 27 26 24 25 28 26
+27 37 28 22 28 28 34 24 28 25 23 26 23 25 27
+>EYKX4VC04IEKBT length=167 xy=3329_0983 region=4 run=R_2007_11_07_16_15_57_
+26 17 12 9 7 5 4 2 1 28 44 31 21 15 11 8 5 2 34 25 44 35 24 15 7 1 35 26 37 29 28 36 27 37 28 28 27 36 27 27 39 32 12 36 27 28 37 28 28 35 26 36 27 28 28 24 27 27 28 28
+28 36 27 40 33 14 27 28 43 36 23 12 2 28 27 27 36 27 43 36 22 8 27 37 28 35 25 28 28 28 28 36 27 41 35 16 39 32 12 28 36 28 28 27 28 38 31 11 28 27 28 28 28 37 28 35 25 28 39 33
+13 28 28 28 28 28 26 26 28 28 27 27 28 27 25 39 32 12 28 28 35 25 25 27 28 28 28 26 36 27 28 28 35 25 28 28 34 25 28 27 36 28 26 28 28 28 28
+>EYKX4VC04H76LH length=104 xy=3256_2259 region=4 run=R_2007_11_07_16_15_57_
+20 12 7 5 3 2 2 1 1 1 1 1 1 28 30 19 26 28 33 23 27 37 28 28 36 28 28 37 28 28 36 28 27 42 35 21 7 27 27 30 19 31 21 28 27 33 23 28 28 28 27 35 25 28 35 26 27 36 27 28
+36 27 28 36 27 28 36 27 28 36 28 28 36 28 28 36 27 27 35 25 27 33 23 28 36 27 28 31 21 28 35 25 27 34 25 28 33 23 28 32 22 26 34 24
+>EYKX4VC04I6APD length=156 xy=3645_0543 region=4 run=R_2007_11_07_16_15_57_
+21 12 8 5 4 2 2 1 1 1 1 1 1 28 38 32 11 28 44 36 24 14 5 24 28 28 28 21 27 42 35 21 7 27 28 27 24 26 28 37 28 35 26 28 26 28 27 24 28 28 27 28 44 18 9 5 3 2 1 1
+1 1 1 1 1 1 1 1 1 35 26 41 34 17 25 24 12 28 28 27 38 32 11 28 28 23 28 27 28 27 20 39 32 12 22 26 36 27 27 27 24 36 28 28 36 27 35 26 40 33 18 1 26 34 27 28 24 22 40 34
+15 26 32 22 28 28 28 25 28 28 39 33 13 27 28 32 22 32 23 42 35 21 7 28 43 36 23 10 27 43 36 23 10 36 28 25
+>EYKX4VC04IPT6U length=143 xy=3457_2692 region=4 run=R_2007_11_07_16_15_57_
+30 20 15 11 8 5 3 1 28 19 27 34 24 44 36 23 13 3 28 35 25 28 27 28 28 28 28 27 35 25 28 36 27 27 28 28 28 28 36 27 36 28 28 42 35 21 6 28 28 27 27 44 35 24 15 8 1 27 27 28
+36 27 28 28 27 28 28 35 26 43 36 23 12 2 28 36 27 26 28 28 36 27 28 28 36 27 41 34 16 25 28 41 34 16 36 28 40 34 14 39 33 13 36 27 40 34 18 2 28 40 34 18 2 35 25 28 27 41 34 19
+4 26 27 28 27 28 28 34 25 36 27 28 28 28 36 27 37 28 28 27 28 27 28
+>EYKX4VC04JX6Y2 length=68 xy=3962_3644 region=4 run=R_2007_11_07_16_15_57_
+28 28 28 27 35 28 6 27 28 27 36 27 28 25 37 28 33 26 2 28 27 28 24 28 37 29 28 28 28 28 28 23 28 32 22 28 36 27 28 26 34 25 28 28 35 26 28 28 26 27 25 28 28 24 28 28 27 27 28 28
+28 28 33 23 28 40 34 14
\ No newline at end of file
diff -r 656f1df80b46 -r cd1bcec32519 test-data/qualscores.qualsolid
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/qualscores.qualsolid Wed Apr 29 16:32:23 2009 -0400
@@ -0,0 +1,48 @@
+>946_21_302_F3
+2 10 2 2 4 2 21 2 2 4 4 17 5 2 5 2 11 4 2 2 2 10 7 3 15 2 2 2 3 19 3 2 6 3 2
+>946_21_659_F3
+3 31 3 2 2 2 34 3 2 2 2 31 2 2 3 4 31 8 3 2 2 30 4 8 3 3 2 2 6 9 4 4 6 2 2
+>946_21_1071_F3
+5 5 2 2 2 8 5 3 2 3 7 7 2 3 4 6 5 2 2 2 5 5 2 2 2 3 8 2 3 3 3 8 2 3 2
+>946_21_1115_F3
+21 5 2 8 13 31 6 2 17 24 10 27 4 21 29 8 20 2 11 21 13 24 5 5 6 24 31 2 13 6 22 17 6 27 10
+>946_21_1218_F3
+11 21 2 13 13 16 27 16 19 27 22 28 14 26 24 23 29 10 15 13 6 4 7 16 26 22 11 6 16 22 21 6 4 7 21
+>946_21_1232_F3
+17 16 2 28 21 31 15 16 10 11 8 20 6 5 18 6 13 23 7 13 4 12 19 8 6 9 10 19 7 10 6 10 20 14 8
+>946_21_1368_F3
+28 30 31 31 31 20 29 24 27 31 31 31 24 26 31 31 26 15 27 31 27 30 29 27 30 27 30 21 23 26 24 31 17 30 19
+>946_21_1406_F3
+4 29 3 2 4 5 34 8 2 2 3 29 4 2 8 2 11 4 2 2 2 28 8 5 3 4 31 2 2 2 2 28 5 2 2
+>946_21_1695_F3
+25 31 8 29 19 31 19 12 16 31 30 13 16 21 31 22 13 13 19 23 32 16 6 14 16 24 13 6 6 14 8 8 5 11 6
+>946_21_1945_F3
+23 27 14 10 17 31 29 31 10 13 31 29 23 8 24 30 31 28 10 20 26 28 31 5 22 31 24 28 9 7 15 7 20 5 4
+>946_21_2013_F3
+2 26 2 2 2 2 2 2 9 2 2 2 2 2 2 2 2 4 2 2 2 2 3 2 2 2 2 2 2 2 2 2 4 2 2
+>946_22_108_F3
+17 14 12 28 12 17 18 28 19 13 14 6 17 5 3 6 2 7 6 12 16 7 13 9 11 8 2 5 5 6 6 7 20 11 19
+>946_22_1241_F3
+16 8 5 20 11 6 13 5 19 13 8 9 17 31 8 17 2 6 31 21 12 11 8 12 5 8 15 8 5 5 7 5 11 6 10
+>946_22_1296_F3
+10 8 33 31 27 31 26 5 17 11 6 23 8 24 6 8 14 14 27 11 16 8 8 29 5 21 8 5 14 8 7 16 4 5 5
+>946_22_1356_F3
+2 31 4 3 2 6 31 4 2 2 4 2 3 8 2 2 2 3 4 8 2 2 3 3 11 2 2 2 4 2 2 2 9 2 2
+>946_22_1520_F3
+8 11 12 21 13 15 16 25 20 21 14 23 31 23 30 18 25 23 27 30 21 30 15 14 25 22 22 21 21 22 16 23 26 13 21
+>946_22_1532_F3
+27 30 34 26 25 29 31 31 24 29 29 33 28 22 24 31 30 30 24 22 25 31 19 9 26 29 23 22 13 28 30 31 24 7 29
+>946_22_1582_F3
+28 19 4 5 6 25 30 4 5 8 15 19 5 9 8 29 27 4 4 2 27 17 2 2 5 30 22 2 11 6 26 16 3 2 2
+>946_22_1598_F3
+23 5 33 6 19 8 13 4 30 17 11 13 6 16 8 8 27 5 24 8 8 26 16 8 17 2 23 25 4 23 10 16 10 7 13
+>946_22_1834_F3
+31 28 30 31 31 34 27 29 31 28 31 29 16 31 31 25 31 11 27 21 26 29 16 18 21 19 29 13 24 24 27 24 8 24 10
+>946_22_1939_F3
+30 29 31 31 25 31 33 24 31 30 25 31 27 33 31 27 31 26 30 19 16 27 29 31 21 25 27 9 26 27 23 15 20 27 27
+>946_23_975_F3
+31 31 3 3 2 32 29 3 3 4 28 27 2 4 4 30 24 3 7 3 28 24 5 7 8 29 22 2 6 6 2 24 2 2 2
+>946_23_1133_F3
+19 28 22 31 25 14 28 30 32 27 10 33 26 31 31 14 30 30 16 30 10 28 23 16 6 14 17 10 6 8 18 5 8 9 5
+>946_23_1221_F3
+30 26 30 32 23 29 29 27 28 20 26 23 14 29 27 29 28 29 23 14 30 27 30 27 8 24 13 26 11 12 20 17 15 5 21
\ No newline at end of file
diff -r 656f1df80b46 -r cd1bcec32519 test/functional/test_sniffing_and_metadata_settings.py
--- a/test/functional/test_sniffing_and_metadata_settings.py Wed Apr 29 14:47:18 2009 -0400
+++ b/test/functional/test_sniffing_and_metadata_settings.py Wed Apr 29 16:32:23 2009 -0400
@@ -114,6 +114,18 @@
self.check_history_for_string( 'NCBI Blast XML data' )
self.check_history_for_string( 'format: <span class="blastxml">blastxml</span>' )
self.delete_history_item( 1 )
+ def test_65_qualsolid_datatype( self ):
+ """Testing correctly sniffing qualsolid data type upon upload"""
+ self.upload_file( 'qualscores.qualsolid' )
+ self.verify_dataset_correctness('qualscores.qualsolid')
+ self.check_history_for_string('qualscores.qualsolid format: <span class="qualsolid">qualsolid</span>, database: \? Info: uploaded qualsolid file')
+ self.delete_history_item( 1 )
+ def test_70_qual454_datatype( self ):
+ """Testing correctly sniffing qual454 data type upon upload"""
+ self.upload_file( 'qualscores.qual454' )
+ self.verify_dataset_correctness('qualscores.qual454')
+ self.check_history_for_string('qualscores.qual454 format: <span class="qual454">qual454</span>, database: \? Info: uploaded qual454 file')
+ self.delete_history_item( 1 )
def test_9999_clean_up( self ):
self.delete_history()
self.logout()
1
0
30 Apr '09
details: http://www.bx.psu.edu/hg/galaxy/rev/add0911007d9
changeset: 2385:add0911007d9
user: Nate Coraor <nate(a)bx.psu.edu>
date: Wed Apr 29 17:37:22 2009 -0400
description:
PyTables and GeneTrack eggs. Still need to bundle up pychartdir.
2 file(s) affected in this change:
dist-eggs.ini
eggs.ini
diffs (55 lines):
diff -r cd1bcec32519 -r add0911007d9 dist-eggs.ini
--- a/dist-eggs.ini Wed Apr 29 16:32:23 2009 -0400
+++ b/dist-eggs.ini Wed Apr 29 17:37:22 2009 -0400
@@ -24,9 +24,15 @@
py2.5-solaris-2.10-sun4u-ucs2 = v880.bx.psu.edu /depot/projects/pythons/solaris-2.10-sun4u-ucs2/bin/python2.5
[groups]
-py2.4-linux = py2.4-linux-i686-ucs2 py2.4-linux-i686-ucs4 py2.4-linux-x86_64-ucs2 py2.4-linux-x86_64-ucs4
-py2.5-linux = py2.5-linux-i686-ucs2 py2.5-linux-i686-ucs4 py2.5-linux-x86_64-ucs2 py2.5-linux-x86_64-ucs4
-linux = py2.4-linux py2.5-linux
+py2.4-linux-i686 = py2.4-linux-i686-ucs2 py2.4-linux-i686-ucs4
+py2.4-linux-x86_64 = py2.4-linux-x86_64-ucs2 py2.4-linux-x86_64-ucs4
+py2.5-linux-i686 = py2.5-linux-i686-ucs2 py2.5-linux-i686-ucs4
+py2.5-linux-x86_64 = py2.5-linux-x86_64-ucs2 py2.5-linux-x86_64-ucs4
+py2.4-linux = py2.4-linux-i686 py2.4-linux-x86_64
+py2.5-linux = py2.5-linux-i686 py2.5-linux-x86_64
+linux-i686 = py2.4-linux-i686 py2.5-linux-i686
+linux-x86_64 = py2.4-linux-x86_64 py2.5-linux-x86_64
+linux = linux-i686 linux-x86_64
py2.4-macosx = py2.4-macosx-10.3-fat-ucs2
py2.5-macosx = py2.5-macosx-10.3-fat-ucs2 py2.5-macosx-10.5-i386-ucs2
macosx = py2.4-macosx py2.5-macosx
diff -r cd1bcec32519 -r add0911007d9 eggs.ini
--- a/eggs.ini Wed Apr 29 16:32:23 2009 -0400
+++ b/eggs.ini Wed Apr 29 17:37:22 2009 -0400
@@ -24,6 +24,7 @@
threadframe = 0.2
guppy = 0.1.8
numpy = 1.2.1
+tables = 2.1.1
[eggs:noplatform]
Beaker = 0.5
@@ -51,6 +52,7 @@
wsgiref = 0.1.2
Babel = 0.9.4
wchartype = 0.1
+GeneTrack = 1.0.3
; extra version information
[tags]
@@ -61,6 +63,7 @@
flup = .dev_r2311
bx_python = _dev_r4bf1f32e6b76
nose = .dev_r101
+tables = _hdf5_1.8.2_lzo_2.03_bzip2_1.0.5_static
; source location, necessary for scrambling
[source]
@@ -100,3 +103,5 @@
wsgiref = http://pypi.python.org/packages/source/w/wsgiref/wsgiref-0.1.2.zip
Babel = http://ftp.edgewall.com/pub/babel/Babel-0.9.4.zip
wchartype = http://ginstrom.com/code/wchartype-0.1.zip
+tables = http://www.pytables.org/download/stable/tables-2.1.1.tar.gz ftp://ftp.hdfgroup.org/HDF5/current/src/hdf5-1.8.2.tar.gz http://www.oberhumer.com/opensource/lzo/download/lzo-2.03.tar.gz http://www.bzip.org/1.0.5/bzip2-1.0.5.tar.gz
+GeneTrack = http://genetrack.googlecode.com/files/GeneTrack-1.0.3.zip http://dalchemy.com/opensource/formkit/FormKit_0.9b2.tar.gz
1
0
30 Apr '09
details: http://www.bx.psu.edu/hg/galaxy/rev/5a4aac327bad
changeset: 2379:5a4aac327bad
user: James Taylor <james(a)jamestaylor.org>
date: Wed Apr 29 10:27:57 2009 -0400
description:
Fix for issubtype in workflow editor, would cause problems drawing connections for types with no inheritence relationships
1 file(s) affected in this change:
static/scripts/galaxy.workflow_editor.canvas.js
diffs (12 lines):
diff -r e0534b25c282 -r 5a4aac327bad static/scripts/galaxy.workflow_editor.canvas.js
--- a/static/scripts/galaxy.workflow_editor.canvas.js Tue Apr 28 15:32:44 2009 -0400
+++ b/static/scripts/galaxy.workflow_editor.canvas.js Wed Apr 29 10:27:57 2009 -0400
@@ -645,7 +645,7 @@
function issubtype( child, parent ) {
child = ext_to_type[child];
parent = ext_to_type[parent];
- return ( parent in type_to_type[child] );
+ return ( type_to_type[child] ) && ( parent in type_to_type[child] );
};
function populate_datatype_info( data ) {
1
0
29 Apr '09
details: http://www.bx.psu.edu/hg/galaxy/rev/d0c905db68db
changeset: 2374:d0c905db68db
user: Greg Von Kuster <greg(a)bx.psu.edu>
date: Mon Apr 27 16:03:44 2009 -0400
description:
Display only the number of associations on the main users, groups, roles pages rather than the associated objects - much faster display times. Also a tweek to my last fix for the column_maker tool.
6 file(s) affected in this change:
lib/galaxy/web/controllers/admin.py
templates/admin/dataset_security/groups.mako
templates/admin/dataset_security/roles.mako
templates/admin/dataset_security/users.mako
test/base/twilltestcase.py
tools/stats/column_maker.py
diffs (483 lines):
diff -r 22118cf46b0a -r d0c905db68db lib/galaxy/web/controllers/admin.py
--- a/lib/galaxy/web/controllers/admin.py Mon Apr 27 13:33:47 2009 -0400
+++ b/lib/galaxy/web/controllers/admin.py Mon Apr 27 16:03:44 2009 -0400
@@ -280,23 +280,12 @@
params = util.Params( kwd )
msg = util.restore_text( params.get( 'msg', '' ) )
messagetype = params.get( 'messagetype', 'done' )
- # Build a list of tuples which are groups followed by lists of members and roles
- # [ ( group, [ member, member, member ], [ role, role ] ), ( group, [ member, member ], [ role ] ) ]
- groups_members_roles = []
groups = trans.app.model.Group.query() \
.filter( trans.app.model.Group.table.c.deleted==False ) \
.order_by( trans.app.model.Group.table.c.name ) \
.all()
- for group in groups:
- members = []
- for uga in group.members:
- members.append( trans.app.model.User.get( uga.user_id ) )
- roles = []
- for gra in group.roles:
- roles.append( trans.app.model.Role.get( gra.role_id ) )
- groups_members_roles.append( ( group, members, roles ) )
return trans.fill_template( '/admin/dataset_security/groups.mako',
- groups_members_roles=groups_members_roles,
+ groups=groups,
msg=msg,
messagetype=messagetype )
@web.expose
@@ -647,20 +636,9 @@
params = util.Params( kwd )
msg = util.restore_text( params.get( 'msg', '' ) )
messagetype = params.get( 'messagetype', 'done' )
- # Build a list of tuples which are users followed by lists of groups and roles
- # [ ( user, [ group, group, group ], [ role, role ] ), ( user, [ group, group ], [ role ] ) ]
- users_groups_roles = []
users = trans.app.model.User.filter( trans.app.model.User.table.c.deleted==False ).order_by( trans.app.model.User.table.c.email ).all()
- for user in users:
- groups = []
- for uga in user.groups:
- groups.append( trans.app.model.Group.get( uga.group_id ) )
- roles = []
- for ura in user.non_private_roles:
- roles.append( trans.app.model.Role.get( ura.role_id ) )
- users_groups_roles.append( ( user, groups, roles ) )
return trans.fill_template( '/admin/dataset_security/users.mako',
- users_groups_roles=users_groups_roles,
+ users=users,
allow_user_deletion=trans.app.config.allow_user_deletion,
msg=msg,
messagetype=messagetype )
@@ -702,91 +680,6 @@
out_groups=out_groups,
msg=msg,
messagetype=messagetype )
- # Utility methods to enable removal of associations - redirects are key
- @web.expose
- @web.require_admin
- def remove_group_from_role( self, trans, **kwd ):
- params = util.Params( kwd )
- group_id = int( params.group_id )
- group = trans.app.model.Group.get( group_id )
- role_id = int( params.role_id )
- role = trans.app.model.Role.get( role_id )
- gra = trans.app.model.GroupRoleAssociation.filter( and_( trans.app.model.GroupRoleAssociation.table.c.group_id==group_id,
- trans.app.model.GroupRoleAssociation.table.c.role_id==role_id ) ).first()
- gra.delete()
- gra.flush()
- msg = "Group '%s' removed from role '%s'" % ( group.name, role.name )
- trans.response.send_redirect( web.url_for( action='roles', msg=util.sanitize_text( msg ), messagetype='done' ) )
- @web.expose
- @web.require_admin
- def remove_group_from_user( self, trans, **kwd ):
- params = util.Params( kwd )
- group_id = int( params.group_id )
- group = trans.app.model.Group.get( group_id )
- user_id = int( params.user_id )
- user = trans.app.model.User.get( user_id )
- uga = trans.app.model.UserGroupAssociation.filter( and_( trans.app.model.UserGroupAssociation.table.c.group_id==group_id,
- trans.app.model.UserGroupAssociation.table.c.user_id==user_id ) ).first()
- uga.delete()
- uga.flush()
- msg = "Group '%s' removed from user '%s'" % ( group.name, user.email )
- trans.response.send_redirect( web.url_for( action='users', msg=util.sanitize_text( msg ), messagetype='done' ) )
- @web.expose
- @web.require_admin
- def remove_role_from_group( self, trans, **kwd ):
- params = util.Params( kwd )
- role_id = int( params.role_id )
- role = trans.app.model.Role.get( role_id )
- group_id = int( params.group_id )
- group = trans.app.model.Group.get( group_id )
- gra = trans.app.model.GroupRoleAssociation.filter( and_( trans.app.model.GroupRoleAssociation.table.c.role_id==role_id,
- trans.app.model.GroupRoleAssociation.table.c.group_id==group_id ) ).first()
- gra.delete()
- gra.flush()
- msg = "Role '%s' removed from group '%s'" % ( role.name, group.name )
- trans.response.send_redirect( web.url_for( action='groups', msg=util.sanitize_text( msg ), messagetype='done' ) )
- @web.expose
- @web.require_admin
- def remove_role_from_user( self, trans, **kwd ):
- params = util.Params( kwd )
- user_id = int( params.user_id )
- user = trans.app.model.User.get( user_id )
- role_id = int( params.role_id )
- role = trans.app.model.Role.get( role_id )
- ura = trans.app.model.UserRoleAssociation.filter( and_( trans.app.model.UserRoleAssociation.table.c.user_id==user_id,
- trans.app.model.UserRoleAssociation.table.c.role_id==role_id ) ).first()
- ura.delete()
- ura.flush()
- msg = "Role '%s' removed from user '%s'" % ( role.name, user.email )
- trans.response.send_redirect( web.url_for( action='users', msg=util.sanitize_text( msg ), messagetype='done' ) )
- @web.expose
- @web.require_admin
- def remove_user_from_group( self, trans, **kwd ):
- params = util.Params( kwd )
- user_id = int( params.user_id )
- user = trans.app.model.User.get( user_id )
- group_id = int( params.group_id )
- group = trans.app.model.Group.get( group_id )
- uga = trans.app.model.UserGroupAssociation.filter( and_( trans.app.model.UserGroupAssociation.table.c.user_id==user_id,
- trans.app.model.UserGroupAssociation.table.c.group_id==group_id ) ).first()
- uga.delete()
- uga.flush()
- msg = "User '%s' removed from group '%s'" % ( user.email, group.name )
- trans.response.send_redirect( web.url_for( action='groups', msg=util.sanitize_text( msg ), messagetype='done' ) )
- @web.expose
- @web.require_admin
- def remove_user_from_role( self, trans, **kwd ):
- params = util.Params( kwd )
- user_id = int( params.user_id )
- user = trans.app.model.User.get( user_id )
- role_id = int( params.role_id )
- role = trans.app.model.Role.get( role_id )
- ura = trans.app.model.UserRoleAssociation.filter( and_( trans.app.model.UserRoleAssociation.table.c.user_id==user_id,
- trans.app.model.UserRoleAssociation.table.c.role_id==role_id ) ).first()
- ura.delete()
- ura.flush()
- msg = "User '%s' removed from role '%s'" % ( user.email, role.name )
- trans.response.send_redirect( web.url_for( action='roles', msg=util.sanitize_text( msg ), messagetype='done' ) )
# Galaxy Library Stuff
@web.expose
diff -r 22118cf46b0a -r d0c905db68db templates/admin/dataset_security/groups.mako
--- a/templates/admin/dataset_security/groups.mako Mon Apr 27 13:33:47 2009 -0400
+++ b/templates/admin/dataset_security/groups.mako Mon Apr 27 16:03:44 2009 -0400
@@ -2,7 +2,7 @@
<%namespace file="/message.mako" import="render_msg" />
## Render a row
-<%def name="render_row( group, members, roles, ctr, anchored, curr_anchor )">
+<%def name="render_row( group, ctr, anchored, curr_anchor )">
%if ctr % 2 == 1:
<tr class="odd_row">
%else:
@@ -18,34 +18,10 @@
</div>
</td>
<td>
- <ul>
- %for user in members:
- <li>
- <a href="${h.url_for( controller='admin', action='user', user_id=user.id )}">${user.email}</a>
- <a id="user-${user.id}-popup" class="popup-arrow" style="display: none;">▼</a>
- <div popupmenu="user-${user.id}-popup">
- <a class="action-button" href="${h.url_for( controller='admin', action='remove_user_from_group', group_id=group.id, user_id=user.id )}">Remove user from group</a>
- </div>
- </li>
- %endfor
- </ul>
+ ${len( group.members )}
</td>
<td>
- <ul>
- %for role in roles:
- <li>
- %if not role.type == trans.app.model.Role.types.PRIVATE:
- <a href="${h.url_for( controller='admin', action='role', role_id=role.id )}">${role.name}</a>
- <a id="role-${role.id}-popup" class="popup-arrow" style="display: none;">▼</a>
- <div popupmenu="role-${role.id}-popup">
- <a class="action-button" href="${h.url_for( controller='admin', action='remove_role_from_group', group_id=group.id, role_id=role.id )}">Remove role from group</a>
- </div>
- %else:
- ${role.name}
- %endif
- </li>
- %endfor
- </ul>
+ ${len( group.roles )}
%if not anchored:
<a name="${curr_anchor}"></a>
<div style="float: right;"><a href="#TOP">top</a></div>
@@ -65,12 +41,12 @@
${render_msg( msg, messagetype )}
%endif
-%if len( groups_members_roles ) == 0:
+%if len( groups ) == 0:
There are no Galaxy groups
%else:
<table class="manage-table colored" border="0" cellspacing="0" cellpadding="0" width="100%">
<%
- render_quick_find = len( groups_members_roles ) > 50
+ render_quick_find = len( groups ) > 50
ctr = 0
%>
%if render_quick_find:
@@ -91,34 +67,29 @@
%endif
<tr class="header">
<td>Name</td>
- <td>Associated Users</td>
- <td>Associated Roles</td>
+ <td>Users</td>
+ <td>Roles</td>
</tr>
- %for ctr, group_tuple in enumerate( groups_members_roles ):
- <%
- group = group_tuple[0]
- members = group_tuple[1]
- roles = group_tuple[2]
- %>
+ %for ctr, group in enumerate( groups ):
%if render_quick_find and not group.name.upper().startswith( curr_anchor ):
<% anchored = False %>
%endif
%if render_quick_find and group.name.upper().startswith( curr_anchor ):
%if not anchored:
- ${render_row( group, members, roles, ctr, anchored, curr_anchor )}
+ ${render_row( group, ctr, anchored, curr_anchor )}
<% anchored = True %>
%else:
- ${render_row( group, members, roles, ctr, anchored, curr_anchor )}
+ ${render_row( group, ctr, anchored, curr_anchor )}
%endif
%elif render_quick_find:
%for anchor in anchors[ anchor_loc: ]:
%if group.name.upper().startswith( anchor ):
%if not anchored:
<% curr_anchor = anchor %>
- ${render_row( group, members, roles, ctr, anchored, curr_anchor )}
+ ${render_row( group, ctr, anchored, curr_anchor )}
<% anchored = True %>
%else:
- ${render_row( group, members, roles, ctr, anchored, curr_anchor )}
+ ${render_row( group, ctr, anchored, curr_anchor )}
%endif
<%
anchor_loc = anchors.index( anchor )
@@ -127,7 +98,7 @@
%endif
%endfor
%else:
- ${render_row( group, members, roles, ctr, True, '' )}
+ ${render_row( group, ctr, True, '' )}
%endif
%endfor
</table>
diff -r 22118cf46b0a -r d0c905db68db templates/admin/dataset_security/roles.mako
--- a/templates/admin/dataset_security/roles.mako Mon Apr 27 13:33:47 2009 -0400
+++ b/templates/admin/dataset_security/roles.mako Mon Apr 27 16:03:44 2009 -0400
@@ -20,30 +20,10 @@
<td>${role.description}</td>
<td>${role.type}</td>
<td>
- <ul>
- %for ura in role.users:
- <li>
- <a href="${h.url_for( controller='admin', action='user', user_id=ura.user.id )}">${ura.user.email}</a>
- <a id="user-${ura.user.id}-popup" class="popup-arrow" style="display: none;">▼</a>
- <div popupmenu="user-${ura.user.id}-popup">
- <a class="action-button" href="${h.url_for( controller='admin', action='remove_role_from_user', role_id=role.id, user_id=ura.user.id )}">Remove user from role</a>
- </div>
- </li>
- %endfor
- </ul>
+ ${len( role.users )}
</td>
<td>
- <ul>
- %for gra in role.groups:
- <li>
- <a href="${h.url_for( controller='admin', action='group', group_id=gra.group.id )}">${gra.group.name}</a>
- <a id="group-${gra.group.id}-popup" class="popup-arrow" style="display: none;">▼</a>
- <div popupmenu="group-${gra.group.id}-popup">
- <a class="action-button" href="${h.url_for( controller='admin', action='remove_group_from_role', role_id=role.id, group_id=gra.group.id )}">Remove group from role</a>
- </div>
- </li>
- %endfor
- </ul>
+ ${len( role.groups )}
%if not anchored:
<a name="${curr_anchor}"></a>
<div style="float: right;"><a href="#TOP">top</a></div>
@@ -91,8 +71,8 @@
<td>Name</td>
<td>Description</td>
<td>Type</td>
- <td>Associated Users</td>
- <td>Associated Groups</td>
+ <td>Users</td>
+ <td>Groups</td>
</tr>
%for ctr, role in enumerate( roles ):
%if render_quick_find and not role.name.upper().startswith( curr_anchor ):
diff -r 22118cf46b0a -r d0c905db68db templates/admin/dataset_security/users.mako
--- a/templates/admin/dataset_security/users.mako Mon Apr 27 13:33:47 2009 -0400
+++ b/templates/admin/dataset_security/users.mako Mon Apr 27 16:03:44 2009 -0400
@@ -2,7 +2,7 @@
<%namespace file="/message.mako" import="render_msg" />
## Render a row
-<%def name="render_row( user, groups, roles, ctr, anchored, curr_anchor )">
+<%def name="render_row( user, ctr, anchored, curr_anchor )">
%if ctr % 2 == 1:
<tr class="odd_row">
%else:
@@ -20,30 +20,10 @@
</div>
</td>
<td>
- <ul>
- %for group in groups:
- <li>
- <a href="${h.url_for( controller='admin', action='group', group_id=group.id )}">${group.name}</a>
- <a id="group-${group.id}-popup" class="popup-arrow" style="display: none;">▼</a>
- <div popupmenu="group-${group.id}-popup">
- <a class="action-button" href="${h.url_for( controller='admin', action='remove_group_from_user', user_id=user.id, group_id=group.id )}">Remove group from user</a>
- </div>
- </li>
- %endfor
- </ul>
+ ${len( user.groups )}
</td>
<td>
- <ul>
- %for role in roles:
- <li>
- <a href="${h.url_for( controller='admin', action='role', role_id=role.id )}">${role.name}</a>
- <a id="role-${role.id}-popup" class="popup-arrow" style="display: none;">▼</a>
- <div popupmenu="role-${role.id}-popup">
- <a class="action-button" href="${h.url_for( controller='admin', action='remove_role_from_user', user_id=user.id, role_id=role.id )}">Remove role from user</a>
- </div>
- </li>
- %endfor
- </ul>
+ ${len( user.roles )}
%if not anchored:
<a name="${curr_anchor}"></a>
<div style="float: right;"><a href="#TOP">top</a></div>
@@ -65,12 +45,12 @@
${render_msg( msg, messagetype )}
%endif
-%if len( users_groups_roles ) == 0:
+%if len( users ) == 0:
There are no Galaxy users
%else:
<table class="manage-table colored" border="0" cellspacing="0" cellpadding="0" width="100%">
<%
- render_quick_find = len( users_groups_roles ) > 50
+ render_quick_find = len( users ) > 50
ctr = 0
%>
%if render_quick_find:
@@ -91,34 +71,29 @@
%endif
<tr class="header">
<td>Email</td>
- <td>Associated Groups</td>
- <td>Associated Roles</td>
+ <td>Groups</td>
+ <td>Roles</td>
</tr>
- %for ctr, user_tuple in enumerate( users_groups_roles ):
- <%
- user = user_tuple[0]
- groups = user_tuple[1]
- roles = user_tuple[2]
- %>
+ %for ctr, user in enumerate( users ):
%if render_quick_find and not user.email.upper().startswith( curr_anchor ):
<% anchored = False %>
%endif
%if render_quick_find and user.email.upper().startswith( curr_anchor ):
%if not anchored:
- ${render_row( user, groups, roles, ctr, anchored, curr_anchor )}
+ ${render_row( user, ctr, anchored, curr_anchor )}
<% anchored = True %>
%else:
- ${render_row( user, groups, roles, ctr, anchored, curr_anchor )}
+ ${render_row( user, ctr, anchored, curr_anchor )}
%endif
%elif render_quick_find:
%for anchor in anchors[ anchor_loc: ]:
%if user.email.upper().startswith( anchor ):
%if not anchored:
<% curr_anchor = anchor %>
- ${render_row( user, groups, roles, ctr, anchored, curr_anchor )}
+ ${render_row( user, ctr, anchored, curr_anchor )}
<% anchored = True %>
%else:
- ${render_row( user, groups, roles, ctr, anchored, curr_anchor )}
+ ${render_row( user, ctr, anchored, curr_anchor )}
%endif
<%
anchor_loc = anchors.index( anchor )
@@ -127,7 +102,7 @@
%endif
%endfor
%else:
- ${render_row( user, groups, roles, ctr, True, '' )}
+ ${render_row( user, ctr, True, '' )}
%endif
%endfor
</table>
diff -r 22118cf46b0a -r d0c905db68db test/base/twilltestcase.py
--- a/test/base/twilltestcase.py Mon Apr 27 13:33:47 2009 -0400
+++ b/test/base/twilltestcase.py Mon Apr 27 16:03:44 2009 -0400
@@ -787,29 +787,6 @@
self.check_page_for_string( check_str )
self.home()
- # Utility methods to test removal of associations
- def remove_role_from_group( self, role_id, role_name, group_id, group_name ):
- """Remove a role from a group"""
- self.home()
- self.visit_url( "%s/admin/remove_role_from_group?role_id=%s&group_id=%s" % ( self.url, role_id, group_id ) )
- check_str = "Role '%s' removed from group '%s'" % ( role_name, group_name )
- self.check_page_for_string( check_str )
- self.home()
- def remove_user_from_group( self, user_id, email, group_id, group_name ):
- """Remove a user from a group"""
- self.home()
- self.visit_url( "%s/admin/remove_user_from_group?user_id=%s&group_id=%s" % ( self.url, user_id, group_id ) )
- check_str = "User '%s' removed from group '%s'" % ( email, group_name )
- self.check_page_for_string( check_str )
- self.home()
- def remove_user_from_role( self, user_id, email, role_id, role_name ):
- """Remove a user from a role"""
- self.home()
- self.visit_url( "%s/admin/remove_user_from_role?user_id=%s&role_id=%s" % ( self.url, user_id, role_id ) )
- check_str = "User '%s' removed from role '%s'" % ( email, role_name )
- self.check_page_for_string( check_str )
- self.home()
-
# Library stuff
def create_library( self, name='Library One', description='This is Library One' ):
"""Create a new library"""
diff -r 22118cf46b0a -r d0c905db68db tools/stats/column_maker.py
--- a/tools/stats/column_maker.py Mon Apr 27 13:33:47 2009 -0400
+++ b/tools/stats/column_maker.py Mon Apr 27 16:03:44 2009 -0400
@@ -13,10 +13,6 @@
sys.stderr.write( msg )
sys.exit()
-data_err = "This tool can only be used with tab-delimited data."
-columns_err = "Missing or invalid 'columns' metadata value, click the pencil icon in the history item and select the Auto-detect option to correct it. "
-column_types_err = "Missing or invalid 'column_types' metadata value, click the pencil icon in the history item and select the Auto-detect option to correct it. "
-invalid_metadata_err = "The 'columns' metadata setting does not conform to the 'column_types' metadata setting, click the pencil icon in the history item and select the Auto-detect option to correct it. "
inp_file = sys.argv[1]
out_file = sys.argv[2]
expr = sys.argv[3]
@@ -24,16 +20,16 @@
try:
in_columns = int( sys.argv[5] )
except:
- stop_err( columns_err + data_err )
+ stop_err( "Missing or invalid 'columns' metadata value, click the pencil icon in the history item and select the Auto-detect option to correct it. This tool can only be used with tab-delimited data." )
if in_columns < 2:
# To be considered tabular, data must fulfill requirements of the sniff.is_column_based() method.
- stop_err( columns_err + data_err )
+ stop_err( "Missing or invalid 'columns' metadata value, click the pencil icon in the history item and select the Auto-detect option to correct it. This tool can only be used with tab-delimited data." )
try:
in_column_types = sys.argv[6].split( ',' )
except:
- stop_err( column_types_err + data_err )
+ stop_err( "Missing or invalid 'column_types' metadata value, click the pencil icon in the history item and select the Auto-detect option to correct it. This tool can only be used with tab-delimited data." )
if len( in_column_types ) != in_columns:
- stop_err( invalid_metadata_err + data_err )
+ stop_err( "The 'columns' metadata setting does not conform to the 'column_types' metadata setting, click the pencil icon in the history item and select the Auto-detect option to correct it. This tool can only be used with tab-delimited data." )
# Unescape if input has been escaped
mapped_str = {
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/e0534b25c282
changeset: 2378:e0534b25c282
user: guru
date: Tue Apr 28 15:32:44 2009 -0400
description:
Updated bx-python.
1 file(s) affected in this change:
eggs.ini
diffs (19 lines):
diff -r 635d97a3a228 -r e0534b25c282 eggs.ini
--- a/eggs.ini Tue Apr 28 14:18:21 2009 -0400
+++ b/eggs.ini Tue Apr 28 15:32:44 2009 -0400
@@ -59,13 +59,13 @@
MySQL_python = _5.0.67_static
python_lzo = _static
flup = .dev_r2311
-bx_python = _dev_r130f083b56b9
+bx_python = _dev_r4bf1f32e6b76
nose = .dev_r101
; source location, necessary for scrambling
[source]
numpy = http://downloads.sourceforge.net/numpy/numpy-1.2.1.tar.gz
-bx_python = http://bitbucket.org/james_taylor/bx-python/get/130f083b56b9.bz2
+bx_python = http://bitbucket.org/james_taylor/bx-python/get/4bf1f32e6b76.bz2
Cheetah = http://voxel.dl.sourceforge.net/sourceforge/cheetahtemplate/Cheetah-1.0.tar…
DRMAA_python = http://gridengine.sunsource.net/files/documents/7/36/DRMAA-python-0.2.tar.gz
MySQL_python = http://superb-west.dl.sourceforge.net/sourceforge/mysql-python/MySQL-python… http://mysql.mirrors.pair.com/Downloads/MySQL-5.0/mysql-5.0.67.tar.gz
1
0
29 Apr '09
details: http://www.bx.psu.edu/hg/galaxy/rev/635d97a3a228
changeset: 2377:635d97a3a228
user: James Taylor <james(a)jamestaylor.org>
date: Tue Apr 28 14:18:21 2009 -0400
description:
Fix a problem with dynamic selects depending on other dynamic selects when setting values at runtime in workflows
2 file(s) affected in this change:
lib/galaxy/tools/__init__.py
lib/galaxy/tools/parameters/basic.py
diffs (43 lines):
diff -r 87885ab394a0 -r 635d97a3a228 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py Tue Apr 28 09:44:35 2009 -0400
+++ b/lib/galaxy/tools/__init__.py Tue Apr 28 14:18:21 2009 -0400
@@ -686,12 +686,12 @@
# to determine the current case
test_value = input.test_param.get_initial_value( trans, context )
current_case = input.get_current_case( test_value, trans )
- # Recursively fill in state for selected case
- self.fill_in_new_state( trans, input.cases[current_case].inputs, s, context )
# Store the current case in a special value
s['__current_case__'] = current_case
# Store the value of the test element
s[ input.test_param.name ] = test_value
+ # Recursively fill in state for selected case
+ self.fill_in_new_state( trans, input.cases[current_case].inputs, s, context )
else:
# `input` is just a plain parameter, get its default value
state[ input.name ] = input.get_initial_value( trans, context )
diff -r 87885ab394a0 -r 635d97a3a228 lib/galaxy/tools/parameters/basic.py
--- a/lib/galaxy/tools/parameters/basic.py Tue Apr 28 09:44:35 2009 -0400
+++ b/lib/galaxy/tools/parameters/basic.py Tue Apr 28 14:18:21 2009 -0400
@@ -569,6 +569,11 @@
def value_to_basic( self, value, app ):
if isinstance( value, UnvalidatedValue ):
return { "__class__": "UnvalidatedValue", "value": value.value }
+ elif isinstance( value, RuntimeValue ):
+ # Need to handle runtime value's ourself since delegating to the
+ # parent method causes the value to be turned into a string, which
+ # breaks multiple selection
+ return { "__class__": "RuntimeValue" }
return value
def value_from_basic( self, value, app, ignore_errors=False ):
if isinstance( value, dict ) and value["__class__"] == "UnvalidatedValue":
@@ -598,6 +603,9 @@
dep_value = context[ dep_name ]
# Dependency on a dataset that does not yet exist
if isinstance( dep_value, DummyDataset ):
+ return True
+ # Dependency on a value that has not been checked
+ if isinstance( dep_value, UnvalidatedValue ):
return True
# Dependency on a value that does not yet exist
if isinstance( dep_value, RuntimeValue ):
1
0
29 Apr '09
details: http://www.bx.psu.edu/hg/galaxy/rev/7fd4f748b0ca
changeset: 2375:7fd4f748b0ca
user: Dan Blankenberg <dan(a)bx.psu.edu>
date: Mon Apr 27 16:30:08 2009 -0400
description:
Remove direct references of the model from the the cleanup_datasets_fix migration script.
1 file(s) affected in this change:
lib/galaxy/model/migrate/versions/0005_cleanup_datasets_fix.py
diffs (709 lines):
diff -r d0c905db68db -r 7fd4f748b0ca lib/galaxy/model/migrate/versions/0005_cleanup_datasets_fix.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/model/migrate/versions/0005_cleanup_datasets_fix.py Mon Apr 27 16:30:08 2009 -0400
@@ -0,0 +1,705 @@
+import sys, logging, os, time, datetime, errno
+
+log = logging.getLogger( __name__ )
+log.setLevel(logging.DEBUG)
+handler = logging.StreamHandler( sys.stdout )
+format = "%(name)s %(levelname)s %(asctime)s %(message)s"
+formatter = logging.Formatter( format )
+handler.setFormatter( formatter )
+log.addHandler( handler )
+
+from migrate import migrate_engine
+from sqlalchemy import and_
+
+from sqlalchemy import *
+now = datetime.datetime.utcnow
+from sqlalchemy.orm import *
+
+from galaxy.model.orm.ext.assignmapper import assign_mapper
+
+from galaxy.model.custom_types import *
+
+from galaxy.util.bunch import Bunch
+
+
+metadata = MetaData( migrate_engine )
+context = scoped_session( sessionmaker( autoflush=False, transactional=False ) )
+
+
+## classes
+def get_permitted_actions( **kwds ):
+ return Bunch()
+
+def directory_hash_id( id ):
+ s = str( id )
+ l = len( s )
+ # Shortcut -- ids 0-999 go under ../000/
+ if l < 4:
+ return [ "000" ]
+ # Pad with zeros until a multiple of three
+ padded = ( ( 3 - len( s ) % 3 ) * "0" ) + s
+ # Drop the last three digits -- 1000 files per directory
+ padded = padded[:-3]
+ # Break into chunks of three
+ return [ padded[i*3:(i+1)*3] for i in range( len( padded ) // 3 ) ]
+
+
+class Dataset( object ):
+ states = Bunch( NEW = 'new',
+ UPLOAD = 'upload',
+ QUEUED = 'queued',
+ RUNNING = 'running',
+ OK = 'ok',
+ EMPTY = 'empty',
+ ERROR = 'error',
+ DISCARDED = 'discarded' )
+ permitted_actions = get_permitted_actions( filter='DATASET' )
+ file_path = "/tmp/"
+ engine = None
+ def __init__( self, id=None, state=None, external_filename=None, extra_files_path=None, file_size=None, purgable=True ):
+ self.id = id
+ self.state = state
+ self.deleted = False
+ self.purged = False
+ self.purgable = purgable
+ self.external_filename = external_filename
+ self._extra_files_path = extra_files_path
+ self.file_size = file_size
+ def get_file_name( self ):
+ if not self.external_filename:
+ assert self.id is not None, "ID must be set before filename used (commit the object)"
+ # First try filename directly under file_path
+ filename = os.path.join( self.file_path, "dataset_%d.dat" % self.id )
+ # Only use that filename if it already exists (backward compatibility),
+ # otherwise construct hashed path
+ if not os.path.exists( filename ):
+ dir = os.path.join( self.file_path, *directory_hash_id( self.id ) )
+ # Create directory if it does not exist
+ try:
+ os.makedirs( dir )
+ except OSError, e:
+ # File Exists is okay, otherwise reraise
+ if e.errno != errno.EEXIST:
+ raise
+ # Return filename inside hashed directory
+ return os.path.abspath( os.path.join( dir, "dataset_%d.dat" % self.id ) )
+ else:
+ filename = self.external_filename
+ # Make filename absolute
+ return os.path.abspath( filename )
+ def set_file_name ( self, filename ):
+ if not filename:
+ self.external_filename = None
+ else:
+ self.external_filename = filename
+ file_name = property( get_file_name, set_file_name )
+ @property
+ def extra_files_path( self ):
+ if self._extra_files_path:
+ path = self._extra_files_path
+ else:
+ path = os.path.join( self.file_path, "dataset_%d_files" % self.id )
+ #only use path directly under self.file_path if it exists
+ if not os.path.exists( path ):
+ path = os.path.join( os.path.join( self.file_path, *directory_hash_id( self.id ) ), "dataset_%d_files" % self.id )
+ # Make path absolute
+ return os.path.abspath( path )
+ def get_size( self ):
+ """Returns the size of the data on disk"""
+ if self.file_size:
+ return self.file_size
+ else:
+ try:
+ return os.path.getsize( self.file_name )
+ except OSError:
+ return 0
+ def set_size( self ):
+ """Returns the size of the data on disk"""
+ try:
+ if not self.file_size:
+ self.file_size = os.path.getsize( self.file_name )
+ except OSError:
+ self.file_size = 0
+ def has_data( self ):
+ """Detects whether there is any data"""
+ return self.get_size() > 0
+ def mark_deleted( self, include_children=True ):
+ self.deleted = True
+ # FIXME: sqlalchemy will replace this
+ def _delete(self):
+ """Remove the file that corresponds to this data"""
+ try:
+ os.remove(self.data.file_name)
+ except OSError, e:
+ log.critical('%s delete error %s' % (self.__class__.__name__, e))
+
+class DatasetInstance( object ):
+ """A base class for all 'dataset instances', HDAs, LDAs, etc"""
+ states = Dataset.states
+ permitted_actions = Dataset.permitted_actions
+ def __init__( self, id=None, hid=None, name=None, info=None, blurb=None, peek=None, extension=None,
+ dbkey=None, metadata=None, history=None, dataset=None, deleted=False, designation=None,
+ parent_id=None, validation_errors=None, visible=True, create_dataset = False ):
+ self.name = name or "Unnamed dataset"
+ self.id = id
+ self.info = info
+ self.blurb = blurb
+ self.peek = peek
+ self.extension = extension
+ self.designation = designation
+ self.metadata = metadata or dict()
+ if dbkey: #dbkey is stored in metadata, only set if non-zero, or else we could clobber one supplied by input 'metadata'
+ self.dbkey = dbkey
+ self.deleted = deleted
+ self.visible = visible
+ # Relationships
+ if not dataset and create_dataset:
+ dataset = Dataset( state=Dataset.states.NEW )
+ dataset.flush()
+ self.dataset = dataset
+ self.parent_id = parent_id
+ self.validation_errors = validation_errors
+ @property
+ def ext( self ):
+ return self.extension
+ def get_dataset_state( self ):
+ return self.dataset.state
+ def set_dataset_state ( self, state ):
+ self.dataset.state = state
+ self.dataset.flush() #flush here, because hda.flush() won't flush the Dataset object
+ state = property( get_dataset_state, set_dataset_state )
+ def get_file_name( self ):
+ return self.dataset.get_file_name()
+ def set_file_name (self, filename):
+ return self.dataset.set_file_name( filename )
+ file_name = property( get_file_name, set_file_name )
+ @property
+ def extra_files_path( self ):
+ return self.dataset.extra_files_path
+ @property
+ def datatype( self ):
+ return datatypes_registry.get_datatype_by_extension( self.extension )
+ def get_metadata( self ):
+ if not hasattr( self, '_metadata_collection' ) or self._metadata_collection.parent != self: #using weakref to store parent (to prevent circ ref), does a context.clear() cause parent to be invalidated, while still copying over this non-database attribute?
+ self._metadata_collection = MetadataCollection( self )
+ return self._metadata_collection
+ def set_metadata( self, bunch ):
+ # Needs to accept a MetadataCollection, a bunch, or a dict
+ self._metadata = self.metadata.make_dict_copy( bunch )
+ metadata = property( get_metadata, set_metadata )
+ # This provide backwards compatibility with using the old dbkey
+ # field in the database. That field now maps to "old_dbkey" (see mapping.py).
+ def get_dbkey( self ):
+ dbkey = self.metadata.dbkey
+ if not isinstance(dbkey, list): dbkey = [dbkey]
+ if dbkey in [[None], []]: return "?"
+ return dbkey[0]
+ def set_dbkey( self, value ):
+ if "dbkey" in self.datatype.metadata_spec:
+ if not isinstance(value, list):
+ self.metadata.dbkey = [value]
+ else:
+ self.metadata.dbkey = value
+ dbkey = property( get_dbkey, set_dbkey )
+ def change_datatype( self, new_ext ):
+ self.clear_associated_files()
+ datatypes_registry.change_datatype( self, new_ext )
+ def get_size( self ):
+ """Returns the size of the data on disk"""
+ return self.dataset.get_size()
+ def set_size( self ):
+ """Returns the size of the data on disk"""
+ return self.dataset.set_size()
+ def has_data( self ):
+ """Detects whether there is any data"""
+ return self.dataset.has_data()
+ def get_raw_data( self ):
+ """Returns the full data. To stream it open the file_name and read/write as needed"""
+ return self.datatype.get_raw_data( self )
+ def write_from_stream( self, stream ):
+ """Writes data from a stream"""
+ self.datatype.write_from_stream(self, stream)
+ def set_raw_data( self, data ):
+ """Saves the data on the disc"""
+ self.datatype.set_raw_data(self, data)
+ def get_mime( self ):
+ """Returns the mime type of the data"""
+ return datatypes_registry.get_mimetype_by_extension( self.extension.lower() )
+ def set_peek( self ):
+ return self.datatype.set_peek( self )
+ def set_multi_byte_peek( self ):
+ return self.datatype.set_multi_byte_peek( self )
+ def init_meta( self, copy_from=None ):
+ return self.datatype.init_meta( self, copy_from=copy_from )
+ def set_meta( self, **kwd ):
+ self.clear_associated_files( metadata_safe = True )
+ return self.datatype.set_meta( self, **kwd )
+ def set_readonly_meta( self, **kwd ):
+ return self.datatype.set_readonly_meta( self, **kwd )
+ def missing_meta( self, **kwd ):
+ return self.datatype.missing_meta( self, **kwd )
+ def as_display_type( self, type, **kwd ):
+ return self.datatype.as_display_type( self, type, **kwd )
+ def display_peek( self ):
+ return self.datatype.display_peek( self )
+ def display_name( self ):
+ return self.datatype.display_name( self )
+ def display_info( self ):
+ return self.datatype.display_info( self )
+ def get_converted_files_by_type( self, file_type ):
+ valid = []
+ for assoc in self.implicitly_converted_datasets:
+ if not assoc.deleted and assoc.type == file_type:
+ valid.append( assoc.dataset )
+ return valid
+ def clear_associated_files( self, metadata_safe = False, purge = False ):
+ raise 'Unimplemented'
+ def get_child_by_designation(self, designation):
+ for child in self.children:
+ if child.designation == designation:
+ return child
+ return None
+ def get_converter_types(self):
+ return self.datatype.get_converter_types( self, datatypes_registry)
+ def find_conversion_destination( self, accepted_formats, **kwd ):
+ """Returns ( target_ext, exisiting converted dataset )"""
+ return self.datatype.find_conversion_destination( self, accepted_formats, datatypes_registry, **kwd )
+ def add_validation_error( self, validation_error ):
+ self.validation_errors.append( validation_error )
+ def extend_validation_errors( self, validation_errors ):
+ self.validation_errors.extend(validation_errors)
+ def mark_deleted( self, include_children=True ):
+ self.deleted = True
+ if include_children:
+ for child in self.children:
+ child.mark_deleted()
+ def mark_undeleted( self, include_children=True ):
+ self.deleted = False
+ if include_children:
+ for child in self.children:
+ child.mark_undeleted()
+ def undeletable( self ):
+ if self.purged:
+ return False
+ return True
+ @property
+ def source_library_dataset( self ):
+ def get_source( dataset ):
+ if isinstance( dataset, LibraryDatasetDatasetAssociation ):
+ if dataset.library_dataset:
+ return ( dataset, dataset.library_dataset )
+ if dataset.copied_from_library_dataset_dataset_association:
+ source = get_source( dataset.copied_from_library_dataset_dataset_association )
+ if source:
+ return source
+ if dataset.copied_from_history_dataset_association:
+ source = get_source( dataset.copied_from_history_dataset_association )
+ if source:
+ return source
+ return ( None, None )
+ return get_source( self )
+
+
+class HistoryDatasetAssociation( DatasetInstance ):
+ def __init__( self,
+ hid = None,
+ history = None,
+ copied_from_history_dataset_association = None,
+ copied_from_library_dataset_dataset_association = None,
+ **kwd ):
+ DatasetInstance.__init__( self, **kwd )
+ self.hid = hid
+ # Relationships
+ self.history = history
+ self.copied_from_history_dataset_association = copied_from_history_dataset_association
+ self.copied_from_library_dataset_dataset_association = copied_from_library_dataset_dataset_association
+ def copy( self, copy_children = False, parent_id = None, target_history = None ):
+ hda = HistoryDatasetAssociation( hid=self.hid,
+ name=self.name,
+ info=self.info,
+ blurb=self.blurb,
+ peek=self.peek,
+ extension=self.extension,
+ dbkey=self.dbkey,
+ dataset = self.dataset,
+ visible=self.visible,
+ deleted=self.deleted,
+ parent_id=parent_id,
+ copied_from_history_dataset_association=self,
+ history = target_history )
+ hda.flush()
+ hda.set_size()
+ # Need to set after flushed, as MetadataFiles require dataset.id
+ hda.metadata = self.metadata
+ if copy_children:
+ for child in self.children:
+ child_copy = child.copy( copy_children = copy_children, parent_id = hda.id )
+ if not self.datatype.copy_safe_peek:
+ # In some instances peek relies on dataset_id, i.e. gmaj.zip for viewing MAFs
+ hda.set_peek()
+ hda.flush()
+ return hda
+ def to_library_dataset_dataset_association( self, target_folder, replace_dataset=None, parent_id=None ):
+ if replace_dataset:
+ # The replace_dataset param ( when not None ) refers to a LibraryDataset that is being replaced with a new version.
+ library_dataset = replace_dataset
+ else:
+ # If replace_dataset is None, the Library level permissions will be taken from the folder and applied to the new
+ # LibraryDataset, and the current user's DefaultUserPermissions will be applied to the associated Dataset.
+ library_dataset = LibraryDataset( folder=target_folder, name=self.name, info=self.info )
+ library_dataset.flush()
+ ldda = LibraryDatasetDatasetAssociation( name=self.name,
+ info=self.info,
+ blurb=self.blurb,
+ peek=self.peek,
+ extension=self.extension,
+ dbkey=self.dbkey,
+ dataset=self.dataset,
+ library_dataset=library_dataset,
+ visible=self.visible,
+ deleted=self.deleted,
+ parent_id=parent_id,
+ copied_from_history_dataset_association=self,
+ user=self.history.user )
+ ldda.flush()
+ # Permissions must be the same on the LibraryDatasetDatasetAssociation and the associated LibraryDataset
+ # Must set metadata after ldda flushed, as MetadataFiles require ldda.id
+ ldda.metadata = self.metadata
+ if not replace_dataset:
+ target_folder.add_library_dataset( library_dataset, genome_build=ldda.dbkey )
+ target_folder.flush()
+ library_dataset.library_dataset_dataset_association_id = ldda.id
+ library_dataset.flush()
+ for child in self.children:
+ child_copy = child.to_library_dataset_dataset_association( target_folder=target_folder, replace_dataset=replace_dataset, parent_id=ldda.id )
+ if not self.datatype.copy_safe_peek:
+ # In some instances peek relies on dataset_id, i.e. gmaj.zip for viewing MAFs
+ ldda.set_peek()
+ ldda.flush()
+ return ldda
+ def clear_associated_files( self, metadata_safe = False, purge = False ):
+ # metadata_safe = True means to only clear when assoc.metadata_safe == False
+ for assoc in self.implicitly_converted_datasets:
+ if not metadata_safe or not assoc.metadata_safe:
+ assoc.clear( purge = purge )
+
+
+
+class LibraryDatasetDatasetAssociation( DatasetInstance ):
+ def __init__( self,
+ copied_from_history_dataset_association=None,
+ copied_from_library_dataset_dataset_association=None,
+ library_dataset=None,
+ user=None,
+ **kwd ):
+ DatasetInstance.__init__( self, **kwd )
+ self.copied_from_history_dataset_association = copied_from_history_dataset_association
+ self.copied_from_library_dataset_dataset_association = copied_from_library_dataset_dataset_association
+ self.library_dataset = library_dataset
+ self.user = user
+ def to_history_dataset_association( self, target_history, parent_id=None ):
+ hid = target_history._next_hid()
+ hda = HistoryDatasetAssociation( name=self.name,
+ info=self.info,
+ blurb=self.blurb,
+ peek=self.peek,
+ extension=self.extension,
+ dbkey=self.dbkey,
+ dataset=self.dataset,
+ visible=self.visible,
+ deleted=self.deleted,
+ parent_id=parent_id,
+ copied_from_library_dataset_dataset_association=self,
+ history=target_history,
+ hid=hid )
+ hda.flush()
+ hda.metadata = self.metadata #need to set after flushed, as MetadataFiles require dataset.id
+ for child in self.children:
+ child_copy = child.to_history_dataset_association( target_history=target_history, parent_id=hda.id )
+ if not self.datatype.copy_safe_peek:
+ hda.set_peek() #in some instances peek relies on dataset_id, i.e. gmaj.zip for viewing MAFs
+ hda.flush()
+ return hda
+ def copy( self, copy_children = False, parent_id = None, target_folder = None ):
+ ldda = LibraryDatasetDatasetAssociation( name=self.name,
+ info=self.info,
+ blurb=self.blurb,
+ peek=self.peek,
+ extension=self.extension,
+ dbkey=self.dbkey,
+ dataset=self.dataset,
+ visible=self.visible,
+ deleted=self.deleted,
+ parent_id=parent_id,
+ copied_from_library_dataset_dataset_association=self,
+ folder=target_folder )
+ ldda.flush()
+ # Need to set after flushed, as MetadataFiles require dataset.id
+ ldda.metadata = self.metadata
+ if copy_children:
+ for child in self.children:
+ child_copy = child.copy( copy_children = copy_children, parent_id = ldda.id )
+ if not self.datatype.copy_safe_peek:
+ # In some instances peek relies on dataset_id, i.e. gmaj.zip for viewing MAFs
+ ldda.set_peek()
+ ldda.flush()
+ return ldda
+ def clear_associated_files( self, metadata_safe = False, purge = False ):
+ return
+ def get_library_item_info_templates( self, template_list=[], restrict=False ):
+ # If restrict is True, we'll return only those templates directly associated with this LibraryDatasetDatasetAssociation
+ if self.library_dataset_dataset_info_template_associations:
+ template_list.extend( [ lddita.library_item_info_template for lddita in self.library_dataset_dataset_info_template_associations if lddita.library_item_info_template not in template_list ] )
+ self.library_dataset.get_library_item_info_templates( template_list, restrict )
+ return template_list
+
+
+
+class LibraryDataset( object ):
+ # This class acts as a proxy to the currently selected LDDA
+ def __init__( self, folder=None, order_id=None, name=None, info=None, library_dataset_dataset_association=None, **kwd ):
+ self.folder = folder
+ self.order_id = order_id
+ self.name = name
+ self.info = info
+ self.library_dataset_dataset_association = library_dataset_dataset_association
+ def set_library_dataset_dataset_association( self, ldda ):
+ self.library_dataset_dataset_association = ldda
+ ldda.library_dataset = self
+ ldda.flush()
+ self.flush()
+ def get_info( self ):
+ if self.library_dataset_dataset_association:
+ return self.library_dataset_dataset_association.info
+ elif self._info:
+ return self._info
+ else:
+ return 'no info'
+ def set_info( self, info ):
+ self._info = info
+ info = property( get_info, set_info )
+ def get_name( self ):
+ if self.library_dataset_dataset_association:
+ return self.library_dataset_dataset_association.name
+ elif self._name:
+ return self._name
+ else:
+ return 'Unnamed dataset'
+ def set_name( self, name ):
+ self._name = name
+ name = property( get_name, set_name )
+ def display_name( self ):
+ self.library_dataset_dataset_association.display_name()
+ def get_purged( self ):
+ return self.library_dataset_dataset_association.dataset.purged
+ def set_purged( self, purged ):
+ if purged:
+ raise Exception( "Not implemented" )
+ if not purged and self.purged:
+ raise Exception( "Cannot unpurge once purged" )
+ purged = property( get_purged, set_purged )
+ def get_library_item_info_templates( self, template_list=[], restrict=False ):
+ # If restrict is True, we'll return only those templates directly associated with this LibraryDataset
+ if self.library_dataset_info_template_associations:
+ template_list.extend( [ ldita.library_item_info_template for ldita in self.library_dataset_info_template_associations if ldita.library_item_info_template not in template_list ] )
+ if restrict not in [ 'True', True ]:
+ self.folder.get_library_item_info_templates( template_list, restrict )
+ return template_list
+
+##tables
+
+
+Dataset.table = Table( "dataset", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "create_time", DateTime, default=now ),
+ Column( "update_time", DateTime, index=True, default=now, onupdate=now ),
+ Column( "state", TrimmedString( 64 ) ),
+ Column( "deleted", Boolean, index=True, default=False ),
+ Column( "purged", Boolean, index=True, default=False ),
+ Column( "purgable", Boolean, default=True ),
+ Column( "external_filename" , TEXT ),
+ Column( "_extra_files_path", TEXT ),
+ Column( 'file_size', Numeric( 15, 0 ) ) )
+
+
+
+HistoryDatasetAssociation.table = Table( "history_dataset_association", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "dataset_id", Integer, ForeignKey( "dataset.id" ), index=True ),
+ Column( "create_time", DateTime, default=now ),
+ Column( "update_time", DateTime, default=now, onupdate=now ),
+ Column( "copied_from_history_dataset_association_id", Integer, ForeignKey( "history_dataset_association.id" ), nullable=True ),
+ Column( "copied_from_library_dataset_dataset_association_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), nullable=True ),
+ Column( "hid", Integer ),
+ Column( "name", TrimmedString( 255 ) ),
+ Column( "info", TrimmedString( 255 ) ),
+ Column( "blurb", TrimmedString( 255 ) ),
+ Column( "peek" , TEXT ),
+ Column( "extension", TrimmedString( 64 ) ),
+ Column( "metadata", MetadataType(), key="_metadata" ),
+ Column( "parent_id", Integer, ForeignKey( "history_dataset_association.id" ), nullable=True ),
+ Column( "designation", TrimmedString( 255 ) ),
+ Column( "deleted", Boolean, index=True, default=False ),
+ Column( "visible", Boolean ) )
+
+
+LibraryDatasetDatasetAssociation.table = Table( "library_dataset_dataset_association", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "library_dataset_id", Integer, ForeignKey( "library_dataset.id" ), index=True ),
+ Column( "dataset_id", Integer, ForeignKey( "dataset.id" ), index=True ),
+ Column( "create_time", DateTime, default=now ),
+ Column( "update_time", DateTime, default=now, onupdate=now ),
+ Column( "copied_from_history_dataset_association_id", Integer, ForeignKey( "history_dataset_association.id", use_alter=True, name='history_dataset_association_dataset_id_fkey' ), nullable=True ),
+ Column( "copied_from_library_dataset_dataset_association_id", Integer, ForeignKey( "library_dataset_dataset_association.id", use_alter=True, name='library_dataset_dataset_association_id_fkey' ), nullable=True ),
+ Column( "name", TrimmedString( 255 ) ),
+ Column( "info", TrimmedString( 255 ) ),
+ Column( "blurb", TrimmedString( 255 ) ),
+ Column( "peek" , TEXT ),
+ Column( "extension", TrimmedString( 64 ) ),
+ Column( "metadata", MetadataType(), key="_metadata" ),
+ Column( "parent_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), nullable=True ),
+ Column( "designation", TrimmedString( 255 ) ),
+ Column( "deleted", Boolean, index=True, default=False ),
+ Column( "visible", Boolean ),
+ Column( "message", TrimmedString( 255 ) ) )
+
+LibraryDataset.table = Table( "library_dataset", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "library_dataset_dataset_association_id", Integer, ForeignKey( "library_dataset_dataset_association.id", use_alter=True, name="library_dataset_dataset_association_id_fk" ), nullable=True, index=True ),#current version of dataset, if null, there is not a current version selected
+ Column( "order_id", Integer ),
+ Column( "create_time", DateTime, default=now ),
+ Column( "update_time", DateTime, default=now, onupdate=now ),
+ Column( "name", TrimmedString( 255 ), key="_name" ), #when not None/null this will supercede display in library (but not when imported into user's history?)
+ Column( "info", TrimmedString( 255 ), key="_info" ), #when not None/null this will supercede display in library (but not when imported into user's history?)
+ Column( "deleted", Boolean, index=True, default=False ) )
+
+
+
+##mappers
+
+
+assign_mapper( context, Dataset, Dataset.table,
+ properties=dict(
+ history_associations=relation(
+ HistoryDatasetAssociation,
+ primaryjoin=( Dataset.table.c.id == HistoryDatasetAssociation.table.c.dataset_id ) ),
+ active_history_associations=relation(
+ HistoryDatasetAssociation,
+ primaryjoin=( ( Dataset.table.c.id == HistoryDatasetAssociation.table.c.dataset_id ) & ( HistoryDatasetAssociation.table.c.deleted == False ) ) ),
+ library_associations=relation(
+ LibraryDatasetDatasetAssociation,
+ primaryjoin=( Dataset.table.c.id == LibraryDatasetDatasetAssociation.table.c.dataset_id ) ),
+ active_library_associations=relation(
+ LibraryDatasetDatasetAssociation,
+ primaryjoin=( ( Dataset.table.c.id == LibraryDatasetDatasetAssociation.table.c.dataset_id ) & ( LibraryDatasetDatasetAssociation.table.c.deleted == False ) ) )
+ ) )
+
+
+assign_mapper( context, HistoryDatasetAssociation, HistoryDatasetAssociation.table,
+ properties=dict(
+ dataset=relation(
+ Dataset,
+ primaryjoin=( Dataset.table.c.id == HistoryDatasetAssociation.table.c.dataset_id ), lazy=False ),
+ # .history defined in History mapper
+ copied_to_history_dataset_associations=relation(
+ HistoryDatasetAssociation,
+ primaryjoin=( HistoryDatasetAssociation.table.c.copied_from_history_dataset_association_id == HistoryDatasetAssociation.table.c.id ),
+ backref=backref( "copied_from_history_dataset_association", primaryjoin=( HistoryDatasetAssociation.table.c.copied_from_history_dataset_association_id == HistoryDatasetAssociation.table.c.id ), remote_side=[HistoryDatasetAssociation.table.c.id], uselist=False ) ),
+ copied_to_library_dataset_dataset_associations=relation(
+ LibraryDatasetDatasetAssociation,
+ primaryjoin=( HistoryDatasetAssociation.table.c.copied_from_library_dataset_dataset_association_id == LibraryDatasetDatasetAssociation.table.c.id ),
+ backref=backref( "copied_from_history_dataset_association", primaryjoin=( HistoryDatasetAssociation.table.c.copied_from_library_dataset_dataset_association_id == LibraryDatasetDatasetAssociation.table.c.id ), remote_side=[LibraryDatasetDatasetAssociation.table.c.id], uselist=False ) ),
+ children=relation(
+ HistoryDatasetAssociation,
+ primaryjoin=( HistoryDatasetAssociation.table.c.parent_id == HistoryDatasetAssociation.table.c.id ),
+ backref=backref( "parent", primaryjoin=( HistoryDatasetAssociation.table.c.parent_id == HistoryDatasetAssociation.table.c.id ), remote_side=[HistoryDatasetAssociation.table.c.id], uselist=False ) ),
+ visible_children=relation(
+ HistoryDatasetAssociation,
+ primaryjoin=( ( HistoryDatasetAssociation.table.c.parent_id == HistoryDatasetAssociation.table.c.id ) & ( HistoryDatasetAssociation.table.c.visible == True ) ) )
+ ) )
+
+assign_mapper( context, LibraryDatasetDatasetAssociation, LibraryDatasetDatasetAssociation.table,
+ properties=dict(
+ dataset=relation( Dataset ),
+ library_dataset = relation( LibraryDataset,
+ primaryjoin=( LibraryDatasetDatasetAssociation.table.c.library_dataset_id == LibraryDataset.table.c.id ) ),
+ copied_to_library_dataset_dataset_associations=relation(
+ LibraryDatasetDatasetAssociation,
+ primaryjoin=( LibraryDatasetDatasetAssociation.table.c.copied_from_library_dataset_dataset_association_id == LibraryDatasetDatasetAssociation.table.c.id ),
+ backref=backref( "copied_from_library_dataset_dataset_association", primaryjoin=( LibraryDatasetDatasetAssociation.table.c.copied_from_library_dataset_dataset_association_id == LibraryDatasetDatasetAssociation.table.c.id ), remote_side=[LibraryDatasetDatasetAssociation.table.c.id] ) ),
+ copied_to_history_dataset_associations=relation(
+ HistoryDatasetAssociation,
+ primaryjoin=( HistoryDatasetAssociation.table.c.copied_from_library_dataset_dataset_association_id == LibraryDatasetDatasetAssociation.table.c.id ),
+ backref=backref( "copied_from_library_dataset_dataset_association", primaryjoin=( HistoryDatasetAssociation.table.c.copied_from_library_dataset_dataset_association_id == LibraryDatasetDatasetAssociation.table.c.id ), remote_side=[LibraryDatasetDatasetAssociation.table.c.id], uselist=False ) ),
+ children=relation(
+ LibraryDatasetDatasetAssociation,
+ primaryjoin=( LibraryDatasetDatasetAssociation.table.c.parent_id == LibraryDatasetDatasetAssociation.table.c.id ),
+ backref=backref( "parent", primaryjoin=( LibraryDatasetDatasetAssociation.table.c.parent_id == LibraryDatasetDatasetAssociation.table.c.id ), remote_side=[LibraryDatasetDatasetAssociation.table.c.id] ) ),
+ visible_children=relation(
+ LibraryDatasetDatasetAssociation,
+ primaryjoin=( ( LibraryDatasetDatasetAssociation.table.c.parent_id == LibraryDatasetDatasetAssociation.table.c.id ) & ( LibraryDatasetDatasetAssociation.table.c.visible == True ) ) )
+ ) )
+
+assign_mapper( context, LibraryDataset, LibraryDataset.table,
+ properties=dict(
+ library_dataset_dataset_association=relation( LibraryDatasetDatasetAssociation, primaryjoin=( LibraryDataset.table.c.library_dataset_dataset_association_id == LibraryDatasetDatasetAssociation.table.c.id ) ),
+ expired_datasets = relation( LibraryDatasetDatasetAssociation, foreign_keys=[LibraryDataset.table.c.id,LibraryDataset.table.c.library_dataset_dataset_association_id ], primaryjoin=( ( LibraryDataset.table.c.id == LibraryDatasetDatasetAssociation.table.c.library_dataset_id ) & ( not_( LibraryDataset.table.c.library_dataset_dataset_association_id == LibraryDatasetDatasetAssociation.table.c.id ) ) ), viewonly=True, uselist=True )
+ ) )
+
+
+def __guess_dataset_by_filename( filename ):
+ """Return a guessed dataset by filename"""
+ try:
+ fields = os.path.split( filename )
+ if fields:
+ if fields[-1].startswith( 'dataset_' ) and fields[-1].endswith( '.dat' ): #dataset_%d.dat
+ return Dataset.get( int( fields[-1][ len( 'dataset_' ): -len( '.dat' ) ] ) )
+ except:
+ pass #some parsing error, we can't guess Dataset
+ return None
+
+def upgrade():
+ log.debug( "Fixing a discrepancy concerning deleted shared history items." )
+ affected_items = 0
+ start_time = time.time()
+ for dataset in Dataset.filter( and_( Dataset.c.deleted == True, Dataset.c.purged == False ) ).all():
+ for dataset_instance in dataset.history_associations + dataset.library_associations:
+ if not dataset_instance.deleted:
+ dataset.deleted = False
+ if dataset.file_size in [ None, 0 ]:
+ dataset.set_size() #Restore filesize
+ affected_items += 1
+ break
+ context.flush()
+ log.debug( "%i items affected, and restored." % ( affected_items ) )
+ log.debug( "Time elapsed: %s" % ( time.time() - start_time ) )
+
+ #fix share before hda
+ log.debug( "Fixing a discrepancy concerning cleaning up deleted history items shared before HDAs." )
+ dataset_by_filename = {}
+ changed_associations = 0
+ start_time = time.time()
+ for dataset in Dataset.filter( Dataset.external_filename.like( '%dataset_%.dat' ) ).all():
+ if dataset.file_name in dataset_by_filename:
+ guessed_dataset = dataset_by_filename[ dataset.file_name ]
+ else:
+ guessed_dataset = __guess_dataset_by_filename( dataset.file_name )
+ if guessed_dataset and dataset.file_name != guessed_dataset.file_name:#not os.path.samefile( dataset.file_name, guessed_dataset.file_name ):
+ guessed_dataset = None
+ dataset_by_filename[ dataset.file_name ] = guessed_dataset
+
+ if guessed_dataset is not None and guessed_dataset.id != dataset.id: #could we have a self referential dataset?
+ for dataset_instance in dataset.history_associations + dataset.library_associations:
+ dataset_instance.dataset = guessed_dataset
+ changed_associations += 1
+ #mark original Dataset as deleted and purged, it is no longer in use, but do not delete file_name contents
+ dataset.deleted = True
+ dataset.external_filename = "Dataset was result of share before HDA, and has been replaced: %s mapped to Dataset %s" % ( dataset.external_filename, guessed_dataset.id )
+ dataset.purged = True #we don't really purge the file here, but we mark it as purged, since this dataset is now defunct
+ context.flush()
+ log.debug( "%i items affected, and restored." % ( changed_associations ) )
+ log.debug( "Time elapsed: %s" % ( time.time() - start_time ) )
+
+def downgrade():
+ log.debug( "Downgrade is not possible." )
+
1
0