galaxy-commits
Threads by month
- ----- 2025 -----
- July
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
May 2010
- 2 participants
- 158 discussions

25 May '10
details: http://www.bx.psu.edu/hg/galaxy/rev/d22853e5963d
changeset: 3810:d22853e5963d
user: jeremy goecks <jeremy.goecks(a)emory.edu>
date: Mon May 24 10:53:55 2010 -0400
description:
In Page editor, prevent extra paragraphs from being inserted with embedded items.
diffstat:
templates/page/editor.mako | 29 ++++++++++++++++++++++++-----
1 files changed, 24 insertions(+), 5 deletions(-)
diffs (48 lines):
diff -r 18bd3fa93bed -r d22853e5963d templates/page/editor.mako
--- a/templates/page/editor.mako Sun May 23 08:46:07 2010 -0400
+++ b/templates/page/editor.mako Mon May 24 10:53:55 2010 -0400
@@ -474,20 +474,39 @@
// Embedded item HTML; item class is embedded in div container classes; this is necessary because the editor strips
// all non-standard attributes when it returns its content (e.g. it will not return an element attribute of the form
// item_class='History').
+ var item_elt_id = item_info.iclass + "-" + item_id;
var item_embed_html =
"\
- <div id='" + item_info.iclass + "-" + item_id + "' class='embedded-item " + item_info.singular.toLowerCase() +
+ <p><div id='" + item_elt_id + "' class='embedded-item " + item_info.singular.toLowerCase() +
" placeholder'> \
<p class='title'>Embedded Galaxy " + item_info.singular + " '" + item_name + "'</p> \
<p class='content'> \
[Do not edit this block; Galaxy will fill it in with the annotated " +
- item_info.singular.toLowerCase() + " when it is displayed.]</div> \
+ item_info.singular.toLowerCase() + " when it is displayed.] \
</p> \
- </div><p></p>";
+ </div></p>";
- // Insert embedded representation into document.
- // TODO: maybe try replace() instead to handle indenting?
+ // Insert embedded item into document.
wym.insert(item_embed_html);
+
+ // TODO: can we fix this?
+ // Due to oddities of wym.insert() [likely due to inserting a <div> and/or a complete paragraph], an
+ // empty paragraph may be included either before or after an embedded item. Remove these paragraphs.
+ $("#" + item_elt_id, wym._doc.body).each( function() {
+ // Remove previous empty paragraphs.
+ var prev_elt = $(this).prev();
+ if ( prev_elt.length != 0 && jQuery.trim(prev_elt.text()) == "" )
+ prev_elt.remove();
+
+ // Remove subsequent empty paragraphs.
+ /*
+ var next_elt = $(this).next();
+ var next_next_elt = next_elt.next();
+ if (next_next_elt.length != 0)
+ next_elt.remove();
+ */
+ });
+
});
hide_modal();
},
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/18bd3fa93bed
changeset: 3809:18bd3fa93bed
user: fubar: ross Lazarus at gmail period com
date: Sun May 23 08:46:07 2010 -0400
description:
branch merge again?
diffstat:
test-data/sanger_full_range_masked_N.fastqsanger | 8 +
test-data/sanger_full_range_masked_lowercase.fastqsanger | 8 +
tools/fastq/fastq_masker_by_quality.py | 83 ++++++++++++++++
tools/fastq/fastq_masker_by_quality.xml | 53 ++++++++++
4 files changed, 152 insertions(+), 0 deletions(-)
diffs (168 lines):
diff -r f175a156d7e0 -r 18bd3fa93bed test-data/sanger_full_range_masked_N.fastqsanger
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/sanger_full_range_masked_N.fastqsanger Sun May 23 08:46:07 2010 -0400
@@ -0,0 +1,8 @@
+@FAKE0001 Original version has PHRED scores from 0 to 93 inclusive (in that order)
+NNNNNNNNNNNNNNNNNNNNNCGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC
++
+!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~
+@FAKE0002 Original version has PHRED scores from 93 to 0 inclusive (in that order)
+CATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCNNNNNNNNNNNNNNNNNNNNN
++
+~}|{zyxwvutsrqponmlkjihgfedcba`_^]\[ZYXWVUTSRQPONMLKJIHGFEDCBA@?>=<;:9876543210/.-,+*)('&%$#"!
diff -r f175a156d7e0 -r 18bd3fa93bed test-data/sanger_full_range_masked_lowercase.fastqsanger
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/sanger_full_range_masked_lowercase.fastqsanger Sun May 23 08:46:07 2010 -0400
@@ -0,0 +1,8 @@
+@FAKE0001 Original version has PHRED scores from 0 to 93 inclusive (in that order)
+acgtacgtacgtacgtacgtaCGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC
++
+!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~
+@FAKE0002 Original version has PHRED scores from 93 to 0 inclusive (in that order)
+CATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCatgcatgcatgcatgcatgca
++
+~}|{zyxwvutsrqponmlkjihgfedcba`_^]\[ZYXWVUTSRQPONMLKJIHGFEDCBA@?>=<;:9876543210/.-,+*)('&%$#"!
diff -r f175a156d7e0 -r 18bd3fa93bed tools/fastq/fastq_masker_by_quality.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/fastq/fastq_masker_by_quality.py Sun May 23 08:46:07 2010 -0400
@@ -0,0 +1,83 @@
+#Dan Blankenberg
+import string
+from optparse import OptionParser
+from galaxy_utils.sequence.fastq import fastqReader, fastqWriter
+
+
+def get_score_comparer( operator ):
+ if operator == 'gt':
+ return compare_gt
+ elif operator == 'ge':
+ return compare_ge
+ elif operator == 'eq':
+ return compare_eq
+ elif operator == 'lt':
+ return compare_lt
+ elif operator == 'le':
+ return compare_le
+ elif operator == 'ne':
+ return compare_ne
+ raise 'Invalid operator provided: %s' % operator
+
+def compare_gt( quality_score, threshold_value ):
+ return quality_score > threshold_value
+
+def compare_ge( quality_score, threshold_value ):
+ return quality_score >= threshold_value
+
+def compare_eq( quality_score, threshold_value ):
+ return quality_score == threshold_value
+
+def compare_ne( quality_score, threshold_value ):
+ return quality_score != threshold_value
+
+def compare_lt( quality_score, threshold_value ):
+ return quality_score < threshold_value
+
+def compare_le( quality_score, threshold_value ):
+ return quality_score <= threshold_value
+
+class BaseReplacer( object ):
+ def __init__( self, replace_character ):
+ self.replace_character = replace_character
+ def __call__( self, base_character ):
+ return self.replace_character
+
+def main():
+ usage = "usage: %prog [options] input_file output_file"
+ parser = OptionParser( usage=usage )
+ parser.add_option( '-f', '--format', dest='format', type='choice', default='sanger', choices=( 'sanger', 'cssanger', 'solexa', 'illumina' ), help='FASTQ variant type' )
+ parser.add_option( '-m', '--mask_character', dest='mask_character', default='N', help='Mask Character to use' )
+ parser.add_option( '-c', '--score_comparison', type="choice", dest='score_comparison', default='le', choices=('gt','ge','eq','lt', 'le', 'ne' ), help='Mask base when score is' )
+ parser.add_option( '-s', '--quality_score', type="float", dest='quality_score', default='0', help='Quality Score' )
+ parser.add_option( "-l", "--lowercase", action="store_true", dest="lowercase", default=False, help="Use lowercase masking")
+ ( options, args ) = parser.parse_args()
+
+ if len ( args ) != 2:
+ parser.error( "Need to specify an input file and an output file" )
+
+ score_comparer = get_score_comparer( options.score_comparison )
+
+ if options.lowercase:
+ base_masker = string.lower
+ else:
+ base_masker = BaseReplacer( options.mask_character )
+
+ out = fastqWriter( open( args[1], 'wb' ), format = options.format )
+
+ num_reads = None
+ num_reads_excluded = 0
+ for num_reads, fastq_read in enumerate( fastqReader( open( args[0] ), format = options.format ) ):
+ sequence_list = list( fastq_read.sequence )
+ for i, quality_score in enumerate( fastq_read.get_decimal_quality_scores() ):
+ if score_comparer( quality_score, options.quality_score ):
+ sequence_list[ i ] = base_masker( sequence_list[ i ] )
+ fastq_read.sequence = "".join( sequence_list )
+ out.write( fastq_read )
+
+ if num_reads is not None:
+ print "Processed %i %s reads." % ( num_reads + 1, options.format )
+ else:
+ print "No valid FASTQ reads were provided."
+
+if __name__ == "__main__": main()
diff -r f175a156d7e0 -r 18bd3fa93bed tools/fastq/fastq_masker_by_quality.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/fastq/fastq_masker_by_quality.xml Sun May 23 08:46:07 2010 -0400
@@ -0,0 +1,53 @@
+<tool id="fastq_masker_by_quality" name="FASTQ Masker" version="1.0.0">
+ <description>by quality score</description>
+ <command interpreter="python">fastq_masker_by_quality.py '$input_file' '$output_file' -f '${input_file.extension[len( 'fastq' ):]}' -s '${quality_score}' -c '${score_comparison}'
+ #if $mask_type.value == 'lowercase'
+ --lowercase
+ #else
+ -m '${mask_type}'
+ #end if
+ </command>
+ <inputs>
+ <param name="input_file" type="data" format="fastqsanger" label="File to mask" />
+ <param name="mask_type" type="select" label="Mask input with">
+ <option value="N">N's</option>
+ <option value="lowercase">Lowercase</option>
+ </param>
+ <param name="score_comparison" type="select" label="When score is">
+ <option value="le" selected="True">Less than or equal</option>
+ <option value="lt">Less than</option>
+ <option value="eq">Equal to</option>
+ <option value="ne">Not Equal to</option>
+ <option value="ge">Greater than</option>
+ <option value="gt">Greater than or equal</option>
+ </param>
+ <param name="quality_score" type="integer" value="0"/>
+ </inputs>
+ <outputs>
+ <data name="output_file" format="fastqsanger" />
+ </outputs>
+ <tests>
+ <test>
+ <param name="input_file" value="sanger_full_range_original_sanger.fastqsanger" ftype="fastqsanger" />
+ <param name="mask_type" value="N" />
+ <param name="score_comparison" value="le" />
+ <param name="quality_score" value="20" />
+ <output name="output_file" file="sanger_full_range_masked_N.fastqsanger" />
+ </test>
+ <test>
+ <param name="input_file" value="sanger_full_range_original_sanger.fastqsanger" ftype="fastqsanger" />
+ <param name="mask_type" value="lowercase" />
+ <param name="score_comparison" value="le" />
+ <param name="quality_score" value="20" />
+ <output name="output_file" file="sanger_full_range_masked_lowercase.fastqsanger" />
+ </test>
+ </tests>
+ <help>
+**What it does**
+
+This tool allows masking base characters in FASTQ format files dependent upon user specified quality score value and comparison method.
+
+This tool is not available for use on color space (csSanger) formats.
+
+ </help>
+</tool>
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/f175a156d7e0
changeset: 3808:f175a156d7e0
user: fubar: ross Lazarus at gmail period com
date: Sun May 23 08:45:01 2010 -0400
description:
Branch merge
diffstat:
eggs.ini | 2 +-
lib/galaxy/jobs/runners/pbs.py | 133 +++++++++++++-----------------------
tool_conf.xml.sample | 1 +
tools/rgenetics/rgtest_one_tool.sh | 16 ++--
4 files changed, 58 insertions(+), 94 deletions(-)
diffs (243 lines):
diff -r 72d709b2c198 -r f175a156d7e0 eggs.ini
--- a/eggs.ini Sat May 22 19:47:50 2010 -0400
+++ b/eggs.ini Sun May 23 08:45:01 2010 -0400
@@ -17,7 +17,7 @@
DRMAA_python = 0.2
MySQL_python = 1.2.3c1
numpy = 1.3.0
-pbs_python = 2.9.8
+pbs_python = 2.9.4
psycopg2 = 2.0.13
pycrypto = 2.0.1
pysam = 0.1.1
diff -r 72d709b2c198 -r f175a156d7e0 lib/galaxy/jobs/runners/pbs.py
--- a/lib/galaxy/jobs/runners/pbs.py Sat May 22 19:47:50 2010 -0400
+++ b/lib/galaxy/jobs/runners/pbs.py Sun May 23 08:45:01 2010 -0400
@@ -50,19 +50,6 @@
%s
"""
-# From pbs' job.h
-JOB_EXIT_STATUS = {
- 0: "job exec successful",
- -1: "job exec failed, before files, no retry",
- -2: "job exec failed, after files, no retry",
- -3: "job execution failed, do retry",
- -4: "job aborted on MOM initialization",
- -5: "job aborted on MOM init, chkpt, no migrate",
- -6: "job aborted on MOM init, chkpt, ok migrate",
- -7: "job restart failed",
- -8: "exec() of user command failed",
-}
-
class PBSJobState( object ):
def __init__( self ):
"""
@@ -78,7 +65,6 @@
self.efile = None
self.runner_url = None
self.check_count = 0
- self.stop_job = False
class PBSJobRunner( object ):
"""
@@ -207,9 +193,8 @@
pbs_options = self.determine_pbs_options( runner_url )
c = pbs.pbs_connect( pbs_server_name )
if c <= 0:
- errno, text = pbs.error()
job_wrapper.fail( "Unable to queue job for execution. Resubmitting the job may succeed." )
- log.error( "Connection to PBS server for submit failed: %s: %s" % ( errno, text ) )
+ log.error( "Connection to PBS server for submit failed" )
return
# define job attributes
@@ -351,78 +336,58 @@
log.debug( "(%s/%s) Skipping state check because PBS server connection failed" % ( galaxy_job_id, job_id ) )
new_watched.append( pbs_job_state )
continue
- try:
+ if statuses.has_key( job_id ):
status = statuses[job_id]
- except KeyError:
+ if status.job_state != old_state:
+ log.debug("(%s/%s) job state changed from %s to %s" % ( galaxy_job_id, job_id, old_state, status.job_state ) )
+ if status.job_state == "R" and not pbs_job_state.running:
+ pbs_job_state.running = True
+ pbs_job_state.job_wrapper.change_state( model.Job.states.RUNNING )
+ if status.job_state == "R" and ( pbs_job_state.check_count % 20 ) == 0:
+ # Every 20th time the job status is checked, do limit checks (if configured)
+ if self.app.config.output_size_limit > 0:
+ # Check the size of the job outputs
+ fail = False
+ for outfile, size in pbs_job_state.job_wrapper.check_output_sizes():
+ if size > self.app.config.output_size_limit:
+ pbs_job_state.fail_message = 'Job output grew too large (greater than %s), please try different job parameters or' \
+ % nice_size( self.app.config.output_size_limit )
+ log.warning( '(%s/%s) Dequeueing job due to output %s growing larger than %s limit' \
+ % ( galaxy_job_id, job_id, os.path.basename( outfile ), nice_size( self.app.config.output_size_limit ) ) )
+ self.work_queue.put( ( 'fail', pbs_job_state ) )
+ fail = True
+ break
+ if fail:
+ continue
+ if self.job_walltime is not None:
+ # Check the job's execution time
+ if status.get( 'resources_used', False ):
+ # resources_used may not be in the status for new jobs
+ h, m, s = [ int( i ) for i in status.resources_used.walltime.split( ':' ) ]
+ time_executing = timedelta( 0, s, 0, 0, m, h )
+ if time_executing > self.job_walltime:
+ pbs_job_state.fail_message = 'Job ran longer than maximum allowed execution time (%s), please try different job parameters or' \
+ % self.app.config.job_walltime
+ log.warning( '(%s/%s) Dequeueing job since walltime has been reached' \
+ % ( galaxy_job_id, job_id ) )
+ self.work_queue.put( ( 'fail', pbs_job_state ) )
+ continue
+ pbs_job_state.old_state = status.job_state
+ new_watched.append( pbs_job_state )
+ else:
try:
- # Recheck to make sure it wasn't a communication problem
+ # recheck to make sure it wasn't a communication problem
self.check_single_job( pbs_server_name, job_id )
- log.warning( "(%s/%s) PBS job was not in state check list, but was found with individual state check" % ( galaxy_job_id, job_id ) )
+ log.warning( "(%s/%s) job was not in state check list, but was found with individual state check" % ( galaxy_job_id, job_id ) )
new_watched.append( pbs_job_state )
except:
errno, text = pbs.error()
- if errno == 15001:
- # 15001 == job not in queue
- log.debug("(%s/%s) PBS job has left queue" % (galaxy_job_id, job_id) )
+ if errno != 15001:
+ log.info("(%s/%s) state check resulted in error (%d): %s" % (galaxy_job_id, job_id, errno, text) )
+ new_watched.append( pbs_job_state )
+ else:
+ log.debug("(%s/%s) job has left queue" % (galaxy_job_id, job_id) )
self.work_queue.put( ( 'finish', pbs_job_state ) )
- else:
- # Unhandled error, continue to monitor
- log.info("(%s/%s) PBS state check resulted in error (%d): %s" % (galaxy_job_id, job_id, errno, text) )
- new_watched.append( pbs_job_state )
- continue
- if status.job_state != old_state:
- log.debug("(%s/%s) PBS job state changed from %s to %s" % ( galaxy_job_id, job_id, old_state, status.job_state ) )
- if status.job_state == "R" and not pbs_job_state.running:
- pbs_job_state.running = True
- pbs_job_state.job_wrapper.change_state( model.Job.states.RUNNING )
- if status.job_state == "R" and ( pbs_job_state.check_count % 20 ) == 0:
- # Every 20th time the job status is checked, do limit checks (if configured)
- if self.app.config.output_size_limit > 0:
- # Check the size of the job outputs
- fail = False
- for outfile, size in pbs_job_state.job_wrapper.check_output_sizes():
- if size > self.app.config.output_size_limit:
- pbs_job_state.fail_message = 'Job output grew too large (greater than %s), please try different job parameters or' \
- % nice_size( self.app.config.output_size_limit )
- log.warning( '(%s/%s) Dequeueing job due to output %s growing larger than %s limit' \
- % ( galaxy_job_id, job_id, os.path.basename( outfile ), nice_size( self.app.config.output_size_limit ) ) )
- pbs_job_state.stop_job = True
- self.work_queue.put( ( 'fail', pbs_job_state ) )
- fail = True
- break
- if fail:
- continue
- if self.job_walltime is not None:
- # Check the job's execution time
- if status.get( 'resources_used', False ):
- # resources_used may not be in the status for new jobs
- h, m, s = [ int( i ) for i in status.resources_used.walltime.split( ':' ) ]
- time_executing = timedelta( 0, s, 0, 0, m, h )
- if time_executing > self.job_walltime:
- pbs_job_state.fail_message = 'Job ran longer than maximum allowed execution time (%s), please try different job parameters or' \
- % self.app.config.job_walltime
- log.warning( '(%s/%s) Dequeueing job since walltime has been reached' \
- % ( galaxy_job_id, job_id ) )
- pbs_job_state.stop_job = True
- self.work_queue.put( ( 'fail', pbs_job_state ) )
- continue
- elif status.job_state == "C":
- # "keep_completed" is enabled in PBS, so try to check exit status
- try:
- assert int( status.exit_status ) == 0
- log.debug("(%s/%s) PBS job has completed successfully" % ( galaxy_job_id, job_id ) )
- except AssertionError:
- pbs_job_state.fail_message = 'Job cannot be completed due to a cluster error. Please retry or'
- log.error( '(%s/%s) PBS job failed: %s' % ( galaxy_job_id, job_id, JOB_EXIT_STATUS.get( int( status.exit_status ), 'Unknown error: %s' % status.exit_status ) ) )
- self.work_queue.put( ( 'fail', pbs_job_state ) )
- continue
- except AttributeError:
- # No exit_status, can't verify proper completion so we just have to assume success.
- log.debug("(%s/%s) PBS job has completed" % ( galaxy_job_id, job_id ) )
- self.work_queue.put( ( 'finish', pbs_job_state ) )
- continue
- pbs_job_state.old_state = status.job_state
- new_watched.append( pbs_job_state )
# Replace the watch list with the updated version
self.watched = new_watched
@@ -446,10 +411,9 @@
log.debug("connection to PBS server %s for state check failed" % pbs_server_name )
failures.append( pbs_server_name )
continue
- stat_attrl = pbs.new_attrl(3)
+ stat_attrl = pbs.new_attrl(2)
stat_attrl[0].name = pbs.ATTR_state
stat_attrl[1].name = pbs.ATTR_used
- stat_attrl[2].name = pbs.ATTR_exitstat
jobs = pbs.pbs_statjob( c, None, stat_attrl, None )
pbs.pbs_disconnect( c )
statuses.update( self.convert_statjob_to_bunches( jobs ) )
@@ -516,8 +480,7 @@
"""
Seperated out so we can use the worker threads for it.
"""
- if pbs_job_state.stop_job:
- self.stop_job( self.sa_session.query( self.app.model.Job ).get( pbs_job_state.job_wrapper.job_id ) )
+ self.stop_job( self.sa_session.query( self.app.model.Job ).get( pbs_job_state.job_wrapper.job_id ) )
pbs_job_state.job_wrapper.fail( pbs_job_state.fail_message )
self.cleanup( ( pbs_job_state.ofile, pbs_job_state.efile, pbs_job_state.job_file ) )
diff -r 72d709b2c198 -r f175a156d7e0 tool_conf.xml.sample
--- a/tool_conf.xml.sample Sat May 22 19:47:50 2010 -0400
+++ b/tool_conf.xml.sample Sun May 23 08:45:01 2010 -0400
@@ -221,6 +221,7 @@
<tool file="fastq/fastq_filter.xml" />
<tool file="fastq/fastq_trimmer.xml" />
<tool file="fastq/fastq_trimmer_by_quality.xml" />
+ <tool file="fastq/fastq_masker_by_quality.xml" />
<tool file="fastq/fastq_manipulation.xml" />
<tool file="fastq/fastq_to_fasta.xml" />
<tool file="fastq/fastq_to_tabular.xml" />
diff -r 72d709b2c198 -r f175a156d7e0 tools/rgenetics/rgtest_one_tool.sh
--- a/tools/rgenetics/rgtest_one_tool.sh Sat May 22 19:47:50 2010 -0400
+++ b/tools/rgenetics/rgtest_one_tool.sh Sun May 23 08:45:01 2010 -0400
@@ -1,20 +1,20 @@
#!/bin/sh
# script to generate all functional test outputs for each rgenetics tool
# could be run at installation to ensure all dependencies are in place?
-case $# in 0) echo "USAGE: ${0##*/} TooltoTest"; exit 1;;
- [2-10]*) echo "Too many arguments - name of tool only"; exit 2;;
+case $# in 0) echo "USAGE: ${0##*/} TooltoTest galaxyRoot outRoot"; exit 1;;
+ [1-3]*) echo "Need ToolToTest and paths for galaxyRoot outRoot as parameters"; exit 2;;
+ [5-10]*) echo "Too many arguments - ToolToTest and paths for galaxyRoot outRoot as parameters"; exit 2;;
*)
esac
-GALAXYROOT=`pwd`
-#PATHTOGALAXY='/opt/galaxy' # whatever
-PATHTOGALAXY='/share/shared/galaxy' # whatever
+GALAXYROOT=$2
+OUTROOT=$3
echo "using $GALAXYROOT"
# change this as needed for your local install
INPATH="${GALAXYROOT}/test-data"
BINPATH="${GALAXYROOT}/tool-data/rg/bin"
-TOOLPATH="${PATHTOGALAXY}/tools/rgenetics"
-OROOT="${GALAXYROOT}/test-data/rgtestouts"
-NORMALOROOT="${GALAXYROOT}/test-data"
+TOOLPATH="${GALAXYROOT}/tools/rgenetics"
+OROOT="${OUTROOT}/test-data/rgtestouts"
+NORMALOROOT="${OUTROOT}/test-data"
case "$1" in
'rgManQQ')
1
0

25 May '10
details: http://www.bx.psu.edu/hg/galaxy/rev/72d709b2c198
changeset: 3807:72d709b2c198
user: fubar: ross Lazarus at gmail period com
date: Sat May 22 19:47:50 2010 -0400
description:
Require two paths for rgtest.sh galaxyroot and outroot
test with a /tmp path for outroot...this script will recreate all the
snp/wga test outputs if you ask it to - which may or may not be what
you want...
diffstat:
tools/rgenetics/rgEigPCA.xml | 2 +-
tools/rgenetics/rgtest.sh | 27 +++++++++++++++++++++++----
2 files changed, 24 insertions(+), 5 deletions(-)
diffs (53 lines):
diff -r 3b8e4af25be2 -r 72d709b2c198 tools/rgenetics/rgEigPCA.xml
--- a/tools/rgenetics/rgEigPCA.xml Fri May 21 15:50:49 2010 -0400
+++ b/tools/rgenetics/rgEigPCA.xml Sat May 22 19:47:50 2010 -0400
@@ -48,7 +48,7 @@
<param name="t" value="2" />
<param name="s" value="2" />
<output name='out_file1' file='rgtestouts/rgEigPCA/rgEigPCAtest1.html' ftype='html' compare='diff' lines_diff='195'>
- <extra_files type="file" name='rgEigPCAtest1_PCAPlot.pdf' value="rgtestouts/rgEigPCA/rgEigPCAtest1_PCAPlot.pdf" compare="sim_size" delta="30000"/>
+ <extra_files type="file" name='rgEigPCAtest1_PCAPlot.pdf' value="rgtestouts/rgEigPCA/rgEigPCAtest1_PCAPlot.pdf" compare="sim_size" delta="3000"/>
</output>
<output name='pca' file='rgtestouts/rgEigPCA/rgEigPCAtest1.txt' compare='diff'/>
</test>
diff -r 3b8e4af25be2 -r 72d709b2c198 tools/rgenetics/rgtest.sh
--- a/tools/rgenetics/rgtest.sh Fri May 21 15:50:49 2010 -0400
+++ b/tools/rgenetics/rgtest.sh Sat May 22 19:47:50 2010 -0400
@@ -1,14 +1,33 @@
#!/bin/sh
# script to generate all functional test outputs for each rgenetics tool
# could be run at installation to ensure all dependencies are in place?
-GALAXYROOT=`pwd`
-echo "using $GALAXYROOT"
+if test $# -lt 2
+then
+ echo "We need to agree on 2 parameters - GalaxyRoot and OutRoot - use paths to galaxy and galaxy to re-create all test outputs"
+ echo "or more prudently, galaxy and /tmp/foo for checking without updating all your test-data"
+ echo "Exiting with no changes"
+ exit 1
+fi
+if [ $1 ]
+then
+ GALAXYROOT=$1
+else
+ GALAXYROOT=`pwd`
+fi
+if [ $2 ]
+then
+ OUTROOT=$2
+else
+ OUTROOT=`pwd`
+ OUTROOT="$OUTROOT/test-data"
+fi
+echo "using $GALAXYROOT as galaxyroot and $OUTROOT as outroot"
# change this as needed for your local install
INPATH="${GALAXYROOT}/test-data"
BINPATH="${GALAXYROOT}/tool-data/rg/bin"
TOOLPATH="${GALAXYROOT}/tools/rgenetics"
-OROOT="${GALAXYROOT}/test-data/rgtestouts"
-NORMALOROOT="${GALAXYROOT}/test-data"
+OROOT="${OUTROOT}/test-data/rgtestouts"
+NORMALOROOT="${OUTROOT}/test-data"
mkdir -p $OROOT
rm -rf $OROOT/*
# needed for testing - but tool versions should be bumped if this is rerun?
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/aa84d012cb50
changeset: 3806:aa84d012cb50
user: fubar/ross period lazarus at gmail d0t com
date: Sat May 22 20:04:38 2010 -0400
description:
Require 2 paths for rgtest.sh
This script will overwrite all the snp/wga test outputs if you ask it to
diffstat:
tools/rgenetics/rgtest.sh | 27 +++++++++++++++++++++++----
1 files changed, 23 insertions(+), 4 deletions(-)
diffs (41 lines):
diff -r 88afe0a30dc7 -r aa84d012cb50 tools/rgenetics/rgtest.sh
--- a/tools/rgenetics/rgtest.sh Fri May 21 16:53:42 2010 -0400
+++ b/tools/rgenetics/rgtest.sh Sat May 22 20:04:38 2010 -0400
@@ -1,14 +1,33 @@
#!/bin/sh
# script to generate all functional test outputs for each rgenetics tool
# could be run at installation to ensure all dependencies are in place?
-GALAXYROOT=`pwd`
-echo "using $GALAXYROOT"
+if test $# -lt 2
+then
+ echo "We need to agree on 2 parameters - GalaxyRoot and OutRoot - use paths to galaxy and galaxy to re-create all test outputs"
+ echo "or more prudently, galaxy and /tmp/foo for checking without updating all your test-data"
+ echo "Exiting with no changes"
+ exit 1
+fi
+if [ $1 ]
+then
+ GALAXYROOT=$1
+else
+ GALAXYROOT=`pwd`
+fi
+if [ $2 ]
+then
+ OUTROOT=$2
+else
+ OUTROOT=`pwd`
+ OUTROOT="$OUTROOT/test-data"
+fi
+echo "using $GALAXYROOT as galaxyroot and $OUTROOT as outroot"
# change this as needed for your local install
INPATH="${GALAXYROOT}/test-data"
BINPATH="${GALAXYROOT}/tool-data/rg/bin"
TOOLPATH="${GALAXYROOT}/tools/rgenetics"
-OROOT="${GALAXYROOT}/test-data/rgtestouts"
-NORMALOROOT="${GALAXYROOT}/test-data"
+OROOT="${OUTROOT}/test-data/rgtestouts"
+NORMALOROOT="${OUTROOT}/test-data"
mkdir -p $OROOT
rm -rf $OROOT/*
# needed for testing - but tool versions should be bumped if this is rerun?
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/88afe0a30dc7
changeset: 3805:88afe0a30dc7
user: Nate Coraor <nate(a)bx.psu.edu>
date: Fri May 21 16:53:42 2010 -0400
description:
merge backout
diffstat:
eggs.ini | 2 +-
lib/galaxy/jobs/runners/pbs.py | 133 ++++++++++++++--------------------------
2 files changed, 49 insertions(+), 86 deletions(-)
diffs (200 lines):
diff -r c9b41f94d707 -r 88afe0a30dc7 eggs.ini
--- a/eggs.ini Fri May 21 16:48:59 2010 -0400
+++ b/eggs.ini Fri May 21 16:53:42 2010 -0400
@@ -17,7 +17,7 @@
DRMAA_python = 0.2
MySQL_python = 1.2.3c1
numpy = 1.3.0
-pbs_python = 2.9.8
+pbs_python = 2.9.4
psycopg2 = 2.0.13
pycrypto = 2.0.1
pysam = 0.1.1
diff -r c9b41f94d707 -r 88afe0a30dc7 lib/galaxy/jobs/runners/pbs.py
--- a/lib/galaxy/jobs/runners/pbs.py Fri May 21 16:48:59 2010 -0400
+++ b/lib/galaxy/jobs/runners/pbs.py Fri May 21 16:53:42 2010 -0400
@@ -50,19 +50,6 @@
%s
"""
-# From pbs' job.h
-JOB_EXIT_STATUS = {
- 0: "job exec successful",
- -1: "job exec failed, before files, no retry",
- -2: "job exec failed, after files, no retry",
- -3: "job execution failed, do retry",
- -4: "job aborted on MOM initialization",
- -5: "job aborted on MOM init, chkpt, no migrate",
- -6: "job aborted on MOM init, chkpt, ok migrate",
- -7: "job restart failed",
- -8: "exec() of user command failed",
-}
-
class PBSJobState( object ):
def __init__( self ):
"""
@@ -78,7 +65,6 @@
self.efile = None
self.runner_url = None
self.check_count = 0
- self.stop_job = False
class PBSJobRunner( object ):
"""
@@ -207,9 +193,8 @@
pbs_options = self.determine_pbs_options( runner_url )
c = pbs.pbs_connect( pbs_server_name )
if c <= 0:
- errno, text = pbs.error()
job_wrapper.fail( "Unable to queue job for execution. Resubmitting the job may succeed." )
- log.error( "Connection to PBS server for submit failed: %s: %s" % ( errno, text ) )
+ log.error( "Connection to PBS server for submit failed" )
return
# define job attributes
@@ -351,78 +336,58 @@
log.debug( "(%s/%s) Skipping state check because PBS server connection failed" % ( galaxy_job_id, job_id ) )
new_watched.append( pbs_job_state )
continue
- try:
+ if statuses.has_key( job_id ):
status = statuses[job_id]
- except KeyError:
+ if status.job_state != old_state:
+ log.debug("(%s/%s) job state changed from %s to %s" % ( galaxy_job_id, job_id, old_state, status.job_state ) )
+ if status.job_state == "R" and not pbs_job_state.running:
+ pbs_job_state.running = True
+ pbs_job_state.job_wrapper.change_state( model.Job.states.RUNNING )
+ if status.job_state == "R" and ( pbs_job_state.check_count % 20 ) == 0:
+ # Every 20th time the job status is checked, do limit checks (if configured)
+ if self.app.config.output_size_limit > 0:
+ # Check the size of the job outputs
+ fail = False
+ for outfile, size in pbs_job_state.job_wrapper.check_output_sizes():
+ if size > self.app.config.output_size_limit:
+ pbs_job_state.fail_message = 'Job output grew too large (greater than %s), please try different job parameters or' \
+ % nice_size( self.app.config.output_size_limit )
+ log.warning( '(%s/%s) Dequeueing job due to output %s growing larger than %s limit' \
+ % ( galaxy_job_id, job_id, os.path.basename( outfile ), nice_size( self.app.config.output_size_limit ) ) )
+ self.work_queue.put( ( 'fail', pbs_job_state ) )
+ fail = True
+ break
+ if fail:
+ continue
+ if self.job_walltime is not None:
+ # Check the job's execution time
+ if status.get( 'resources_used', False ):
+ # resources_used may not be in the status for new jobs
+ h, m, s = [ int( i ) for i in status.resources_used.walltime.split( ':' ) ]
+ time_executing = timedelta( 0, s, 0, 0, m, h )
+ if time_executing > self.job_walltime:
+ pbs_job_state.fail_message = 'Job ran longer than maximum allowed execution time (%s), please try different job parameters or' \
+ % self.app.config.job_walltime
+ log.warning( '(%s/%s) Dequeueing job since walltime has been reached' \
+ % ( galaxy_job_id, job_id ) )
+ self.work_queue.put( ( 'fail', pbs_job_state ) )
+ continue
+ pbs_job_state.old_state = status.job_state
+ new_watched.append( pbs_job_state )
+ else:
try:
- # Recheck to make sure it wasn't a communication problem
+ # recheck to make sure it wasn't a communication problem
self.check_single_job( pbs_server_name, job_id )
- log.warning( "(%s/%s) PBS job was not in state check list, but was found with individual state check" % ( galaxy_job_id, job_id ) )
+ log.warning( "(%s/%s) job was not in state check list, but was found with individual state check" % ( galaxy_job_id, job_id ) )
new_watched.append( pbs_job_state )
except:
errno, text = pbs.error()
- if errno == 15001:
- # 15001 == job not in queue
- log.debug("(%s/%s) PBS job has left queue" % (galaxy_job_id, job_id) )
+ if errno != 15001:
+ log.info("(%s/%s) state check resulted in error (%d): %s" % (galaxy_job_id, job_id, errno, text) )
+ new_watched.append( pbs_job_state )
+ else:
+ log.debug("(%s/%s) job has left queue" % (galaxy_job_id, job_id) )
self.work_queue.put( ( 'finish', pbs_job_state ) )
- else:
- # Unhandled error, continue to monitor
- log.info("(%s/%s) PBS state check resulted in error (%d): %s" % (galaxy_job_id, job_id, errno, text) )
- new_watched.append( pbs_job_state )
- continue
- if status.job_state != old_state:
- log.debug("(%s/%s) PBS job state changed from %s to %s" % ( galaxy_job_id, job_id, old_state, status.job_state ) )
- if status.job_state == "R" and not pbs_job_state.running:
- pbs_job_state.running = True
- pbs_job_state.job_wrapper.change_state( model.Job.states.RUNNING )
- if status.job_state == "R" and ( pbs_job_state.check_count % 20 ) == 0:
- # Every 20th time the job status is checked, do limit checks (if configured)
- if self.app.config.output_size_limit > 0:
- # Check the size of the job outputs
- fail = False
- for outfile, size in pbs_job_state.job_wrapper.check_output_sizes():
- if size > self.app.config.output_size_limit:
- pbs_job_state.fail_message = 'Job output grew too large (greater than %s), please try different job parameters or' \
- % nice_size( self.app.config.output_size_limit )
- log.warning( '(%s/%s) Dequeueing job due to output %s growing larger than %s limit' \
- % ( galaxy_job_id, job_id, os.path.basename( outfile ), nice_size( self.app.config.output_size_limit ) ) )
- pbs_job_state.stop_job = True
- self.work_queue.put( ( 'fail', pbs_job_state ) )
- fail = True
- break
- if fail:
- continue
- if self.job_walltime is not None:
- # Check the job's execution time
- if status.get( 'resources_used', False ):
- # resources_used may not be in the status for new jobs
- h, m, s = [ int( i ) for i in status.resources_used.walltime.split( ':' ) ]
- time_executing = timedelta( 0, s, 0, 0, m, h )
- if time_executing > self.job_walltime:
- pbs_job_state.fail_message = 'Job ran longer than maximum allowed execution time (%s), please try different job parameters or' \
- % self.app.config.job_walltime
- log.warning( '(%s/%s) Dequeueing job since walltime has been reached' \
- % ( galaxy_job_id, job_id ) )
- pbs_job_state.stop_job = True
- self.work_queue.put( ( 'fail', pbs_job_state ) )
- continue
- elif status.job_state == "C":
- # "keep_completed" is enabled in PBS, so try to check exit status
- try:
- assert int( status.exit_status ) == 0
- log.debug("(%s/%s) PBS job has completed successfully" % ( galaxy_job_id, job_id ) )
- except AssertionError:
- pbs_job_state.fail_message = 'Job cannot be completed due to a cluster error. Please retry or'
- log.error( '(%s/%s) PBS job failed: %s' % ( galaxy_job_id, job_id, JOB_EXIT_STATUS.get( int( status.exit_status ), 'Unknown error: %s' % status.exit_status ) ) )
- self.work_queue.put( ( 'fail', pbs_job_state ) )
- continue
- except AttributeError:
- # No exit_status, can't verify proper completion so we just have to assume success.
- log.debug("(%s/%s) PBS job has completed" % ( galaxy_job_id, job_id ) )
- self.work_queue.put( ( 'finish', pbs_job_state ) )
- continue
- pbs_job_state.old_state = status.job_state
- new_watched.append( pbs_job_state )
# Replace the watch list with the updated version
self.watched = new_watched
@@ -446,10 +411,9 @@
log.debug("connection to PBS server %s for state check failed" % pbs_server_name )
failures.append( pbs_server_name )
continue
- stat_attrl = pbs.new_attrl(3)
+ stat_attrl = pbs.new_attrl(2)
stat_attrl[0].name = pbs.ATTR_state
stat_attrl[1].name = pbs.ATTR_used
- stat_attrl[2].name = pbs.ATTR_exitstat
jobs = pbs.pbs_statjob( c, None, stat_attrl, None )
pbs.pbs_disconnect( c )
statuses.update( self.convert_statjob_to_bunches( jobs ) )
@@ -516,8 +480,7 @@
"""
Seperated out so we can use the worker threads for it.
"""
- if pbs_job_state.stop_job:
- self.stop_job( self.sa_session.query( self.app.model.Job ).get( pbs_job_state.job_wrapper.job_id ) )
+ self.stop_job( self.sa_session.query( self.app.model.Job ).get( pbs_job_state.job_wrapper.job_id ) )
pbs_job_state.job_wrapper.fail( pbs_job_state.fail_message )
self.cleanup( ( pbs_job_state.ofile, pbs_job_state.efile, pbs_job_state.job_file ) )
1
0

25 May '10
details: http://www.bx.psu.edu/hg/galaxy/rev/b1619e50417f
changeset: 3804:b1619e50417f
user: Nate Coraor <nate(a)bx.psu.edu>
date: Fri May 21 16:53:29 2010 -0400
description:
Backed out changeset 48432330228e. pbs_python 2.9.8 is causing the job runner to segfault.
diffstat:
eggs.ini | 2 +-
lib/galaxy/jobs/runners/pbs.py | 133 ++++++++++++++--------------------------
2 files changed, 49 insertions(+), 86 deletions(-)
diffs (200 lines):
diff -r 48432330228e -r b1619e50417f eggs.ini
--- a/eggs.ini Fri May 14 10:37:36 2010 -0400
+++ b/eggs.ini Fri May 21 16:53:29 2010 -0400
@@ -17,7 +17,7 @@
DRMAA_python = 0.2
MySQL_python = 1.2.3c1
numpy = 1.3.0
-pbs_python = 2.9.8
+pbs_python = 2.9.4
psycopg2 = 2.0.13
pycrypto = 2.0.1
pysam = 0.1.1
diff -r 48432330228e -r b1619e50417f lib/galaxy/jobs/runners/pbs.py
--- a/lib/galaxy/jobs/runners/pbs.py Fri May 14 10:37:36 2010 -0400
+++ b/lib/galaxy/jobs/runners/pbs.py Fri May 21 16:53:29 2010 -0400
@@ -50,19 +50,6 @@
%s
"""
-# From pbs' job.h
-JOB_EXIT_STATUS = {
- 0: "job exec successful",
- -1: "job exec failed, before files, no retry",
- -2: "job exec failed, after files, no retry",
- -3: "job execution failed, do retry",
- -4: "job aborted on MOM initialization",
- -5: "job aborted on MOM init, chkpt, no migrate",
- -6: "job aborted on MOM init, chkpt, ok migrate",
- -7: "job restart failed",
- -8: "exec() of user command failed",
-}
-
class PBSJobState( object ):
def __init__( self ):
"""
@@ -78,7 +65,6 @@
self.efile = None
self.runner_url = None
self.check_count = 0
- self.stop_job = False
class PBSJobRunner( object ):
"""
@@ -207,9 +193,8 @@
pbs_options = self.determine_pbs_options( runner_url )
c = pbs.pbs_connect( pbs_server_name )
if c <= 0:
- errno, text = pbs.error()
job_wrapper.fail( "Unable to queue job for execution. Resubmitting the job may succeed." )
- log.error( "Connection to PBS server for submit failed: %s: %s" % ( errno, text ) )
+ log.error( "Connection to PBS server for submit failed" )
return
# define job attributes
@@ -351,78 +336,58 @@
log.debug( "(%s/%s) Skipping state check because PBS server connection failed" % ( galaxy_job_id, job_id ) )
new_watched.append( pbs_job_state )
continue
- try:
+ if statuses.has_key( job_id ):
status = statuses[job_id]
- except KeyError:
+ if status.job_state != old_state:
+ log.debug("(%s/%s) job state changed from %s to %s" % ( galaxy_job_id, job_id, old_state, status.job_state ) )
+ if status.job_state == "R" and not pbs_job_state.running:
+ pbs_job_state.running = True
+ pbs_job_state.job_wrapper.change_state( model.Job.states.RUNNING )
+ if status.job_state == "R" and ( pbs_job_state.check_count % 20 ) == 0:
+ # Every 20th time the job status is checked, do limit checks (if configured)
+ if self.app.config.output_size_limit > 0:
+ # Check the size of the job outputs
+ fail = False
+ for outfile, size in pbs_job_state.job_wrapper.check_output_sizes():
+ if size > self.app.config.output_size_limit:
+ pbs_job_state.fail_message = 'Job output grew too large (greater than %s), please try different job parameters or' \
+ % nice_size( self.app.config.output_size_limit )
+ log.warning( '(%s/%s) Dequeueing job due to output %s growing larger than %s limit' \
+ % ( galaxy_job_id, job_id, os.path.basename( outfile ), nice_size( self.app.config.output_size_limit ) ) )
+ self.work_queue.put( ( 'fail', pbs_job_state ) )
+ fail = True
+ break
+ if fail:
+ continue
+ if self.job_walltime is not None:
+ # Check the job's execution time
+ if status.get( 'resources_used', False ):
+ # resources_used may not be in the status for new jobs
+ h, m, s = [ int( i ) for i in status.resources_used.walltime.split( ':' ) ]
+ time_executing = timedelta( 0, s, 0, 0, m, h )
+ if time_executing > self.job_walltime:
+ pbs_job_state.fail_message = 'Job ran longer than maximum allowed execution time (%s), please try different job parameters or' \
+ % self.app.config.job_walltime
+ log.warning( '(%s/%s) Dequeueing job since walltime has been reached' \
+ % ( galaxy_job_id, job_id ) )
+ self.work_queue.put( ( 'fail', pbs_job_state ) )
+ continue
+ pbs_job_state.old_state = status.job_state
+ new_watched.append( pbs_job_state )
+ else:
try:
- # Recheck to make sure it wasn't a communication problem
+ # recheck to make sure it wasn't a communication problem
self.check_single_job( pbs_server_name, job_id )
- log.warning( "(%s/%s) PBS job was not in state check list, but was found with individual state check" % ( galaxy_job_id, job_id ) )
+ log.warning( "(%s/%s) job was not in state check list, but was found with individual state check" % ( galaxy_job_id, job_id ) )
new_watched.append( pbs_job_state )
except:
errno, text = pbs.error()
- if errno == 15001:
- # 15001 == job not in queue
- log.debug("(%s/%s) PBS job has left queue" % (galaxy_job_id, job_id) )
+ if errno != 15001:
+ log.info("(%s/%s) state check resulted in error (%d): %s" % (galaxy_job_id, job_id, errno, text) )
+ new_watched.append( pbs_job_state )
+ else:
+ log.debug("(%s/%s) job has left queue" % (galaxy_job_id, job_id) )
self.work_queue.put( ( 'finish', pbs_job_state ) )
- else:
- # Unhandled error, continue to monitor
- log.info("(%s/%s) PBS state check resulted in error (%d): %s" % (galaxy_job_id, job_id, errno, text) )
- new_watched.append( pbs_job_state )
- continue
- if status.job_state != old_state:
- log.debug("(%s/%s) PBS job state changed from %s to %s" % ( galaxy_job_id, job_id, old_state, status.job_state ) )
- if status.job_state == "R" and not pbs_job_state.running:
- pbs_job_state.running = True
- pbs_job_state.job_wrapper.change_state( model.Job.states.RUNNING )
- if status.job_state == "R" and ( pbs_job_state.check_count % 20 ) == 0:
- # Every 20th time the job status is checked, do limit checks (if configured)
- if self.app.config.output_size_limit > 0:
- # Check the size of the job outputs
- fail = False
- for outfile, size in pbs_job_state.job_wrapper.check_output_sizes():
- if size > self.app.config.output_size_limit:
- pbs_job_state.fail_message = 'Job output grew too large (greater than %s), please try different job parameters or' \
- % nice_size( self.app.config.output_size_limit )
- log.warning( '(%s/%s) Dequeueing job due to output %s growing larger than %s limit' \
- % ( galaxy_job_id, job_id, os.path.basename( outfile ), nice_size( self.app.config.output_size_limit ) ) )
- pbs_job_state.stop_job = True
- self.work_queue.put( ( 'fail', pbs_job_state ) )
- fail = True
- break
- if fail:
- continue
- if self.job_walltime is not None:
- # Check the job's execution time
- if status.get( 'resources_used', False ):
- # resources_used may not be in the status for new jobs
- h, m, s = [ int( i ) for i in status.resources_used.walltime.split( ':' ) ]
- time_executing = timedelta( 0, s, 0, 0, m, h )
- if time_executing > self.job_walltime:
- pbs_job_state.fail_message = 'Job ran longer than maximum allowed execution time (%s), please try different job parameters or' \
- % self.app.config.job_walltime
- log.warning( '(%s/%s) Dequeueing job since walltime has been reached' \
- % ( galaxy_job_id, job_id ) )
- pbs_job_state.stop_job = True
- self.work_queue.put( ( 'fail', pbs_job_state ) )
- continue
- elif status.job_state == "C":
- # "keep_completed" is enabled in PBS, so try to check exit status
- try:
- assert int( status.exit_status ) == 0
- log.debug("(%s/%s) PBS job has completed successfully" % ( galaxy_job_id, job_id ) )
- except AssertionError:
- pbs_job_state.fail_message = 'Job cannot be completed due to a cluster error. Please retry or'
- log.error( '(%s/%s) PBS job failed: %s' % ( galaxy_job_id, job_id, JOB_EXIT_STATUS.get( int( status.exit_status ), 'Unknown error: %s' % status.exit_status ) ) )
- self.work_queue.put( ( 'fail', pbs_job_state ) )
- continue
- except AttributeError:
- # No exit_status, can't verify proper completion so we just have to assume success.
- log.debug("(%s/%s) PBS job has completed" % ( galaxy_job_id, job_id ) )
- self.work_queue.put( ( 'finish', pbs_job_state ) )
- continue
- pbs_job_state.old_state = status.job_state
- new_watched.append( pbs_job_state )
# Replace the watch list with the updated version
self.watched = new_watched
@@ -446,10 +411,9 @@
log.debug("connection to PBS server %s for state check failed" % pbs_server_name )
failures.append( pbs_server_name )
continue
- stat_attrl = pbs.new_attrl(3)
+ stat_attrl = pbs.new_attrl(2)
stat_attrl[0].name = pbs.ATTR_state
stat_attrl[1].name = pbs.ATTR_used
- stat_attrl[2].name = pbs.ATTR_exitstat
jobs = pbs.pbs_statjob( c, None, stat_attrl, None )
pbs.pbs_disconnect( c )
statuses.update( self.convert_statjob_to_bunches( jobs ) )
@@ -516,8 +480,7 @@
"""
Seperated out so we can use the worker threads for it.
"""
- if pbs_job_state.stop_job:
- self.stop_job( self.sa_session.query( self.app.model.Job ).get( pbs_job_state.job_wrapper.job_id ) )
+ self.stop_job( self.sa_session.query( self.app.model.Job ).get( pbs_job_state.job_wrapper.job_id ) )
pbs_job_state.job_wrapper.fail( pbs_job_state.fail_message )
self.cleanup( ( pbs_job_state.ofile, pbs_job_state.efile, pbs_job_state.job_file ) )
1
0

25 May '10
details: http://www.bx.psu.edu/hg/galaxy/rev/c9b41f94d707
changeset: 3803:c9b41f94d707
user: Dan Blankenberg <dan(a)bx.psu.edu>
date: Fri May 21 16:48:59 2010 -0400
description:
Add a tool to mask FASTQ bases according to quality score. Currently replacement by Ns or lowercase is allowed.
diffstat:
test-data/sanger_full_range_masked_N.fastqsanger | 8 +
test-data/sanger_full_range_masked_lowercase.fastqsanger | 8 +
tool_conf.xml.sample | 1 +
tools/fastq/fastq_masker_by_quality.py | 83 ++++++++++++++++
tools/fastq/fastq_masker_by_quality.xml | 53 ++++++++++
5 files changed, 153 insertions(+), 0 deletions(-)
diffs (179 lines):
diff -r 3b8e4af25be2 -r c9b41f94d707 test-data/sanger_full_range_masked_N.fastqsanger
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/sanger_full_range_masked_N.fastqsanger Fri May 21 16:48:59 2010 -0400
@@ -0,0 +1,8 @@
+@FAKE0001 Original version has PHRED scores from 0 to 93 inclusive (in that order)
+NNNNNNNNNNNNNNNNNNNNNCGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC
++
+!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~
+@FAKE0002 Original version has PHRED scores from 93 to 0 inclusive (in that order)
+CATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCNNNNNNNNNNNNNNNNNNNNN
++
+~}|{zyxwvutsrqponmlkjihgfedcba`_^]\[ZYXWVUTSRQPONMLKJIHGFEDCBA@?>=<;:9876543210/.-,+*)('&%$#"!
diff -r 3b8e4af25be2 -r c9b41f94d707 test-data/sanger_full_range_masked_lowercase.fastqsanger
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/sanger_full_range_masked_lowercase.fastqsanger Fri May 21 16:48:59 2010 -0400
@@ -0,0 +1,8 @@
+@FAKE0001 Original version has PHRED scores from 0 to 93 inclusive (in that order)
+acgtacgtacgtacgtacgtaCGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC
++
+!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~
+@FAKE0002 Original version has PHRED scores from 93 to 0 inclusive (in that order)
+CATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCatgcatgcatgcatgcatgca
++
+~}|{zyxwvutsrqponmlkjihgfedcba`_^]\[ZYXWVUTSRQPONMLKJIHGFEDCBA@?>=<;:9876543210/.-,+*)('&%$#"!
diff -r 3b8e4af25be2 -r c9b41f94d707 tool_conf.xml.sample
--- a/tool_conf.xml.sample Fri May 21 15:50:49 2010 -0400
+++ b/tool_conf.xml.sample Fri May 21 16:48:59 2010 -0400
@@ -221,6 +221,7 @@
<tool file="fastq/fastq_filter.xml" />
<tool file="fastq/fastq_trimmer.xml" />
<tool file="fastq/fastq_trimmer_by_quality.xml" />
+ <tool file="fastq/fastq_masker_by_quality.xml" />
<tool file="fastq/fastq_manipulation.xml" />
<tool file="fastq/fastq_to_fasta.xml" />
<tool file="fastq/fastq_to_tabular.xml" />
diff -r 3b8e4af25be2 -r c9b41f94d707 tools/fastq/fastq_masker_by_quality.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/fastq/fastq_masker_by_quality.py Fri May 21 16:48:59 2010 -0400
@@ -0,0 +1,83 @@
+#Dan Blankenberg
+import string
+from optparse import OptionParser
+from galaxy_utils.sequence.fastq import fastqReader, fastqWriter
+
+
+def get_score_comparer( operator ):
+ if operator == 'gt':
+ return compare_gt
+ elif operator == 'ge':
+ return compare_ge
+ elif operator == 'eq':
+ return compare_eq
+ elif operator == 'lt':
+ return compare_lt
+ elif operator == 'le':
+ return compare_le
+ elif operator == 'ne':
+ return compare_ne
+ raise 'Invalid operator provided: %s' % operator
+
+def compare_gt( quality_score, threshold_value ):
+ return quality_score > threshold_value
+
+def compare_ge( quality_score, threshold_value ):
+ return quality_score >= threshold_value
+
+def compare_eq( quality_score, threshold_value ):
+ return quality_score == threshold_value
+
+def compare_ne( quality_score, threshold_value ):
+ return quality_score != threshold_value
+
+def compare_lt( quality_score, threshold_value ):
+ return quality_score < threshold_value
+
+def compare_le( quality_score, threshold_value ):
+ return quality_score <= threshold_value
+
+class BaseReplacer( object ):
+ def __init__( self, replace_character ):
+ self.replace_character = replace_character
+ def __call__( self, base_character ):
+ return self.replace_character
+
+def main():
+ usage = "usage: %prog [options] input_file output_file"
+ parser = OptionParser( usage=usage )
+ parser.add_option( '-f', '--format', dest='format', type='choice', default='sanger', choices=( 'sanger', 'cssanger', 'solexa', 'illumina' ), help='FASTQ variant type' )
+ parser.add_option( '-m', '--mask_character', dest='mask_character', default='N', help='Mask Character to use' )
+ parser.add_option( '-c', '--score_comparison', type="choice", dest='score_comparison', default='le', choices=('gt','ge','eq','lt', 'le', 'ne' ), help='Mask base when score is' )
+ parser.add_option( '-s', '--quality_score', type="float", dest='quality_score', default='0', help='Quality Score' )
+ parser.add_option( "-l", "--lowercase", action="store_true", dest="lowercase", default=False, help="Use lowercase masking")
+ ( options, args ) = parser.parse_args()
+
+ if len ( args ) != 2:
+ parser.error( "Need to specify an input file and an output file" )
+
+ score_comparer = get_score_comparer( options.score_comparison )
+
+ if options.lowercase:
+ base_masker = string.lower
+ else:
+ base_masker = BaseReplacer( options.mask_character )
+
+ out = fastqWriter( open( args[1], 'wb' ), format = options.format )
+
+ num_reads = None
+ num_reads_excluded = 0
+ for num_reads, fastq_read in enumerate( fastqReader( open( args[0] ), format = options.format ) ):
+ sequence_list = list( fastq_read.sequence )
+ for i, quality_score in enumerate( fastq_read.get_decimal_quality_scores() ):
+ if score_comparer( quality_score, options.quality_score ):
+ sequence_list[ i ] = base_masker( sequence_list[ i ] )
+ fastq_read.sequence = "".join( sequence_list )
+ out.write( fastq_read )
+
+ if num_reads is not None:
+ print "Processed %i %s reads." % ( num_reads + 1, options.format )
+ else:
+ print "No valid FASTQ reads were provided."
+
+if __name__ == "__main__": main()
diff -r 3b8e4af25be2 -r c9b41f94d707 tools/fastq/fastq_masker_by_quality.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/fastq/fastq_masker_by_quality.xml Fri May 21 16:48:59 2010 -0400
@@ -0,0 +1,53 @@
+<tool id="fastq_masker_by_quality" name="FASTQ Masker" version="1.0.0">
+ <description>by quality score</description>
+ <command interpreter="python">fastq_masker_by_quality.py '$input_file' '$output_file' -f '${input_file.extension[len( 'fastq' ):]}' -s '${quality_score}' -c '${score_comparison}'
+ #if $mask_type.value == 'lowercase'
+ --lowercase
+ #else
+ -m '${mask_type}'
+ #end if
+ </command>
+ <inputs>
+ <param name="input_file" type="data" format="fastqsanger" label="File to mask" />
+ <param name="mask_type" type="select" label="Mask input with">
+ <option value="N">N's</option>
+ <option value="lowercase">Lowercase</option>
+ </param>
+ <param name="score_comparison" type="select" label="When score is">
+ <option value="le" selected="True">Less than or equal</option>
+ <option value="lt">Less than</option>
+ <option value="eq">Equal to</option>
+ <option value="ne">Not Equal to</option>
+ <option value="ge">Greater than</option>
+ <option value="gt">Greater than or equal</option>
+ </param>
+ <param name="quality_score" type="integer" value="0"/>
+ </inputs>
+ <outputs>
+ <data name="output_file" format="fastqsanger" />
+ </outputs>
+ <tests>
+ <test>
+ <param name="input_file" value="sanger_full_range_original_sanger.fastqsanger" ftype="fastqsanger" />
+ <param name="mask_type" value="N" />
+ <param name="score_comparison" value="le" />
+ <param name="quality_score" value="20" />
+ <output name="output_file" file="sanger_full_range_masked_N.fastqsanger" />
+ </test>
+ <test>
+ <param name="input_file" value="sanger_full_range_original_sanger.fastqsanger" ftype="fastqsanger" />
+ <param name="mask_type" value="lowercase" />
+ <param name="score_comparison" value="le" />
+ <param name="quality_score" value="20" />
+ <output name="output_file" file="sanger_full_range_masked_lowercase.fastqsanger" />
+ </test>
+ </tests>
+ <help>
+**What it does**
+
+This tool allows masking base characters in FASTQ format files dependent upon user specified quality score value and comparison method.
+
+This tool is not available for use on color space (csSanger) formats.
+
+ </help>
+</tool>
1
0

25 May '10
details: http://www.bx.psu.edu/hg/galaxy/rev/3b8e4af25be2
changeset: 3802:3b8e4af25be2
user: jeremy goecks <jeremy.goecks(a)emory.edu>
date: Fri May 21 15:50:49 2010 -0400
description:
Another pass on search+select for autocomplete that handles user input much better.
diffstat:
lib/galaxy/web/framework/helpers/__init__.py | 2 +-
static/scripts/galaxy.base.js | 68 +++++++++++++++------------
static/scripts/packed/galaxy.base.js | 2 +-
3 files changed, 39 insertions(+), 33 deletions(-)
diffs (105 lines):
diff -r 37586a11c13a -r 3b8e4af25be2 lib/galaxy/web/framework/helpers/__init__.py
--- a/lib/galaxy/web/framework/helpers/__init__.py Fri May 21 15:25:56 2010 -0400
+++ b/lib/galaxy/web/framework/helpers/__init__.py Fri May 21 15:50:49 2010 -0400
@@ -44,7 +44,7 @@
TODO: This has a hardcoded "?v=2" to defeat caching. This should be done
in a better way.
"""
- return "\n".join( [ javascript_include_tag( "/static/scripts/" + name + ".js?v=2" ) for name in args ] )
+ return "\n".join( [ javascript_include_tag( "/static/scripts/" + name + ".js?v=3" ) for name in args ] )
# Hashes
diff -r 37586a11c13a -r 3b8e4af25be2 static/scripts/galaxy.base.js
--- a/static/scripts/galaxy.base.js Fri May 21 15:25:56 2010 -0400
+++ b/static/scripts/galaxy.base.js Fri May 21 15:50:49 2010 -0400
@@ -242,44 +242,50 @@
// Get refresh vals.
var refresh_vals = select_elt.attr('refresh_on_change_values');
if (refresh_vals !== undefined)
- refresh_vals = refresh_vals.split(",")
- text_input_elt.keyup( function( e )
+ refresh_vals = refresh_vals.split(",");
+
+ // Function that attempts to refresh based on the value in the text element.
+ var try_refresh_fn = function()
{
- if ( ( e.keyCode == 13 ) && // Return Key
- ( return_key_pressed_for_autocomplete == true ) ) // Make sure return key was for autocomplete.
+ //
+ // If value entered can be matched to value, do so and refresh by submitting parent form.
+ //
+
+ // Get new value and see if it can be matched.
+ var cur_value = text_input_elt.attr('value');
+ var new_value = select_mapping[cur_value];
+ if (new_value !== null && new_value !== undefined)
{
- //
- // If value entered can be matched to value, do so and refresh by submitting parent form.
- //
-
- // Get new value and see if it can be matched.
- var cur_value = text_input_elt.attr('value');
- var new_value = select_mapping[cur_value];
- if (new_value !== null && new_value !== undefined)
+ // Do refresh if new value is refresh value or if there are no refresh values.
+ refresh = false;
+ if (refresh_vals !== undefined)
{
- // Do refresh if new value is refresh value or if there are no refresh values.
- refresh = false;
- if (refresh_vals !== undefined)
- {
- for (var i= 0; i < refresh_vals.length; i++ )
- if (new_value == refresh_vals[i])
- {
- refresh = true;
- break;
- }
- }
- else
- // Refresh for all values.
- refresh = true;
+ for (var i= 0; i < refresh_vals.length; i++ )
+ if (new_value == refresh_vals[i])
+ {
+ refresh = true;
+ break;
+ }
+ }
+ else
+ // Refresh for all values.
+ refresh = true;
- if (refresh)
- {
- text_input_elt.attr('value', new_value);
- text_input_elt.parents('form').submit();
- }
+ if (refresh)
+ {
+ text_input_elt.attr('value', new_value);
+ text_input_elt.parents('form').submit();
}
}
+ };
+
+ // Attempt refresh if (a) result event fired by autocomplete (indicating autocomplete occurred) or (b) on keyup (in which
+ // case a user may have manually entered a value that needs to be refreshed).
+ text_input_elt.bind("result", try_refresh_fn);
+ text_input_elt.keyup( function(e) {
+ try_refresh_fn();
});
+
}
});
}
diff -r 37586a11c13a -r 3b8e4af25be2 static/scripts/packed/galaxy.base.js
--- a/static/scripts/packed/galaxy.base.js Fri May 21 15:25:56 2010 -0400
+++ b/static/scripts/packed/galaxy.base.js Fri May 21 15:50:49 2010 -0400
@@ -1,1 +1,1 @@
-$(document).ready(function(){replace_big_select_inputs()});$.fn.makeAbsolute=function(a){return this.each(function(){var b=$(this);var c=b.position();b.css({position:"absolute",marginLeft:0,marginTop:0,top:c.top,left:c.left,right:$(window).width()-(c.left+b.width())});if(a){b.remove().appendTo("body")}})};function ensure_popup_helper(){if($("#popup-helper").length===0){$("<div id='popup-helper'/>").css({background:"white",opacity:0,zIndex:15000,position:"absolute",top:0,left:0,width:"100%",height:"100%"}).appendTo("body").hide()}}function attach_popupmenu(b,d){var a=function(){d.unbind().hide();$("#popup-helper").unbind("click.popupmenu").hide()};var c=function(g){$("#popup-helper").bind("click.popupmenu",a).show();d.click(a).css({left:0,top:-1000}).show();var f=g.pageX-d.width()/2;f=Math.min(f,$(document).scrollLeft()+$(window).width()-$(d).width()-20);f=Math.max(f,$(document).scrollLeft()+20);d.css({top:g.pageY-5,left:f});return false};$(b).click(c)}function make_popupmen!
u(c,b){ensure_popup_helper();var a=$("<ul id='"+c.attr("id")+"-menu'></ul>");$.each(b,function(f,e){if(e){$("<li/>").html(f).click(e).appendTo(a)}else{$("<li class='head'/>").html(f).appendTo(a)}});var d=$("<div class='popmenu-wrapper'>");d.append(a).append("<div class='overlay-border'>").css("position","absolute").appendTo("body").hide();attach_popupmenu(c,d)}function make_popup_menus(){jQuery("div[popupmenu]").each(function(){var c={};$(this).find("a").each(function(){var b=$(this).attr("confirm"),d=$(this).attr("href"),e=$(this).attr("target");c[$(this).text()]=function(){if(!b||confirm(b)){var g=window;if(e=="_parent"){g=window.parent}else{if(e=="_top"){g=window.top}}g.location=d}}});var a=$("#"+$(this).attr("popupmenu"));make_popupmenu(a,c);$(this).remove();a.addClass("popup").show()})}function array_length(b){if(b.length){return b.length}var c=0;for(var a in b){c++}return c}function naturalSort(i,g){var n=/(-?[0-9\.]+)/g,j=i.toString().toLowerCase()||"",f=g.toString()!
.toLowerCase()||"",k=String.fromCharCode(0),l=j.replace(n,k+"$1"+k).sp
lit(k),e=f.replace(n,k+"$1"+k).split(k),d=(new Date(j)).getTime(),m=d?(new Date(f)).getTime():null;if(m){if(d<m){return -1}else{if(d>m){return 1}}}for(var h=0,c=Math.max(l.length,e.length);h<c;h++){oFxNcL=parseFloat(l[h])||l[h];oFyNcL=parseFloat(e[h])||e[h];if(oFxNcL<oFyNcL){return -1}else{if(oFxNcL>oFyNcL){return 1}}}return 0}function replace_big_select_inputs(a){if(typeof jQuery().autocomplete=="undefined"){return}if(a===undefined){a=20}$("select").each(function(){var b=$(this);if(b.find("option").length<a){return}var c=b.attr("value");var d=$("<input type='text' class='text-and-autocomplete-select'></input>");d.attr("size",40);d.attr("name",b.attr("name"));d.attr("id",b.attr("id"));d.click(function(){var j=$(this).attr("value");$(this).attr("value","Loading...");$(this).showAllInCache();$(this).attr("value",j);$(this).select()});var i=[];var h={};b.children("option").each(function(){var k=$(this).text();var j=$(this).attr("value");i.push(k);h[k]=j;h[j]=j;if(j==c){d.attr("!
value",k)}});if(c==""||c=="?"){d.attr("value","Click to Search or Select")}i=i.sort(naturalSort);var g={selectFirst:false,autoFill:false,mustMatch:false,matchContains:true,max:1000,minChars:0,hideForLessThanMinChars:false};d.autocomplete(i,g);b.replaceWith(d);var e=function(){var k=d.attr("value");var j=h[k];if(j!==null&&j!==undefined){d.attr("value",j)}else{if(c!=""){d.attr("value",c)}else{d.attr("value","?")}}};d.parents("form").submit(function(){e()});$(document).bind("convert_dbkeys",function(){e()});if(b.attr("refresh_on_change")=="true"){var f=b.attr("refresh_on_change_values");if(f!==undefined){f=f.split(",")}d.keyup(function(m){if((m.keyCode==13)&&(return_key_pressed_for_autocomplete==true)){var l=d.attr("value");var k=h[l];if(k!==null&&k!==undefined){refresh=false;if(f!==undefined){for(var j=0;j<f.length;j++){if(k==f[j]){refresh=true;break}}}else{refresh=true}if(refresh){d.attr("value",k);d.parents("form").submit()}}}})}})}function async_save_text(d,f,e,a,c,h,i,g,b!
){if(c===undefined){c=30}if(i===undefined){i=4}$("#"+d).live("click",f
unction(){if($("#renaming-active").length>0){return}var l=$("#"+f),k=l.text(),j;if(h){j=$("<textarea></textarea>").attr({rows:i,cols:c}).text(k)}else{j=$("<input type='text'></input>").attr({value:k,size:c})}j.attr("id","renaming-active");j.blur(function(){$(this).remove();l.show();if(b){b(j)}});j.keyup(function(n){if(n.keyCode===27){$(this).trigger("blur")}else{if(n.keyCode===13){var m={};m[a]=$(this).val();$(this).trigger("blur");$.ajax({url:e,data:m,error:function(){alert("Text editing for elt "+f+" failed")},success:function(o){l.text(o);if(b){b(j)}}})}}});if(g){g(j)}l.hide();j.insertAfter(l);j.focus();j.select();return})}function init_history_items(d,a,c){var b=function(){try{var e=$.jStore.store("history_expand_state");if(e){for(var g in e){$("#"+g+" div.historyItemBody").show()}}}catch(f){$.jStore.remove("history_expand_state")}if($.browser.mozilla){$("div.historyItemBody").each(function(){if(!$(this).is(":visible")){$(this).find("pre.peek").css("overflow","hidden")}}!
)}d.each(function(){var j=this.id;var h=$(this).children("div.historyItemBody");var i=h.find("pre.peek");$(this).find(".historyItemTitleBar > .historyItemTitle").wrap("<a href='javascript:void(0);'></a>").click(function(){if(h.is(":visible")){if($.browser.mozilla){i.css("overflow","hidden")}h.slideUp("fast");if(!c){var k=$.jStore.store("history_expand_state");if(k){delete k[j];$.jStore.store("history_expand_state",k)}}}else{h.slideDown("fast",function(){if($.browser.mozilla){i.css("overflow","auto")}});if(!c){var k=$.jStore.store("history_expand_state");if(k===undefined){k={}}k[j]=true;$.jStore.store("history_expand_state",k)}}return false})});$("#top-links > a.toggle").click(function(){var h=$.jStore.store("history_expand_state");if(h===undefined){h={}}$("div.historyItemBody:visible").each(function(){if($.browser.mozilla){$(this).find("pre.peek").css("overflow","hidden")}$(this).slideUp("fast");if(h){delete h[$(this).parent().attr("id")]}});$.jStore.store("history_expand_s!
tate",h)}).show()};if(a){b()}else{$.jStore.init("galaxy");$.jStore.eng
ineReady(function(){b()})}}$(document).ready(function(){$("a[confirm]").click(function(){return confirm($(this).attr("confirm"))});if($.fn.tipsy){$(".tooltip").tipsy({gravity:"s"})}make_popup_menus()});
\ No newline at end of file
+$(document).ready(function(){replace_big_select_inputs()});$.fn.makeAbsolute=function(a){return this.each(function(){var b=$(this);var c=b.position();b.css({position:"absolute",marginLeft:0,marginTop:0,top:c.top,left:c.left,right:$(window).width()-(c.left+b.width())});if(a){b.remove().appendTo("body")}})};function ensure_popup_helper(){if($("#popup-helper").length===0){$("<div id='popup-helper'/>").css({background:"white",opacity:0,zIndex:15000,position:"absolute",top:0,left:0,width:"100%",height:"100%"}).appendTo("body").hide()}}function attach_popupmenu(b,d){var a=function(){d.unbind().hide();$("#popup-helper").unbind("click.popupmenu").hide()};var c=function(g){$("#popup-helper").bind("click.popupmenu",a).show();d.click(a).css({left:0,top:-1000}).show();var f=g.pageX-d.width()/2;f=Math.min(f,$(document).scrollLeft()+$(window).width()-$(d).width()-20);f=Math.max(f,$(document).scrollLeft()+20);d.css({top:g.pageY-5,left:f});return false};$(b).click(c)}function make_popupmen!
u(c,b){ensure_popup_helper();var a=$("<ul id='"+c.attr("id")+"-menu'></ul>");$.each(b,function(f,e){if(e){$("<li/>").html(f).click(e).appendTo(a)}else{$("<li class='head'/>").html(f).appendTo(a)}});var d=$("<div class='popmenu-wrapper'>");d.append(a).append("<div class='overlay-border'>").css("position","absolute").appendTo("body").hide();attach_popupmenu(c,d)}function make_popup_menus(){jQuery("div[popupmenu]").each(function(){var c={};$(this).find("a").each(function(){var b=$(this).attr("confirm"),d=$(this).attr("href"),e=$(this).attr("target");c[$(this).text()]=function(){if(!b||confirm(b)){var g=window;if(e=="_parent"){g=window.parent}else{if(e=="_top"){g=window.top}}g.location=d}}});var a=$("#"+$(this).attr("popupmenu"));make_popupmenu(a,c);$(this).remove();a.addClass("popup").show()})}function array_length(b){if(b.length){return b.length}var c=0;for(var a in b){c++}return c}function naturalSort(i,g){var n=/(-?[0-9\.]+)/g,j=i.toString().toLowerCase()||"",f=g.toString()!
.toLowerCase()||"",k=String.fromCharCode(0),l=j.replace(n,k+"$1"+k).sp
lit(k),e=f.replace(n,k+"$1"+k).split(k),d=(new Date(j)).getTime(),m=d?(new Date(f)).getTime():null;if(m){if(d<m){return -1}else{if(d>m){return 1}}}for(var h=0,c=Math.max(l.length,e.length);h<c;h++){oFxNcL=parseFloat(l[h])||l[h];oFyNcL=parseFloat(e[h])||e[h];if(oFxNcL<oFyNcL){return -1}else{if(oFxNcL>oFyNcL){return 1}}}return 0}function replace_big_select_inputs(a){if(typeof jQuery().autocomplete=="undefined"){return}if(a===undefined){a=20}$("select").each(function(){var d=$(this);if(d.find("option").length<a){return}var j=d.attr("value");var b=$("<input type='text' class='text-and-autocomplete-select'></input>");b.attr("size",40);b.attr("name",d.attr("name"));b.attr("id",d.attr("id"));b.click(function(){var k=$(this).attr("value");$(this).attr("value","Loading...");$(this).showAllInCache();$(this).attr("value",k);$(this).select()});var e=[];var g={};d.children("option").each(function(){var l=$(this).text();var k=$(this).attr("value");e.push(l);g[l]=k;g[k]=k;if(k==j){b.attr("!
value",l)}});if(j==""||j=="?"){b.attr("value","Click to Search or Select")}e=e.sort(naturalSort);var f={selectFirst:false,autoFill:false,mustMatch:false,matchContains:true,max:1000,minChars:0,hideForLessThanMinChars:false};b.autocomplete(e,f);d.replaceWith(b);var i=function(){var l=b.attr("value");var k=g[l];if(k!==null&&k!==undefined){b.attr("value",k)}else{if(j!=""){b.attr("value",j)}else{b.attr("value","?")}}};b.parents("form").submit(function(){i()});$(document).bind("convert_dbkeys",function(){i()});if(d.attr("refresh_on_change")=="true"){var c=d.attr("refresh_on_change_values");if(c!==undefined){c=c.split(",")}var h=function(){var m=b.attr("value");var l=g[m];if(l!==null&&l!==undefined){refresh=false;if(c!==undefined){for(var k=0;k<c.length;k++){if(l==c[k]){refresh=true;break}}}else{refresh=true}if(refresh){b.attr("value",l);b.parents("form").submit()}}};b.bind("result",h);b.keyup(function(k){h()})}})}function async_save_text(d,f,e,a,c,h,i,g,b){if(c===undefined){c=30}!
if(i===undefined){i=4}$("#"+d).live("click",function(){if($("#renaming
-active").length>0){return}var l=$("#"+f),k=l.text(),j;if(h){j=$("<textarea></textarea>").attr({rows:i,cols:c}).text(k)}else{j=$("<input type='text'></input>").attr({value:k,size:c})}j.attr("id","renaming-active");j.blur(function(){$(this).remove();l.show();if(b){b(j)}});j.keyup(function(n){if(n.keyCode===27){$(this).trigger("blur")}else{if(n.keyCode===13){var m={};m[a]=$(this).val();$(this).trigger("blur");$.ajax({url:e,data:m,error:function(){alert("Text editing for elt "+f+" failed")},success:function(o){l.text(o);if(b){b(j)}}})}}});if(g){g(j)}l.hide();j.insertAfter(l);j.focus();j.select();return})}function init_history_items(d,a,c){var b=function(){try{var e=$.jStore.store("history_expand_state");if(e){for(var g in e){$("#"+g+" div.historyItemBody").show()}}}catch(f){$.jStore.remove("history_expand_state")}if($.browser.mozilla){$("div.historyItemBody").each(function(){if(!$(this).is(":visible")){$(this).find("pre.peek").css("overflow","hidden")}})}d.each(function(){var j!
=this.id;var h=$(this).children("div.historyItemBody");var i=h.find("pre.peek");$(this).find(".historyItemTitleBar > .historyItemTitle").wrap("<a href='javascript:void(0);'></a>").click(function(){if(h.is(":visible")){if($.browser.mozilla){i.css("overflow","hidden")}h.slideUp("fast");if(!c){var k=$.jStore.store("history_expand_state");if(k){delete k[j];$.jStore.store("history_expand_state",k)}}}else{h.slideDown("fast",function(){if($.browser.mozilla){i.css("overflow","auto")}});if(!c){var k=$.jStore.store("history_expand_state");if(k===undefined){k={}}k[j]=true;$.jStore.store("history_expand_state",k)}}return false})});$("#top-links > a.toggle").click(function(){var h=$.jStore.store("history_expand_state");if(h===undefined){h={}}$("div.historyItemBody:visible").each(function(){if($.browser.mozilla){$(this).find("pre.peek").css("overflow","hidden")}$(this).slideUp("fast");if(h){delete h[$(this).parent().attr("id")]}});$.jStore.store("history_expand_state",h)}).show()};if(a){!
b()}else{$.jStore.init("galaxy");$.jStore.engineReady(function(){b()})
}}$(document).ready(function(){$("a[confirm]").click(function(){return confirm($(this).attr("confirm"))});if($.fn.tipsy){$(".tooltip").tipsy({gravity:"s"})}make_popup_menus()});
\ No newline at end of file
1
0

25 May '10
details: http://www.bx.psu.edu/hg/galaxy/rev/37586a11c13a
changeset: 3801:37586a11c13a
user: Dan Blankenberg <dan(a)bx.psu.edu>
date: Fri May 21 15:25:56 2010 -0400
description:
First pass at adding Ensembl browsers as an external display application. Two different URL generation and data attachment methods are used; one for 'old' Ensembl archives older than ~November 2008 and another for Ensembl sites using the current method. The tool-data/shared/ensembl/ensembl_sites.txt file contains the site and build information for using the current method; the tool-data/shared/ensembl/ensembl_sites_data_URL.txt file has the site and build information for when the older method is to be used.
The new method follows: http://www.ensembl.org/info/docs/webcode/linking.html
The old method follows: http://aug2007.archive.ensembl.org/Homo_sapiens/helpview?se=1;kw=urlsource
diffstat:
datatypes_conf.xml.sample | 2 +
display_applications/ensembl/ensembl_gff.xml | 127 +++++++++++++++
display_applications/ensembl/ensembl_interval_as_bed.xml | 127 +++++++++++++++
tool-data/shared/ensembl/ensembl_sites.txt | 4 +
tool-data/shared/ensembl/ensembl_sites_data_URL.txt | 8 +
5 files changed, 268 insertions(+), 0 deletions(-)
diffs (301 lines):
diff -r 0539d58e383a -r 37586a11c13a datatypes_conf.xml.sample
--- a/datatypes_conf.xml.sample Fri May 21 14:41:20 2010 -0400
+++ b/datatypes_conf.xml.sample Fri May 21 15:25:56 2010 -0400
@@ -49,6 +49,7 @@
</datatype>
<datatype extension="gff" type="galaxy.datatypes.interval:Gff" display_in_upload="true">
<converter file="gff_to_bed_converter.xml" target_datatype="bed"/>
+ <display file="ensembl/ensembl_gff.xml" inherit="True"/>
</datatype>
<datatype extension="gff3" type="galaxy.datatypes.interval:Gff3" display_in_upload="true"/>
<datatype extension="gif" type="galaxy.datatypes.images:Image" mimetype="image/gif"/>
@@ -63,6 +64,7 @@
<indexer file="interval_awk.xml" />
<!-- <display file="ucsc/interval_as_bed.xml" inherit="True" /> -->
<display file="genetrack.xml" inherit="True"/>
+ <display file="ensembl/ensembl_interval_as_bed.xml" inherit="True"/>
</datatype>
<datatype extension="jpg" type="galaxy.datatypes.images:Image" mimetype="image/jpeg"/>
<datatype extension="laj" type="galaxy.datatypes.images:Laj"/>
diff -r 0539d58e383a -r 37586a11c13a display_applications/ensembl/ensembl_gff.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/display_applications/ensembl/ensembl_gff.xml Fri May 21 15:25:56 2010 -0400
@@ -0,0 +1,127 @@
+<display id="ensembl_gff" version="1.0.0" name="display at Ensembl">
+ <!-- Current Ensembl method of attaching user data via URL; archives older than ~November 2008 will use a different method -->
+ <!-- Load links from file: one line to one link -->
+ <dynamic_links from_file="tool-data/shared/ensembl/ensembl_sites.txt" skip_startswith="#" id="0" name="1">
+
+ <!-- Define parameters by column from file, allow splitting on builds -->
+ <dynamic_param name="site_id" value="0"/>
+ <dynamic_param name="site_name" value="1"/>
+ <dynamic_param name="site_link" value="2"/>
+ <dynamic_param name="site_dbkeys" value="3" split="True" separator="," />
+ <dynamic_param name="site_organisms" value="4" split="True" separator="," />
+
+ <!-- Filter out some of the links based upon matching site_dbkeys to dataset dbkey -->
+ <filter>${dataset.dbkey in $site_dbkeys}</filter>
+
+ <!-- We define url and params as normal, but values defined in dynamic_param are available by specified name -->
+ <url>${site_link}${site_organism}/Location/View?r=${position};contigviewbottom=url:${gff_file.qp}=normal</url>
+
+ <param type="data" name="gff_file" url="galaxy_${DATASET_HASH}.gff" />
+ <param type="template" name="site_organism" strip="True" >
+ #set index = $site_dbkeys.index( $gff_file.dbkey )
+ $site_organisms[ $index ]
+ </param>
+ <param type="template" name="position" strip="True" >
+#set line_count = 0
+#set chrom = None
+#set start = float( 'inf' )
+#set end = 0
+#for $line in open( $gff_file.file_name ):
+ #if $line_count > 10: ##10 max lines to check for view port
+ #break
+ #end if
+ #if not $line.startswith( "#" ):
+ #set $fields = $line.split( "\t" )
+ #try:
+ #if len( $fields ) >= 5:
+ #if $chrom is None or $fields[ 0 ] == $chrom:
+ #set chrom = $fields[ 0 ]
+ #set start = min( $start, int( $fields[ 3 ] ) )
+ #set end = max( $end, int( $fields[ 4 ] ) )
+ #end if
+ #end if
+ #except:
+ #pass
+ #end try
+ #end if
+ #set line_count += 1
+#end for
+#if $chrom is not None:
+##The difference between chr1 and 1 is handled by Ensembl, except for the viewport, we need to provide e.g. '1' instead of 'chr1' here
+##This is rather naive, it would be more ideal to have actual mappings
+#if $chrom == 'chrM':
+ #set $chrom = 'MT'
+#end if
+#if $chrom.startswith( 'chr' ):
+ #set $chrom = $chrom[3:]
+#end if
+${chrom}:${start}-${end}
+#else:
+##default view is of '1'
+1
+#end if
+ </param>
+ </dynamic_links>
+
+ <!-- Old Ensembl method of attaching user data via URL -->
+ <!-- Load links from file: one line to one link -->
+ <dynamic_links from_file="tool-data/shared/ensembl/ensembl_sites_data_URL.txt" skip_startswith="#" id="0" name="1">
+
+ <!-- Define parameters by column from file, allow splitting on builds -->
+ <dynamic_param name="site_id" value="0"/>
+ <dynamic_param name="site_name" value="1"/>
+ <dynamic_param name="site_link" value="2"/>
+ <dynamic_param name="site_dbkeys" value="3" split="True" separator="," />
+ <dynamic_param name="site_organisms" value="4" split="True" separator="," />
+
+ <!-- Filter out some of the links based upon matching site_dbkeys to dataset dbkey -->
+ <filter>${dataset.dbkey in $site_dbkeys}</filter>
+
+ <!-- We define url and params as normal, but values defined in dynamic_param are available by specified name -->
+ <url>${site_link}${site_organism}/contigview?data_URL=${gff_file.qp}${position}</url>
+
+ <param type="data" name="gff_file" url="galaxy_${DATASET_HASH}.gff" />
+ <param type="template" name="site_organism" strip="True" >
+ #set index = $site_dbkeys.index( $gff_file.dbkey )
+ $site_organisms[ $index ]
+ </param>
+ <param type="template" name="position" strip="True" >
+ #set line_count = 0
+ #set chrom = None
+ #set start = float( 'inf' )
+ #set end = 0
+ #for $line in open( $gff_file.file_name ):
+ #if $line_count > 10: ##10 max lines to check for view port
+ #break
+ #end if
+ #if not $line.startswith( "#" ):
+ #set $fields = $line.split( "\t" )
+ #try:
+ #if len( $fields ) >= 5:
+ #if $chrom is None or $fields[ 0 ] == $chrom:
+ #set chrom = $fields[ 0 ]
+ #set start = min( $start, int( $fields[ 3 ] ) )
+ #set end = max( $end, int( $fields[ 4 ] ) )
+ #end if
+ #end if
+ #except:
+ #pass
+ #end try
+ #end if
+ #set line_count += 1
+ #end for
+ #if $chrom is not None:
+ ##The difference between chr1 and 1 is handled by Ensembl, except for the viewport, we need to provide e.g. '1' instead of 'chr1' here
+ ##This is rather naive, it would be more ideal to have actual mappings
+ #if $chrom == 'chrM':
+ #set $chrom = 'MT'
+ #end if
+ #if $chrom.startswith( 'chr' ):
+ #set $chrom = $chrom[3:]
+ #end if
+ &chr=${chrom}&start=${start}&end=${end}
+ #end if
+ </param>
+ </dynamic_links>
+
+</display>
diff -r 0539d58e383a -r 37586a11c13a display_applications/ensembl/ensembl_interval_as_bed.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/display_applications/ensembl/ensembl_interval_as_bed.xml Fri May 21 15:25:56 2010 -0400
@@ -0,0 +1,127 @@
+<display id="ensembl_interval" version="1.0.0" name="display at Ensembl">
+ <!-- Current Ensembl method of attaching user data via URL; archives older than ~November 2008 will use a different method -->
+ <!-- Load links from file: one line to one link -->
+ <dynamic_links from_file="tool-data/shared/ensembl/ensembl_sites.txt" skip_startswith="#" id="0" name="1">
+
+ <!-- Define parameters by column from file, allow splitting on builds -->
+ <dynamic_param name="site_id" value="0"/>
+ <dynamic_param name="site_name" value="1"/>
+ <dynamic_param name="site_link" value="2"/>
+ <dynamic_param name="site_dbkeys" value="3" split="True" separator="," />
+ <dynamic_param name="site_organisms" value="4" split="True" separator="," />
+
+ <!-- Filter out some of the links based upon matching site_dbkeys to dataset dbkey -->
+ <filter>${dataset.dbkey in $site_dbkeys}</filter>
+
+ <!-- We define url and params as normal, but values defined in dynamic_param are available by specified name -->
+ <url>${site_link}${site_organism}/Location/View?r=${position};contigviewbottom=url:${bed_file.qp}=normal</url>
+
+ <param type="data" name="bed_file" url="galaxy_${DATASET_HASH}.bed" format="bedstrict"/>
+ <param type="template" name="site_organism" strip="True" >
+ #set index = $site_dbkeys.index( $bed_file.dbkey )
+ $site_organisms[ $index ]
+ </param>
+ <param type="template" name="position" strip="True" >
+#set line_count = 0
+#set chrom = None
+#set start = float( 'inf' )
+#set end = 0
+#for $line in open( $bed_file.file_name ):
+ #if $line_count > 10: ##10 max lines to check for view port
+ #break
+ #end if
+ #if not $line.startswith( "#" ):
+ #set $fields = $line.split( "\t" )
+ #try:
+ #if len( $fields ) >= max( $bed_file.metadata.startCol, $bed_file.metadata.endCol, $bed_file.metadata.chromCol ):
+ #if $chrom is None or $fields[ $bed_file.metadata.chromCol - 1 ] == $chrom:
+ #set chrom = $fields[ $bed_file.metadata.chromCol - 1 ]
+ #set start = min( $start, int( $fields[ $bed_file.metadata.startCol - 1 ] ) )
+ #set end = max( $end, int( $fields[ $bed_file.metadata.endCol - 1 ] ) )
+ #end if
+ #end if
+ #except:
+ #pass
+ #end try
+ #end if
+ #set line_count += 1
+#end for
+#if $chrom is not None:
+##The difference between chr1 and 1 is handled by Ensembl, except for the viewport, we need to provide e.g. '1' instead of 'chr1' here
+##This is rather naive, it would be more ideal to have actual mappings
+#if $chrom == 'chrM':
+ #set $chrom = 'MT'
+#end if
+#if $chrom.startswith( 'chr' ):
+ #set $chrom = $chrom[3:]
+#end if
+${chrom}:${start + 1}-${end}
+#else:
+##default view is of '1'
+1
+#end if
+ </param>
+ </dynamic_links>
+
+ <!-- Old Ensembl method of attaching user data via URL -->
+ <!-- Load links from file: one line to one link -->
+ <dynamic_links from_file="tool-data/shared/ensembl/ensembl_sites_data_URL.txt" skip_startswith="#" id="0" name="1">
+
+ <!-- Define parameters by column from file, allow splitting on builds -->
+ <dynamic_param name="site_id" value="0"/>
+ <dynamic_param name="site_name" value="1"/>
+ <dynamic_param name="site_link" value="2"/>
+ <dynamic_param name="site_dbkeys" value="3" split="True" separator="," />
+ <dynamic_param name="site_organisms" value="4" split="True" separator="," />
+
+ <!-- Filter out some of the links based upon matching site_dbkeys to dataset dbkey -->
+ <filter>${dataset.dbkey in $site_dbkeys}</filter>
+
+ <!-- We define url and params as normal, but values defined in dynamic_param are available by specified name -->
+ <url>${site_link}${site_organism}/contigview?data_URL=${bed_file.qp}${position}</url>
+
+ <param type="data" name="bed_file" url="galaxy_${DATASET_HASH}.bed" format="bedstrict"/>
+ <param type="template" name="site_organism" strip="True" >
+ #set index = $site_dbkeys.index( $bed_file.dbkey )
+ $site_organisms[ $index ]
+ </param>
+ <param type="template" name="position" strip="True" >
+ #set line_count = 0
+ #set chrom = None
+ #set start = float( 'inf' )
+ #set end = 0
+ #for $line in open( $bed_file.file_name ):
+ #if $line_count > 10: ##10 max lines to check for view port
+ #break
+ #end if
+ #if not $line.startswith( "#" ):
+ #set $fields = $line.split( "\t" )
+ #try:
+ #if len( $fields ) >= max( $bed_file.metadata.startCol, $bed_file.metadata.endCol, $bed_file.metadata.chromCol ):
+ #if $chrom is None or $fields[ $bed_file.metadata.chromCol - 1 ] == $chrom:
+ #set chrom = $fields[ $bed_file.metadata.chromCol - 1 ]
+ #set start = min( $start, int( $fields[ $bed_file.metadata.startCol - 1 ] ) )
+ #set end = max( $end, int( $fields[ $bed_file.metadata.endCol - 1 ] ) )
+ #end if
+ #end if
+ #except:
+ #pass
+ #end try
+ #end if
+ #set line_count += 1
+ #end for
+ #if $chrom is not None:
+ ##The difference between chr1 and 1 is handled by Ensembl, except for the viewport, we need to provide e.g. '1' instead of 'chr1' here
+ ##This is rather naive, it would be more ideal to have actual mappings
+ #if $chrom == 'chrM':
+ #set $chrom = 'MT'
+ #end if
+ #if $chrom.startswith( 'chr' ):
+ #set $chrom = $chrom[3:]
+ #end if
+ &chr=${chrom}&start=${start + 1}&end=${end}
+ #end if
+ </param>
+ </dynamic_links>
+
+</display>
diff -r 0539d58e383a -r 37586a11c13a tool-data/shared/ensembl/ensembl_sites.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tool-data/shared/ensembl/ensembl_sites.txt Fri May 21 15:25:56 2010 -0400
@@ -0,0 +1,4 @@
+#These builds are displayed using the method described in:
+#http://www.ensembl.org/info/docs/webcode/linking.html
+ensembl_Current Current http://www.ensembl.org/ hg19,felCat3,galGal3,bosTau4,canFam2,loxAfr3,cavPor3,equCab2,anoCar1,oryLat2,mm9,monDom5,ponAbe2,susScr2,ornAna1,oryCun2,rn4,rheMac2,gasAcu1,tetNig2,xenTro2,taeGut1,danRer5,ci2,dm3,ce6,sacCer2 Homo_sapiens,Felis_catus,Gallus_gallus,Bos_taurus,Canis_familiaris,Loxodonta_africana,Cavia_porcellus,Equus_caballus,Anolis_carolinensis,Oryzias_latipes,Mus_musculus,Monodelphis_domestica,Pongo_pygmaeus,Sus_scrofa,Ornithorhynchus_anatinus,Oryctolagus_cuniculus,Rattus_norvegicus,Macaca_mulatta,Gasterosteus_aculeatus,Tetraodon_nigroviridis,Xenopus_tropicalis,Taeniopygia_guttata,Danio_rerio,Ciona_intestinalis,Drosophila_melanogaster,Caenorhabditis_elegans,Saccharomyces_cerevisiae
+ensembl_May_2009 May 2009 http://may2009.archive.ensembl.org/ hg18 Homo_sapiens
diff -r 0539d58e383a -r 37586a11c13a tool-data/shared/ensembl/ensembl_sites_data_URL.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tool-data/shared/ensembl/ensembl_sites_data_URL.txt Fri May 21 15:25:56 2010 -0400
@@ -0,0 +1,8 @@
+#These builds are displayed using the method described in:
+#http://aug2007.archive.ensembl.org/Homo_sapiens/helpview?se=1;kw=urlsource
+ensembl_March_2008 March 2008 http://mar2008.archive.ensembl.org/ bosTau3,tetNig1 Bos_taurus,Tetraodon_nigroviridis
+ensembl_February_2007 February 2007 http://feb2007.archive.ensembl.org/ monDom4,danRer4 Monodelphis_domestica,Danio_rerio
+ensembl_July_2008 July 2008 http://jul2008.archive.ensembl.org/ panTro2 Pan_troglodytes
+ensembl_April_2006 April 2006 http://apr2006.archive.ensembl.org/ galGal2,bosTau2,canFam1,mm7,rheMac1,danRer3,apiMel2,sacCer1 Gallus_gallus,Bos_taurus,Canis_familiaris,Mus_musculus,Macaca_mulatta,Danio_rerio,Apis_mellifera,Saccharomyces_cerevisiae
+ensembl_November_2005 November 2005 http://nov2005.archive.ensembl.org/ hg17,panTro1,bosTau1,mm6,xenTro1,anoGam1,dm2 Homo_sapiens,Pan_troglodytes,Bos_taurus,Mus_musculus,Xenopus_tropicalis,Anopheles_gambiae,Drosophila_melanogaster
+ensembl_August_2007 August 2007 http://aug2007.archive.ensembl.org/ mm8,ce4 Mus_musculus,Caenorhabditis_elegans
1
0