1 new changeset in galaxy-central:
http://bitbucket.org/galaxy/galaxy-central/changeset/fd53d9688eb3/
changeset: fd53d9688eb3
user: jgoecks
date: 2011-07-14 12:12:55
summary: Trackster: document FeaturePainter object and methods as abstract.
affected #: 1 file (377 bytes)
--- a/static/scripts/trackster.js Wed Jul 13 16:51:51 2011 -0400
+++ b/static/scripts/trackster.js Thu Jul 14 12:12:55 2011 +0200
@@ -3307,6 +3307,9 @@
ctx.restore();
}
+/**
+ * Abstract object for painting feature tracks. Subclasses must implement draw_element() for painting to work.
+ */
var FeaturePainter = function( data, view_start, view_end, prefs, mode ) {
Painter.call( this, data, view_start, view_end, prefs, mode );
}
@@ -3327,7 +3330,6 @@
},
draw: function( ctx, width, height, slots ) {
-
var data = this.data, view_start = this.view_start, view_end = this.view_end;
ctx.save();
@@ -3356,7 +3358,12 @@
}
ctx.restore();
- }
+ },
+
+ /**
+ * Abstract function for drawing an individual feature. NOTE: this method must be implemented by subclasses for drawing to work.
+ */
+ draw_element: function(ctx, mode, feature, slot, tile_low, tile_high, w_scale, y_scale, width ) {}
});
// Constants specific to feature tracks moved here (HACKING, these should
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new changeset in galaxy-central:
http://bitbucket.org/galaxy/galaxy-central/changeset/b2f7abd50f6a/
changeset: b2f7abd50f6a
user: greg
date: 2011-07-13 15:39:02
summary: Add patch from Assaf Gordon to the cleanup_datasets.py script. The changes are:
1. If a dataset is skipped (because it's shared/cloned and was already process), no message is printed at all.
2. If a dataset can not be deleted because it is shared, and one instance is not marked as "deleted", a proper message is printed.
3. If a dataset has metadata files, the message is changed depending on "info_only" and "remove_from_disk" flags.
4. The final summary message is slightly changed.
affected #: 1 file (514 bytes)
--- a/scripts/cleanup_datasets/cleanup_datasets.py Tue Jul 12 18:14:56 2011 -0400
+++ b/scripts/cleanup_datasets/cleanup_datasets.py Wed Jul 13 09:39:02 2011 -0400
@@ -310,17 +310,21 @@
dataset_ids.extend( [ row.id for row in history_dataset_ids_query.execute() ] )
# Process each of the Dataset objects
for dataset_id in dataset_ids:
+ dataset = app.sa_session.query( app.model.Dataset ).get( dataset_id )
+ if dataset.id in skip:
+ continue
+ skip.append( dataset.id )
print "######### Processing dataset id:", dataset_id
- dataset = app.sa_session.query( app.model.Dataset ).get( dataset_id )
- if dataset.id not in skip and _dataset_is_deletable( dataset ):
- deleted_dataset_count += 1
- for dataset_instance in dataset.history_associations + dataset.library_associations:
- # Mark each associated HDA as deleted
- _purge_dataset_instance( dataset_instance, app, remove_from_disk, include_children=True, info_only=info_only, is_deletable=True )
- deleted_instance_count += 1
- skip.append( dataset.id )
+ if not _dataset_is_deletable( dataset ):
+ print "Dataset is not deletable (shared between multiple histories/libraries, at least one is not deleted)"
+ continue
+ deleted_dataset_count += 1
+ for dataset_instance in dataset.history_associations + dataset.library_associations:
+ # Mark each associated HDA as deleted
+ _purge_dataset_instance( dataset_instance, app, remove_from_disk, include_children=True, info_only=info_only, is_deletable=True )
+ deleted_instance_count += 1
stop = time.time()
- print "Examined %d datasets, marked %d as deleted and purged %d dataset instances" % ( len( skip ), deleted_dataset_count, deleted_instance_count )
+ print "Examined %d datasets, marked %d datasets and %d dataset instances (HDA) as deleted" % ( len( skip ), deleted_dataset_count, deleted_instance_count )
print "Total elapsed time: ", stop - start
print "##########################################"
@@ -396,8 +400,13 @@
.filter( app.model.MetadataFile.table.c.lda_id==ldda.id ):
metadata_files.append( metadata_file )
for metadata_file in metadata_files:
- print "The following metadata files attached to associations of Dataset '%s' have been purged:" % dataset.id
- if not info_only:
+ op_description = "marked as deleted"
+ if remove_from_disk:
+ op_description = op_description + " and purged from disk"
+ if info_only:
+ print "The following metadata files attached to associations of Dataset '%s' will be %s (without 'info_only' mode):" % ( dataset.id, op_description )
+ else:
+ print "The following metadata files attached to associations of Dataset '%s' have been %s:" % ( dataset.id, op_description )
if remove_from_disk:
try:
print "Removing disk file ", metadata_file.file_name
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new changeset in galaxy-central:
http://bitbucket.org/galaxy/galaxy-central/changeset/26bffb1b7d57/
changeset: 26bffb1b7d57
user: kanwei
date: 2011-07-13 00:14:56
summary: Improve BLAST parser tool error handling, and improve elementtree import as well.
affected #: 3 files (260 bytes)
--- a/tools/metag_tools/megablast_xml_parser.py Tue Jul 12 17:04:16 2011 -0400
+++ b/tools/metag_tools/megablast_xml_parser.py Tue Jul 12 18:14:56 2011 -0400
@@ -2,12 +2,12 @@
import sys, os, re
-assert sys.version_info[:2] >= ( 2, 4 )
-
if sys.version_info[:2] >= ( 2, 5 ):
- import xml.etree.cElementTree as cElementTree
+ import xml.etree.cElementTree as ElementTree
else:
- import cElementTree
+ from galaxy import eggs
+ import pkg_resources; pkg_resources.require( "elementtree" )
+ from elementtree import ElementTree
def stop_err( msg ):
sys.stderr.write( "%s\n" % msg )
@@ -34,7 +34,7 @@
# get an iterable
try:
- context = cElementTree.iterparse( source, events=( "start", "end" ) )
+ context = ElementTree.iterparse( source, events=( "start", "end" ) )
except:
stop_err( "Invalid data format." )
# turn it into an iterator
@@ -46,7 +46,7 @@
stop_err( "Invalid data format." )
outfile = open( sys.argv[2], 'w' )
- try:
+ try:
for event, elem in context:
# for every <Iteration> tag
if event == "end" and elem.tag == "Iteration":
@@ -71,7 +71,7 @@
elem.clear()
except:
outfile.close()
- stop_err( "The input data contains tags that are not recognizable by the tool." )
+ stop_err( "The input data is malformed, or there is more than one dataset in the input file. Error: %s" % sys.exc_info()[1] )
outfile.close()
--- a/tools/metag_tools/megablast_xml_parser.xml Tue Jul 12 17:04:16 2011 -0400
+++ b/tools/metag_tools/megablast_xml_parser.xml Tue Jul 12 18:14:56 2011 -0400
@@ -2,26 +2,23 @@
<description></description><command interpreter="python">megablast_xml_parser.py $input1 $output1</command><inputs>
- <param name="input1" type="data" format="blastxml" label="Megablast XML output" />
+ <param name="input1" type="data" format="blastxml" label="Megablast XML output" /></inputs><outputs>
- <data name="output1" format="tabular"/>
+ <data name="output1" format="tabular"/></outputs>
-<requirements>
- <requirement type="python-module">cElementTree</requirement>
-</requirements><tests>
- <test>
- <param name="input1" value="megablast_xml_parser_test1.gz" ftype="blastxml" />
- <output name="output1" file="megablast_xml_parser_test1_out.tabular" ftype="tabular" />
- </test>
+ <test>
+ <param name="input1" value="megablast_xml_parser_test1.gz" ftype="blastxml" />
+ <output name="output1" file="megablast_xml_parser_test1_out.tabular" ftype="tabular" />
+ </test></tests><help>
**What it does**
This tool processes the XML output of any NCBI blast tool (if you run your own blast jobs, the XML output can be generated with **-m 7** option).
-
+
-----
**Output fields**
--- a/tools/ncbi_blast_plus/blastxml_to_tabular.py Tue Jul 12 17:04:16 2011 -0400
+++ b/tools/ncbi_blast_plus/blastxml_to_tabular.py Tue Jul 12 18:14:56 2011 -0400
@@ -5,7 +5,7 @@
BLAST filename, output format (std for standard 12 columns, or ext for the
extended 24 columns offered in the BLAST+ wrappers).
-The 12 colums output are 'qseqid sseqid pident length mismatch gapopen qstart
+The 12 columns output are 'qseqid sseqid pident length mismatch gapopen qstart
qend sstart send evalue bitscore' or 'std' at the BLAST+ command line, which
mean:
@@ -51,22 +51,23 @@
Be aware that the sequence in the extended tabular output or XML direct from
BLAST+ may or may not use XXXX masking on regions of low complexity. This
can throw the off the calculation of percentage identity and gap openings.
-[In fact, both BLAST 2.2.24+ and 2.2.25+ have a sutle bug in this regard,
+[In fact, both BLAST 2.2.24+ and 2.2.25+ have a subtle bug in this regard,
with these numbers changing depending on whether or not the low complexity
filter is used.]
-This script attempts to produce idential output to what BLAST+ would have done.
+This script attempts to produce identical output to what BLAST+ would have done.
However, check this with "diff -b ..." since BLAST+ sometimes includes an extra
space character (probably a bug).
"""
import sys
import re
-assert sys.version_info[:2] >= ( 2, 4 )
if sys.version_info[:2] >= ( 2, 5 ):
- import xml.etree.cElementTree as cElementTree
+ import xml.etree.cElementTree as ElementTree
else:
- import cElementTree
+ from galaxy import eggs
+ import pkg_resources; pkg_resources.require( "elementtree" )
+ from elementtree import ElementTree
def stop_err( msg ):
sys.stderr.write("%s\n" % msg)
@@ -90,7 +91,7 @@
# get an iterable
try:
- context = cElementTree.iterparse(in_file, events=("start", "end"))
+ context = ElementTree.iterparse(in_file, events=("start", "end"))
except:
stop_err("Invalid data format.")
# turn it into an iterator
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.