commit/galaxy-central: 6 new changesets
6 new commits in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/commits/aed2268f79d1/ Changeset: aed2268f79d1 User: jgoecks Date: 2014-07-24 19:54:50 Summary: Trackster: variable clean up for read track painter, preventing features from being cutoff at the bottom. Affected #: 1 file diff -r 9d8967752787556b3d814e086fe2bd9eb1992170 -r aed2268f79d13f5604d69db109c20374cf311511 static/scripts/viz/trackster/painters.js --- a/static/scripts/viz/trackster/painters.js +++ b/static/scripts/viz/trackster/painters.js @@ -783,7 +783,9 @@ char_width_px = ctx.canvas.manager.char_width_px, block_color = (strand === "+" ? this.prefs.detail_block_color : this.prefs.reverse_strand_color), pack_mode = (mode === 'Pack'), - paint_utils = new ReadPainterUtils(ctx, (pack_mode ? PACK_FEATURE_HEIGHT : SQUISH_FEATURE_HEIGHT), w_scale, mode), + draw_height = (pack_mode ? PACK_FEATURE_HEIGHT : SQUISH_FEATURE_HEIGHT), + rect_y = y_start + 1, + paint_utils = new ReadPainterUtils(ctx, draw_height, w_scale, mode), drawing_blocks = []; // Keep list of items that need to be drawn on top of initial drawing layer. @@ -813,10 +815,7 @@ // Draw read base as rectangle. ctx.fillStyle = block_color; - ctx.fillRect(s_start, - y_start + (pack_mode ? 1 : 4 ), - s_end - s_start, - (pack_mode ? PACK_FEATURE_HEIGHT : SQUISH_FEATURE_HEIGHT)); + ctx.fillRect(s_start, rect_y, s_end - s_start, draw_height); } } @@ -893,10 +892,7 @@ } // Require a minimum w_scale so that variants are only drawn when somewhat zoomed in. else if (w_scale > 0.05) { - ctx.fillRect(c_start - gap, - y_start + (pack_mode ? 1 : 4), - Math.max( 1, Math.round(w_scale) ), - (pack_mode ? PACK_FEATURE_HEIGHT : SQUISH_FEATURE_HEIGHT)); + ctx.fillRect(c_start - gap, rect_y, Math.max( 1, Math.round(w_scale) ), draw_height); } } } @@ -913,7 +909,7 @@ base_offset += cig_len; break; case "D": // Deletion. - paint_utils.draw_deletion(s_start, y_start + (pack_mode ? 1 : 4), cig_len); + paint_utils.draw_deletion(s_start, rect_y, cig_len); base_offset += cig_len; break; case "I": // Insertion. https://bitbucket.org/galaxy/galaxy-central/commits/9f94c75733e9/ Changeset: 9f94c75733e9 User: jgoecks Date: 2014-07-24 20:34:53 Summary: Client-side visualization framework: remove extend aliasing. Affected #: 1 file diff -r aed2268f79d13f5604d69db109c20374cf311511 -r 9f94c75733e9e61f94658ecdf436d57f70eec9a6 static/scripts/viz/trackster/painters.js --- a/static/scripts/viz/trackster/painters.js +++ b/static/scripts/viz/trackster/painters.js @@ -1,7 +1,5 @@ define( ["libs/underscore"], function( _ ) { -var extend = _.extend; - /** * Compute the type of overlap between two regions. They are assumed to be on the same chrom/contig. * The overlap is computed relative to the second region; hence, OVERLAP_START indicates that the first @@ -128,7 +126,7 @@ this.view_start = view_start; this.view_end = view_end; // Drawing prefs - this.prefs = extend({}, this.default_prefs, prefs); + this.prefs = _.extend({}, this.default_prefs, prefs); this.mode = mode; }; @@ -354,7 +352,7 @@ FeaturePainter.prototype.default_prefs = { block_color: "#FFF", connector_color: "#FFF" }; -extend(FeaturePainter.prototype, { +_.extend(FeaturePainter.prototype, { get_required_height: function(rows_required, width) { // y_scale is the height per row var required_height = this.get_row_height(), @@ -451,7 +449,7 @@ this.draw_individual_connectors = false; }; -extend(LinkedFeaturePainter.prototype, FeaturePainter.prototype, { +_.extend(LinkedFeaturePainter.prototype, FeaturePainter.prototype, { /** * Height of a single row, depends on mode @@ -692,7 +690,7 @@ this.base_color_fn = base_color_fn; }; -extend(ReadPainter.prototype, FeaturePainter.prototype, { +_.extend(ReadPainter.prototype, FeaturePainter.prototype, { /** * Returns height based on mode. */ @@ -1077,7 +1075,7 @@ this.draw_individual_connectors = true; }; -extend(ArcLinkedFeaturePainter.prototype, FeaturePainter.prototype, LinkedFeaturePainter.prototype, { +_.extend(ArcLinkedFeaturePainter.prototype, FeaturePainter.prototype, LinkedFeaturePainter.prototype, { calculate_longest_feature_length: function () { var longest_feature_length = 0; @@ -1336,7 +1334,7 @@ this.delete_details_thickness = 0.2; }; -extend(ReadPainterUtils.prototype, { +_.extend(ReadPainterUtils.prototype, { /** * Draw deletion of base(s). * @param draw_detail if true, drawing in detail and deletion is drawn more subtly @@ -1358,7 +1356,7 @@ this.divider_height = 1; }; -extend(VariantPainter.prototype, Painter.prototype, { +_.extend(VariantPainter.prototype, Painter.prototype, { /** * Height of a single row, depends on mode */ https://bitbucket.org/galaxy/galaxy-central/commits/537e1c3b80ee/ Changeset: 537e1c3b80ee User: jgoecks Date: 2014-07-25 15:38:05 Summary: Trackster: better handling for drawing features across tiles. Affected #: 2 files diff -r 9f94c75733e9e61f94658ecdf436d57f70eec9a6 -r 537e1c3b80eec65f83871272f49f6de6393523c9 static/scripts/viz/trackster/painters.js --- a/static/scripts/viz/trackster/painters.js +++ b/static/scripts/viz/trackster/painters.js @@ -1055,10 +1055,10 @@ ctx.fillStyle = this.prefs.label_color; if (tile_low === 0 && f_start - ctx.measureText(feature_name).width < 0) { ctx.textAlign = "left"; - ctx.fillText(feature_name, f_end + LABEL_SPACING, y_start + 8, this.max_label_length); + ctx.fillText(feature_name, f_end + LABEL_SPACING, y_start + 9, this.max_label_length); } else { ctx.textAlign = "right"; - ctx.fillText(feature_name, f_start - LABEL_SPACING, y_start + 8, this.max_label_length); + ctx.fillText(feature_name, f_start - LABEL_SPACING, y_start + 9, this.max_label_length); } } diff -r 9f94c75733e9e61f94658ecdf436d57f70eec9a6 -r 537e1c3b80eec65f83871272f49f6de6393523c9 static/scripts/viz/trackster/tracks.js --- a/static/scripts/viz/trackster/tracks.js +++ b/static/scripts/viz/trackster/tracks.js @@ -1968,6 +1968,7 @@ * Sets up support for popups. */ FeatureTrackTile.prototype.predisplay_actions = function() { + /* // // Add support for popups. // @@ -2062,6 +2063,7 @@ .mouseleave(function() { $(this).parents(".track-content").children(".overlay").children(".feature-popup").remove(); }); +*/ }; /** @@ -3722,11 +3724,22 @@ }); }); - // Draw features on each tile. + // Draw incomplete features on each tile. var self = this; _.each(tiles, function(tile) { - self.draw_tile({ 'data': _.values(all_incomplete_features) }, tile.canvas.getContext('2d'), - tile.mode, tile.region, w_scale, tile.seq_data, true); + // To draw incomplete features, copy original canvas and then draw incomplete features + // on the canvas. + var features = { data: _.values( all_incomplete_features ) }, + canvas = self.view.canvas_manager.new_canvas(); + canvas.height = self.get_canvas_height(features, tile.mode, tile.w_scale, 100); + canvas.width = tile.canvas.width; + canvas.getContext('2d').drawImage(tile.canvas, 0, 0); + canvas.getContext('2d').translate(track.left_offset, 0); + var new_tile = self.draw_tile(features, canvas.getContext('2d'), + tile.mode, tile.region, tile.w_scale, tile.seq_data); + $(new_tile.canvas).addClass('incomplete_features'); + $(tile.canvas).replaceWith($(new_tile.canvas)); + tile.canvas = canvas; }); } https://bitbucket.org/galaxy/galaxy-central/commits/4ca83b9de85f/ Changeset: 4ca83b9de85f User: jgoecks Date: 2014-07-25 15:45:28 Summary: Trackster: disable track popups because they are slow and don't work well. Affected #: 1 file diff -r 537e1c3b80eec65f83871272f49f6de6393523c9 -r 4ca83b9de85f10b371c457fff1e06470f1a1944c static/scripts/viz/trackster/tracks.js --- a/static/scripts/viz/trackster/tracks.js +++ b/static/scripts/viz/trackster/tracks.js @@ -1969,6 +1969,7 @@ */ FeatureTrackTile.prototype.predisplay_actions = function() { /* + FIXME: use a canvas library to handle popups. // // Add support for popups. // @@ -2063,7 +2064,7 @@ .mouseleave(function() { $(this).parents(".track-content").children(".overlay").children(".feature-popup").remove(); }); -*/ + */ }; /** https://bitbucket.org/galaxy/galaxy-central/commits/b2613d9978a4/ Changeset: b2613d9978a4 User: jgoecks Date: 2014-07-25 15:48:31 Summary: Trackster: better variable naming. Affected #: 1 file diff -r 4ca83b9de85f10b371c457fff1e06470f1a1944c -r b2613d9978a499db6e905ba8f4e452029b1aff61 static/scripts/viz/trackster/tracks.js --- a/static/scripts/viz/trackster/tracks.js +++ b/static/scripts/viz/trackster/tracks.js @@ -3731,16 +3731,16 @@ // To draw incomplete features, copy original canvas and then draw incomplete features // on the canvas. var features = { data: _.values( all_incomplete_features ) }, - canvas = self.view.canvas_manager.new_canvas(); - canvas.height = self.get_canvas_height(features, tile.mode, tile.w_scale, 100); - canvas.width = tile.canvas.width; - canvas.getContext('2d').drawImage(tile.canvas, 0, 0); - canvas.getContext('2d').translate(track.left_offset, 0); - var new_tile = self.draw_tile(features, canvas.getContext('2d'), + new_canvas = self.view.canvas_manager.new_canvas(); + new_canvas.height = self.get_canvas_height(features, tile.mode, tile.w_scale, 100); + new_canvas.width = tile.canvas.width; + new_canvas.getContext('2d').drawImage(tile.canvas, 0, 0); + new_canvas.getContext('2d').translate(track.left_offset, 0); + var new_tile = self.draw_tile(features, new_canvas.getContext('2d'), tile.mode, tile.region, tile.w_scale, tile.seq_data); $(new_tile.canvas).addClass('incomplete_features'); $(tile.canvas).replaceWith($(new_tile.canvas)); - tile.canvas = canvas; + tile.canvas = new_canvas; }); } https://bitbucket.org/galaxy/galaxy-central/commits/8b98752f4fbc/ Changeset: 8b98752f4fbc User: jgoecks Date: 2014-07-25 15:48:56 Summary: Automated merge Affected #: 13 files diff -r b2613d9978a499db6e905ba8f4e452029b1aff61 -r 8b98752f4fbc11fb50f23b219f9037e7afa9a234 datatypes_conf.xml.sample --- a/datatypes_conf.xml.sample +++ b/datatypes_conf.xml.sample @@ -177,6 +177,7 @@ <datatype extension="taxonomy" type="galaxy.datatypes.tabular:Taxonomy" display_in_upload="true"/><datatype extension="tabular" type="galaxy.datatypes.tabular:Tabular" display_in_upload="true" description="Any data in tab delimited format (tabular)." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#Tabular_.28tab_delimited.29"/><datatype extension="twobit" type="galaxy.datatypes.binary:TwoBit" mimetype="application/octet-stream" display_in_upload="true"/> + <datatype extension="sqlite" type="galaxy.datatypes.binary:SQlite" mimetype="application/octet-stream" display_in_upload="true"/><datatype extension="txt" type="galaxy.datatypes.data:Text" display_in_upload="true" description="Any text file." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#Plain_text"/><datatype extension="linecount" type="galaxy.datatypes.data:LineCount" display_in_upload="false"/><datatype extension="memexml" type="galaxy.datatypes.xml:MEMEXml" mimetype="application/xml" display_in_upload="true"/> @@ -262,6 +263,7 @@ --><sniffer type="galaxy.datatypes.tabular:Vcf"/><sniffer type="galaxy.datatypes.binary:TwoBit"/> + <sniffer type="galaxy.datatypes.binary:SQlite"/><sniffer type="galaxy.datatypes.binary:Bam"/><sniffer type="galaxy.datatypes.binary:Sff"/><sniffer type="galaxy.datatypes.xml:Phyloxml"/> diff -r b2613d9978a499db6e905ba8f4e452029b1aff61 -r 8b98752f4fbc11fb50f23b219f9037e7afa9a234 lib/galaxy/datatypes/binary.py --- a/lib/galaxy/datatypes/binary.py +++ b/lib/galaxy/datatypes/binary.py @@ -12,6 +12,7 @@ import subprocess import tempfile import zipfile +import sqlite3 from urllib import urlencode, quote_plus from galaxy import eggs @@ -545,3 +546,45 @@ return "Binary TwoBit format nucleotide file (%s)" % (data.nice_size(dataset.get_size())) Binary.register_sniffable_binary_format("twobit", "twobit", TwoBit) + + +@dataproviders.decorators.has_dataproviders +class SQlite ( Binary ): + file_ext = "sqlite" + + # Connects and runs a query that should work on any real database + # If the file is not sqlite, an exception will be thrown and the sniffer will return false + def sniff( self, filename ): + try: + conn = sqlite3.connect(filename) + schema_version=conn.cursor().execute("pragma schema_version").fetchone() + conn.close() + if schema_version is not None: + return True + return False + except: + return False + + def set_peek( self, dataset, is_multi_byte=False ): + if not dataset.dataset.purged: + dataset.peek = "SQLite Database" + dataset.blurb = data.nice_size( dataset.get_size() ) + else: + dataset.peek = 'file does not exist' + dataset.blurb = 'file purged from disk' + + def display_peek( self, dataset ): + try: + return dataset.peek + except: + return "SQLite Database (%s)" % ( data.nice_size( dataset.get_size() ) ) + + + @dataproviders.decorators.dataprovider_factory( 'sqlite', dataproviders.dataset.SQliteDataProvider.settings ) + def sqlite_dataprovider( self, dataset, **settings ): + dataset_source = dataproviders.dataset.DatasetDataProvider( dataset ) + return dataproviders.dataset.SQliteDataProvider( dataset_source, **settings ) + + +Binary.register_sniffable_binary_format("sqlite","sqlite",SQlite) + diff -r b2613d9978a499db6e905ba8f4e452029b1aff61 -r 8b98752f4fbc11fb50f23b219f9037e7afa9a234 lib/galaxy/datatypes/dataproviders/dataset.py --- a/lib/galaxy/datatypes/dataproviders/dataset.py +++ b/lib/galaxy/datatypes/dataproviders/dataset.py @@ -11,6 +11,8 @@ import line import column import external +import sqlite3 +import re from galaxy import eggs eggs.require( 'bx-python' ) @@ -700,3 +702,39 @@ #TODO: as samtools - need more info on output format raise NotImplementedError() super( BGzipTabixDataProvider, self ).__init__( dataset, **kwargs ) + + + +class SQliteDataProvider ( base.DataProvider ): + """ + Data provider that uses a sqlite database file as its source. + + Allows any query to be run and returns the resulting rows as sqlite3 row objects + """ + settings = { + 'query' : 'str' + } + + def __init__( self, source, query=None, **kwargs ): + self.query=query + self.connection = sqlite3.connect(source.dataset.file_name); + self.connection.row_factory = sqlite3.Row + super( SQliteDataProvider, self ).__init__( source, **kwargs ) + + def query_matches_whitelist(self,query): + if re.match("select ",query,re.IGNORECASE): + if re.search("^([^\"]|\"[^\"]*\")*?;",query) or re.search("^([^\']|\'[^\']*\')*?;",query): + return False + else: + return True + return False + + + + def __iter__( self ): + if (self.query is not None) and self.query_matches_whitelist(self.query): + for row in self.connection.cursor().execute(self.query): + yield row + else: + yield + diff -r b2613d9978a499db6e905ba8f4e452029b1aff61 -r 8b98752f4fbc11fb50f23b219f9037e7afa9a234 lib/galaxy/model/__init__.py --- a/lib/galaxy/model/__init__.py +++ b/lib/galaxy/model/__init__.py @@ -34,6 +34,7 @@ from galaxy.util.bunch import Bunch from galaxy.util.hash_util import new_secure_hash from galaxy.util.directory_hash import directory_hash_id +from galaxy.util.sanitize_html import sanitize_html from galaxy.web.framework.helpers import to_unicode from galaxy.web.form_builder import (AddressField, CheckboxField, HistoryField, PasswordField, SelectField, TextArea, TextField, WorkflowField, @@ -2589,8 +2590,8 @@ class DatasetCollection( object, Dictifiable, UsesAnnotations ): """ """ - dict_collection_visible_keys = ( 'id', 'name', 'collection_type' ) - dict_element_visible_keys = ( 'id', 'name', 'collection_type' ) + dict_collection_visible_keys = ( 'id', 'collection_type' ) + dict_element_visible_keys = ( 'id', 'collection_type' ) def __init__( self, @@ -3703,23 +3704,27 @@ self.country = country self.phone = phone def get_html(self): + # This should probably be deprecated eventually. It should currently + # sanitize. + # TODO Find out where else uses this and replace with + # templates html = '' if self.name: - html = html + self.name + html = html + sanitize_html(self.name) if self.institution: - html = html + '<br/>' + self.institution + html = html + '<br/>' + sanitize_html(self.institution) if self.address: - html = html + '<br/>' + self.address + html = html + '<br/>' + sanitize_html(self.address) if self.city: - html = html + '<br/>' + self.city + html = html + '<br/>' + sanitize_html(self.city) if self.state: - html = html + ' ' + self.state + html = html + ' ' + sanitize_html(self.state) if self.postal_code: - html = html + ' ' + self.postal_code + html = html + ' ' + sanitize_html(self.postal_code) if self.country: - html = html + '<br/>' + self.country + html = html + '<br/>' + sanitize_html(self.country) if self.phone: - html = html + '<br/>' + 'Phone: ' + self.phone + html = html + '<br/>' + 'phone: ' + sanitize_html(self.phone) return html class UserOpenID( object ): diff -r b2613d9978a499db6e905ba8f4e452029b1aff61 -r 8b98752f4fbc11fb50f23b219f9037e7afa9a234 lib/galaxy/tools/__init__.py --- a/lib/galaxy/tools/__init__.py +++ b/lib/galaxy/tools/__init__.py @@ -1391,7 +1391,7 @@ # Load parameters (optional) input_elem = root.find("inputs") enctypes = set() - if input_elem: + if input_elem is not None: # Handle properties of the input form self.check_values = string_as_bool( input_elem.get("check_values", self.check_values ) ) self.nginx_upload = string_as_bool( input_elem.get( "nginx_upload", self.nginx_upload ) ) diff -r b2613d9978a499db6e905ba8f4e452029b1aff61 -r 8b98752f4fbc11fb50f23b219f9037e7afa9a234 lib/galaxy/tools/parameters/basic.py --- a/lib/galaxy/tools/parameters/basic.py +++ b/lib/galaxy/tools/parameters/basic.py @@ -1712,10 +1712,15 @@ if self.__display_multirun_option(): # Select multiple datasets, run multiple jobs. multirun_key = "%s|__multirun__" % self.name + collection_multirun_key = "%s|__collection_multirun__" % self.name if multirun_key in (other_values or {}): multirun_value = listify( other_values[ multirun_key ] ) if multirun_value and len( multirun_value ) > 1: default_field = "select_multiple" + elif collection_multirun_key in (other_values or {}): + multirun_value = listify( other_values[ collection_multirun_key ] ) + if multirun_value: + default_field = "select_collection" else: multirun_value = value multi_dataset_matcher = DatasetMatcher( trans, self, multirun_value, other_values ) @@ -2014,9 +2019,17 @@ default_field = "select_single_collection" fields = odict() + collection_multirun_key = "%s|__collection_multirun__" % self.name + if collection_multirun_key in (other_values or {}): + multirun_value = other_values[ collection_multirun_key ] + if multirun_value: + default_field = "select_map_over_collections" + else: + multirun_value = value + history = self._get_history( trans ) fields[ "select_single_collection" ] = self._get_single_collection_field( trans=trans, history=history, value=value, other_values=other_values ) - fields[ "select_map_over_collections" ] = self._get_select_dataset_collection_field( trans=trans, history=history, value=value, other_values=other_values ) + fields[ "select_map_over_collections" ] = self._get_select_dataset_collection_field( trans=trans, history=history, value=multirun_value, other_values=other_values ) return self._switch_fields( fields, default_field=default_field ) diff -r b2613d9978a499db6e905ba8f4e452029b1aff61 -r 8b98752f4fbc11fb50f23b219f9037e7afa9a234 lib/galaxy/util/__init__.py --- a/lib/galaxy/util/__init__.py +++ b/lib/galaxy/util/__init__.py @@ -48,10 +48,10 @@ from .inflection import Inflector, English inflector = Inflector(English) -log = logging.getLogger(__name__) +log = logging.getLogger(__name__) _lock = threading.RLock() -CHUNK_SIZE = 65536 #64k +CHUNK_SIZE = 65536 # 64k DATABASE_MAX_STRING_SIZE = 32768 DATABASE_MAX_STRING_SIZE_PRETTY = '32K' @@ -62,6 +62,7 @@ NULL_CHAR = '\000' BINARY_CHARS = [ NULL_CHAR ] + def is_multi_byte( chars ): for char in chars: try: @@ -69,18 +70,15 @@ except UnicodeDecodeError: # Probably binary return False - if wchartype.is_asian( char ) or \ - wchartype.is_full_width( char ) or \ - wchartype.is_kanji( char ) or \ - wchartype.is_hiragana( char ) or \ - wchartype.is_katakana( char ) or \ - wchartype.is_half_katakana( char ) or \ - wchartype.is_hangul( char ) or \ - wchartype.is_full_digit( char ) or \ - wchartype.is_full_letter( char ): + if ( wchartype.is_asian( char ) or wchartype.is_full_width( char ) or + wchartype.is_kanji( char ) or wchartype.is_hiragana( char ) or + wchartype.is_katakana( char ) or wchartype.is_half_katakana( char ) + or wchartype.is_hangul( char ) or wchartype.is_full_digit( char ) + or wchartype.is_full_letter( char )): return True return False + def is_binary( value, binary_chars=None ): """ File is binary if it contains a null-byte by default (e.g. behavior of grep, etc.). @@ -99,6 +97,7 @@ return True return False + def get_charset_from_http_headers( headers, default=None ): rval = headers.get('content-type', None ) if rval and 'charset=' in rval: @@ -107,16 +106,18 @@ return rval return default + def synchronized(func): """This wrapper will serialize access to 'func' to a single thread. Use it as a decorator.""" def caller(*params, **kparams): - _lock.acquire(True) # Wait + _lock.acquire(True) # Wait try: return func(*params, **kparams) finally: _lock.release() return caller + def file_iter(fname, sep=None): """ This generator iterates over a file and yields its lines @@ -131,6 +132,7 @@ if line and line[0] != '#': yield line.split(sep) + def file_reader( fp, chunk_size=CHUNK_SIZE ): """This generator yields the open fileobject in chunks (default 64k). Closes the file at the end""" while 1: @@ -140,6 +142,7 @@ yield data fp.close() + def unique_id(KEY_SIZE=128): """ Generates an unique id @@ -148,8 +151,8 @@ >>> len(set(ids)) 1000 """ - id = str( random.getrandbits( KEY_SIZE ) ) - return md5(id).hexdigest() + return md5(str( random.getrandbits( KEY_SIZE ) )).hexdigest() + def parse_xml(fname): """Returns a parsed xml tree""" @@ -163,6 +166,7 @@ tree = ElementTree.fromstring(xml_string) return tree + def xml_to_string( elem, pretty=False ): """Returns a string from an xml tree""" if pretty: @@ -170,12 +174,13 @@ try: return ElementTree.tostring( elem ) except TypeError, e: - #assume this is a comment + # we assume this is a comment if hasattr( elem, 'text' ): return "<!-- %s -->\n" % ( elem.text ) else: raise e + def xml_element_compare( elem1, elem2 ): if not isinstance( elem1, dict ): elem1 = xml_element_to_dict( elem1 ) @@ -183,9 +188,11 @@ elem2 = xml_element_to_dict( elem2 ) return elem1 == elem2 + def xml_element_list_compare( elem_list1, elem_list2 ): return [ xml_element_to_dict( elem ) for elem in elem_list1 ] == [ xml_element_to_dict( elem ) for elem in elem_list2 ] + def xml_element_to_dict( elem ): rval = {} if elem.attrib: @@ -220,7 +227,6 @@ return rval - def pretty_print_xml( elem, level=0 ): pad = ' ' i = "\n" + level * pad @@ -238,26 +244,28 @@ elem.tail = i + pad return elem + def get_file_size( value, default=None ): try: - #try built-in + # try built-in return os.path.getsize( value ) except: try: - #try built-in one name attribute + # try built-in one name attribute return os.path.getsize( value.name ) except: try: - #try tell() of end of object + # try tell() of end of object offset = value.tell() value.seek( 0, 2 ) rval = value.tell() value.seek( offset ) return rval except: - #return default value + # return default value return default + def shrink_stream_by_size( value, size, join_by="..", left_larger=True, beginning_on_size_error=False, end_on_size_error=False ): rval = '' if get_file_size( value ) > size: @@ -292,6 +300,7 @@ rval += data return rval + def shrink_string_by_size( value, size, join_by="..", left_larger=True, beginning_on_size_error=False, end_on_size_error=False ): if len( value ) > size: len_join_by = len( join_by ) @@ -311,29 +320,30 @@ value = "%s%s%s" % ( value[:left_index], join_by, value[-right_index:] ) return value + def pretty_print_json(json_data, is_json_string=False): if is_json_string: json_data = json.from_json_string(json_data) return json.to_json_string(json_data, sort_keys=True, indent=4) # characters that are valid -valid_chars = set(string.letters + string.digits + " -=_.()/+*^,:?!") +valid_chars = set(string.letters + string.digits + " -=_.()/+*^,:?!") # characters that are allowed but need to be escaped -mapped_chars = { '>' :'__gt__', - '<' :'__lt__', - "'" :'__sq__', - '"' :'__dq__', - '[' :'__ob__', - ']' :'__cb__', - '{' :'__oc__', - '}' :'__cc__', - '@' : '__at__', - '\n' : '__cn__', - '\r' : '__cr__', - '\t' : '__tc__', - '#' : '__pd__' - } +mapped_chars = { '>': '__gt__', + '<': '__lt__', + "'": '__sq__', + '"': '__dq__', + '[': '__ob__', + ']': '__cb__', + '{': '__oc__', + '}': '__cc__', + '@': '__at__', + '\n': '__cn__', + '\r': '__cr__', + '\t': '__tc__', + '#': '__pd__'} + def restore_text(text): """Restores sanitized text""" @@ -343,6 +353,7 @@ text = text.replace(value, key) return text + def sanitize_text(text): """ Restricts the characters that are allowed in text; accepts both strings @@ -353,6 +364,7 @@ elif isinstance( text, list ): return [ _sanitize_text_helper(t) for t in text ] + def _sanitize_text_helper(text): """Restricts the characters that are allowed in a string""" @@ -363,9 +375,10 @@ elif c in mapped_chars: out.append(mapped_chars[c]) else: - out.append('X') # makes debugging easier + out.append('X') # makes debugging easier return ''.join(out) + def sanitize_param(value): """Clean incoming parameters (strings or lists)""" if isinstance( value, basestring ): @@ -373,10 +386,12 @@ elif isinstance( value, list ): return map(sanitize_text, value) else: - raise Exception, 'Unknown parameter type (%s)' % ( type( value ) ) + raise Exception('Unknown parameter type (%s)' % ( type( value ) )) valid_filename_chars = set( string.ascii_letters + string.digits + '_.' ) invalid_filenames = [ '', '.', '..' ] + + def sanitize_for_filename( text, default=None ): """ Restricts the characters that are allowed in a filename portion; Returns default value or a unique id string if result is not a valid name. @@ -512,7 +527,7 @@ def __init__( self, params, sanitize=True ): if sanitize: for key, value in params.items(): - if key not in self.NEVER_SANITIZE and True not in [ key.endswith( "|%s" % nonsanitize_parameter ) for nonsanitize_parameter in self.NEVER_SANITIZE ]: #sanitize check both ungrouped and grouped parameters by name. Anything relying on NEVER_SANITIZE should be changed to not require this and NEVER_SANITIZE should be removed. + if key not in self.NEVER_SANITIZE and True not in [ key.endswith( "|%s" % nonsanitize_parameter ) for nonsanitize_parameter in self.NEVER_SANITIZE ]: # sanitize check both ungrouped and grouped parameters by name. Anything relying on NEVER_SANITIZE should be changed to not require this and NEVER_SANITIZE should be removed. self.__dict__[ key ] = sanitize_param( value ) else: self.__dict__[ key ] = value @@ -525,7 +540,7 @@ """ flat = [] for key, value in self.__dict__.items(): - if type(value) == type([]): + if isinstance(value, list): for v in value: flat.append( (key, v) ) else: @@ -551,16 +566,19 @@ def update(self, values): self.__dict__.update(values) + def rst_to_html( s ): """Convert a blob of reStructuredText to HTML""" log = logging.getLogger( "docutils" ) + class FakeStream( object ): def write( self, str ): if len( str ) > 0 and not str.isspace(): log.warn( str ) return unicodify( docutils.core.publish_string( s, - writer=docutils.writers.html4css1.Writer(), - settings_overrides={ "embed_stylesheet": False, "template": os.path.join(os.path.dirname(__file__), "docutils_template.txt"), "warning_stream": FakeStream() } ) ) + writer=docutils.writers.html4css1.Writer(), + settings_overrides={ "embed_stylesheet": False, "template": os.path.join(os.path.dirname(__file__), "docutils_template.txt"), "warning_stream": FakeStream() } ) ) + def xml_text(root, name=None): """Returns the text inside an element""" @@ -582,6 +600,8 @@ # asbool implementation pulled from PasteDeploy truthy = frozenset(['true', 'yes', 'on', 'y', 't', '1']) falsy = frozenset(['false', 'no', 'off', 'n', 'f', '0']) + + def asbool(obj): if isinstance(obj, basestring): obj = obj.strip().lower() @@ -600,6 +620,7 @@ else: return False + def string_as_bool_or_none( string ): """ Returns True, None or False based on the argument: @@ -618,6 +639,7 @@ else: return False + def listify( item, do_strip=False ): """ Make a single item a single item list, or return a list if passed a @@ -635,6 +657,7 @@ else: return [ item ] + def commaify(amount): orig = amount new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', amount) @@ -643,7 +666,8 @@ else: return commaify(new) -def roundify(amount, sfs = 2): + +def roundify(amount, sfs=2): """ Take a number in string form and truncate to 'sfs' significant figures. """ @@ -652,6 +676,7 @@ else: return amount[0:sfs] + '0'*(len(amount) - sfs) + def unicodify( value, encoding=DEFAULT_ENCODING, error='replace', default=None ): """ Returns a unicode string or None @@ -691,6 +716,7 @@ def object_to_string( obj ): return binascii.hexlify( pickle.dumps( obj, 2 ) ) + def string_to_object( s ): return pickle.loads( binascii.unhexlify( s ) ) @@ -743,19 +769,23 @@ return False return True + def get_ucsc_by_build(build): sites = [] for site in ucsc_build_sites: if build in site['builds']: - sites.append((site['name'],site['url'])) + sites.append((site['name'], site['url'])) return sites + + def get_gbrowse_sites_by_build(build): sites = [] for site in gbrowse_build_sites: if build in site['builds']: - sites.append((site['name'],site['url'])) + sites.append((site['name'], site['url'])) return sites + def read_dbnames(filename): """ Read build names from file """ class DBNames( list ): @@ -764,48 +794,54 @@ db_names = DBNames() try: ucsc_builds = {} - man_builds = [] #assume these are integers + man_builds = [] # assume these are integers name_to_db_base = {} for line in open(filename): try: - if line[0:1] == "#": continue - fields = line.replace("\r","").replace("\n","").split("\t") - #Special case of unspecified build is at top of list + if line[0:1] == "#": + continue + fields = line.replace("\r", "").replace("\n", "").split("\t") + # Special case of unspecified build is at top of list if fields[0] == "?": - db_names.insert(0,(fields[0],fields[1])) + db_names.insert(0, (fields[0], fields[1])) continue - try: #manual build (i.e. microbes) + try: # manual build (i.e. microbes) int(fields[0]) man_builds.append((fields[1], fields[0])) - except: #UCSC build + except: # UCSC build db_base = fields[0].rstrip('0123456789') if db_base not in ucsc_builds: ucsc_builds[db_base] = [] name_to_db_base[fields[1]] = db_base - #we want to sort within a species numerically by revision number + # we want to sort within a species numerically by revision number build_rev = re.compile(r'\d+$') - try: build_rev = int(build_rev.findall(fields[0])[0]) - except: build_rev = 0 - ucsc_builds[db_base].append((build_rev, fields[0],fields[1])) - except: continue + try: + build_rev = int(build_rev.findall(fields[0])[0]) + except: + build_rev = 0 + ucsc_builds[db_base].append((build_rev, fields[0], fields[1])) + except: + continue sort_names = name_to_db_base.keys() sort_names.sort() for name in sort_names: db_base = name_to_db_base[name] ucsc_builds[db_base].sort() ucsc_builds[db_base].reverse() - ucsc_builds[db_base] = [(build, name) for build_rev, build, name in ucsc_builds[db_base]] + ucsc_builds[db_base] = [(build, name) for _, build, name in ucsc_builds[db_base]] db_names = DBNames( db_names + ucsc_builds[db_base] ) - if len( db_names ) > 1 and len( man_builds ) > 0: db_names.append( ( db_names.default_value, '----- Additional Species Are Below -----' ) ) + if len( db_names ) > 1 and len( man_builds ) > 0: + db_names.append( ( db_names.default_value, '----- Additional Species Are Below -----' ) ) man_builds.sort() - man_builds = [(build, name) for name, build in man_builds] + man_builds = [(build, name) for name, build in man_builds] db_names = DBNames( db_names + man_builds ) except Exception, e: print "ERROR: Unable to read builds file:", e - if len(db_names)<1: + if len(db_names) < 1: db_names = DBNames( [( db_names.default_value, db_names.default_name )] ) return db_names + def read_ensembl( filename, ucsc ): """ Read Ensembl build names from file """ ucsc_builds = [] @@ -814,47 +850,55 @@ ensembl_builds = list() try: for line in open( filename ): - if line[0:1] in [ '#', '\t' ]: continue - fields = line.replace("\r","").replace("\n","").split("\t") - if fields[0] in ucsc_builds: continue + if line[0:1] in [ '#', '\t' ]: + continue + fields = line.replace("\r", "").replace("\n", "").split("\t") + if fields[0] in ucsc_builds: + continue ensembl_builds.append( dict( dbkey=fields[0], release=fields[1], name=fields[2].replace( '_', ' ' ) ) ) except Exception, e: print "ERROR: Unable to read builds file:", e return ensembl_builds + def read_ncbi( filename ): """ Read NCBI build names from file """ ncbi_builds = list() try: for line in open( filename ): - if line[0:1] in [ '#', '\t' ]: continue - fields = line.replace("\r","").replace("\n","").split("\t") + if line[0:1] in [ '#', '\t' ]: + continue + fields = line.replace("\r", "").replace("\n", "").split("\t") ncbi_builds.append( dict( dbkey=fields[0], name=fields[1] ) ) except Exception, e: print "ERROR: Unable to read builds file:", e return ncbi_builds + def read_build_sites( filename, check_builds=True ): """ read db names to ucsc mappings from file, this file should probably be merged with the one above """ build_sites = [] try: for line in open(filename): try: - if line[0:1] == "#": continue - fields = line.replace("\r","").replace("\n","").split("\t") + if line[0:1] == "#": + continue + fields = line.replace("\r", "").replace("\n", "").split("\t") site_name = fields[0] site = fields[1] if check_builds: site_builds = fields[2].split(",") - site_dict = {'name':site_name, 'url':site, 'builds':site_builds} + site_dict = {'name': site_name, 'url': site, 'builds': site_builds} else: - site_dict = {'name':site_name, 'url':site} + site_dict = {'name': site_name, 'url': site} build_sites.append( site_dict ) - except: continue + except: + continue except: - print "ERROR: Unable to read builds for site file %s" %filename + print "ERROR: Unable to read builds for site file %s" % filename return build_sites + def relativize_symlinks( path, start=None, followlinks=False): for root, dirs, files in os.walk( path, followlinks=followlinks ): rel_start = None @@ -871,23 +915,26 @@ os.remove( symlink_file_name ) os.symlink( rel_path, symlink_file_name ) + def stringify_dictionary_keys( in_dict ): - #returns a new dictionary - #changes unicode keys into strings, only works on top level (does not recurse) - #unicode keys are not valid for expansion into keyword arguments on method calls + # returns a new dictionary + # changes unicode keys into strings, only works on top level (does not recurse) + # unicode keys are not valid for expansion into keyword arguments on method calls out_dict = {} for key, value in in_dict.iteritems(): out_dict[ str( key ) ] = value return out_dict + def recursively_stringify_dictionary_keys( d ): if isinstance(d, dict): - return dict([(k.encode( DEFAULT_ENCODING ), recursively_stringify_dictionary_keys(v)) for k,v in d.iteritems()]) + return dict([(k.encode( DEFAULT_ENCODING ), recursively_stringify_dictionary_keys(v)) for k, v in d.iteritems()]) elif isinstance(d, list): return [recursively_stringify_dictionary_keys(x) for x in d] else: return d + def mkstemp_ln( src, prefix='mkstemp_ln_' ): """ From tempfile._mkstemp_inner, generate a hard link in the same dir with a @@ -904,9 +951,10 @@ return (os.path.abspath(file)) except OSError, e: if e.errno == errno.EEXIST: - continue # try again + continue # try again raise - raise IOError, (errno.EEXIST, "No usable temporary file name found") + raise IOError(errno.EEXIST, "No usable temporary file name found") + def umask_fix_perms( path, umask, unmasked_perms, gid=None ): """ @@ -923,7 +971,7 @@ try: os.chmod( path, perms ) except Exception, e: - log.warning( 'Unable to honor umask (%s) for %s, tried to set: %s but mode remains %s, error was: %s' % ( oct( umask ), \ + log.warning( 'Unable to honor umask (%s) for %s, tried to set: %s but mode remains %s, error was: %s' % ( oct( umask ), path, oct( perms ), oct( stat.S_IMODE( st.st_mode ) ), @@ -939,11 +987,12 @@ except: desired_group = gid current_group = st.st_gid - log.warning( 'Unable to honor primary group (%s) for %s, group remains %s, error was: %s' % ( desired_group, \ + log.warning( 'Unable to honor primary group (%s) for %s, group remains %s, error was: %s' % ( desired_group, path, current_group, e ) ) + def docstring_trim(docstring): """Trimming python doc strings. Taken from: http://www.python.org/dev/peps/pep-0257/""" if not docstring: @@ -970,6 +1019,7 @@ # Return a single string: return '\n'.join(trimmed) + def nice_size(size): """ Returns a readably formatted string with the size @@ -989,14 +1039,15 @@ except: return '??? bytes' for ind, word in enumerate(words): - step = 1024 ** (ind + 1) + step = 1024 ** (ind + 1) if step > size: size = size / float(1024 ** ind) - if word == 'bytes': # No decimals for bytes + if word == 'bytes': # No decimals for bytes return "%d bytes" % size return "%.1f %s" % (size, word) return '??? bytes' + def size_to_bytes( size ): """ Returns a number of bytes if given a reasonably formatted string with the size @@ -1023,6 +1074,7 @@ elif multiple.startswith( 'b' ): return int( size ) + def send_mail( frm, to, subject, body, config ): """ Sends an email. @@ -1072,6 +1124,7 @@ s.sendmail( frm, to, msg.as_string() ) s.quit() + def force_symlink( source, link_name ): try: os.symlink( source, link_name ) @@ -1082,12 +1135,13 @@ else: raise e + def move_merge( source, target ): - #when using shutil and moving a directory, if the target exists, - #then the directory is placed inside of it - #if the target doesn't exist, then the target is made into the directory - #this makes it so that the target is always the target, and if it exists, - #the source contents are moved into the target + # when using shutil and moving a directory, if the target exists, + # then the directory is placed inside of it + # if the target doesn't exist, then the target is made into the directory + # this makes it so that the target is always the target, and if it exists, + # the source contents are moved into the target if os.path.isdir( source ) and os.path.exists( target ) and os.path.isdir( target ): for name in os.listdir( source ): move_merge( os.path.join( source, name ), os.path.join( target, name ) ) @@ -1103,7 +1157,7 @@ rv |= ord(x) ^ ord(y) return rv == 0 -galaxy_root_path = os.path.join(__path__[0], "..","..","..") +galaxy_root_path = os.path.join(__path__[0], "..", "..", "..") # The dbnames list is used in edit attributes and the upload tool dbnames = read_dbnames( os.path.join( galaxy_root_path, "tool-data", "shared", "ucsc", "builds.txt" ) ) @@ -1114,6 +1168,7 @@ gbrowse_build_sites = read_build_sites( os.path.join( galaxy_root_path, "tool-data", "shared", "gbrowse", "gbrowse_build_sites.txt" ) ) dlnames = dict(ucsc=ucsc_names, ensembl=ensembl_names, ncbi=ncbi_names) + def galaxy_directory(): return os.path.abspath(galaxy_root_path) diff -r b2613d9978a499db6e905ba8f4e452029b1aff61 -r 8b98752f4fbc11fb50f23b219f9037e7afa9a234 static/scripts/galaxy.tools.js --- a/static/scripts/galaxy.tools.js +++ b/static/scripts/galaxy.tools.js @@ -86,7 +86,7 @@ }).attr( 'title', selectionType['select_by'] - ); + ).data( "index", iIndex ); view.formRow().find( "label" ).append( button ); } }); @@ -114,11 +114,13 @@ } else { $("div#remap-row").css("display", "none"); } - this.formRow().find( "i" ).each(function(index, iElement) { + this.formRow().find( "i" ).each(function(_, iElement) { + var $iElement = $(iElement); + var index = $iElement.data("index"); if(index == enableIndex) { - $(iElement).css('color', 'black'); + $iElement.css('color', 'black'); } else { - $(iElement).css('color', 'Gray'); + $iElement.css('color', 'Gray'); } }); var $select = this.$( "select" ); diff -r b2613d9978a499db6e905ba8f4e452029b1aff61 -r 8b98752f4fbc11fb50f23b219f9037e7afa9a234 templates/webapps/galaxy/user/manage_info.mako --- a/templates/webapps/galaxy/user/manage_info.mako +++ b/templates/webapps/galaxy/user/manage_info.mako @@ -49,7 +49,7 @@ <div class="form-row"><div class="grid-header"> %for i, filter in enumerate( ['Active', 'Deleted', 'All'] ): - %if i > 0: + %if i > 0: <span>|</span> %endif %if show_filter == filter: @@ -62,11 +62,11 @@ </div><table class="grid"><tbody> - %for index, address in enumerate(addresses): + %for index, address in enumerate(addresses): <tr class="libraryRow libraryOrFolderRow" id="libraryRow"><td> - <div class="form-row"> - <label>${address.desc}:</label> + <div class="form-row"> + <label>${address.desc | h}:</label> ${address.get_html()} </div><div class="form-row"> @@ -82,7 +82,7 @@ </ul></div></td> - </tr> + </tr> %endfor </tbody></table> diff -r b2613d9978a499db6e905ba8f4e452029b1aff61 -r 8b98752f4fbc11fb50f23b219f9037e7afa9a234 test/base/interactor.py --- a/test/base/interactor.py +++ b/test/base/interactor.py @@ -1,4 +1,5 @@ import os +import re from StringIO import StringIO from galaxy.tools.parameters import grouping from galaxy.tools import test @@ -330,10 +331,11 @@ try: test_user = [ user for user in all_users if user["email"] == email ][0] except IndexError: + username = re.sub('[^a-z-]', '--', email.lower()) data = dict( email=email, password='testuser', - username='admin-user', + username=username, ) test_user = self._post( 'users', data, key=admin_key ).json() return test_user diff -r b2613d9978a499db6e905ba8f4e452029b1aff61 -r 8b98752f4fbc11fb50f23b219f9037e7afa9a234 test/functional/tools/collection_two_paired.xml --- /dev/null +++ b/test/functional/tools/collection_two_paired.xml @@ -0,0 +1,69 @@ +<tool id="collection_two_paired" name="collection_two_paired" version="0.1.0"> + <command> + #if $kind.collection_type == "paired" + cat $kind.f1.forward $kind.f1['reverse'] >> $out1; + cat $kind.f2.forward $kind.f2['reverse'] >> $out1; + #else + #for $i, $_ in enumerate($kind.f1): + cat $kind.f1[$i] $kind.f2[$i] >> $out1; + #end for + #end if + </command> + <inputs> + <conditional name="kind"> + <param type="select" name="collection_type"> + <option value="paired">Paired Datasets</option> + <option value="list">List of Datasets</option> + </param> + <when value="paired"> + <param name="f1" type="data_collection" collection_type="paired" /> + <param name="f2" type="data_collection" collection_type="paired" /> + </when> + <when value="list"> + <param name="f1" type="data_collection" collection_type="list" /> + <param name="f2" type="data_collection" collection_type="list" /> + </when> + </conditional> + </inputs> + <outputs> + <data format="txt" name="out1" /> + </outputs> + <tests> + <test> + <conditional name="kind"> + <param name="collection_type" value="paired" /> + <param name="f1"> + <collection type="paired"> + <element name="forward" value="simple_line.txt" /> + <element name="reverse" value="simple_line_alternative.txt" /> + </collection> + </param> + <param name="f2"> + <collection type="paired"> + <element name="forward" value="simple_line.txt" /> + <element name="reverse" value="simple_line_alternative.txt" /> + </collection> + </param> + </conditional> + <output name="out1" file="simple_lines_interleaved.txt"/> + </test> + <test> + <conditional name="kind"> + <param name="collection_type" value="list" /> + <param name="f1"> + <collection type="list"> + <element name="l11" value="simple_line.txt" /> + <element name="l12" value="simple_line.txt" /> + </collection> + </param> + <param name="f2"> + <collection type="list"> + <element name="l21" value="simple_line_alternative.txt" /> + <element name="l22" value="simple_line_alternative.txt" /> + </collection> + </param> + </conditional> + <output name="out1" file="simple_lines_interleaved.txt"/> + </test> + </tests> +</tool> diff -r b2613d9978a499db6e905ba8f4e452029b1aff61 -r 8b98752f4fbc11fb50f23b219f9037e7afa9a234 test/functional/tools/samples_tool_conf.xml --- a/test/functional/tools/samples_tool_conf.xml +++ b/test/functional/tools/samples_tool_conf.xml @@ -21,4 +21,5 @@ <tool file="collection_paired_test.xml" /><tool file="collection_nested_test.xml" /><tool file="collection_mixed_param.xml" /> + <tool file="collection_two_paired.xml" /></toolbox> \ No newline at end of file diff -r b2613d9978a499db6e905ba8f4e452029b1aff61 -r 8b98752f4fbc11fb50f23b219f9037e7afa9a234 test/unit/tools/test_execution.py --- a/test/unit/tools/test_execution.py +++ b/test/unit/tools/test_execution.py @@ -16,13 +16,12 @@ eggs.require( "Paste" ) from paste import httpexceptions -# Tool with a repeat parameter, to test state update. -REPEAT_TOOL_CONTENTS = '''<tool id="test_tool" name="Test Tool"> +BASE_REPEAT_TOOL_CONTENTS = '''<tool id="test_tool" name="Test Tool"><command>echo "$param1" #for $r in $repeat# "$r.param2" #end for# < $out1</command><inputs><param type="text" name="param1" value="" /><repeat name="repeat1" label="Repeat 1"> - <param type="text" name="param2" value="" /> + %s </repeat></inputs><outputs> @@ -31,6 +30,10 @@ </tool> ''' +# Tool with a repeat parameter, to test state update. +REPEAT_TOOL_CONTENTS = BASE_REPEAT_TOOL_CONTENTS % '''<param type="text" name="param2" value="" />''' +REPEAT_COLLECTION_PARAM_CONTENTS = BASE_REPEAT_TOOL_CONTENTS % '''<param type="data_collection" name="param2" collection_type="paired" />''' + class ToolExecutionTestCase( TestCase, tools_support.UsesApp, tools_support.UsesTools ): @@ -287,13 +290,48 @@ } ) self.__assert_exeuted( template, template_vars ) - def __history_dataset_collection_for( self, hdas, id=1234 ): - collection = galaxy.model.DatasetCollection() + def test_subcollection_multirun_with_state_updates( self ): + self._init_tool( REPEAT_COLLECTION_PARAM_CONTENTS ) + hda1, hda2 = self.__add_dataset( 1 ), self.__add_dataset( 2 ) + collection = self.__history_dataset_collection_for( [ hda1, hda2 ], collection_type="list:paired" ) + collection_id = self.app.security.encode_id( collection.id ) + self.app.dataset_collections_service = Bunch( + match_collections=lambda collections: None + ) + template, template_vars = self.__handle_with_incoming( + repeat1_add="dummy", + ) + state = self.__assert_rerenders_tool_without_errors( template, template_vars ) + assert len( state.inputs[ "repeat1" ] ) == 1 + template, template_vars = self.__handle_with_incoming( state, **{ + "repeat1_0|param2|__collection_multirun__": "%s|paired" % collection_id, + "repeat1_add": "dummy", + } ) + state = self.__assert_rerenders_tool_without_errors( template, template_vars ) + assert state.inputs[ "repeat1" ][ 0 ][ "param2|__collection_multirun__" ] == "%s|paired" % collection_id + + def __history_dataset_collection_for( self, hdas, collection_type="list", id=1234 ): + collection = galaxy.model.DatasetCollection( + collection_type=collection_type, + ) to_element = lambda hda: galaxy.model.DatasetCollectionElement( collection=collection, element=hda, ) - collection.datasets = map(to_element, hdas) + elements = map(to_element, hdas) + if collection_type == "list:paired": + paired_collection = galaxy.model.DatasetCollection( + collection_type="paired", + ) + paired_collection.elements = elements + list_dce = galaxy.model.DatasetCollectionElement( + collection=collection, + element=paired_collection, + ) + elements = [ list_dce ] + + collection.elements = elements + history_dataset_collection_association = galaxy.model.HistoryDatasetCollectionAssociation( id=id, collection=collection, @@ -349,13 +387,13 @@ self.history.datasets.append( hda ) return hda - def __add_collection_dataset( self, id, *hdas ): + def __add_collection_dataset( self, id, collection_type="paired", *hdas ): hdca = galaxy.model.HistoryDatasetCollectionAssociation() hdca.id = id collection = galaxy.model.DatasetCollection() hdca.collection = collection collection.elements = [ galaxy.model.DatasetCollectionElement(element=self.__add_dataset( 1 )) ] - + collection.type = collection_type self.trans.sa_session.model_objects[ galaxy.model.HistoryDatasetCollectionAssociation ][ id ] = hdca self.history.dataset_collections.append( hdca ) return hdca Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.
participants (1)
-
commits-noreply@bitbucket.org