11 new commits in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/commits/320ae026f73a/ Changeset: 320ae026f73a Branch: search User: Kyle Ellrott Date: 2013-06-21 22:31:02 Summary: Adding 'state' field to the jobn search view. Also adding '!=' compatitor to GQL Affected #: 2 files diff -r fd3a82d33bb6896ba6395a5e83add5c6ac7f7fbf -r 320ae026f73a849c77a580410a5a3d6927ef1b91 lib/galaxy/model/__init__.py --- a/lib/galaxy/model/__init__.py +++ b/lib/galaxy/model/__init__.py @@ -158,8 +158,8 @@ class Job( object, APIItem ): - api_collection_visible_keys = [ 'id' ] - api_element_visible_keys = [ 'id' ] + api_collection_visible_keys = [ 'id', 'state' ] + api_element_visible_keys = [ 'id', 'state' ] """ A job represents a request to run a tool given input datasets, tool diff -r fd3a82d33bb6896ba6395a5e83add5c6ac7f7fbf -r 320ae026f73a849c77a580410a5a3d6927ef1b91 lib/galaxy/model/search.py --- a/lib/galaxy/model/search.py +++ b/lib/galaxy/model/search.py @@ -107,6 +107,8 @@ if operator == "=": #print field.sqlalchemy_field == right, field.sqlalchemy_field, right self.query = self.query.filter( field.sqlalchemy_field == right ) + elif operator == "!=": + self.query = self.query.filter( field.sqlalchemy_field != right ) elif operator == "like": self.query = self.query.filter( field.sqlalchemy_field.like(right) ) else: @@ -433,6 +435,7 @@ DOMAIN = "job" FIELDS = { 'tool_name' : ViewField('tool_name', sqlalchemy_field=Job.tool_id), + 'state' : ViewField('state', sqlalchemy_field=Job.state), 'param' : ViewField('param', handler=job_param_filter), 'input_hda' : ViewField('input_hda', handler=job_input_hda_filter, id_decode=True), 'output_hda' : ViewField('output_hda', handler=job_output_hda_filter, id_decode=True) @@ -499,6 +502,7 @@ comparison = ( '=' -> '=' | '>' -> '>' | '<' -> '<' + | '!=' -> '!=' | '>=' -> '>=' | '<=' -> '<=' | 'like' -> 'like' https://bitbucket.org/galaxy/galaxy-central/commits/460c7dc52f92/ Changeset: 460c7dc52f92 Branch: search User: Kyle Ellrott Date: 2013-06-27 20:59:24 Summary: Default merge Affected #: 197 files diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/datatypes/binary.py --- a/lib/galaxy/datatypes/binary.py +++ b/lib/galaxy/datatypes/binary.py @@ -267,25 +267,25 @@ # bam does not use '#' to indicate comments/headers - we need to strip out those headers from the std. providers #TODO:?? seems like there should be an easier way to do/inherit this - metadata.comment_char? #TODO: incorporate samtools options to control output: regions first, then flags, etc. - @dataproviders.decorators.dataprovider_factory( 'line' ) + @dataproviders.decorators.dataprovider_factory( 'line', dataproviders.line.FilteredLineDataProvider.settings ) def line_dataprovider( self, dataset, **settings ): samtools_source = dataproviders.dataset.SamtoolsDataProvider( dataset ) settings[ 'comment_char' ] = '@' return dataproviders.line.FilteredLineDataProvider( samtools_source, **settings ) - @dataproviders.decorators.dataprovider_factory( 'regex-line' ) + @dataproviders.decorators.dataprovider_factory( 'regex-line', dataproviders.line.RegexLineDataProvider.settings ) def regex_line_dataprovider( self, dataset, **settings ): samtools_source = dataproviders.dataset.SamtoolsDataProvider( dataset ) settings[ 'comment_char' ] = '@' return dataproviders.line.RegexLineDataProvider( samtools_source, **settings ) - @dataproviders.decorators.dataprovider_factory( 'column' ) + @dataproviders.decorators.dataprovider_factory( 'column', dataproviders.column.ColumnarDataProvider.settings ) def column_dataprovider( self, dataset, **settings ): samtools_source = dataproviders.dataset.SamtoolsDataProvider( dataset ) settings[ 'comment_char' ] = '@' return dataproviders.column.ColumnarDataProvider( samtools_source, **settings ) - @dataproviders.decorators.dataprovider_factory( 'map' ) + @dataproviders.decorators.dataprovider_factory( 'map', dataproviders.column.MapDataProvider.settings ) def map_dataprovider( self, dataset, **settings ): samtools_source = dataproviders.dataset.SamtoolsDataProvider( dataset ) settings[ 'comment_char' ] = '@' @@ -293,30 +293,30 @@ # these can't be used directly - may need BamColumn, BamMap (Bam metadata -> column/map) # OR - see genomic_region_dataprovider - #@dataproviders.decorators.dataprovider_factory( 'dataset-column' ) + #@dataproviders.decorators.dataprovider_factory( 'dataset-column', dataproviders.column.ColumnarDataProvider.settings ) #def dataset_column_dataprovider( self, dataset, **settings ): # settings[ 'comment_char' ] = '@' # return super( Sam, self ).dataset_column_dataprovider( dataset, **settings ) - #@dataproviders.decorators.dataprovider_factory( 'dataset-map' ) + #@dataproviders.decorators.dataprovider_factory( 'dataset-map', dataproviders.column.MapDataProvider.settings ) #def dataset_map_dataprovider( self, dataset, **settings ): # settings[ 'comment_char' ] = '@' # return super( Sam, self ).dataset_map_dataprovider( dataset, **settings ) - @dataproviders.decorators.dataprovider_factory( 'header' ) + @dataproviders.decorators.dataprovider_factory( 'header', dataproviders.line.RegexLineDataProvider.settings ) def header_dataprovider( self, dataset, **settings ): # in this case we can use an option of samtools view to provide just what we need (w/o regex) samtools_source = dataproviders.dataset.SamtoolsDataProvider( dataset, '-H' ) return dataproviders.line.RegexLineDataProvider( samtools_source, **settings ) - @dataproviders.decorators.dataprovider_factory( 'id-seq-qual' ) + @dataproviders.decorators.dataprovider_factory( 'id-seq-qual', dataproviders.column.MapDataProvider.settings ) def id_seq_qual_dataprovider( self, dataset, **settings ): settings[ 'indeces' ] = [ 0, 9, 10 ] settings[ 'column_types' ] = [ 'str', 'str', 'str' ] settings[ 'column_names' ] = [ 'id', 'seq', 'qual' ] return self.map_dataprovider( dataset, **settings ) - @dataproviders.decorators.dataprovider_factory( 'genomic-region' ) + @dataproviders.decorators.dataprovider_factory( 'genomic-region', dataproviders.column.ColumnarDataProvider.settings ) def genomic_region_dataprovider( self, dataset, **settings ): # GenomicRegionDataProvider currently requires a dataset as source - may not be necc. #TODO:?? consider (at least) the possible use of a kwarg: metadata_source (def. to source.dataset), @@ -330,7 +330,7 @@ settings[ 'column_types' ] = [ 'str', 'int', 'int' ] return self.column_dataprovider( dataset, **settings ) - @dataproviders.decorators.dataprovider_factory( 'genomic-region-map' ) + @dataproviders.decorators.dataprovider_factory( 'genomic-region-map', dataproviders.column.MapDataProvider.settings ) def genomic_region_map_dataprovider( self, dataset, **settings ): settings[ 'indeces' ] = [ 2, 3, 3 ] settings[ 'column_types' ] = [ 'str', 'int', 'int' ] diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/datatypes/converters/bam_to_bigwig_converter.xml --- a/lib/galaxy/datatypes/converters/bam_to_bigwig_converter.xml +++ b/lib/galaxy/datatypes/converters/bam_to_bigwig_converter.xml @@ -1,5 +1,9 @@ <tool id="CONVERTER_bam_to_bigwig_0" name="Convert BAM to BigWig" version="1.0.0" hidden="true"><!-- <description>__NOT_USED_CURRENTLY_FOR_CONVERTERS__</description> --> + <requirements> + <requirement type="package">ucsc_tools</requirement> + <requirement type="package">bedtools</requirement> + </requirements><command> bedtools genomecov -bg -split -ibam $input -g $chromInfo diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/datatypes/converters/bed_gff_or_vcf_to_bigwig_converter.xml --- a/lib/galaxy/datatypes/converters/bed_gff_or_vcf_to_bigwig_converter.xml +++ b/lib/galaxy/datatypes/converters/bed_gff_or_vcf_to_bigwig_converter.xml @@ -1,5 +1,9 @@ <tool id="CONVERTER_bed_gff_or_vcf_to_bigwig_0" name="Convert BED, GFF, or VCF to BigWig" version="1.0.0" hidden="true"><!-- <description>__NOT_USED_CURRENTLY_FOR_CONVERTERS__</description> --> + <requirements> + <requirement type="package">ucsc_tools</requirement> + <requirement type="package">bedtools</requirement> + </requirements><command> ## Remove comments and sort by chromosome. grep -v '^#' $input | sort -k1,1 | diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/datatypes/converters/interval_to_bigwig_converter.xml --- a/lib/galaxy/datatypes/converters/interval_to_bigwig_converter.xml +++ b/lib/galaxy/datatypes/converters/interval_to_bigwig_converter.xml @@ -1,6 +1,10 @@ <tool id="CONVERTER_interval_to_bigwig_0" name="Convert Genomic Intervals To Coverage"><!-- <description>__NOT_USED_CURRENTLY_FOR_CONVERTERS__</description> --><!-- Used on the metadata edit page. --> + <requirements> + <requirement type="package">ucsc_tools</requirement> + <requirement type="package">bedtools</requirement> + </requirements><command> ## Remove comments and sort by chromosome. diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/datatypes/converters/sam_to_bigwig_converter.xml --- a/lib/galaxy/datatypes/converters/sam_to_bigwig_converter.xml +++ b/lib/galaxy/datatypes/converters/sam_to_bigwig_converter.xml @@ -1,4 +1,9 @@ <tool id="CONVERTER_sam_to_bigwig_0" name="Convert SAM to BigWig" version="1.0.0" hidden="true"> + <requirements> + <requirement type="package">ucsc_tools</requirement> + <requirement type="package">samtools</requirement> + <requirement type="package">bedtools</requirement> + </requirements><command> samtools view -bh $input | bedtools genomecov -bg -split -ibam stdin -g $chromInfo diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/datatypes/data.py --- a/lib/galaxy/datatypes/data.py +++ b/lib/galaxy/datatypes/data.py @@ -593,7 +593,6 @@ Base dataprovider factory for all datatypes that returns the proper provider for the given `data_format` or raises a `NoProviderAvailable`. """ - #TODO:?? is this handling super class providers? if self.has_dataprovider( data_format ): return self.dataproviders[ data_format ]( self, dataset, **settings ) raise dataproviders.exceptions.NoProviderAvailable( self, data_format ) @@ -603,12 +602,12 @@ dataset_source = dataproviders.dataset.DatasetDataProvider( dataset ) return dataproviders.base.DataProvider( dataset_source, **settings ) - @dataproviders.decorators.dataprovider_factory( 'chunk' ) + @dataproviders.decorators.dataprovider_factory( 'chunk', dataproviders.chunk.ChunkDataProvider.settings ) def chunk_dataprovider( self, dataset, **settings ): dataset_source = dataproviders.dataset.DatasetDataProvider( dataset ) return dataproviders.chunk.ChunkDataProvider( dataset_source, **settings ) - @dataproviders.decorators.dataprovider_factory( 'chunk64' ) + @dataproviders.decorators.dataprovider_factory( 'chunk64', dataproviders.chunk.Base64ChunkDataProvider.settings ) def chunk64_dataprovider( self, dataset, **settings ): dataset_source = dataproviders.dataset.DatasetDataProvider( dataset ) return dataproviders.chunk.Base64ChunkDataProvider( dataset_source, **settings ) @@ -785,7 +784,7 @@ split = classmethod(split) # ------------- Dataproviders - @dataproviders.decorators.dataprovider_factory( 'line' ) + @dataproviders.decorators.dataprovider_factory( 'line', dataproviders.line.FilteredLineDataProvider.settings ) def line_dataprovider( self, dataset, **settings ): """ Returns an iterator over the dataset's lines (that have been `strip`ed) @@ -794,7 +793,7 @@ dataset_source = dataproviders.dataset.DatasetDataProvider( dataset ) return dataproviders.line.FilteredLineDataProvider( dataset_source, **settings ) - @dataproviders.decorators.dataprovider_factory( 'regex-line' ) + @dataproviders.decorators.dataprovider_factory( 'regex-line', dataproviders.line.RegexLineDataProvider.settings ) def regex_line_dataprovider( self, dataset, **settings ): """ Returns an iterator over the dataset's lines diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/datatypes/dataproviders/base.py --- a/lib/galaxy/datatypes/dataproviders/base.py +++ b/lib/galaxy/datatypes/dataproviders/base.py @@ -22,8 +22,13 @@ icorporate existing visualization/dataproviders some of the sources (esp. in datasets) don't need to be re-created +YAGNI: InterleavingMultiSourceDataProvider, CombiningMultiSourceDataProvider -YAGNI: InterleavingMultiSourceDataProvider, CombiningMultiSourceDataProvider +datasets API entry point: + kwargs should be parsed from strings 2 layers up (in the DatasetsAPI) - that's the 'proper' place for that. + but how would it know how/what to parse if it doesn't have access to the classes used in the provider? + Building a giant list by sweeping all possible dprov classes doesn't make sense + For now - I'm burying them in the class __init__s - but I don't like that """ import logging @@ -31,6 +36,31 @@ # ----------------------------------------------------------------------------- base classes +class HasSettings( type ): + """ + Metaclass for data providers that allows defining and inheriting + a dictionary named 'settings'. + + Useful for allowing class level access to expected variable types + passed to class `__init__` functions so they can be parsed from a query string. + """ + # yeah - this is all too acrobatic + def __new__( cls, name, base_classes, attributes ): + settings = {} + # get settings defined in base classes + for base_class in base_classes: + base_settings = getattr( base_class, 'settings', None ) + if base_settings: + settings.update( base_settings ) + # get settings defined in this class + new_settings = attributes.pop( 'settings', None ) + if new_settings: + settings.update( new_settings ) + attributes[ 'settings' ] = settings + return type.__new__( cls, name, base_classes, attributes ) + + +# ----------------------------------------------------------------------------- base classes class DataProvider( object ): """ Base class for all data providers. Data providers: @@ -39,6 +69,12 @@ (c) do not allow write methods (but otherwise implement the other file object interface methods) """ + # a definition of expected types for keyword arguments sent to __init__ + # useful for controlling how query string dictionaries can be parsed into correct types for __init__ + # empty in this base class + __metaclass__ = HasSettings + settings = {} + def __init__( self, source, **kwargs ): """ :param source: the source that this iterator will loop over. @@ -130,13 +166,16 @@ - `num_valid_data_read`: how many data have been returned from `filter`. - `num_data_returned`: how many data has this provider yielded. """ + # not useful here - we don't want functions over the query string + #settings.update({ 'filter_fn': 'function' }) + def __init__( self, source, filter_fn=None, **kwargs ): """ :param filter_fn: a lambda or function that will be passed a datum and return either the (optionally modified) datum or None. """ super( FilteredDataProvider, self ).__init__( source, **kwargs ) - self.filter_fn = filter_fn + self.filter_fn = filter_fn if hasattr( filter_fn, '__call__' ) else None # count how many data we got from the source self.num_data_read = 0 # how many valid data have we gotten from the source @@ -179,6 +218,12 @@ Useful for grabbing sections from a source (e.g. pagination). """ + # define the expected types of these __init__ arguments so they can be parsed out from query strings + settings = { + 'limit' : 'int', + 'offset': 'int' + } + #TODO: may want to squash this into DataProvider def __init__( self, source, offset=0, limit=None, **kwargs ): """ diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/datatypes/dataproviders/chunk.py --- a/lib/galaxy/datatypes/dataproviders/chunk.py +++ b/lib/galaxy/datatypes/dataproviders/chunk.py @@ -26,6 +26,10 @@ """ MAX_CHUNK_SIZE = 2**16 DEFAULT_CHUNK_SIZE = MAX_CHUNK_SIZE + settings = { + 'chunk_index' : 'int', + 'chunk_size' : 'int' + } #TODO: subclass from LimitedOffsetDataProvider? # see web/framework/base.iterate_file, util/__init__.file_reader, and datatypes.tabular @@ -38,8 +42,8 @@ (gen. in bytes). """ super( ChunkDataProvider, self ).__init__( source, **kwargs ) - self.chunk_size = chunk_size - self.chunk_pos = chunk_index * self.chunk_size + self.chunk_size = int( chunk_size ) + self.chunk_pos = int( chunk_index ) * self.chunk_size def validate_source( self, source ): """ diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/datatypes/dataproviders/column.py --- a/lib/galaxy/datatypes/dataproviders/column.py +++ b/lib/galaxy/datatypes/dataproviders/column.py @@ -29,6 +29,14 @@ the same number of columns as the number of indeces asked for (even if they are filled with None). """ + settings = { + 'indeces' : 'list:int', + 'column_count' : 'int', + 'column_types' : 'list:str', + 'parse_columns' : 'bool', + 'deliminator' : 'str' + } + def __init__( self, source, indeces=None, column_count=None, column_types=None, parsers=None, parse_columns=True, deliminator='\t', **kwargs ): @@ -91,11 +99,11 @@ # how/whether to parse each column value self.parsers = {} if parse_columns: - self.parsers = self._get_default_parsers() + self.parsers = self.get_default_parsers() # overwrite with user desired parsers self.parsers.update( parsers or {} ) - def _get_default_parsers( self ): + def get_default_parsers( self ): """ Return parser dictionary keyed for each columnar type (as defined in datatypes). @@ -132,7 +140,7 @@ #'gffstrand': # -, +, ?, or '.' for None, etc. } - def _parse_value( self, val, type ): + def parse_value( self, val, type ): """ Attempt to parse and return the given value based on the given type. @@ -153,7 +161,7 @@ return None return val - def _get_column_type( self, index ): + def get_column_type( self, index ): """ Get the column type for the parser from `self.column_types` or `None` if the type is unavailable. @@ -165,18 +173,18 @@ except IndexError, ind_err: return None - def _parse_column_at_index( self, columns, parser_index, index ): + def parse_column_at_index( self, columns, parser_index, index ): """ Get the column type for the parser from `self.column_types` or `None` if the type is unavailable. """ try: - return self._parse_value( columns[ index ], self._get_column_type( parser_index ) ) + return self.parse_value( columns[ index ], self.get_column_type( parser_index ) ) # if a selected index is not within columns, return None except IndexError, index_err: return None - def _parse_columns_from_line( self, line ): + def parse_columns_from_line( self, line ): """ Returns a list of the desired, parsed columns. :param line: the line to parse @@ -188,13 +196,13 @@ selected_indeces = self.selected_column_indeces or list( xrange( len( all_columns ) ) ) parsed_columns = [] for parser_index, column_index in enumerate( selected_indeces ): - parsed_columns.append( self._parse_column_at_index( all_columns, parser_index, column_index ) ) + parsed_columns.append( self.parse_column_at_index( all_columns, parser_index, column_index ) ) return parsed_columns def __iter__( self ): parent_gen = super( ColumnarDataProvider, self ).__iter__() for line in parent_gen: - columns = self._parse_columns_from_line( line ) + columns = self.parse_columns_from_line( line ) yield columns #TODO: implement column filters here and not below - flatten hierarchy @@ -223,6 +231,10 @@ .. note: that the subclass constructors are passed kwargs - so they're params (limit, offset, etc.) are also applicable here. """ + settings = { + 'column_names' : 'list:str', + } + def __init__( self, source, column_names=None, **kwargs ): """ :param column_names: an ordered list of strings that will be used as the keys diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/datatypes/dataproviders/dataset.py --- a/lib/galaxy/datatypes/dataproviders/dataset.py +++ b/lib/galaxy/datatypes/dataproviders/dataset.py @@ -141,7 +141,7 @@ """ # metadata columns are 1-based indeces column = getattr( self.dataset.metadata, name ) - return ( column - 1 ) if isinstance( column, int ) else None + return ( column - 1 ) if ( isinstance( column, int ) and column > 0 ) else None def get_genomic_region_indeces( self, check=False ): """ @@ -271,6 +271,12 @@ """ # dictionary keys when named_columns=True COLUMN_NAMES = [ 'chrom', 'start', 'end' ] + settings = { + 'chrom_column' : 'int', + 'start_column' : 'int', + 'end_column' : 'int', + 'named_columns' : 'bool', + } def __init__( self, dataset, chrom_column=None, start_column=None, end_column=None, named_columns=False, **kwargs ): """ @@ -333,6 +339,14 @@ 'chrom', 'start', 'end' (and 'strand' and 'name' if available). """ COLUMN_NAMES = [ 'chrom', 'start', 'end', 'strand', 'name' ] + settings = { + 'chrom_column' : 'int', + 'start_column' : 'int', + 'end_column' : 'int', + 'strand_column' : 'int', + 'name_column' : 'int', + 'named_columns' : 'bool', + } def __init__( self, dataset, chrom_column=None, start_column=None, end_column=None, strand_column=None, name_column=None, named_columns=False, **kwargs ): @@ -349,25 +363,40 @@ dataset_source = DatasetDataProvider( dataset ) # get genomic indeces and add strand and name + self.column_names = [] + indeces = [] + #TODO: this is sort of involved and oogly if chrom_column == None: chrom_column = dataset_source.get_metadata_column_index_by_name( 'chromCol' ) + if chrom_column != None: + self.column_names.append( 'chrom' ) + indeces.append( chrom_column ) if start_column == None: start_column = dataset_source.get_metadata_column_index_by_name( 'startCol' ) + if start_column != None: + self.column_names.append( 'start' ) + indeces.append( start_column ) if end_column == None: end_column = dataset_source.get_metadata_column_index_by_name( 'endCol' ) + if end_column != None: + self.column_names.append( 'end' ) + indeces.append( end_column ) if strand_column == None: strand_column = dataset_source.get_metadata_column_index_by_name( 'strandCol' ) + if strand_column != None: + self.column_names.append( 'strand' ) + indeces.append( strand_column ) if name_column == None: name_column = dataset_source.get_metadata_column_index_by_name( 'nameCol' ) - indeces = [ chrom_column, start_column, end_column, strand_column, name_column ] + if name_column != None: + self.column_names.append( 'name' ) + indeces.append( name_column ) + kwargs.update({ 'indeces' : indeces }) - if not kwargs.get( 'column_types', None ): kwargs.update({ 'column_types' : dataset_source.get_metadata_column_types( indeces=indeces ) }) self.named_columns = named_columns - if self.named_columns: - self.column_names = self.COLUMN_NAMES super( IntervalDataProvider, self ).__init__( dataset_source, **kwargs ) @@ -390,6 +419,10 @@ sequence: <joined lines of nucleotide/amino data> } """ + settings = { + 'ids' : 'list:str', + } + def __init__( self, source, ids=None, **kwargs ): """ :param ids: optionally return only ids (and sequences) that are in this list. @@ -419,6 +452,10 @@ sequence: <joined lines of nucleotide/amino data> } """ + settings = { + 'ids' : 'list:str', + } + def __init__( self, source, ids=None, **kwargs ): """ :param ids: optionally return only ids (and sequences) that are in this list. @@ -445,6 +482,10 @@ Class that returns chrom, pos, data from a wiggle source. """ COLUMN_NAMES = [ 'chrom', 'pos', 'value' ] + settings = { + 'named_columns' : 'bool', + 'column_names' : 'list:str', + } def __init__( self, source, named_columns=False, column_names=None, **kwargs ): """ @@ -483,6 +524,10 @@ Class that returns chrom, pos, data from a wiggle source. """ COLUMN_NAMES = [ 'chrom', 'pos', 'value' ] + settings = { + 'named_columns' : 'bool', + 'column_names' : 'list:str', + } def __init__( self, source, chrom, start, end, named_columns=False, column_names=None, **kwargs ): """ diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/datatypes/dataproviders/decorators.py --- a/lib/galaxy/datatypes/dataproviders/decorators.py +++ b/lib/galaxy/datatypes/dataproviders/decorators.py @@ -87,17 +87,40 @@ # log.debug( '\t\t ', fn.__doc__ ) return cls -def dataprovider_factory( name ): +def dataprovider_factory( name, settings=None ): """ - Wraps a class method and marks it as a dataprovider factory. + Wraps a class method and marks it as a dataprovider factory and creates a + function to parse query strings to __init__ arguments as the + `parse_query_string_settings` attribute of the factory function. + + An example use of the `parse_query_string_settings`: + ..example:: + kwargs = dataset.datatype.dataproviders[ provider ].parse_query_string_settings( query_kwargs ) + return list( dataset.datatype.dataprovider( dataset, provider, **kwargs ) ) :param name: what name/key to register the factory under in `cls.dataproviders` - :param type: any hashable var + :type name: any hashable var + :param settings: dictionary containing key/type pairs for parsing query strings + to __init__ arguments + :type settings: dictionary """ + #TODO:?? use *args for settings allowing mulitple dictionaries + # make a function available through the name->provider dispatch to parse query strings + # callable like: + # settings_dict = dataproviders[ provider_name ].parse_query_string_settings( query_kwargs ) + #TODO: ugh - overly complicated but the best I could think of + def parse_query_string_settings( query_kwargs ): + return _parse_query_string_settings( query_kwargs, settings ) + #log.debug( 'dataprovider:', name ) def named_dataprovider_factory( func ): #log.debug( 'named_dataprovider_factory:', name, '->', func.__name__ ) setattr( func, _DATAPROVIDER_METHOD_NAME_KEY, name ) + + setattr( func, 'parse_query_string_settings', parse_query_string_settings ) + setattr( func, 'settings', settings ) + #TODO: I want a way to inherit settings from the previous provider( this_name ) instead of defining over and over + #log.debug( '\t setting:', getattr( func, _DATAPROVIDER_METHOD_NAME_KEY ) ) @wraps( func ) def wrapped_dataprovider_factory( self, *args, **kwargs ): @@ -105,3 +128,38 @@ return func( self, *args, **kwargs ) return wrapped_dataprovider_factory return named_dataprovider_factory + +def _parse_query_string_settings( query_kwargs, settings=None ): + """ + Parse the values in `query_kwargs` from strings to the proper types + listed in the same key in `settings`. + """ + def list_from_query_string( s ): + # assume csv + return s.split( ',' ) + + parsers = { + 'int' : int, + 'float' : float, + 'bool' : bool, + 'list:str' : lambda s: list_from_query_string( s ), + 'list:int' : lambda s: [ int( i ) for i in list_from_query_string( s ) ], + } + settings = settings or {} + # yay! yet another set of query string parsers! <-- sarcasm + # work through the keys in settings finding matching keys in query_kwargs + # if found in both, get the expected/needed type from settings and store the new parsed value + # if we can't parse it (no parser, bad value), delete the key from query_kwargs so the provider will use the defaults + for key in settings: + if key in query_kwargs: + #TODO: this would be the place to sanitize any strings + query_value = query_kwargs[ key ] + needed_type = settings[ key ] + try: + query_kwargs[ key ] = parsers[ needed_type ]( query_value ) + except ( KeyError, ValueError ): + del query_kwargs[ key ] + + #TODO:?? do we want to remove query_kwarg entries NOT in settings? + return query_kwargs + diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/datatypes/dataproviders/line.py --- a/lib/galaxy/datatypes/dataproviders/line.py +++ b/lib/galaxy/datatypes/dataproviders/line.py @@ -27,6 +27,12 @@ to return. """ DEFAULT_COMMENT_CHAR = '#' + settings = { + 'string_lines' : 'bool', + 'provide_blank' : 'bool', + 'comment_char' : 'str', + } + def __init__( self, source, strip_lines=True, provide_blank=False, comment_char=DEFAULT_COMMENT_CHAR, **kwargs ): """ :param strip_lines: remove whitespace from the beginning an ending @@ -78,6 +84,11 @@ .. note:: the regex matches are effectively OR'd (if **any** regex matches the line it is considered valid and will be provided). """ + settings = { + 'regex_list' : 'list:str', + 'invert' : 'bool', + } + def __init__( self, source, regex_list=None, invert=False, **kwargs ): """ :param regex_list: list of strings or regular expression strings that will diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/datatypes/interval.py --- a/lib/galaxy/datatypes/interval.py +++ b/lib/galaxy/datatypes/interval.py @@ -334,20 +334,24 @@ return None # ------------- Dataproviders - @dataproviders.decorators.dataprovider_factory( 'genomic-region' ) + @dataproviders.decorators.dataprovider_factory( 'genomic-region', + dataproviders.dataset.GenomicRegionDataProvider.settings ) def genomic_region_dataprovider( self, dataset, **settings ): return dataproviders.dataset.GenomicRegionDataProvider( dataset, **settings ) - @dataproviders.decorators.dataprovider_factory( 'genomic-region-map' ) + @dataproviders.decorators.dataprovider_factory( 'genomic-region-map', + dataproviders.dataset.GenomicRegionDataProvider.settings ) def genomic_region_map_dataprovider( self, dataset, **settings ): settings[ 'named_columns' ] = True return self.genomic_region_dataprovider( dataset, **settings ) - @dataproviders.decorators.dataprovider_factory( 'interval' ) + @dataproviders.decorators.dataprovider_factory( 'interval', + dataproviders.dataset.IntervalDataProvider.settings ) def interval_dataprovider( self, dataset, **settings ): return dataproviders.dataset.IntervalDataProvider( dataset, **settings ) - @dataproviders.decorators.dataprovider_factory( 'interval-map' ) + @dataproviders.decorators.dataprovider_factory( 'interval-map', + dataproviders.dataset.IntervalDataProvider.settings ) def interval_map_dataprovider( self, dataset, **settings ): settings[ 'named_columns' ] = True return self.interval_dataprovider( dataset, **settings ) @@ -809,20 +813,24 @@ # ------------- Dataproviders # redefine bc super is Tabular - @dataproviders.decorators.dataprovider_factory( 'genomic-region' ) + @dataproviders.decorators.dataprovider_factory( 'genomic-region', + dataproviders.dataset.GenomicRegionDataProvider.settings ) def genomic_region_dataprovider( self, dataset, **settings ): return dataproviders.dataset.GenomicRegionDataProvider( dataset, 0, 3, 4, **settings ) - @dataproviders.decorators.dataprovider_factory( 'genomic-region-map' ) + @dataproviders.decorators.dataprovider_factory( 'genomic-region-map', + dataproviders.dataset.GenomicRegionDataProvider.settings ) def genomic_region_map_dataprovider( self, dataset, **settings ): settings[ 'named_columns' ] = True return self.genomic_region_dataprovider( dataset, **settings ) - @dataproviders.decorators.dataprovider_factory( 'interval' ) + @dataproviders.decorators.dataprovider_factory( 'interval', + dataproviders.dataset.IntervalDataProvider.settings ) def interval_dataprovider( self, dataset, **settings ): return dataproviders.dataset.IntervalDataProvider( dataset, 0, 3, 4, 6, 2, **settings ) - @dataproviders.decorators.dataprovider_factory( 'interval-map' ) + @dataproviders.decorators.dataprovider_factory( 'interval-map', + dataproviders.dataset.IntervalDataProvider.settings ) def interval_map_dataprovider( self, dataset, **settings ): settings[ 'named_columns' ] = True return self.interval_dataprovider( dataset, **settings ) @@ -1193,12 +1201,12 @@ return resolution # ------------- Dataproviders - @dataproviders.decorators.dataprovider_factory( 'wiggle' ) + @dataproviders.decorators.dataprovider_factory( 'wiggle', dataproviders.dataset.WiggleDataProvider.settings ) def wiggle_dataprovider( self, dataset, **settings ): dataset_source = dataproviders.dataset.DatasetDataProvider( dataset ) return dataproviders.dataset.WiggleDataProvider( dataset_source, **settings ) - @dataproviders.decorators.dataprovider_factory( 'wiggle-map' ) + @dataproviders.decorators.dataprovider_factory( 'wiggle-map', dataproviders.dataset.WiggleDataProvider.settings ) def wiggle_map_dataprovider( self, dataset, **settings ): dataset_source = dataproviders.dataset.DatasetDataProvider( dataset ) settings[ 'named_columns' ] = True diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/datatypes/sequence.py --- a/lib/galaxy/datatypes/sequence.py +++ b/lib/galaxy/datatypes/sequence.py @@ -15,8 +15,6 @@ from galaxy import util from sniff import * -from galaxy.datatypes import dataproviders - import pkg_resources pkg_resources.require("simplejson") import simplejson @@ -399,15 +397,6 @@ f.close() _count_split = classmethod(_count_split) - def provider( self, dataset, data_format, **settings ): - from galaxy.dataproviders import dataset as dataset_providers - - if data_format == 'id_seq': - source = dataset_providers.DatasetDataProvider( dataset ) - return dataset_providers.FastaDataProvider( source, **settings ) - - return super( Fasta, self ).provider( dataset, data_format, **settings ) - class csFasta( Sequence ): """ Class representing the SOLID Color-Space sequence ( csfasta ) """ diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/datatypes/sniff.py --- a/lib/galaxy/datatypes/sniff.py +++ b/lib/galaxy/datatypes/sniff.py @@ -6,6 +6,7 @@ from galaxy import util from galaxy.datatypes.checkers import * from encodings import search_function as encodings_search_function +from binary import Binary log = logging.getLogger(__name__) diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/datatypes/tabular.py --- a/lib/galaxy/datatypes/tabular.py +++ b/lib/galaxy/datatypes/tabular.py @@ -345,26 +345,25 @@ return vizs # ------------- Dataproviders - @dataproviders.decorators.dataprovider_factory( 'column' ) + @dataproviders.decorators.dataprovider_factory( 'column', dataproviders.column.ColumnarDataProvider.settings ) def column_dataprovider( self, dataset, **settings ): """Uses column settings that are passed in""" - print 'Tabular.comment_char:', settings.get( 'comment_char', None ) - dataset_source = dataproviders.dataset.DatasetDataProvider( dataset ) return dataproviders.column.ColumnarDataProvider( dataset_source, **settings ) - @dataproviders.decorators.dataprovider_factory( 'dataset-column' ) + @dataproviders.decorators.dataprovider_factory( 'dataset-column', + dataproviders.column.ColumnarDataProvider.settings ) def dataset_column_dataprovider( self, dataset, **settings ): """Attempts to get column settings from dataset.metadata""" return dataproviders.dataset.DatasetColumnarDataProvider( dataset, **settings ) - @dataproviders.decorators.dataprovider_factory( 'map' ) + @dataproviders.decorators.dataprovider_factory( 'map', dataproviders.column.MapDataProvider.settings ) def map_dataprovider( self, dataset, **settings ): """Uses column settings that are passed in""" dataset_source = dataproviders.dataset.DatasetDataProvider( dataset ) return dataproviders.column.MapDataProvider( dataset_source, **settings ) - @dataproviders.decorators.dataprovider_factory( 'dataset-map' ) + @dataproviders.decorators.dataprovider_factory( 'dataset-map', dataproviders.column.MapDataProvider.settings ) def dataset_map_dataprovider( self, dataset, **settings ): """Attempts to get column settings from dataset.metadata""" return dataproviders.dataset.DatasetMapDataProvider( dataset, **settings ) @@ -502,55 +501,58 @@ # ------------- Dataproviders # sam does not use '#' to indicate comments/headers - we need to strip out those headers from the std. providers #TODO:?? seems like there should be an easier way to do this - metadata.comment_char? - @dataproviders.decorators.dataprovider_factory( 'line' ) + @dataproviders.decorators.dataprovider_factory( 'line', dataproviders.line.FilteredLineDataProvider.settings ) def line_dataprovider( self, dataset, **settings ): settings[ 'comment_char' ] = '@' return super( Sam, self ).line_dataprovider( dataset, **settings ) - @dataproviders.decorators.dataprovider_factory( 'regex-line' ) + @dataproviders.decorators.dataprovider_factory( 'regex-line', dataproviders.line.RegexLineDataProvider.settings ) def regex_line_dataprovider( self, dataset, **settings ): settings[ 'comment_char' ] = '@' return super( Sam, self ).regex_line_dataprovider( dataset, **settings ) - @dataproviders.decorators.dataprovider_factory( 'column' ) + @dataproviders.decorators.dataprovider_factory( 'column', dataproviders.column.ColumnarDataProvider.settings ) def column_dataprovider( self, dataset, **settings ): settings[ 'comment_char' ] = '@' return super( Sam, self ).column_dataprovider( dataset, **settings ) - @dataproviders.decorators.dataprovider_factory( 'dataset-column' ) + @dataproviders.decorators.dataprovider_factory( 'dataset-column', + dataproviders.column.ColumnarDataProvider.settings ) def dataset_column_dataprovider( self, dataset, **settings ): settings[ 'comment_char' ] = '@' return super( Sam, self ).dataset_column_dataprovider( dataset, **settings ) - @dataproviders.decorators.dataprovider_factory( 'map' ) + @dataproviders.decorators.dataprovider_factory( 'map', dataproviders.column.MapDataProvider.settings ) def map_dataprovider( self, dataset, **settings ): settings[ 'comment_char' ] = '@' return super( Sam, self ).map_dataprovider( dataset, **settings ) - @dataproviders.decorators.dataprovider_factory( 'dataset-map' ) + @dataproviders.decorators.dataprovider_factory( 'dataset-map', dataproviders.column.MapDataProvider.settings ) def dataset_map_dataprovider( self, dataset, **settings ): settings[ 'comment_char' ] = '@' return super( Sam, self ).dataset_map_dataprovider( dataset, **settings ) - @dataproviders.decorators.dataprovider_factory( 'header' ) + @dataproviders.decorators.dataprovider_factory( 'header', dataproviders.line.RegexLineDataProvider.settings ) def header_dataprovider( self, dataset, **settings ): dataset_source = dataproviders.dataset.DatasetDataProvider( dataset ) headers_source = dataproviders.line.RegexLineDataProvider( dataset_source, regex_list=[ '^@' ] ) return dataproviders.line.RegexLineDataProvider( headers_source, **settings ) - @dataproviders.decorators.dataprovider_factory( 'id-seq-qual' ) + @dataproviders.decorators.dataprovider_factory( 'id-seq-qual', map_dataprovider.settings ) def id_seq_qual_dataprovider( self, dataset, **settings ): # provided as an example of a specified column map (w/o metadata) settings[ 'indeces' ] = [ 0, 9, 10 ] settings[ 'column_names' ] = [ 'id', 'seq', 'qual' ] return self.map_dataprovider( dataset, **settings ) - @dataproviders.decorators.dataprovider_factory( 'genomic-region' ) + @dataproviders.decorators.dataprovider_factory( 'genomic-region', + dataproviders.dataset.GenomicRegionDataProvider.settings ) def genomic_region_dataprovider( self, dataset, **settings ): settings[ 'comment_char' ] = '@' return dataproviders.dataset.GenomicRegionDataProvider( dataset, 2, 3, 3, **settings ) - @dataproviders.decorators.dataprovider_factory( 'genomic-region-map' ) + @dataproviders.decorators.dataprovider_factory( 'genomic-region-map', + dataproviders.dataset.GenomicRegionDataProvider.settings ) def genomic_region_map_dataprovider( self, dataset, **settings ): settings[ 'comment_char' ] = '@' return dataproviders.dataset.GenomicRegionDataProvider( dataset, 2, 3, 3, True, **settings ) @@ -621,11 +623,13 @@ return False # ------------- Dataproviders - @dataproviders.decorators.dataprovider_factory( 'genomic-region' ) + @dataproviders.decorators.dataprovider_factory( 'genomic-region', + dataproviders.dataset.GenomicRegionDataProvider.settings ) def genomic_region_dataprovider( self, dataset, **settings ): return dataproviders.dataset.GenomicRegionDataProvider( dataset, **settings ) - @dataproviders.decorators.dataprovider_factory( 'genomic-region-map' ) + @dataproviders.decorators.dataprovider_factory( 'genomic-region-map', + dataproviders.dataset.GenomicRegionDataProvider.settings ) def genomic_region_map_dataprovider( self, dataset, **settings ): settings[ 'named_columns' ] = True return self.genomic_region_dataprovider( dataset, **settings ) @@ -668,11 +672,13 @@ dataset.metadata.sample_names = line.split()[ 9: ] # ------------- Dataproviders - @dataproviders.decorators.dataprovider_factory( 'genomic-region' ) + @dataproviders.decorators.dataprovider_factory( 'genomic-region', + dataproviders.dataset.GenomicRegionDataProvider.settings ) def genomic_region_dataprovider( self, dataset, **settings ): return dataproviders.dataset.GenomicRegionDataProvider( dataset, 0, 1, 1, **settings ) - @dataproviders.decorators.dataprovider_factory( 'genomic-region-map' ) + @dataproviders.decorators.dataprovider_factory( 'genomic-region-map', + dataproviders.dataset.GenomicRegionDataProvider.settings ) def genomic_region_map_dataprovider( self, dataset, **settings ): settings[ 'named_columns' ] = True return self.genomic_region_dataprovider( dataset, **settings ) diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/jobs/__init__.py --- a/lib/galaxy/jobs/__init__.py +++ b/lib/galaxy/jobs/__init__.py @@ -692,7 +692,10 @@ incoming['__user_email__'] = incoming['userEmail'] = user_email incoming['__user_name__'] = user_name # Build params, done before hook so hook can use - param_dict = self.tool.build_param_dict( incoming, inp_data, out_data, self.get_output_fnames(), self.working_directory ) + param_dict = self.tool.build_param_dict( incoming, + inp_data, out_data, + self.get_output_fnames(), + self.working_directory ) # Certain tools require tasks to be completed prior to job execution # ( this used to be performed in the "exec_before_job" hook, but hooks are deprecated ). self.tool.exec_before_job( self.queue.app, inp_data, out_data, param_dict ) @@ -919,8 +922,6 @@ return self.fail( "Job %s's output dataset(s) could not be read" % job.id ) job_context = ExpressionContext( dict( stdout = job.stdout, stderr = job.stderr ) ) - #DBTODO unused - #job_tool = self.app.toolbox.tools_by_id.get( job.tool_id, None ) for dataset_assoc in job.output_datasets + job.output_library_datasets: context = self.get_dataset_finish_context( job_context, dataset_assoc.dataset.dataset ) #should this also be checking library associations? - can a library item be added from a history before the job has ended? - lets not allow this to occur diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/jobs/handler.py --- a/lib/galaxy/jobs/handler.py +++ b/lib/galaxy/jobs/handler.py @@ -10,7 +10,7 @@ from sqlalchemy.sql.expression import and_, or_, select, func -from galaxy import util, model +from galaxy import model from galaxy.jobs import Sleeper, JobWrapper, TaskWrapper, JobDestination log = logging.getLogger( __name__ ) diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/model/__init__.py --- a/lib/galaxy/model/__init__.py +++ b/lib/galaxy/model/__init__.py @@ -3569,7 +3569,7 @@ def repository_dependencies_being_installed( self ): required_repositories_being_installed = [] for required_repository in self.repository_dependencies: - if tool_dependency.status == ToolDependency.installation_status.INSTALLING: + if required_repository.status == self.installation_status.INSTALLING: required_repositories_being_installed.append( required_repository ) return required_repositories_being_installed @property diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/model/mapping.py --- a/lib/galaxy/model/mapping.py +++ b/lib/galaxy/model/mapping.py @@ -16,6 +16,7 @@ from sqlalchemy.orm.collections import attribute_mapped_collection from galaxy import model +from galaxy.model.orm import dialect_to_egg from galaxy.model.custom_types import JSONType, MetadataType, TrimmedString, UUIDType from galaxy.security import GalaxyRBACAgent from galaxy.util.bunch import Bunch @@ -29,11 +30,6 @@ # For backward compatibility with "context.current" context.current = Session -dialect_to_egg = { - "sqlite" : "pysqlite>=2", - "postgres" : "psycopg2", - "mysql" : "MySQL_python" -} # NOTE REGARDING TIMESTAMPS: # It is currently difficult to have the timestamps calculated by the @@ -1616,17 +1612,27 @@ primaryjoin=( model.LibraryDatasetDatasetInfoAssociation.table.c.form_values_id == model.FormValues.table.c.id ) ) ) ) -mapper( model.JobToInputDatasetAssociation, model.JobToInputDatasetAssociation.table, - properties=dict( job=relation( model.Job ), dataset=relation( model.HistoryDatasetAssociation, lazy=False, backref="dependent_jobs" ) ) ) +mapper( model.JobToInputDatasetAssociation, + model.JobToInputDatasetAssociation.table, properties=dict( + job=relation( model.Job ), dataset=relation( + model.HistoryDatasetAssociation, lazy=False, + backref="dependent_jobs" ) ) ) -mapper( model.JobToOutputDatasetAssociation, model.JobToOutputDatasetAssociation.table, - properties=dict( job=relation( model.Job ), dataset=relation( model.HistoryDatasetAssociation, lazy=False ) ) ) +mapper( model.JobToOutputDatasetAssociation, + model.JobToOutputDatasetAssociation.table, properties=dict( + job=relation( model.Job ), dataset=relation( + model.HistoryDatasetAssociation, lazy=False ) ) ) -mapper( model.JobToInputLibraryDatasetAssociation, model.JobToInputLibraryDatasetAssociation.table, - properties=dict( job=relation( model.Job ), dataset=relation( model.LibraryDatasetDatasetAssociation, lazy=False, backref="dependent_jobs" ) ) ) +mapper( model.JobToInputLibraryDatasetAssociation, + model.JobToInputLibraryDatasetAssociation.table, properties=dict( + job=relation( model.Job ), dataset=relation( + model.LibraryDatasetDatasetAssociation, lazy=False, + backref="dependent_jobs" ) ) ) -mapper( model.JobToOutputLibraryDatasetAssociation, model.JobToOutputLibraryDatasetAssociation.table, - properties=dict( job=relation( model.Job ), dataset=relation( model.LibraryDatasetDatasetAssociation, lazy=False ) ) ) +mapper( model.JobToOutputLibraryDatasetAssociation, + model.JobToOutputLibraryDatasetAssociation.table, properties=dict( + job=relation( model.Job ), dataset=relation( + model.LibraryDatasetDatasetAssociation, lazy=False ) ) ) mapper( model.JobParameter, model.JobParameter.table ) diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/model/migrate/check.py --- a/lib/galaxy/model/migrate/check.py +++ b/lib/galaxy/model/migrate/check.py @@ -12,16 +12,13 @@ from sqlalchemy.exc import NoSuchTableError from migrate.versioning import repository, schema +from galaxy.model.orm import dialect_to_egg + log = logging.getLogger( __name__ ) # path relative to galaxy migrate_repository_directory = os.path.dirname( __file__ ).replace( os.getcwd() + os.path.sep, '', 1 ) migrate_repository = repository.Repository( migrate_repository_directory ) -dialect_to_egg = { - "sqlite" : "pysqlite>=2", - "postgres" : "psycopg2", - "mysql" : "MySQL_python" -} def create_or_verify_database( url, galaxy_config_file, engine_options={}, app=None ): """ @@ -47,6 +44,7 @@ # Let this go, it could possibly work with db's we don't support log.error( "database_connection contains an unknown SQLAlchemy database dialect: %s" % dialect ) # Create engine and metadata + print url, engine_options engine = create_engine( url, **engine_options ) meta = MetaData( bind=engine ) # Try to load dataset table diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/model/orm/__init__.py --- a/lib/galaxy/model/orm/__init__.py +++ b/lib/galaxy/model/orm/__init__.py @@ -7,3 +7,10 @@ import sqlalchemy.exc from sqlalchemy.ext.orderinglist import ordering_list + +dialect_to_egg = { + "sqlite" : "pysqlite>=2", + "postgres" : "psycopg2", + "postgresql" : "psycopg2", + "mysql" : "MySQL_python" +} diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/tools/__init__.py --- a/lib/galaxy/tools/__init__.py +++ b/lib/galaxy/tools/__init__.py @@ -816,6 +816,7 @@ """ def __init__( self ): self.page = 0 + self.rerun_remap_job_id = None self.inputs = None def encode( self, tool, app, secure=True ): """ @@ -825,6 +826,7 @@ # page in that dict value = params_to_strings( tool.inputs, self.inputs, app ) value["__page__"] = self.page + value["__rerun_remap_job_id__"] = self.rerun_remap_job_id value = simplejson.dumps( value ) # Make it secure if secure: @@ -846,6 +848,10 @@ # Restore from string values = json_fix( simplejson.loads( value ) ) self.page = values.pop( "__page__" ) + if '__rerun_remap_job_id__' in values: + self.rerun_remap_job_id = values.pop( "__rerun_remap_job_id__" ) + else: + self.rerun_remap_job_id = None self.inputs = params_from_strings( tool.inputs, values, app, ignore_errors=True ) class ToolOutput( object ): @@ -933,6 +939,7 @@ self.input_required = False self.display_interface = True self.require_login = False + self.rerun = False # Define a place to keep track of all input These # differ from the inputs dictionary in that inputs can be page # elements like conditionals, but input_params are basic form @@ -1521,7 +1528,8 @@ elif ( re.search( "fatal", err_level, re.IGNORECASE ) ): return_level = StdioErrorLevel.FATAL else: - log.debug( "Error level %s did not match warning/fatal" % err_level ) + log.debug( "Tool %s: error level %s did not match log/warning/fatal" % + ( self.id, err_level ) ) except Exception: log.error( "Exception in parse_error_level " + str(sys.exc_info() ) ) @@ -1933,7 +1941,10 @@ # If we've completed the last page we can execute the tool elif state.page == self.last_page: try: - _, out_data = self.execute( trans, incoming=params, history=history ) + rerun_remap_job_id = None + if 'rerun_remap_job_id' in incoming: + rerun_remap_job_id = trans.app.security.decode_id(incoming['rerun_remap_job_id']) + _, out_data = self.execute( trans, incoming=params, history=history, rerun_remap_job_id=rerun_remap_job_id ) except httpexceptions.HTTPFound, e: #if it's a paste redirect exception, pass it up the stack raise e @@ -2502,7 +2513,6 @@ datatypes_registry = self.app.datatypes_registry, tool = self, name = name ) - if data: for child in data.children: param_dict[ "_CHILD___%s___%s" % ( name, child.designation ) ] = DatasetFilenameWrapper( child ) diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/tools/actions/__init__.py --- a/lib/galaxy/tools/actions/__init__.py +++ b/lib/galaxy/tools/actions/__init__.py @@ -1,15 +1,16 @@ +import os +import galaxy.tools + +from galaxy.exceptions import ObjectInvalid from galaxy.model import LibraryDatasetDatasetAssociation -from galaxy.util.bunch import Bunch +from galaxy.tools.parameters import DataToolParameter, SelectToolParameter +from galaxy.tools.parameters.grouping import Conditional, Repeat +from galaxy.util.json import from_json_string +from galaxy.util.json import to_json_string +from galaxy.util.none_like import NoneDataset from galaxy.util.odict import odict -from galaxy.util.json import to_json_string -from galaxy.tools.parameters import * -from galaxy.tools.parameters.grouping import * from galaxy.util.template import fill_template -from galaxy.util.none_like import NoneDataset from galaxy.web import url_for -from galaxy.exceptions import ObjectInvalid -import galaxy.tools -from types import * import logging log = logging.getLogger( __name__ ) @@ -107,7 +108,7 @@ tool.visit_inputs( param_values, visitor ) return input_datasets - def execute(self, tool, trans, incoming={}, return_job=False, set_output_hid=True, set_output_history=True, history=None, job_params=None ): + def execute(self, tool, trans, incoming={}, return_job=False, set_output_hid=True, set_output_history=True, history=None, job_params=None, rerun_remap_job_id=None): """ Executes a tool, creating job and tool outputs, associating them, and submitting the job to the job queue. If history is not specified, use @@ -409,6 +410,40 @@ job.params = to_json_string( job_params ) job.set_handler(tool.get_job_handler(job_params)) trans.sa_session.add( job ) + # Now that we have a job id, we can remap any outputs if this is a rerun and the user chose to continue dependent jobs + # This functionality requires tracking jobs in the database. + if trans.app.config.track_jobs_in_database and rerun_remap_job_id is not None: + try: + old_job = trans.sa_session.query( trans.app.model.Job ).get(rerun_remap_job_id) + assert old_job is not None, '(%s/%s): Old job id is invalid' % (rerun_remap_job_id, job.id) + assert old_job.tool_id == job.tool_id, '(%s/%s): Old tool id (%s) does not match rerun tool id (%s)' % (old_job.id, job.id, old_job.tool_id, job.tool_id) + if trans.user is not None: + assert old_job.user_id == trans.user.id, '(%s/%s): Old user id (%s) does not match rerun user id (%s)' % (old_job.id, job.id, old_job.user_id, trans.user.id) + elif trans.user is None and type( galaxy_session ) == trans.model.GalaxySession: + assert old_job.session_id == galaxy_session.id, '(%s/%s): Old session id (%s) does not match rerun session id (%s)' % (old_job.id, job.id, old_job.session_id, galaxy_session.id) + else: + raise Exception('(%s/%s): Remapping via the API is not (yet) supported' % (old_job.id, job.id)) + for jtod in old_job.output_datasets: + for (job_to_remap, jtid) in [(jtid.job, jtid) for jtid in jtod.dataset.dependent_jobs]: + if (trans.user is not None and job_to_remap.user_id == trans.user.id) or (trans.user is None and job_to_remap.session_id == galaxy_session.id): + if job_to_remap.state == job_to_remap.states.PAUSED: + job_to_remap.state = job_to_remap.states.NEW + for hda in [ dep_jtod.dataset for dep_jtod in job_to_remap.output_datasets ]: + if hda.state == hda.states.PAUSED: + hda.state = hda.states.NEW + hda.info = None + for p in job_to_remap.parameters: + if p.name == jtid.name and p.value == str(jtod.dataset.id): + p.value = str(out_data[jtod.name].id) + jtid.dataset = out_data[jtod.name] + jtid.dataset.hid = jtod.dataset.hid + log.info('Job %s input HDA %s remapped to new HDA %s' % (job_to_remap.id, jtod.dataset.id, jtid.dataset.id)) + trans.sa_session.add(job_to_remap) + trans.sa_session.add(jtid) + jtod.dataset.visible = False + trans.sa_session.add(jtod) + except Exception, e: + log.exception('Cannot remap rerun dependencies.') trans.sa_session.flush() # Some tools are not really executable, but jobs are still created for them ( for record keeping ). # Examples include tools that redirect to other applications ( epigraph ). These special tools must diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/tools/actions/history_imp_exp.py --- a/lib/galaxy/tools/actions/history_imp_exp.py +++ b/lib/galaxy/tools/actions/history_imp_exp.py @@ -9,7 +9,7 @@ class ImportHistoryToolAction( ToolAction ): """Tool action used for importing a history to an archive. """ - def execute( self, tool, trans, incoming = {}, set_output_hid = False, overwrite = True, history=None ): + def execute( self, tool, trans, incoming = {}, set_output_hid = False, overwrite = True, history=None, **kwargs ): # # Create job. # @@ -57,7 +57,7 @@ class ExportHistoryToolAction( ToolAction ): """Tool action used for exporting a history to an archive. """ - def execute( self, tool, trans, incoming = {}, set_output_hid = False, overwrite = True, history=None ): + def execute( self, tool, trans, incoming = {}, set_output_hid = False, overwrite = True, history=None, **kwargs ): # # Get history to export. # diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/tools/actions/metadata.py --- a/lib/galaxy/tools/actions/metadata.py +++ b/lib/galaxy/tools/actions/metadata.py @@ -9,7 +9,7 @@ class SetMetadataToolAction( ToolAction ): """Tool action used for setting external metadata on an existing dataset""" - def execute( self, tool, trans, incoming={}, set_output_hid=False, overwrite=True, history=None, job_params=None ): + def execute( self, tool, trans, incoming={}, set_output_hid=False, overwrite=True, history=None, job_params=None, **kwargs ): """ Execute using a web transaction. """ diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/tools/actions/upload.py --- a/lib/galaxy/tools/actions/upload.py +++ b/lib/galaxy/tools/actions/upload.py @@ -5,7 +5,7 @@ log = logging.getLogger( __name__ ) class UploadToolAction( ToolAction ): - def execute( self, tool, trans, incoming={}, set_output_hid = True, history=None ): + def execute( self, tool, trans, incoming={}, set_output_hid = True, history=None, **kwargs ): dataset_upload_inputs = [] for input_name, input in tool.inputs.iteritems(): if input.type == "upload_dataset": diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/visualization/data_providers/registry.py --- a/lib/galaxy/visualization/data_providers/registry.py +++ b/lib/galaxy/visualization/data_providers/registry.py @@ -32,7 +32,7 @@ "bigwig": genome.BigWigDataProvider, "bigbed": genome.BigBedDataProvider, - "column": ColumnDataProvider + "column_with_stats": ColumnDataProvider } def get_data_provider( self, trans, name=None, source='data', raw=False, original_dataset=None ): diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/visualization/registry.py --- a/lib/galaxy/visualization/registry.py +++ b/lib/galaxy/visualization/registry.py @@ -15,22 +15,27 @@ log = logging.getLogger( __name__ ) __TODO__ = """ - BUGS: - anon users clicking a viz link gets 'must be' msg in galaxy_main (w/ masthead) - should not show visualizations (no icon)? - newick files aren't being sniffed prop? - datatype is txt +BUGS: + anon users clicking a viz link gets 'must be' msg in galaxy_main (w/ masthead) + should not show visualizations (no icon)? + newick files aren't being sniffed prop? - datatype is txt - have parsers create objects instead of dicts - allow data_sources with no model_class but have tests (isAdmin, etc.) - maybe that's an instance of User model_class? - some confused vocabulary in docs, var names - tests: - anding, grouping, not - data_sources: - lists of - add description element to visualization. +have parsers create objects instead of dicts +allow data_sources with no model_class but have tests (isAdmin, etc.) + maybe that's an instance of User model_class? +some confused vocabulary in docs, var names +tests: + anding, grouping, not +data_sources: + lists of +add description element to visualization. + +TESTS to add: + has dataprovider + user is admin """ +# ------------------------------------------------------------------- the registry class VisualizationsRegistry( object ): """ Main responsibilities are: @@ -93,6 +98,45 @@ """ self.listings = VisualizationsConfigParser.parse( self.configuration_filepath ) + def get_visualization( self, trans, visualization_name, target_object ): + """ + Return data to build a url to the visualization with the given + `visualization_name` if it's applicable to `target_object` or + `None` if it's not. + """ + # a little weird to pass trans because this registry is part of the trans.app + listing_data = self.listings.get( visualization_name, None ) + if not listing_data: + return None + + data_sources = listing_data[ 'data_sources' ] + for data_source in data_sources: + # currently a model class is required + model_class = data_source[ 'model_class' ] + if not isinstance( target_object, model_class ): + continue + + # tests are optional - default is the above class test + tests = data_source[ 'tests' ] + if tests and not self.is_object_applicable( trans, target_object, tests ): + continue + + param_data = data_source[ 'to_params' ] + url = self.get_visualization_url( trans, target_object, visualization_name, param_data ) + link_text = listing_data.get( 'link_text', None ) + if not link_text: + # default to visualization name, titlecase, and replace underscores + link_text = visualization_name.title().replace( '_', ' ' ) + render_location = listing_data.get( 'render_location' ) + # remap some of these vars for direct use in ui.js, PopupMenu (e.g. text->html) + return { + 'href' : url, + 'html' : link_text, + 'target': render_location + } + + return None + # -- building links to visualizations from objects -- def get_visualizations( self, trans, target_object ): """ @@ -100,36 +144,11 @@ the urls to call in order to render the visualizations. """ #TODO:?? a list of objects? YAGNI? - # a little weird to pass trans because this registry is part of the trans.app applicable_visualizations = [] - for vis_name, listing_data in self.listings.items(): - - data_sources = listing_data[ 'data_sources' ] - for data_source in data_sources: - # currently a model class is required - model_class = data_source[ 'model_class' ] - if not isinstance( target_object, model_class ): - continue - - # tests are optional - default is the above class test - tests = data_source[ 'tests' ] - if tests and not self.is_object_applicable( trans, target_object, tests ): - continue - - param_data = data_source[ 'to_params' ] - url = self.get_visualization_url( trans, target_object, vis_name, param_data ) - link_text = listing_data.get( 'link_text', None ) - if not link_text: - # default to visualization name, titlecase, and replace underscores - link_text = vis_name.title().replace( '_', ' ' ) - render_location = listing_data.get( 'render_location' ) - # remap some of these vars for direct use in ui.js, PopupMenu (e.g. text->html) - applicable_visualizations.append({ - 'href' : url, - 'html' : link_text, - 'target': render_location - }) - + for vis_name in self.listings: + url_data = self.get_visualization( trans, vis_name, target_object ) + if url_data: + applicable_visualizations.append( url_data ) return applicable_visualizations def is_object_applicable( self, trans, target_object, data_source_tests ): @@ -151,10 +170,11 @@ # convert datatypes to their actual classes (for use with isinstance) test_result = trans.app.datatypes_registry.get_datatype_class_by_name( test_result ) if not test_result: - # warn if can't find class, but continue + # warn if can't find class, but continue (with other tests) log.warn( 'visualizations_registry cannot find class (%s) for applicability test', test_result ) continue + #NOTE: tests are OR'd, if any test passes - the visualization can be applied if test_fn( target_object, test_result ): #log.debug( 'test passed' ) return True diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/webapps/galaxy/api/datasets.py --- a/lib/galaxy/webapps/galaxy/api/datasets.py +++ b/lib/galaxy/webapps/galaxy/api/datasets.py @@ -6,6 +6,7 @@ from galaxy.web.base.controller import BaseAPIController, UsesVisualizationMixin, UsesHistoryDatasetAssociationMixin from galaxy.web.base.controller import UsesHistoryMixin from galaxy.web.framework.helpers import is_true +from galaxy.datatypes import dataproviders import logging log = logging.getLogger( __name__ ) @@ -217,10 +218,24 @@ return msg registry = trans.app.data_provider_registry + # allow the caller to specifiy which provider is used - if provider and provider in registry.dataset_type_name_to_data_provider: - data_provider = registry.dataset_type_name_to_data_provider[ provider ]( dataset ) - # or have it look up by datatype + # pulling from the original providers if possible, then the new providers + if provider: + if provider in registry.dataset_type_name_to_data_provider: + data_provider = registry.dataset_type_name_to_data_provider[ provider ]( dataset ) + + elif dataset.datatype.has_dataprovider( provider ): + kwargs = dataset.datatype.dataproviders[ provider ].parse_query_string_settings( kwargs ) + # use dictionary to allow more than the data itself to be returned (data totals, other meta, etc.) + return { + 'data': list( dataset.datatype.dataprovider( dataset, provider, **kwargs ) ) + } + + else: + raise dataproviders.exceptions.NoProviderAvailable( dataset.datatype, provider ) + + # no provider name: look up by datatype else: data_provider = registry.get_data_provider( trans, raw=True, original_dataset=dataset ) diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/webapps/galaxy/api/library_contents.py --- a/lib/galaxy/webapps/galaxy/api/library_contents.py +++ b/lib/galaxy/webapps/galaxy/api/library_contents.py @@ -220,7 +220,8 @@ return { 'error' : 'user has no permission to add to library folder (%s)' %( folder_id ) } ldda = self.copy_hda_to_library_folder( trans, hda, folder, ldda_message=ldda_message ) - rval = ldda.get_api_value() + ldda_dict = ldda.get_api_value() + rval = trans.security.encode_dict_ids( ldda_dict ) except Exception, exc: #TODO: grrr... @@ -234,7 +235,6 @@ return rval - @web.expose_api def update( self, trans, id, library_id, payload, **kwd ): """ diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/webapps/galaxy/api/provenance.py --- a/lib/galaxy/webapps/galaxy/api/provenance.py +++ b/lib/galaxy/webapps/galaxy/api/provenance.py @@ -3,13 +3,13 @@ """ import logging from galaxy import web -from galaxy.web.base.controller import BaseAPIController +from galaxy.web.base.controller import BaseAPIController, UsesHistoryMixin from paste.httpexceptions import HTTPNotImplemented, HTTPBadRequest log = logging.getLogger( __name__ ) -class BaseProvenanceController( BaseAPIController ): +class BaseProvenanceController( BaseAPIController, UsesHistoryMixin ): """ """ @web.expose_api @@ -43,7 +43,7 @@ item = item.copied_from_library_dataset_dataset_association return { "id" : trans.security.encode_id(item.id), - "uuid" : item.dataset.uuid, + "uuid" : ( lambda uuid: str( uuid ) if uuid else None )( item.dataset.uuid), "tool_id" : item.creating_job.tool_id, "parameters" : self._get_job_record(trans, item.creating_job, follow) } @@ -59,7 +59,7 @@ else: out[in_d.name] = { "id" : trans.security.encode_id(in_d.dataset.id), - "uuid" : in_d.dataset.dataset.uuid + "uuid" : ( lambda uuid: str( uuid ) if uuid else None )( in_d.dataset.dataset.uuid ) } return out diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py --- a/lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py +++ b/lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py @@ -9,6 +9,7 @@ from tool_shed.galaxy_install import repository_util from tool_shed.util import common_util +from tool_shed.util import encoding_util import tool_shed.util.shed_util_common as suc log = logging.getLogger( __name__ ) @@ -179,6 +180,11 @@ # Get the information about the Galaxy components (e.g., tool pane section, tool config file, etc) that will contain the repository information. install_repository_dependencies = payload.get( 'install_repository_dependencies', False ) install_tool_dependencies = payload.get( 'install_tool_dependencies', False ) + if install_tool_dependencies: + if trans.app.config.tool_dependency_dir is None: + no_tool_dependency_dir_message = "Tool dependencies can be automatically installed only if you set the value of your 'tool_dependency_dir' " + no_tool_dependency_dir_message += "setting in your Galaxy configuration file (universe_wsgi.ini) and restart your Galaxy server." + raise HTTPBadRequest( detail=no_tool_dependency_dir_message ) new_tool_panel_section = payload.get( 'new_tool_panel_section_label', '' ) shed_tool_conf = payload.get( 'shed_tool_conf', None ) if shed_tool_conf: @@ -211,13 +217,8 @@ tool_path=tool_path, tool_shed_url=tool_shed_url ) # Create the tool_shed_repository database records and gather additional information for repository installation. - created_or_updated_tool_shed_repositories, tool_panel_section_keys, repo_info_dicts, filtered_repo_info_dicts, message = \ + created_or_updated_tool_shed_repositories, tool_panel_section_keys, repo_info_dicts, filtered_repo_info_dicts = \ repository_util.handle_tool_shed_repositories( trans, installation_dict, using_api=True ) - if message and len( repo_info_dicts ) == 1: - # We're attempting to install a single repository that has already been installed into this Galaxy instance. - log.error( message, exc_info=True ) - trans.response.status = 500 - return dict( status='error', error=message ) if created_or_updated_tool_shed_repositories: # Build the dictionary of information necessary for installing the repositories. installation_dict = dict( created_or_updated_tool_shed_repositories=created_or_updated_tool_shed_repositories, @@ -244,7 +245,7 @@ # order the list of tsr_ids to ensure all repositories install in the required order. tsr_ids = [ trans.security.encode_id( tool_shed_repository.id ) for tool_shed_repository in tool_shed_repositories ] ordered_tsr_ids, ordered_repo_info_dicts, ordered_tool_panel_section_keys = \ - repository_util.order_components_for_installation( trans, tsr_ids, repo_info_dicts, tool_panel_section_keys ) + repository_util.order_components_for_installation( trans, tsr_ids, repo_info_dicts, tool_panel_section_keys=tool_panel_section_keys ) # Install the repositories, keeping track of each one for later display. for index, tsr_id in enumerate( ordered_tsr_ids ): tool_shed_repository = trans.sa_session.query( trans.model.ToolShedRepository ).get( trans.security.decode_id( tsr_id ) ) @@ -266,11 +267,7 @@ action='show', id=trans.security.encode_id( tool_shed_repository.id ) ) installed_tool_shed_repositories.append( tool_shed_repository_dict ) - elif message: - log.error( message, exc_info=True ) - trans.response.status = 500 - return dict( status='error', error=message ) - elif not created_or_updated_tool_shed_repositories and not message: + else: # We're attempting to install more than 1 repository, and all of them have already been installed. return dict( status='error', error='All repositories that you are attempting to install have been previously installed.' ) # Display the list of installed repositories. @@ -362,3 +359,56 @@ elif isinstance( installed_tool_shed_repositories, list ): all_installed_tool_shed_repositories.extend( installed_tool_shed_repositories ) return all_installed_tool_shed_repositories + + @web.expose_api + def repair_repository_revision( self, trans, payload, **kwd ): + """ + POST /api/tool_shed_repositories/repair_repository_revision + Repair a specified repository revision previously installed into Galaxy. + + :param key: the current Galaxy admin user's API key + + The following parameters are included in the payload. + :param tool_shed_url (required): the base URL of the Tool Shed from which the Repository was installed + :param name (required): the name of the Repository + :param owner (required): the owner of the Repository + :param changset_revision (required): the changset_revision of the RepositoryMetadata object associated with the Repository + """ + api_key = kwd.get( 'key', None ) + # Get the information about the repository to be installed from the payload. + tool_shed_url = payload.get( 'tool_shed_url', '' ) + if not tool_shed_url: + raise HTTPBadRequest( detail="Missing required parameter 'tool_shed_url'." ) + name = payload.get( 'name', '' ) + if not name: + raise HTTPBadRequest( detail="Missing required parameter 'name'." ) + owner = payload.get( 'owner', '' ) + if not owner: + raise HTTPBadRequest( detail="Missing required parameter 'owner'." ) + changeset_revision = payload.get( 'changeset_revision', '' ) + if not changeset_revision: + raise HTTPBadRequest( detail="Missing required parameter 'changeset_revision'." ) + tool_shed_repositories = [] + tool_shed_repository = suc.get_tool_shed_repository_by_shed_name_owner_changeset_revision( trans.app, tool_shed_url, name, owner, changeset_revision ) + repair_dict = repository_util.get_repair_dict( trans, tool_shed_repository ) + ordered_tsr_ids = repair_dict.get( 'ordered_tsr_ids', [] ) + ordered_repo_info_dicts = repair_dict.get( 'ordered_repo_info_dicts', [] ) + if ordered_tsr_ids and ordered_repo_info_dicts: + repositories_for_repair = [] + for index, tsr_id in enumerate( ordered_tsr_ids ): + repository = trans.sa_session.query( trans.model.ToolShedRepository ).get( trans.security.decode_id( tsr_id ) ) + repo_info_dict = ordered_repo_info_dicts[ index ] + # TODO: handle errors in repair_dict. + repair_dict = repository_util.repair_tool_shed_repository( trans, + repository, + encoding_util.tool_shed_encode( repo_info_dict ) ) + repository_dict = repository.get_api_value( value_mapper=default_tool_shed_repository_value_mapper( trans, repository ) ) + repository_dict[ 'url' ] = web.url_for( controller='tool_shed_repositories', + action='show', + id=trans.security.encode_id( repository.id ) ) + if repair_dict: + errors = repair_dict.get( repository.name, [] ) + repository_dict[ 'errors_attempting_repair' ] = ' '.join( errors ) + tool_shed_repositories.append( repository_dict ) + # Display the list of repaired repositories. + return tool_shed_repositories diff -r 320ae026f73a849c77a580410a5a3d6927ef1b91 -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 lib/galaxy/webapps/galaxy/buildapp.py --- a/lib/galaxy/webapps/galaxy/buildapp.py +++ b/lib/galaxy/webapps/galaxy/buildapp.py @@ -163,12 +163,13 @@ webapp.mapper.connect("workflow_dict", '/api/workflows/download/{workflow_id}', controller='workflows', action='workflow_dict', conditions=dict(method=['GET'])) # Galaxy API for tool shed features. webapp.mapper.resource( 'tool_shed_repository', - 'tool_shed_repositories', - controller='tool_shed_repositories', - name_prefix='tool_shed_repository_', - path_prefix='/api', - new={ 'install_repository_revision' : 'POST' }, - parent_resources=dict( member_name='tool_shed_repository', collection_name='tool_shed_repositories' ) ) + 'tool_shed_repositories', + member={ 'repair_repository_revision' : 'POST' }, + controller='tool_shed_repositories', + name_prefix='tool_shed_repository_', + path_prefix='/api', + new={ 'install_repository_revision' : 'POST' }, + parent_resources=dict( member_name='tool_shed_repository', collection_name='tool_shed_repositories' ) ) # Connect logger from app if app.trace_logger: webapp.trace_logger = app.trace_logger This diff is so big that we needed to truncate the remainder. https://bitbucket.org/galaxy/galaxy-central/commits/5f24b2c4f38b/ Changeset: 5f24b2c4f38b Branch: search User: Kyle Ellrott Date: 2013-06-27 21:00:27 Summary: Fixing the job input/output values to format that is consistant with format used in other areas of galaxy. Affected #: 1 file diff -r 460c7dc52f92abac4d9ea9e5700dc8238b2a7fc4 -r 5f24b2c4f38bd65453cccad0b9bf526eff315918 lib/galaxy/model/__init__.py --- a/lib/galaxy/model/__init__.py +++ b/lib/galaxy/model/__init__.py @@ -372,10 +372,10 @@ input_dict = {} for i in self.input_datasets: if i.dataset is not None: - input_dict[i.name] = {"hda_id" : i.dataset.id} + input_dict[i.name] = {"id" : i.dataset.id, "src" : "hda"} for i in self.input_library_datasets: if i.dataset is not None: - input_dict[i.name] = {"ldda_id" : i.dataset.id} + input_dict[i.name] = {"id" : i.dataset.id, "src" : "ldda"} for k in input_dict: if k in param_dict: del param_dict[k] @@ -384,10 +384,10 @@ output_dict = {} for i in self.output_datasets: if i.dataset is not None: - output_dict[i.name] = {"hda_id" : i.dataset.id} + output_dict[i.name] = {"id" : i.dataset.id, "src" : "hda"} for i in self.output_library_datasets: if i.dataset is not None: - output_dict[i.name] = {"ldda_id" : i.dataset.id} + output_dict[i.name] = {"id" : i.dataset.id, "src" : "ldda"} rval['outputs'] = output_dict return rval https://bitbucket.org/galaxy/galaxy-central/commits/f4272212f722/ Changeset: f4272212f722 Branch: search User: Kyle Ellrott Date: 2013-06-27 21:01:43 Summary: Removing stray print statement Affected #: 1 file diff -r 5f24b2c4f38bd65453cccad0b9bf526eff315918 -r f4272212f72244f5fb045b8d978dc140e7cfa7ca lib/galaxy/model/migrate/check.py --- a/lib/galaxy/model/migrate/check.py +++ b/lib/galaxy/model/migrate/check.py @@ -44,7 +44,6 @@ # Let this go, it could possibly work with db's we don't support log.error( "database_connection contains an unknown SQLAlchemy database dialect: %s" % dialect ) # Create engine and metadata - print url, engine_options engine = create_engine( url, **engine_options ) meta = MetaData( bind=engine ) # Try to load dataset table https://bitbucket.org/galaxy/galaxy-central/commits/c2fb95ef8617/ Changeset: c2fb95ef8617 Branch: search User: Kyle Ellrott Date: 2013-06-27 22:37:14 Summary: Allowing zero length strings to be compared in the search conditions Affected #: 1 file diff -r f4272212f72244f5fb045b8d978dc140e7cfa7ca -r c2fb95ef8617cc8156ba936ee6d064ed098a8b9a lib/galaxy/model/search.py --- a/lib/galaxy/model/search.py +++ b/lib/galaxy/model/search.py @@ -507,7 +507,7 @@ | '<=' -> '<=' | 'like' -> 'like' ) -quote_word = "'" not_quote+:x "'" -> "".join(x) +quote_word = "'" not_quote*:x "'" -> "".join(x) not_quote = anything:x ?(x != "'") -> x not_dquote = anything:x ?(x != '"') -> x """ https://bitbucket.org/galaxy/galaxy-central/commits/0b7b71c9d296/ Changeset: 0b7b71c9d296 Branch: search User: Kyle Ellrott Date: 2013-07-03 01:39:01 Summary: Adding in input_ldda field to job search Affected #: 1 file diff -r c2fb95ef8617cc8156ba936ee6d064ed098a8b9a -r 0b7b71c9d296c8ce92a41905f489d1a109e85f02 lib/galaxy/model/search.py --- a/lib/galaxy/model/search.py +++ b/lib/galaxy/model/search.py @@ -418,6 +418,20 @@ ) ) +def job_input_ldda_filter(view, left, operator, right): + view.do_query = True + alias = aliased( JobToInputLibraryDatasetAssociation ) + param_name = re.sub(r'^input_ldda.', '', left) + view.query = view.query.filter( + and_( + Job.id == alias.job_id, + alias.name == param_name, + alias.dataset_id == right + ) + ) + + + def job_output_hda_filter(view, left, operator, right): view.do_query = True alias = aliased( JobToOutputDatasetAssociation ) @@ -437,6 +451,7 @@ 'tool_name' : ViewField('tool_name', sqlalchemy_field=Job.tool_id), 'state' : ViewField('state', sqlalchemy_field=Job.state), 'param' : ViewField('param', handler=job_param_filter), + 'input_ldda' : ViewField('input_ldda', handler=job_input_ldda_filter, id_decode=True), 'input_hda' : ViewField('input_hda', handler=job_input_hda_filter, id_decode=True), 'output_hda' : ViewField('output_hda', handler=job_output_hda_filter, id_decode=True) } https://bitbucket.org/galaxy/galaxy-central/commits/3b419687739b/ Changeset: 3b419687739b Branch: search User: Kyle Ellrott Date: 2013-07-03 01:44:41 Summary: Adding missing import to search Affected #: 1 file diff -r 0b7b71c9d296c8ce92a41905f489d1a109e85f02 -r 3b419687739b3d4a759e73e99ff91633096bbbec lib/galaxy/model/search.py --- a/lib/galaxy/model/search.py +++ b/lib/galaxy/model/search.py @@ -35,7 +35,7 @@ History, Library, LibraryFolder, LibraryDataset,StoredWorkflowTagAssociation, StoredWorkflow, HistoryTagAssociation,HistoryDatasetAssociationTagAssociation, ExtendedMetadata, ExtendedMetadataIndex, HistoryAnnotationAssociation, Job, JobParameter, -JobToInputDatasetAssociation, JobToOutputDatasetAssociation, ToolVersion) +JobToInputDatasetAssociation, JobToInputLibraryDatasetAssociation, JobToOutputDatasetAssociation, ToolVersion) from galaxy.util.json import to_json_string from sqlalchemy import and_ https://bitbucket.org/galaxy/galaxy-central/commits/df1b861724c2/ Changeset: df1b861724c2 Branch: search User: Kyle Ellrott Date: 2013-07-03 01:49:50 Summary: Fixing field name in selection statement Affected #: 1 file diff -r 3b419687739b3d4a759e73e99ff91633096bbbec -r df1b861724c205412a786cce00434dc39db391b1 lib/galaxy/model/search.py --- a/lib/galaxy/model/search.py +++ b/lib/galaxy/model/search.py @@ -426,7 +426,7 @@ and_( Job.id == alias.job_id, alias.name == param_name, - alias.dataset_id == right + alias.ldda_id == right ) ) https://bitbucket.org/galaxy/galaxy-central/commits/74ad9ec46632/ Changeset: 74ad9ec46632 Branch: search User: Kyle Ellrott Date: 2013-07-18 23:46:32 Summary: Adding code to allow for search selection by 'copied_from_ldda_id' field Affected #: 2 files diff -r df1b861724c205412a786cce00434dc39db391b1 -r 74ad9ec46632d97d43f207098b80a824170bea58 lib/galaxy/model/__init__.py --- a/lib/galaxy/model/__init__.py +++ b/lib/galaxy/model/__init__.py @@ -1715,6 +1715,9 @@ misc_info = hda.info, misc_blurb = hda.blurb ) + if hda.copied_from_library_dataset_dataset_association is not None: + rval['copied_from_ldda_id'] = hda.copied_from_library_dataset_dataset_association.id + if hda.history is not None: rval['history_id'] = hda.history.id diff -r df1b861724c205412a786cce00434dc39db391b1 -r 74ad9ec46632d97d43f207098b80a824170bea58 lib/galaxy/model/search.py --- a/lib/galaxy/model/search.py +++ b/lib/galaxy/model/search.py @@ -296,8 +296,11 @@ FIELDS = { 'name' : ViewField('name', sqlalchemy_field=HistoryDatasetAssociation.name), 'id' : ViewField('id',sqlalchemy_field=HistoryDatasetAssociation.id, id_decode=True), - 'tag' : ViewField("tag", handler=history_dataset_handle_tag) - + 'tag' : ViewField("tag", handler=history_dataset_handle_tag), + 'copied_from_ldda_id' : ViewField("copied_from_ldda_id", + sqlalchemy_field=HistoryDatasetAssociation.copied_from_library_dataset_dataset_association_id, + id_decode=True), + 'deleted' : ViewField('deleted', sqlalchemy_field=HistoryDatasetAssociation.deleted) } def search(self, trans): https://bitbucket.org/galaxy/galaxy-central/commits/0e2c8d03786a/ Changeset: 0e2c8d03786a Branch: search User: kellrott Date: 2013-08-05 01:59:14 Summary: Merging Affected #: 1 file diff -r 74ad9ec46632d97d43f207098b80a824170bea58 -r 0e2c8d03786a9bf22cde5f14da3e868dd4607e0b lib/galaxy/model/search.py --- a/lib/galaxy/model/search.py +++ b/lib/galaxy/model/search.py @@ -173,6 +173,7 @@ 'id' : ViewField('id', sqlalchemy_field=LibraryDatasetDatasetAssociation.id, id_decode=True), 'deleted' : ViewField('deleted', sqlalchemy_field=LibraryDatasetDatasetAssociation.deleted), 'parent_library_id' : ViewField('parent_library_id', id_decode=True, post_filter=ldda_parent_library_filter), + 'data_type' : ViewField('data_type', sqlalchemy_field=LibraryDatasetDatasetAssociation.extension) } def search(self, trans): https://bitbucket.org/galaxy/galaxy-central/commits/506d00954a3e/ Changeset: 506d00954a3e Branch: search User: Kyle Ellrott Date: 2013-08-23 21:05:41 Summary: galaxy-central default merge and resolve Affected #: 417 files diff -r 0e2c8d03786a9bf22cde5f14da3e868dd4607e0b -r 506d00954a3eec4808ed8c0104b5e0404841b6af .coveragerc --- /dev/null +++ b/.coveragerc @@ -0,0 +1,3 @@ +[run] +branch = True +include = lib/galaxy/* diff -r 0e2c8d03786a9bf22cde5f14da3e868dd4607e0b -r 506d00954a3eec4808ed8c0104b5e0404841b6af .hgignore --- a/.hgignore +++ b/.hgignore @@ -60,7 +60,7 @@ job_conf.xml data_manager_conf.xml shed_data_manager_conf.xml -visualizations_conf.xml +config/* static/welcome.html.* static/welcome.html @@ -82,6 +82,10 @@ # Test output run_functional_tests.html test/tool_shed/tmp/* +.coverage +htmlcov +run_unit_tests.html +test/unit/**.log # Project files *.kpf diff -r 0e2c8d03786a9bf22cde5f14da3e868dd4607e0b -r 506d00954a3eec4808ed8c0104b5e0404841b6af .hgtags --- a/.hgtags +++ b/.hgtags @@ -3,3 +3,4 @@ 75f09617abaadbc8cc732bb8ee519decaeb56ea7 release_2013.04.01 2cc8d10988e03257dc7b97f8bb332c7df745d1dd security_2013.04.08 524f246ca85395082719ae7a6ff72260d7ad5612 release_2013.06.03 +1ae95b3aa98d1ccf15b243ac3ce6a895eb7efc53 release_2013.08.12 diff -r 0e2c8d03786a9bf22cde5f14da3e868dd4607e0b -r 506d00954a3eec4808ed8c0104b5e0404841b6af README.txt --- a/README.txt +++ b/README.txt @@ -7,7 +7,7 @@ HOW TO START ============ -Galaxy requires Python 2.5, 2.6 or 2.7. To check your python version, run: +Galaxy requires Python 2.6 or 2.7. To check your python version, run: % python -V Python 2.7.3 diff -r 0e2c8d03786a9bf22cde5f14da3e868dd4607e0b -r 506d00954a3eec4808ed8c0104b5e0404841b6af buildbot_setup.sh --- a/buildbot_setup.sh +++ b/buildbot_setup.sh @@ -65,6 +65,7 @@ " SAMPLES=" +tool_conf.xml.sample datatypes_conf.xml.sample universe_wsgi.ini.sample tool_data_table_conf.xml.sample diff -r 0e2c8d03786a9bf22cde5f14da3e868dd4607e0b -r 506d00954a3eec4808ed8c0104b5e0404841b6af config/plugins/visualizations/README.txt --- /dev/null +++ b/config/plugins/visualizations/README.txt @@ -0,0 +1,34 @@ +Custom visualization plugins +---------------------------- + +Visualizations can be added to your Galaxy instance by creating +sub-directories, templates, and static files here. + +Properly configured and written visualizations will be accessible to +the user when they click the 'visualizations' icon for a dataset +in their history panel. + +The framework must be enabled in your 'universe_wsgi.ini' file by +uncommenting (and having a valid path for) the +'visualizations_plugin_directory' entry. + +For more information, see http://wiki.galaxyproject.org/VisualizationsRegistry + + +Sub-directory structure +----------------------- + +In general, sub-directories should follow the pattern: + + my_visualization/ + config/ + my_visualization.xml + static/ + ... any static files the visualization needs (if any) + templates/ + ... any Mako templates the visualization needs + +The XML config file for a visualization plugin can be validated on the command +line using (from your plugin directory): + + xmllint my_visualization/config/my_visualization.xml --valid --noout diff -r 0e2c8d03786a9bf22cde5f14da3e868dd4607e0b -r 506d00954a3eec4808ed8c0104b5e0404841b6af config/plugins/visualizations/circster/config/circster.xml --- /dev/null +++ b/config/plugins/visualizations/circster/config/circster.xml @@ -0,0 +1,28 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE visualization SYSTEM "../../visualization.dtd"> +<visualization name="circster"> + <data_sources> + <data_source> + <model_class>HistoryDatasetAssociation</model_class> + <test type="isinstance" test_attr="datatype" result_type="datatype">data.Data</test> + <to_param param_attr="id">dataset_id</to_param> + <to_param assign="hda">hda_ldda</to_param> + </data_source> + <data_source> + <model_class>LibraryDatasetDatasetAssociation</model_class> + <test type="isinstance" test_attr="datatype" result_type="datatype">data.Data</test> + <to_param param_attr="id">dataset_id</to_param> + <to_param assign="ldda">hda_ldda</to_param> + </data_source> + </data_sources> + <params> + <param type="visualization">id</param> + <param type="hda_or_ldda">dataset_id</param> + <param_modifier type="string" modifies="dataset_id">hda_ldda</param_modifier> + <param type="dbkey">dbkey</param> + </params> + <!-- template_root and template are currently ignored for the 'built-in' visualizations --> + <template_root>webapps/galaxy/visualization</template_root> + <template>circster.mako</template> + <render_location>_top</render_location> +</visualization> diff -r 0e2c8d03786a9bf22cde5f14da3e868dd4607e0b -r 506d00954a3eec4808ed8c0104b5e0404841b6af config/plugins/visualizations/graphview/config/graphview.xml --- /dev/null +++ b/config/plugins/visualizations/graphview/config/graphview.xml @@ -0,0 +1,20 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE visualization SYSTEM "../../visualization.dtd"> +<visualization name="Graphview"> + <data_sources> + <data_source> + <model_class>HistoryDatasetAssociation</model_class> + <test type="has_dataprovider" test_attr="datatype">node-edge</test> + <to_param param_attr="id">dataset_id</to_param> + </data_source> + <data_source> + <model_class>HistoryDatasetAssociation</model_class> + <test type="isinstance" test_attr="datatype" result_type="datatype">graph.Rdf</test> + <to_param param_attr="id">dataset_id</to_param> + </data_source> + </data_sources> + <params> + <param type="dataset" var_name_in_template="hda" required="true">dataset_id</param> + </params> + <template>graphview/templates/graphview.mako</template> +</visualization> diff -r 0e2c8d03786a9bf22cde5f14da3e868dd4607e0b -r 506d00954a3eec4808ed8c0104b5e0404841b6af config/plugins/visualizations/graphview/static/graphview.css --- /dev/null +++ b/config/plugins/visualizations/graphview/static/graphview.css @@ -0,0 +1,70 @@ +svg { + border: 1px solid lightgrey; + background-color: #FFF; + cursor: default; + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + -o-user-select: none; + user-select: none; +} + +svg:not(.active):not(.ctrl) { + cursor: crosshair; +} + +path.link { + fill: none; + stroke: #000; + stroke-width: 4px; + cursor: default; +} + +svg:not(.active):not(.ctrl) path.link { + cursor: pointer; +} + +path.link.selected { + stroke-dasharray: 10,2; +} + +path.link.dragline { + pointer-events: none; +} + +path.link.hidden { + stroke-width: 0; +} + +circle.node { + stroke-width: 1.0px; + cursor: pointer; +} + +circle.node.reflexive { + stroke: #000 !important; + stroke-width: 2.5px; +} + +text { + font: 12px sans-serif; + pointer-events: none; +} + +text.id { + text-anchor: middle; + font-weight: bold; +} + +div.tooltip { + position: absolute; + text-align: center; + width: 60px; + height: 28px; + padding: 2px; + font: 12px sans-serif; + background: lightsteelblue; + border: 0px; + border-radius: 8px; + pointer-events: none; +} diff -r 0e2c8d03786a9bf22cde5f14da3e868dd4607e0b -r 506d00954a3eec4808ed8c0104b5e0404841b6af config/plugins/visualizations/graphview/static/graphview.js --- /dev/null +++ b/config/plugins/visualizations/graphview/static/graphview.js @@ -0,0 +1,175 @@ +function get_firstChild(n) { + y=n.firstChild; + while (y && y.nodeType!=1) { + y=y.nextSibling; + } + return y; +} + +function get_nextSibling(n) { + y=n.nextSibling; + while (y) { + if (y.nodeType==1) { + return y; + } + y=y.nextSibling; + } + return y; +} + +function parse_xgmml_attr(elm) { + out = {}; + var c = get_firstChild(elm); + while (c) { + if (c.nodeName == "att") { + if (c.attributes['type'].value == "string") { + out[c.attributes['name'].value] = c.attributes['value'].value; + } + } + c = get_nextSibling(c); + } + return out +} + +function parse_xgmml(root, add_node, add_edge) { + graph=root.getElementsByTagName("graph")[0]; + elm = get_firstChild(graph); + while (elm) { + if (elm.nodeName == "node") { + var attr = parse_xgmml_attr(elm); + add_node( elm.attributes['id'].value, elm.attributes['label'].value, attr ); + } + if (elm.nodeName == "edge") { + var attr = parse_xgmml_attr(elm); + add_edge( elm.attributes['source'].value, elm.attributes['target'].value, attr ); + } + + elm = get_nextSibling(elm); + } +} + +function parse_sif(data, add_node, add_edge) { + var lines = data.split("\n") + for (var i in lines) { + var tmp = lines[i].split("\t"); + if (tmp.length == 3) { + add_edge(tmp[0], tmp[2], {type:tmp[1]}); + } + } + +} + +jQuery.fn.graphViewer = function(config) { + + var svg, colors; + var nodes = [], + links = []; + + var height = config.height; + var width = config.width; + + + colors = d3.scale.category10(); + + this.each(function() { + svg = d3.select(this) + .append('svg') + .attr('width', width) + .attr('height', height); + } + ); + + var tooltip_div = d3.select("#tooltip").attr("class", "tooltip").style("opacity", 0);; + + this.add_node = function(node_id, node_label, attr) { + nodes.push( {id: node_id, label:node_label, attr:attr} ); + } + + this.add_edge = function(src_id, dst_id, attr) { + var src, target; + for (var i in nodes) { + if (nodes[i].id == src_id) { + src = nodes[i]; + } + if (nodes[i].id == dst_id) { + target = nodes[i]; + } + } + if (typeof src==="undefined") { + i = nodes.length + nodes.push( {id:src_id, label:src_id, attr:{}} ) + src = nodes[i] + } + if (typeof target==="undefined") { + i = nodes.length + nodes.push( {id:dst_id, label:dst_id, attr:{}} ) + target = nodes[i] + } + if (src && target) { + links.push( {source: src, target: target, left: false, right: true } ); + } + } + + this.render = function() { + var path = svg.append('svg:g').selectAll('path'), + circle = svg.append('svg:g').selectAll('g'); + + + circle = circle.data(nodes, function(d) { return d.id; }); + var g = circle.enter().append('svg:g'); + + circle.on('mouseover', function(d) { + tooltip_div.transition() + .duration(200) + .style("opacity", .9); + tooltip_div.html( "<div>" + d.label + "</div><div>" + d.attr.type + "</div>" ).style("left", (d3.event.pageX + 40) + "px") + .style("top", (d3.event.pageY - 35) + "px"); + }) + .on("mouseout", function(d) { + tooltip_div.transition() + .duration(500) + .style("opacity", 0); + }); + + path = path.data(links); + + + function tick() { + path.attr('d', function(d) { + return 'M' + d.source.x + ',' + d.source.y + 'L' + d.target.x + ',' + d.target.y; + }); + circle.attr('transform', function(d) { + return 'translate(' + d.x + ',' + d.y + ')'; + }); + } + + + g.append('svg:circle') + .attr('class', 'node') + .attr('r', 8) + .style('stroke', 'black') + .attr('fill', function(d) { + return colors(d['attr']['type']) + }); + + path.enter().append('svg:path') + .attr('class', 'link') + + + // init D3 force layout + var force = d3.layout.force() + .nodes(nodes) + .links(links) + .size([width, height]) + .linkDistance(25) + .charge(-50) + .on('tick', tick) + circle.call(force.drag) + force.start() + + } + + return this + +} + This diff is so big that we needed to truncate the remainder. Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.