commit/galaxy-central: greg: Eliminate all references and support for datatype indexers since they have never been used - datatype converters do the same thing.
1 new commit in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/changeset/d48aa9042859/ changeset: d48aa9042859 user: greg date: 2012-01-06 21:43:35 summary: Eliminate all references and support for datatype indexers since they have never been used - datatype converters do the same thing. affected #: 13 files diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f datatypes_conf.xml.sample --- a/datatypes_conf.xml.sample +++ b/datatypes_conf.xml.sample @@ -35,12 +35,6 @@ <datatype extension="bigwig" type="galaxy.datatypes.binary:BigWig" mimetype="application/octet-stream" display_in_upload="true"><display file="ucsc/bigwig.xml" /></datatype> - <datatype extension="coverage" type="galaxy.datatypes.coverage:LastzCoverage" display_in_upload="true"> - <indexer file="coverage.xml" /> - </datatype> - <datatype extension="coverage" type="galaxy.datatypes.coverage:LastzCoverage" display_in_upload="true"> - <indexer file="coverage.xml" /> - </datatype><!-- MSI added Datatypes --><datatype extension="csv" type="galaxy.datatypes.tabular:Tabular" subclass="True" display_in_upload="true" /><!-- FIXME: csv is 'tabular'ized data, but not 'tab-delimited'; the class used here is intended for 'tab-delimited' --><!-- End MSI added Datatypes --> @@ -95,7 +89,6 @@ <converter file="interval_to_bedstrict_converter.xml" target_datatype="bedstrict"/><converter file="interval_to_bed6_converter.xml" target_datatype="bed6"/><converter file="interval_to_bed12_converter.xml" target_datatype="bed12"/> - <indexer file="interval_awk.xml" /><!-- <display file="ucsc/interval_as_bed.xml" inherit="True" /> --><display file="genetrack.xml" inherit="True"/><display file="ensembl/ensembl_interval_as_bed.xml" inherit="True"/> diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/app.py --- a/lib/galaxy/app.py +++ b/lib/galaxy/app.py @@ -74,15 +74,12 @@ self.datatypes_registry.load_datatype_converters( self.toolbox ) # Load history import/export tools load_history_imp_exp_tools( self.toolbox ) - #load external metadata tool + # Load external metadata tool self.datatypes_registry.load_external_metadata_tool( self.toolbox ) - # Load datatype indexers defined in local datatypes_conf.xml - self.datatypes_registry.load_datatype_indexers( self.toolbox ) - # Load proprietary datatypes defined in datatypes_conf.xml files in all installed tool - # shed repositories. This will also load all proprietary datatype converters, indexers - # and display_applications. + # Load proprietary datatypes defined in datatypes_conf.xml files in all installed tool shed + # repositories. This will also load all proprietary datatype converters and display_applications. self.installed_repository_manager.load_proprietary_datatypes() - #Load security policy + # Load security policy self.security_agent = self.model.security_agent self.host_security_agent = galaxy.security.HostAgent( model=self.security_agent.model, permitted_actions=self.security_agent.permitted_actions ) # Load quota management diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/datatypes/indexers/coverage.py --- a/lib/galaxy/datatypes/indexers/coverage.py +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env python - -""" -Read a chromosome of coverage data, and write it as a npy array, as -well as averages over regions of progressively larger size in powers of 10 -""" - -from __future__ import division - -import sys -from galaxy import eggs -import pkg_resources; pkg_resources.require( "bx-python" ) -import bx.wiggle -from bx.cookbook import doc_optparse -from bx import misc -max2 = max -pkg_resources.require("numpy>=1.2.1") -from numpy import * -import tempfile -import os - -def write_chrom(max, out_base, instream): - - scores = zeros( max, float32 ) * nan - # Fill array from wiggle - max_value = 0 - min_value = 0 - for line in instream: - line = line.rstrip("\n\r") - (chrom, pos, val) = line.split("\t") - pos, val = int(pos), float(val) - scores[pos] = val - - # Write ra - fname = "%s_%d" % ( out_base, 1 ) - save( fname, scores ) - os.rename( fname+".npy", fname ) - - # Write average - for window in 10, 100, 1000, 10000, 100000: - input = scores.copy() - size = len( input ) - input.resize( ( ( size / window ), window ) ) - masked = ma.masked_array( input, isnan( input ) ) - averaged = mean( masked, 1 ) - averaged.set_fill_value( nan ) - fname = "%s_%d" % ( out_base, window ) - save( fname, averaged.filled() ) - del masked, averaged - os.rename( fname+".npy", fname ) - -def main(): - max = int( 512*1024*1024 ) - # get chroms and lengths - chroms = {} - LEN = {} - for line in open(sys.argv[1],"r"): - line = line.rstrip("\r\n") - fields = line.split("\t") - (chrom, pos, forward) = fields[0:3] - reverse = 0 - if len(fields) == 4: reverse = int(fields[3]) - forward = int(forward)+reverse - pos = int(pos) - chrom_file = chroms.get(chrom, None) - if not chrom_file: - chrom_file = chroms[chrom] = tempfile.NamedTemporaryFile() - chrom_file.write("%s\t%s\t%s\n" % (chrom,pos,forward)) - LEN[chrom] = max2( LEN.get(chrom,0), pos+1 ) - for chrom, stream in chroms.items(): - stream.seek(0) - prefix = os.path.join(sys.argv[2], chrom) - write_chrom( LEN[chrom], prefix, stream ) - - manifest_file = open( os.path.join( sys.argv[2], "manifest.tab" ),"w" ) - for key, value in LEN.items(): - print >> manifest_file, "%s\t%s" % (key, value) - manifest_file.close() - - -if __name__ == "__main__": main() diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/datatypes/indexers/coverage.xml --- a/lib/galaxy/datatypes/indexers/coverage.xml +++ /dev/null @@ -1,12 +0,0 @@ -<tool id="INDEXER_Coverage_0" name="Index Coverage for Track Viewer"> - <!-- Used internally to generate track indexes --> - <command interpreter="python">coverage.py $input_dataset $store_path 2>&1 - </command> - <inputs> - <page> - <param format="coverage" name="input_dataset" type="data" label="Choose coverage"/> - </page> - </inputs> - <help> - </help> -</tool> diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/datatypes/indexers/interval.awk --- a/lib/galaxy/datatypes/indexers/interval.awk +++ /dev/null @@ -1,43 +0,0 @@ -BEGIN { - # from galaxy.utils - mapped_chars[">"] = "__gt__" - mapped_chars["<"] = "__lt__" - mapped_chars["'"] = "__sq__" - mapped_chars["\""] = "__dq__" - mapped_chars["\\["] = "__ob__" - mapped_chars["\\]"] = "__cb__" - mapped_chars["\\{"] = "__oc__" - mapped_chars["\\}"] = "__cc__" - mapped_chars["@"] = "__at__" - # additional, not in galaxy.utils - mapped_chars["/"] = "__fs__" - mapped_chars["^manifest\.tab$"] = "__manifest.tab__" -} -function escape_filename( name ) -{ - for( char in mapped_chars ) { - gsub( char, mapped_chars[char], name ) - } - return name -} -!_[$chrom]++ { - # close files only when we switch to a new one. - fn && close(fn) - fn = storepath "/" escape_filename($1) } -{ - print $0 >> fn; - # the || part is needed to catch 0 length chromosomes, which - # should never happen but... - if ($end > chroms[$chrom] || !chroms[$chrom]) - chroms[$chrom] = $end } -END { - fn = storepath "/manifest.tab" - for( x in chroms ) { - # add line to manifest - print x "\t" chroms[x] >> fn - chromfile = storepath "/" escape_filename(x) - # sort in-place - system( "sort -f -n -k " chrom " -k " start " -k " end " -o " chromfile " " chromfile ) - close(chromfile) - } -} \ No newline at end of file diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/datatypes/indexers/interval.py --- a/lib/galaxy/datatypes/indexers/interval.py +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env python -""" -Generate indices for track browsing of an interval file. - -usage: %prog bed_file out_directory - -1, --cols1=N,N,N,N: Columns for chrom, start, end, strand in interval file -""" -import sys -from galaxy import eggs -import pkg_resources; pkg_resources.require( "bx-python" ) -from bx.intervals import io -from bx.cookbook import doc_optparse -import psyco_full -import commands -import os -from os import environ -import tempfile -from bisect import bisect - -def divide( intervals, out_path ): - manifest = {} - current_file = None - lastchrom = "" - for line in intervals: - try: - chrom = line.chrom - except AttributeError, e: - continue - manifest[chrom] = max(manifest.get(chrom,0),line.end) - if not lastchrom == chrom: - if current_file: - current_file.close() - current_file = open( os.path.join( out_path, "%s" % chrom), "a" ) - print >> current_file, "\t".join(line) - lastchrom = chrom - if current_file: - current_file.close() - return manifest - -if __name__ == "__main__": - options, args = doc_optparse.parse( __doc__ ) - try: - chr_col_1, start_col_1, end_col_1, strand_col_1 = [int(x)-1 for x in options.cols1.split(',')] - in_fname, out_path = args - except: - doc_optparse.exception() - - # Sort through a tempfile first - temp_file = tempfile.NamedTemporaryFile(mode="r") - environ['LC_ALL'] = 'POSIX' - commandline = "sort -f -n -k %d -k %d -k %d -o %s %s" % (chr_col_1+1,start_col_1+1,end_col_1+1, temp_file.name, in_fname) - errorcode, stdout = commands.getstatusoutput(commandline) - - temp_file.seek(0) - interval = io.NiceReaderWrapper( temp_file, - chrom_col=chr_col_1, - start_col=start_col_1, - end_col=end_col_1, - strand_col=strand_col_1, - fix_strand=True ) - manifest = divide( interval, out_path ) - manifest_file = open( os.path.join( out_path, "manifest.tab" ),"w" ) - for key, value in manifest.items(): - print >> manifest_file, "%s\t%s" % (key, value) - manifest_file.close() - temp_file.close() diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/datatypes/indexers/interval.xml --- a/lib/galaxy/datatypes/indexers/interval.xml +++ /dev/null @@ -1,14 +0,0 @@ -<tool id="INDEXER_Interval_0" name="Index Interval for Track Viewer"> - <!-- Used internally to generate track indexes --> - <command interpreter="python">interval.py $input_dataset - -1 ${input_dataset.metadata.chromCol},${input_dataset.metadata.startCol},${input_dataset.metadata.endCol},${input_dataset.metadata.strandCol} - $store_path 2>&1 - </command> - <inputs> - <page> - <param format="interval" name="input_dataset" type="data" label="Choose intervals"/> - </page> - </inputs> - <help> - </help> -</tool> diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/datatypes/indexers/interval_awk.xml --- a/lib/galaxy/datatypes/indexers/interval_awk.xml +++ /dev/null @@ -1,16 +0,0 @@ -<tool id="INDEXER_Interval_0" name="Index Interval for Track Viewer"> - <!-- Used internally to generate track indexes --> - <command interpreter="awk -f">interval.awk - chrom=${input_dataset.metadata.chromCol} start=${input_dataset.metadata.startCol} - end=${input_dataset.metadata.endCol} strand=${input_dataset.metadata.strandCol} - storepath=${store_path} - $input_dataset 2>&1 - </command> - <inputs> - <page> - <param format="interval" name="input_dataset" type="data" label="Choose intervals"/> - </page> - </inputs> - <help> - </help> -</tool> diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/datatypes/indexers/wiggle.py --- a/lib/galaxy/datatypes/indexers/wiggle.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env python - -""" -Read a chromosome of wiggle data, and write it as a npy array, as -well as averages over regions of progressively larger size in powers of 10 -""" - -from __future__ import division - -import sys -from galaxy import eggs -import pkg_resources; pkg_resources.require( "bx-python" ) -import bx.wiggle -from bx.cookbook import doc_optparse -from bx import misc -max2 = max -pkg_resources.require("numpy>=1.2.1") -from numpy import * -import tempfile -import os -from galaxy.tracks.store import sanitize_name - - -def write_chrom(max, out_base, instream): - - scores = zeros( max, float32 ) * nan - # Fill array from wiggle - for line in instream: - line = line.rstrip("\n\r") - (chrom, pos, val) = line.split("\t") - pos, val = int(pos), float(val) - scores[pos] = val - - # Write ra - fname = "%s_%d" % ( out_base, 1 ) - save( fname, scores ) - os.rename( fname+".npy", fname ) - - # Write average - for window in 10, 100, 1000, 10000, 100000: - input = scores.copy() - size = len( input ) - input.resize( ( ( size / window ), window ) ) - masked = ma.masked_array( input, isnan( input ) ) - averaged = mean( masked, 1 ) - averaged.set_fill_value( nan ) - fname = "%s_%d" % ( out_base, window ) - save( fname, averaged.filled() ) - del masked, averaged - os.rename( fname+".npy", fname ) - -def main(): - max = int( 512*1024*1024 ) - # get chroms and lengths - chroms = {} - LEN = {} - for (chrom, pos, val) in bx.wiggle.Reader( open(sys.argv[1],"r") ): - chrom_file = chroms.get(chrom, None) - if not chrom_file: - chrom_file = chroms[chrom] = tempfile.NamedTemporaryFile() - chrom_file.write("%s\t%s\t%s\n" % (chrom,pos,val)) - LEN[chrom] = max2( LEN.get(chrom,0), pos+1 ) - for chrom, stream in chroms.items(): - stream.seek(0) - prefix = os.path.join(sys.argv[2], sanitize_name(chrom)) - write_chrom( LEN[chrom], prefix, stream ) - - manifest_file = open( os.path.join( sys.argv[2], "manifest.tab" ),"w" ) - for key, value in LEN.items(): - print >> manifest_file, "%s\t%s" % (key, value) - manifest_file.close() - - -if __name__ == "__main__": main() diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/datatypes/indexers/wiggle.xml --- a/lib/galaxy/datatypes/indexers/wiggle.xml +++ /dev/null @@ -1,12 +0,0 @@ -<tool id="INDEXER_Wiggle_0" name="Index Wiggle for Track Viewer"> - <!-- Used internally to generate track indexes --> - <command interpreter="python">wiggle.py $input_dataset $store_path 2>&1 - </command> - <inputs> - <page> - <param format="wiggle" name="input_dataset" type="data" label="Choose wiggle"/> - </page> - </inputs> - <help> - </help> -</tool> diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/datatypes/registry.py --- a/lib/galaxy/datatypes/registry.py +++ b/lib/galaxy/datatypes/registry.py @@ -18,7 +18,6 @@ self.datatypes_by_extension = {} self.mimetypes_by_extension = {} self.datatype_converters = odict() - self.datatype_indexers = odict() # Converters defined in local datatypes_conf.xml self.converters = [] # Converters defined in datatypes_conf.xml included @@ -27,11 +26,6 @@ self.converter_deps = {} self.available_tracks = [] self.set_external_metadata_tool = None - # Indexers defined in local datatypes_conf.xml - self.indexers = [] - # Indexers defined in datatypes_conf.xml included - # in installed tool shed repositories. - self.proprietary_indexers = [] self.sniff_order = [] self.upload_file_formats = [] # Map a display application id to a display application @@ -39,10 +33,6 @@ self.converters_path_attr = None # The 'default' converters_path defined in local datatypes_conf.xml self.datatype_converters_path = None - self.indexers_path_attr = None - # The 'default' indexers_path defined in local datatypes_conf.xml - self.datatype_indexers_path = None - self.display_path_attr = None # The 'default' display_path defined in local datatypes_conf.xml self.display_applications_path = None self.datatype_elems = [] @@ -63,11 +53,6 @@ self.datatype_converters_path = os.path.join( root_dir, self.converters_path_attr ) if not os.path.isdir( self.datatype_converters_path ): raise ConfigurationError( "Directory does not exist: %s" % self.datatype_converters_path ) - if not self.datatype_indexers_path: - self.indexers_path_attr = registration.get( 'indexers_path', 'lib/galaxy/datatypes/indexers' ) - self.datatype_indexers_path = os.path.join( root_dir, self.indexers_path_attr ) - if not os.path.isdir( self.datatype_indexers_path ): - raise ConfigurationError( "Directory does not exist: %s" % self.datatype_indexers_path ) if not self.display_applications_path: self.display_path_attr = registration.get( 'display_path', 'display_applications' ) self.display_applications_path = os.path.join( root_dir, self.display_path_attr ) @@ -134,11 +119,6 @@ self.proprietary_converters.append( ( converter_config, extension, target_datatype ) ) else: self.converters.append( ( converter_config, extension, target_datatype ) ) - for indexer in elem.findall( 'indexer' ): - # Build the list of datatype indexers for track building - indexer_config = indexer.get( 'file', None ) - if indexer_config: - self.indexers.append( (indexer_config, extension) ) for composite_file in elem.findall( 'composite_file' ): # add composite files name = composite_file.get( 'name', None ) @@ -393,26 +373,6 @@ toolbox.tools_by_id[ set_meta_tool.id ] = set_meta_tool self.set_external_metadata_tool = set_meta_tool self.log.debug( "Loaded external metadata tool: %s", self.set_external_metadata_tool.id ) - def load_datatype_indexers( self, toolbox, indexer_path=None ): - """Adds indexers from self.indexers to the toolbox from app""" - if indexer_path: - # Load indexers defined by datatypes_conf.xml - # included in installed tool shed repository. - indexers = self.proprietary_indexers - else: - # Load indexers defined by local datatypes_conf.xml. - indexers = self.indexers - for elem in indexers: - tool_config = elem[0] - datatype = elem[1] - if indexer_path: - config_path = os.path.join( indexer_path, tool_config ) - else: - config_path = os.path.join( self.datatype_indexers_path, tool_config ) - indexer = toolbox.load_tool( config_path ) - toolbox.tools_by_id[ indexer.id ] = indexer - self.datatype_indexers[ datatype ] = indexer - self.log.debug( "Loaded indexer: %s", indexer.id ) def get_converters_by_datatype(self, ext): """Returns available converters by source type""" converters = odict() @@ -425,18 +385,6 @@ if ext in self.datatype_converters.keys(): converters.update(self.datatype_converters[ext]) return converters - def get_indexers_by_datatype( self, ext ): - """Returns indexers based on datatype""" - class_chain = list() - source_datatype = type(self.get_datatype_by_extension(ext)) - for ext_spec in self.datatype_indexers.keys(): - datatype = type(self.get_datatype_by_extension(ext_spec)) - if issubclass( source_datatype, datatype ): - class_chain.append( ext_spec ) - # Prioritize based on class chain - ext2type = lambda x: self.get_datatype_by_extension(x) - class_chain = sorted(class_chain, lambda x,y: issubclass(ext2type(x),ext2type(y)) and -1 or 1) - return [self.datatype_indexers[x] for x in class_chain] def get_converter_by_target_type(self, source_ext, target_ext): """Returns a converter based on source and target datatypes""" converters = self.get_converters_by_datatype(source_ext) @@ -494,17 +442,13 @@ converters_path_str = ' converters_path="%s"' % self.converters_path_attr else: converters_path_str = '' - if self.indexers_path_attr: - indexers_path_str = ' indexers_path="%s"' % self.indexers_path_attr - else: - indexers_path_str = '' if self.display_path_attr: display_path_str = ' display_path="%s"' % self.display_path_attr else: display_path_str = '' os.write( fd, '<?xml version="1.0"?>\n' ) os.write( fd, '<datatypes>\n' ) - os.write( fd, '<registration%s%s%s>\n' % ( converters_path_str, indexers_path_str, display_path_str ) ) + os.write( fd, '<registration%s%s>\n' % ( converters_path_str, display_path_str ) ) for elem in self.datatype_elems: os.write( fd, '%s' % galaxy.util.xml_to_string( elem ) ) os.write( fd, '</registration>\n' ) diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/tool_shed/__init__.py --- a/lib/galaxy/tool_shed/__init__.py +++ b/lib/galaxy/tool_shed/__init__.py @@ -23,12 +23,9 @@ path_items = datatypes_config.split( 'repos' ) relative_install_dir = '%srepos/%s/%s/%s' % \ ( path_items[0], tool_shed_repository.owner, tool_shed_repository.name, tool_shed_repository.installed_changeset_revision ) - converter_path, indexer_path = load_datatypes( self.app, datatypes_config, relative_install_dir ) + converter_path = load_datatypes( self.app, datatypes_config, relative_install_dir ) if converter_path: # Load proprietary datatype converters self.app.datatypes_registry.load_datatype_converters( self.app.toolbox, converter_path=converter_path ) - if indexer_path: - # Load proprietary datatype indexers - self.app.datatypes_registry.load_datatype_indexers( self.app.toolbox, indexer_path=indexer_path ) # TODO: handle display_applications \ No newline at end of file diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/util/shed_util.py --- a/lib/galaxy/util/shed_util.py +++ b/lib/galaxy/util/shed_util.py @@ -109,7 +109,6 @@ Update the received metadata_dict with changes that have been applied to the received datatypes_config. This method is used by the InstallManager, which does not have access to trans. - TODO: Handle converters, indexers, sniffers, etc... """ # Parse datatypes_config. tree = ElementTree.parse( datatypes_config ) @@ -451,7 +450,6 @@ tree = util.parse_xml( datatypes_config ) datatypes_config_root = tree.getroot() converter_path = None - indexer_path = None relative_path_to_datatype_file_name = None datatype_files = datatypes_config_root.find( 'datatype_files' ) datatype_class_modules = [] @@ -502,7 +500,7 @@ log.debug( "Exception importing datatypes code file %s: %s" % ( str( relative_path_to_datatype_file_name ), str( e ) ) ) finally: lock.release() - # Handle data type converters and indexers. + # Handle data type converters. for elem in registration.findall( 'datatype' ): if not converter_path: # If any of the <datatype> tag sets contain <converter> tags, set the converter_path @@ -520,23 +518,7 @@ break if converter_path: break - if not indexer_path: - # If any of the <datatype> tag sets contain <indexer> tags, set the indexer_path - # if it is not already set. This requires repsitories to place all indexers in the - # same subdirectory within the repository hierarchy. - for indexer in elem.findall( 'indexer' ): - indexer_path = None - indexer_config = indexer.get( 'file', None ) - if indexer_config: - for root, dirs, files in os.walk( relative_install_dir ): - if root.find( '.hg' ) < 0: - for name in files: - if name == indexer_config: - indexer_path = root - break - if indexer_path: - break - if converter_path and indexer_path: + else: break # TODO: handle display_applications else: @@ -546,7 +528,7 @@ imported_modules = [] # Load proprietary datatypes app.datatypes_registry.load_datatypes( root_dir=app.config.root, config=datatypes_config, imported_modules=imported_modules ) - return converter_path, indexer_path + return converter_path def load_repository_contents( app, name, description, owner, changeset_revision, tool_path, repository_clone_url, relative_install_dir, current_working_dir, tmp_name, tool_section=None, shed_tool_conf=None, new_install=True ): # This method is used by the InstallManager, which does not have access to trans. @@ -562,13 +544,10 @@ if 'datatypes_config' in metadata_dict: datatypes_config = os.path.abspath( metadata_dict[ 'datatypes_config' ] ) # Load data types required by tools. - converter_path, indexer_path = load_datatypes( app, datatypes_config, relative_install_dir ) + converter_path = load_datatypes( app, datatypes_config, relative_install_dir ) if converter_path: # Load proprietary datatype converters app.datatypes_registry.load_datatype_converters( app.toolbox, converter_path=converter_path ) - if indexer_path: - # Load proprietary datatype indexers - app.datatypes_registry.load_datatype_indexers( app.toolbox, indexer_path=indexer_path ) # TODO: handle display_applications if 'tools' in metadata_dict: repository_tools_tups = [] Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.
participants (1)
-
Bitbucket