galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
June 2012
- 1 participants
- 98 discussions
commit/galaxy-central: jgoecks: Infrastructure for running tools: (a) move rerunning tools from tracks controller into tools (API) controller; (b) rerunning now supports multiple regions.
by Bitbucket 12 Jun '12
by Bitbucket 12 Jun '12
12 Jun '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/4fbd05095ca7/
changeset: 4fbd05095ca7
user: jgoecks
date: 2012-06-12 23:37:58
summary: Infrastructure for running tools: (a) move rerunning tools from tracks controller into tools (API) controller; (b) rerunning now supports multiple regions.
affected #: 11 files
diff -r b2eabe39a70f676b8cb3b90a656501804547fd87 -r 4fbd05095ca70adf740ed635451c0ec876635f50 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -2617,7 +2617,7 @@
elif isinstance( input, SelectToolParameter ):
param_dict.update( { 'type' : 'select',
'html' : urllib.quote( input.get_html( trans ) ),
- 'options': input.static_options
+ 'options': input.static_options
} )
elif isinstance( input, Conditional ):
# TODO.
@@ -2626,7 +2626,8 @@
param_dict.update( { 'type' : 'number', 'init_value' : input.value,
'html' : urllib.quote( input.get_html( trans ) ),
'min': input.min,
- 'max': input.max
+ 'max': input.max,
+ 'value': input.value
} )
else:
param_dict.update( { 'type' : '??', 'init_value' : input.value, \
diff -r b2eabe39a70f676b8cb3b90a656501804547fd87 -r 4fbd05095ca70adf740ed635451c0ec876635f50 lib/galaxy/visualization/genomes.py
--- a/lib/galaxy/visualization/genomes.py
+++ b/lib/galaxy/visualization/genomes.py
@@ -23,6 +23,26 @@
return dbkey.split( ':' )
else:
return None, dbkey
+
+class GenomeRegion( object ):
+ """
+ A genomic region on an individual chromosome.
+ """
+
+ def __init__( self, chrom=None, start=None, end=None ):
+ self.chrom = chrom
+ self.start = int( start )
+ self.end = int( end )
+
+ def __str__( self ):
+ return self.chrom + ":" + str( self.start ) + "-" + str( self.end )
+
+ @staticmethod
+ def from_dict( obj_dict ):
+ return GenomeRegion( chrom=obj_dict[ 'chrom' ],
+ start=obj_dict[ 'start' ],
+ end=obj_dict[ 'end' ] )
+
class Genome( object ):
"""
diff -r b2eabe39a70f676b8cb3b90a656501804547fd87 -r 4fbd05095ca70adf740ed635451c0ec876635f50 lib/galaxy/visualization/tracks/data_providers.py
--- a/lib/galaxy/visualization/tracks/data_providers.py
+++ b/lib/galaxy/visualization/tracks/data_providers.py
@@ -63,7 +63,7 @@
self.original_dataset = original_dataset
self.dependencies = dependencies
- def write_data_to_file( self, chrom, start, end, filename ):
+ def write_data_to_file( self, regions, filename ):
"""
Write data in region defined by chrom, start, and end to a file.
"""
@@ -257,11 +257,18 @@
return tabix.fetch(reference=chrom, start=start, end=end)
- def write_data_to_file( self, chrom, start, end, filename ):
- iterator = self.get_iterator( chrom, start, end )
+ def write_data_to_file( self, regions, filename ):
out = open( filename, "w" )
- for line in iterator:
- out.write( "%s\n" % line )
+
+ for region in regions:
+ # Write data in region.
+ chrom = region.chrom
+ start = region.start
+ end = region.end
+ iterator = self.get_iterator( chrom, start, end )
+ for line in iterator:
+ out.write( "%s\n" % line )
+
out.close()
#
@@ -332,7 +339,7 @@
return { 'data': rval, 'message': message }
- def write_data_to_file( self, chrom, start, end, filename ):
+ def write_data_to_file( self, regions, filename ):
raise Exception( "Unimplemented Function" )
class IntervalTabixDataProvider( TabixDataProvider, IntervalDataProvider ):
@@ -420,11 +427,18 @@
return { 'data': rval, 'message': message }
- def write_data_to_file( self, chrom, start, end, filename ):
- iterator = self.get_iterator( chrom, start, end )
+ def write_data_to_file( self, regions, filename ):
out = open( filename, "w" )
- for line in iterator:
- out.write( "%s\n" % line )
+
+ for region in regions:
+ # Write data in region.
+ chrom = region.chrom
+ start = region.start
+ end = region.end
+ iterator = self.get_iterator( chrom, start, end )
+ for line in iterator:
+ out.write( "%s\n" % line )
+
out.close()
class BedTabixDataProvider( TabixDataProvider, BedDataProvider ):
@@ -545,11 +559,17 @@
return { 'data': rval, 'message': message }
- def write_data_to_file( self, chrom, start, end, filename ):
- iterator = self.get_iterator( chrom, start, end )
+ def write_data_to_file( self, regions, filename ):
out = open( filename, "w" )
- for line in iterator:
- out.write( "%s\n" % line )
+
+ for region in regions:
+ # Write data in region.
+ chrom = region.chrom
+ start = region.start
+ end = region.end
+ iterator = self.get_iterator( chrom, start, end )
+ for line in iterator:
+ out.write( "%s\n" % line )
out.close()
class VcfTabixDataProvider( TabixDataProvider, VcfDataProvider ):
@@ -669,35 +689,42 @@
return filters
- def write_data_to_file( self, chrom, start, end, filename ):
+ def write_data_to_file( self, regions, filename ):
"""
- Write reads in [chrom:start-end] to file.
+ Write reads in regions to file.
"""
# Open current BAM file using index.
- start, end = int(start), int(end)
bamfile = csamtools.Samfile( filename=self.original_dataset.file_name, mode='rb', \
index_filename=self.converted_dataset.file_name )
- try:
- data = bamfile.fetch(start=start, end=end, reference=chrom)
- except ValueError, e:
- # Some BAM files do not prefix chromosome names with chr, try without
- if chrom.startswith( 'chr' ):
- try:
- data = bamfile.fetch( start=start, end=end, reference=chrom[3:] )
- except ValueError:
- return None
- else:
- return None
-
- # Write new BAM file.
+
# TODO: write headers as well?
new_bamfile = csamtools.Samfile( template=bamfile, filename=filename, mode='wb' )
- for i, read in enumerate( data ):
- new_bamfile.write( read )
- new_bamfile.close()
+
+ for region in regions:
+ # Write data from region.
+ chrom = region.chrom
+ start = region.start
+ end = region.end
+
+ try:
+ data = bamfile.fetch(start=start, end=end, reference=chrom)
+ except ValueError, e:
+ # Some BAM files do not prefix chromosome names with chr, try without
+ if chrom.startswith( 'chr' ):
+ try:
+ data = bamfile.fetch( start=start, end=end, reference=chrom[3:] )
+ except ValueError:
+ return None
+ else:
+ return None
+
+ # Write reads in region.
+ for i, read in enumerate( data ):
+ new_bamfile.write( read )
# Cleanup.
+ new_bamfile.close()
bamfile.close()
def get_iterator( self, chrom, start, end ):
@@ -952,17 +979,24 @@
"""
col_name_data_attr_mapping = { 4 : { 'index': 4 , 'name' : 'Score' } }
- def write_data_to_file( self, chrom, start, end, filename ):
+ def write_data_to_file( self, regions, filename ):
source = open( self.original_dataset.file_name )
index = Indexes( self.converted_dataset.file_name )
out = open( filename, 'w' )
- for start, end, offset in index.find(chrom, start, end):
- source.seek( offset )
+
+ for region in regions:
+ # Write data from region.
+ chrom = region.chrom
+ start = region.start
+ end = region.end
+ for start, end, offset in index.find(chrom, start, end):
+ source.seek( offset )
- reader = GFFReaderWrapper( source, fix_strand=True )
- feature = reader.next()
- for interval in feature.intervals:
- out.write( '\t'.join( interval.fields ) + '\n' )
+ reader = GFFReaderWrapper( source, fix_strand=True )
+ feature = reader.next()
+ for interval in feature.intervals:
+ out.write( '\t'.join( interval.fields ) + '\n' )
+
out.close()
def get_iterator( self, chrom, start, end ):
@@ -1183,13 +1217,6 @@
rval.append( payload )
return { 'data': rval, 'message': message }
-
- def write_data_to_file( self, chrom, start, end, filename ):
- iterator = self.get_iterator( chrom, start, end )
- out = open( filename, "w" )
- for line in iterator:
- out.write( "%s\n" % line )
- out.close()
class ENCODEPeakTabixDataProvider( TabixDataProvider, ENCODEPeakDataProvider ):
"""
diff -r b2eabe39a70f676b8cb3b90a656501804547fd87 -r 4fbd05095ca70adf740ed635451c0ec876635f50 lib/galaxy/web/api/datasets.py
--- a/lib/galaxy/web/api/datasets.py
+++ b/lib/galaxy/web/api/datasets.py
@@ -10,7 +10,7 @@
log = logging.getLogger( __name__ )
-class DatasetsController( BaseAPIController, UsesHistoryMixinDatasetAssociationMixin ):
+class DatasetsController( BaseAPIController, UsesHistoryDatasetAssociationMixin ):
@web.expose_api
def index( self, trans, hda_id, **kwd ):
diff -r b2eabe39a70f676b8cb3b90a656501804547fd87 -r 4fbd05095ca70adf740ed635451c0ec876635f50 lib/galaxy/web/api/history_contents.py
--- a/lib/galaxy/web/api/history_contents.py
+++ b/lib/galaxy/web/api/history_contents.py
@@ -12,7 +12,7 @@
log = logging.getLogger( __name__ )
-class HistoryContentsController( BaseAPIController, UsesHistoryMixinDatasetAssociationMixin, UsesHistoryMixin, UsesLibraryMixin, UsesLibraryMixinItems ):
+class HistoryContentsController( BaseAPIController, UsesHistoryDatasetAssociationMixin, UsesHistoryMixin, UsesLibraryMixin, UsesLibraryMixinItems ):
@web.expose_api
def index( self, trans, history_id, **kwd ):
diff -r b2eabe39a70f676b8cb3b90a656501804547fd87 -r 4fbd05095ca70adf740ed635451c0ec876635f50 lib/galaxy/web/api/tools.py
--- a/lib/galaxy/web/api/tools.py
+++ b/lib/galaxy/web/api/tools.py
@@ -1,12 +1,12 @@
from galaxy import config, tools, web, util
-from galaxy.web.base.controller import BaseController, BaseAPIController
+from galaxy.web.base.controller import BaseController, BaseAPIController, UsesHistoryDatasetAssociationMixin, messages, get_highest_priority_msg
from galaxy.util.bunch import Bunch
+from galaxy.visualization.tracks.visual_analytics import get_dataset_job
+from galaxy.visualization.genomes import GenomeRegion
+from galaxy.util.json import to_json_string, from_json_string
+from galaxy.visualization.tracks.data_providers import *
-messages = Bunch(
- NO_TOOL = "no tool"
-)
-
-class ToolsController( BaseAPIController ):
+class ToolsController( BaseAPIController, UsesHistoryDatasetAssociationMixin ):
"""
RESTful controller for interactions with tools.
"""
@@ -29,7 +29,7 @@
# Create return value.
return self.app.toolbox.to_dict( trans, in_panel=in_panel, trackster=trackster )
- @web.json
+ @web.expose_api
def show( self, trans, id, **kwd ):
"""
GET /api/tools/{tool_id}
@@ -41,16 +41,18 @@
def create( self, trans, payload, **kwd ):
"""
POST /api/tools
- Executes tool using specified inputs, creating new history-dataset
- associations, which are returned.
+ Executes tool using specified inputs and returns tool's outputs.
"""
- # TODO: set target history?
+ # HACK: for now, if action is rerun, rerun tool.
+ action = payload.get( 'action', None )
+ if action == 'rerun':
+ return self._rerun_tool( trans, payload, **kwd )
# -- Execute tool. --
# Get tool.
- tool_id = payload[ 'id' ]
+ tool_id = payload[ 'tool_id' ]
tool = trans.app.toolbox.get_tool( tool_id )
if not tool:
return { "message": { "type": "error", "text" : messages.NO_TOOL } }
@@ -72,4 +74,287 @@
for output in output_datasets:
outputs.append( output.get_api_value() )
return rval
-
\ No newline at end of file
+
+ #
+ # -- Helper methods --
+ #
+
+ def _run_tool( self, trans, tool_id, target_dataset_id, **kwargs ):
+ """
+ Run a tool. This method serves as a general purpose way to run tools asynchronously.
+ """
+
+ #
+ # Set target history (the history that tool will use for outputs) using
+ # target dataset. If user owns dataset, put new data in original
+ # dataset's history; if user does not own dataset (and hence is accessing
+ # dataset via sharing), put new data in user's current history.
+ #
+ target_dataset = self.get_dataset( trans, target_dataset_id, check_ownership=False, check_accessible=True )
+ if target_dataset.history.user == trans.user:
+ target_history = target_dataset.history
+ else:
+ target_history = trans.get_history( create=True )
+
+ # HACK: tools require unencoded parameters but kwargs are typically
+ # encoded, so try decoding all parameter values.
+ for key, value in kwargs.items():
+ try:
+ value = trans.security.decode_id( value )
+ kwargs[ key ] = value
+ except:
+ pass
+
+ #
+ # Execute tool.
+ #
+ tool = trans.app.toolbox.get_tool( tool_id )
+ if not tool:
+ return messages.NO_TOOL
+
+ # HACK: add run button so that tool.handle_input will run tool.
+ kwargs['runtool_btn'] = 'Execute'
+ params = util.Params( kwargs, sanitize = False )
+ template, vars = tool.handle_input( trans, params.__dict__, history=target_history )
+
+ # TODO: check for errors and ensure that output dataset is available.
+ output_datasets = vars[ 'out_data' ].values()
+ return self.add_track_async( trans, output_datasets[0].id )
+
+
+ def _rerun_tool( self, trans, payload, **kwargs ):
+ """
+ Rerun a tool to produce a new output dataset that corresponds to a
+ dataset that a user is currently viewing.
+ """
+
+ #
+ # TODO: refactor to use same code as run_tool.
+ #
+
+ # Run tool on region if region is specificied.
+ run_on_regions = False
+ regions = from_json_string( payload.get( 'regions', None ) )
+ print regions, payload
+ if regions:
+ if isinstance( regions, dict ):
+ # Regions is a single region.
+ regions = [ GenomeRegion.from_dict( regions ) ]
+ elif isinstance( regions, list ):
+ # There is a list of regions.
+ regions = [ GenomeRegion.from_dict( r ) for r in regions ]
+ run_on_regions = True
+
+ # Dataset check.
+ original_dataset = self.get_dataset( trans, payload[ 'target_dataset_id' ], check_ownership=False, check_accessible=True )
+ msg = self.check_dataset_state( trans, original_dataset )
+ if msg:
+ return to_json_string( msg )
+
+ #
+ # Set tool parameters--except non-hidden dataset parameters--using combination of
+ # job's previous parameters and incoming parameters. Incoming parameters
+ # have priority.
+ #
+ original_job = get_dataset_job( original_dataset )
+ tool = trans.app.toolbox.get_tool( original_job.tool_id )
+ if not tool:
+ return messages.NO_TOOL
+ tool_params = dict( [ ( p.name, p.value ) for p in original_job.parameters ] )
+ # TODO: need to handle updates to conditional parameters; conditional
+ # params are stored in dicts (and dicts within dicts).
+ tool_params.update( dict( [ ( key, value ) for key, value in kwargs.items() if key in tool.inputs ] ) )
+ tool_params = tool.params_from_strings( tool_params, self.app )
+
+ #
+ # If running tool on region, convert input datasets (create indices) so
+ # that can regions of data can be quickly extracted.
+ #
+ messages_list = []
+ if run_on_regions:
+ for jida in original_job.input_datasets:
+ input_dataset = jida.dataset
+ if get_data_provider( original_dataset=input_dataset ):
+ # Can index dataset.
+ track_type, data_sources = input_dataset.datatype.get_track_type()
+ # Convert to datasource that provides 'data' because we need to
+ # extract the original data.
+ data_source = data_sources[ 'data' ]
+ msg = self.convert_dataset( trans, input_dataset, data_source )
+ if msg is not None:
+ messages_list.append( msg )
+
+ # Return any messages generated during conversions.
+ return_message = get_highest_priority_msg( messages_list )
+ if return_message:
+ return to_json_string( return_message )
+
+ #
+ # Set target history (the history that tool will use for inputs/outputs).
+ # If user owns dataset, put new data in original dataset's history; if
+ # user does not own dataset (and hence is accessing dataset via sharing),
+ # put new data in user's current history.
+ #
+ if original_dataset.history.user == trans.user:
+ target_history = original_dataset.history
+ else:
+ target_history = trans.get_history( create=True )
+ hda_permissions = trans.app.security_agent.history_get_default_permissions( target_history )
+
+ def set_param_value( param_dict, param_name, param_value ):
+ """
+ Set new parameter value in a tool's parameter dictionary.
+ """
+
+ # Recursive function to set param value.
+ def set_value( param_dict, group_name, group_index, param_name, param_value ):
+ if group_name in param_dict:
+ param_dict[ group_name ][ group_index ][ param_name ] = param_value
+ return True
+ elif param_name in param_dict:
+ param_dict[ param_name ] = param_value
+ return True
+ else:
+ # Recursive search.
+ return_val = False
+ for name, value in param_dict.items():
+ if isinstance( value, dict ):
+ return_val = set_value( value, group_name, group_index, param_name, param_value)
+ if return_val:
+ return return_val
+ return False
+
+ # Parse parameter name if necessary.
+ if param_name.find( "|" ) == -1:
+ # Non-grouping parameter.
+ group_name = group_index = None
+ else:
+ # Grouping parameter.
+ group, param_name = param_name.split( "|" )
+ index = group.rfind( "_" )
+ group_name = group[ :index ]
+ group_index = int( group[ index + 1: ] )
+
+ return set_value( param_dict, group_name, group_index, param_name, param_value )
+
+ # Set parameters based tool's trackster config.
+ params_set = {}
+ for action in tool.trackster_conf.actions:
+ success = False
+ for joda in original_job.output_datasets:
+ if joda.name == action.output_name:
+ set_param_value( tool_params, action.name, joda.dataset )
+ params_set[ action.name ] = True
+ success = True
+ break
+
+ if not success:
+ return messages.ERROR
+
+ #
+ # Set input datasets for tool. If running on regions, extract and use subset
+ # when possible.
+ #
+ regions_str = ",".join( [ str( r ) for r in regions ] )
+ for jida in original_job.input_datasets:
+ # If param set previously by config actions, do nothing.
+ if jida.name in params_set:
+ continue
+
+ input_dataset = jida.dataset
+ if input_dataset is None: #optional dataset and dataset wasn't selected
+ tool_params[ jida.name ] = None
+ elif run_on_regions and hasattr( input_dataset.datatype, 'get_track_type' ):
+ # Dataset is indexed and hence a subset can be extracted and used
+ # as input.
+
+ # Look for subset.
+ subset_dataset_association = trans.sa_session.query( trans.app.model.HistoryDatasetAssociationSubset ) \
+ .filter_by( hda=input_dataset, location=regions_str ) \
+ .first()
+ if subset_dataset_association:
+ # Data subset exists.
+ subset_dataset = subset_dataset_association.subset
+ else:
+ # Need to create subset.
+ track_type, data_sources = input_dataset.datatype.get_track_type()
+ data_source = data_sources[ 'data' ]
+ converted_dataset = input_dataset.get_converted_dataset( trans, data_source )
+ deps = input_dataset.get_converted_dataset_deps( trans, data_source )
+
+ # Create new HDA for input dataset's subset.
+ new_dataset = trans.app.model.HistoryDatasetAssociation( extension=input_dataset.ext, \
+ dbkey=input_dataset.dbkey, \
+ create_dataset=True, \
+ sa_session=trans.sa_session,
+ name="Subset [%s] of data %i" % \
+ ( regions_str, input_dataset.hid ),
+ visible=False )
+ target_history.add_dataset( new_dataset )
+ trans.sa_session.add( new_dataset )
+ trans.app.security_agent.set_all_dataset_permissions( new_dataset.dataset, hda_permissions )
+
+ # Write subset of data to new dataset
+ data_provider_class = get_data_provider( original_dataset=input_dataset )
+ data_provider = data_provider_class( original_dataset=input_dataset,
+ converted_dataset=converted_dataset,
+ dependencies=deps )
+ trans.app.object_store.create( new_dataset.dataset )
+ data_provider.write_data_to_file( regions, new_dataset.file_name )
+
+ # TODO: (a) size not working; (b) need to set peek.
+ new_dataset.set_size()
+ new_dataset.info = "Data subset for trackster"
+ new_dataset.set_dataset_state( trans.app.model.Dataset.states.OK )
+
+ # Set metadata.
+ # TODO: set meta internally if dataset is small enough?
+ if trans.app.config.set_metadata_externally:
+ trans.app.datatypes_registry.set_external_metadata_tool.tool_action.execute( trans.app.datatypes_registry.set_external_metadata_tool,
+ trans, incoming = { 'input1':new_dataset },
+ overwrite=False, job_params={ "source" : "trackster" } )
+ else:
+ message = 'Attributes updated'
+ new_dataset.set_meta()
+ new_dataset.datatype.after_setting_metadata( new_dataset )
+
+ # Add HDA subset association.
+ subset_association = trans.app.model.HistoryDatasetAssociationSubset( hda=input_dataset, subset=new_dataset, location=regions_str )
+ trans.sa_session.add( subset_association )
+
+ subset_dataset = new_dataset
+
+ trans.sa_session.flush()
+
+ # Add dataset to tool's parameters.
+ if not set_param_value( tool_params, jida.name, subset_dataset ):
+ return to_json_string( { "error" : True, "message" : "error setting parameter %s" % jida.name } )
+
+ #
+ # Execute tool and handle outputs.
+ #
+ try:
+ subset_job, subset_job_outputs = tool.execute( trans, incoming=tool_params,
+ history=target_history,
+ job_params={ "source" : "trackster" } )
+ except Exception, e:
+ # Lots of things can go wrong when trying to execute tool.
+ return to_json_string( { "error" : True, "message" : e.__class__.__name__ + ": " + str(e) } )
+ if run_on_regions:
+ for output in subset_job_outputs.values():
+ output.visible = False
+ trans.sa_session.flush()
+
+ #
+ # Return new track that corresponds to the original dataset.
+ #
+ output_name = None
+ for joda in original_job.output_datasets:
+ if joda.dataset == original_dataset:
+ output_name = joda.name
+ break
+ for joda in subset_job.output_datasets:
+ if joda.name == output_name:
+ output_dataset = joda.dataset
+
+ return output_dataset.get_api_value()
diff -r b2eabe39a70f676b8cb3b90a656501804547fd87 -r 4fbd05095ca70adf740ed635451c0ec876635f50 lib/galaxy/web/base/controller.py
--- a/lib/galaxy/web/base/controller.py
+++ b/lib/galaxy/web/base/controller.py
@@ -181,6 +181,37 @@
# -- Mixins for working with Galaxy objects. --
#
+# Message strings returned to browser
+messages = Bunch(
+ PENDING = "pending",
+ NO_DATA = "no data",
+ NO_CHROMOSOME = "no chromosome",
+ NO_CONVERTER = "no converter",
+ NO_TOOL = "no tool",
+ DATA = "data",
+ ERROR = "error",
+ OK = "ok"
+)
+
+def get_highest_priority_msg( message_list ):
+ """
+ Returns highest priority message from a list of messages.
+ """
+ return_message = None
+
+ # For now, priority is: job error (dict), no converter, pending.
+ for message in message_list:
+ if message is not None:
+ if isinstance(message, dict):
+ return_message = message
+ break
+ elif message == messages.NO_CONVERTER:
+ return_message = message
+ elif return_message == None and message == messages.PENDING:
+ return_message = message
+ return return_message
+
+
class SharableItemSecurityMixin:
""" Mixin for handling security for sharable items. """
def security_check( self, trans, item, check_ownership=False, check_accessible=False ):
@@ -201,8 +232,9 @@
raise ItemAccessibilityException( "%s is not accessible to the current user" % item.__class__.__name__, type='error' )
return item
-class UsesHistoryMixinDatasetAssociationMixin:
+class UsesHistoryDatasetAssociationMixin:
""" Mixin for controllers that use HistoryDatasetAssociation objects. """
+
def get_dataset( self, trans, dataset_id, check_ownership=True, check_accessible=False ):
""" Get an HDA object by id. """
# DEPRECATION: We still support unencoded ids for backward compatibility
@@ -232,6 +264,7 @@
else:
error( "You are not allowed to access this dataset" )
return data
+
def get_history_dataset_association( self, trans, history, dataset_id, check_ownership=True, check_accessible=False ):
"""Get a HistoryDatasetAssociation from the database by id, verifying ownership."""
self.security_check( trans, history, check_ownership=check_ownership, check_accessible=check_accessible )
@@ -244,6 +277,7 @@
else:
error( "You are not allowed to access this dataset" )
return hda
+
def get_data( self, dataset, preview=True ):
""" Gets a dataset's data. """
# Get data from file, truncating if necessary.
@@ -258,6 +292,46 @@
dataset_data = open( dataset.file_name ).read(max_peek_size)
truncated = False
return truncated, dataset_data
+
+ def check_dataset_state( self, trans, dataset ):
+ """
+ Returns a message if dataset is not ready to be used in visualization.
+ """
+ if not dataset:
+ return messages.NO_DATA
+ if dataset.state == trans.app.model.Job.states.ERROR:
+ return messages.ERROR
+ if dataset.state != trans.app.model.Job.states.OK:
+ return messages.PENDING
+ return None
+
+ def convert_dataset( self, trans, dataset, target_type ):
+ """
+ Converts a dataset to the target_type and returns a message indicating
+ status of the conversion. None is returned to indicate that dataset
+ was converted successfully.
+ """
+
+ # Get converted dataset; this will start the conversion if necessary.
+ try:
+ converted_dataset = dataset.get_converted_dataset( trans, target_type )
+ except NoConverterException:
+ return messages.NO_CONVERTER
+ except ConverterDependencyException, dep_error:
+ return { 'kind': messages.ERROR, 'message': dep_error.value }
+
+ # Check dataset state and return any messages.
+ msg = None
+ if converted_dataset and converted_dataset.state == trans.app.model.Dataset.states.ERROR:
+ job_id = trans.sa_session.query( trans.app.model.JobToOutputDatasetAssociation ) \
+ .filter_by( dataset_id=converted_dataset.id ).first().job_id
+ job = trans.sa_session.query( trans.app.model.Job ).get( job_id )
+ msg = { 'kind': messages.ERROR, 'message': job.stderr }
+ elif not converted_dataset or converted_dataset.state != trans.app.model.Dataset.states.OK:
+ msg = messages.PENDING
+
+ return msg
+
class UsesLibraryMixin:
def get_library( self, trans, id, check_ownership=False, check_accessible=True ):
diff -r b2eabe39a70f676b8cb3b90a656501804547fd87 -r 4fbd05095ca70adf740ed635451c0ec876635f50 lib/galaxy/web/controllers/dataset.py
--- a/lib/galaxy/web/controllers/dataset.py
+++ b/lib/galaxy/web/controllers/dataset.py
@@ -150,7 +150,7 @@
.filter( model.History.deleted==False ) \
.filter( self.model_class.visible==True )
-class DatasetInterface( BaseUIController, UsesAnnotations, UsesHistoryMixin, UsesHistoryMixinDatasetAssociationMixin, UsesItemRatings ):
+class DatasetInterface( BaseUIController, UsesAnnotations, UsesHistoryMixin, UsesHistoryDatasetAssociationMixin, UsesItemRatings ):
stored_list_grid = HistoryDatasetAssociationListGrid()
diff -r b2eabe39a70f676b8cb3b90a656501804547fd87 -r 4fbd05095ca70adf740ed635451c0ec876635f50 lib/galaxy/web/controllers/page.py
--- a/lib/galaxy/web/controllers/page.py
+++ b/lib/galaxy/web/controllers/page.py
@@ -273,7 +273,7 @@
_BaseHTMLProcessor.unknown_endtag( self, tag )
class PageController( BaseUIController, SharableMixin, UsesAnnotations, UsesHistoryMixin,
- UsesStoredWorkflowMixin, UsesHistoryMixinDatasetAssociationMixin, UsesVisualizationMixin, UsesItemRatings ):
+ UsesStoredWorkflowMixin, UsesHistoryDatasetAssociationMixin, UsesVisualizationMixin, UsesItemRatings ):
_page_list = PageListGrid()
_all_published_list = PageAllPublishedGrid()
diff -r b2eabe39a70f676b8cb3b90a656501804547fd87 -r 4fbd05095ca70adf740ed635451c0ec876635f50 lib/galaxy/web/controllers/tracks.py
--- a/lib/galaxy/web/controllers/tracks.py
+++ b/lib/galaxy/web/controllers/tracks.py
@@ -18,17 +18,6 @@
from galaxy.visualization.genomes import decode_dbkey, Genomes
from galaxy.visualization.tracks.visual_analytics import get_tool_def, get_dataset_job
-# Message strings returned to browser
-messages = Bunch(
- PENDING = "pending",
- NO_DATA = "no data",
- NO_CHROMOSOME = "no chromosome",
- NO_CONVERTER = "no converter",
- NO_TOOL = "no tool",
- DATA = "data",
- ERROR = "error",
- OK = "ok"
-)
class NameColumn( grids.TextColumn ):
def get_value( self, trans, grid, history ):
@@ -163,7 +152,7 @@
def apply_query_filter( self, trans, query, **kwargs ):
return query.filter( self.model_class.user_id == trans.user.id )
-class TracksController( BaseUIController, UsesVisualizationMixin, UsesHistoryMixinDatasetAssociationMixin, SharableMixin ):
+class TracksController( BaseUIController, UsesVisualizationMixin, UsesHistoryDatasetAssociationMixin, SharableMixin ):
"""
Controller for track browser interface. Handles building a new browser from
datasets in the current history, and display of the resulting browser.
@@ -488,281 +477,7 @@
@web.expose
def list_tracks( self, trans, **kwargs ):
return self.tracks_grid( trans, **kwargs )
-
- @web.expose
- def run_tool( self, trans, tool_id, target_dataset_id, **kwargs ):
- """
- Run a tool. This method serves as a general purpose way to run tools asynchronously.
- """
-
- #
- # Set target history (the history that tool will use for outputs) using
- # target dataset. If user owns dataset, put new data in original
- # dataset's history; if user does not own dataset (and hence is accessing
- # dataset via sharing), put new data in user's current history.
- #
- target_dataset = self.get_dataset( trans, target_dataset_id, check_ownership=False, check_accessible=True )
- if target_dataset.history.user == trans.user:
- target_history = target_dataset.history
- else:
- target_history = trans.get_history( create=True )
-
- # HACK: tools require unencoded parameters but kwargs are typically
- # encoded, so try decoding all parameter values.
- for key, value in kwargs.items():
- try:
- value = trans.security.decode_id( value )
- kwargs[ key ] = value
- except:
- pass
-
- #
- # Execute tool.
- #
- tool = trans.app.toolbox.get_tool( tool_id )
- if not tool:
- return messages.NO_TOOL
-
- # HACK: add run button so that tool.handle_input will run tool.
- kwargs['runtool_btn'] = 'Execute'
- params = util.Params( kwargs, sanitize = False )
- template, vars = tool.handle_input( trans, params.__dict__, history=target_history )
-
- # TODO: check for errors and ensure that output dataset is available.
- output_datasets = vars[ 'out_data' ].values()
- return self.add_track_async( trans, output_datasets[0].id )
-
- @web.expose
- def rerun_tool( self, trans, dataset_id, tool_id, chrom=None, low=None, high=None, **kwargs ):
- """
- Rerun a tool to produce a new output dataset that corresponds to a
- dataset that a user is currently viewing.
- """
-
- #
- # TODO: refactor to use same code as run_tool.
- #
-
- # Run tool on region if region is specificied.
- run_on_region = False
- if chrom and low and high:
- run_on_region = True
- low, high = int( low ), int( high )
-
- # Dataset check.
- original_dataset = self.get_dataset( trans, dataset_id, check_ownership=False, check_accessible=True )
- msg = self._check_dataset_state( trans, original_dataset )
- if msg:
- return to_json_string( msg )
-
- #
- # Set tool parameters--except non-hidden dataset parameters--using combination of
- # job's previous parameters and incoming parameters. Incoming parameters
- # have priority.
- #
- original_job = get_dataset_job( original_dataset )
- tool = trans.app.toolbox.get_tool( original_job.tool_id )
- if not tool:
- return messages.NO_TOOL
- tool_params = dict( [ ( p.name, p.value ) for p in original_job.parameters ] )
- # TODO: need to handle updates to conditional parameters; conditional
- # params are stored in dicts (and dicts within dicts).
- tool_params.update( dict( [ ( key, value ) for key, value in kwargs.items() if key in tool.inputs ] ) )
- tool_params = tool.params_from_strings( tool_params, self.app )
-
- #
- # If running tool on region, convert input datasets (create indices) so
- # that can regions of data can be quickly extracted.
- #
- messages_list = []
- if run_on_region:
- for jida in original_job.input_datasets:
- input_dataset = jida.dataset
- if get_data_provider( original_dataset=input_dataset ):
- # Can index dataset.
- track_type, data_sources = input_dataset.datatype.get_track_type()
- # Convert to datasource that provides 'data' because we need to
- # extract the original data.
- data_source = data_sources[ 'data' ]
- msg = self._convert_dataset( trans, input_dataset, data_source )
- if msg is not None:
- messages_list.append( msg )
-
- # Return any messages generated during conversions.
- return_message = _get_highest_priority_msg( messages_list )
- if return_message:
- return to_json_string( return_message )
-
- #
- # Set target history (the history that tool will use for inputs/outputs).
- # If user owns dataset, put new data in original dataset's history; if
- # user does not own dataset (and hence is accessing dataset via sharing),
- # put new data in user's current history.
- #
- if original_dataset.history.user == trans.user:
- target_history = original_dataset.history
- else:
- target_history = trans.get_history( create=True )
- hda_permissions = trans.app.security_agent.history_get_default_permissions( target_history )
-
- def set_param_value( param_dict, param_name, param_value ):
- """
- Set new parameter value in a tool's parameter dictionary.
- """
-
- # Recursive function to set param value.
- def set_value( param_dict, group_name, group_index, param_name, param_value ):
- if group_name in param_dict:
- param_dict[ group_name ][ group_index ][ param_name ] = param_value
- return True
- elif param_name in param_dict:
- param_dict[ param_name ] = param_value
- return True
- else:
- # Recursive search.
- return_val = False
- for name, value in param_dict.items():
- if isinstance( value, dict ):
- return_val = set_value( value, group_name, group_index, param_name, param_value)
- if return_val:
- return return_val
- return False
-
- # Parse parameter name if necessary.
- if param_name.find( "|" ) == -1:
- # Non-grouping parameter.
- group_name = group_index = None
- else:
- # Grouping parameter.
- group, param_name = param_name.split( "|" )
- index = group.rfind( "_" )
- group_name = group[ :index ]
- group_index = int( group[ index + 1: ] )
-
- return set_value( param_dict, group_name, group_index, param_name, param_value )
-
- # Set parameters based tool's trackster config.
- params_set = {}
- for action in tool.trackster_conf.actions:
- success = False
- for joda in original_job.output_datasets:
- if joda.name == action.output_name:
- set_param_value( tool_params, action.name, joda.dataset )
- params_set[ action.name ] = True
- success = True
- break
- if not success:
- return messages.ERROR
-
- #
- # Set input datasets for tool. If running on region, extract and use subset
- # when possible.
- #
- location = "%s:%i-%i" % ( chrom, low, high )
- for jida in original_job.input_datasets:
- # If param set previously by config actions, do nothing.
- if jida.name in params_set:
- continue
-
- input_dataset = jida.dataset
- if input_dataset is None: #optional dataset and dataset wasn't selected
- tool_params[ jida.name ] = None
- elif run_on_region and hasattr( input_dataset.datatype, 'get_track_type' ):
- # Dataset is indexed and hence a subset can be extracted and used
- # as input.
-
- # Look for subset.
- subset_dataset_association = trans.sa_session.query( trans.app.model.HistoryDatasetAssociationSubset ) \
- .filter_by( hda=input_dataset, location=location ) \
- .first()
- if subset_dataset_association:
- # Data subset exists.
- subset_dataset = subset_dataset_association.subset
- else:
- # Need to create subset.
- track_type, data_sources = input_dataset.datatype.get_track_type()
- data_source = data_sources[ 'data' ]
- converted_dataset = input_dataset.get_converted_dataset( trans, data_source )
- deps = input_dataset.get_converted_dataset_deps( trans, data_source )
-
- # Create new HDA for input dataset's subset.
- new_dataset = trans.app.model.HistoryDatasetAssociation( extension=input_dataset.ext, \
- dbkey=input_dataset.dbkey, \
- create_dataset=True, \
- sa_session=trans.sa_session,
- name="Subset [%s] of data %i" % \
- ( location, input_dataset.hid ),
- visible=False )
- target_history.add_dataset( new_dataset )
- trans.sa_session.add( new_dataset )
- trans.app.security_agent.set_all_dataset_permissions( new_dataset.dataset, hda_permissions )
-
- # Write subset of data to new dataset
- data_provider_class = get_data_provider( original_dataset=input_dataset )
- data_provider = data_provider_class( original_dataset=input_dataset,
- converted_dataset=converted_dataset,
- dependencies=deps )
- trans.app.object_store.create( new_dataset.dataset )
- data_provider.write_data_to_file( chrom, low, high, new_dataset.file_name )
-
- # TODO: (a) size not working; (b) need to set peek.
- new_dataset.set_size()
- new_dataset.info = "Data subset for trackster"
- new_dataset.set_dataset_state( trans.app.model.Dataset.states.OK )
-
- # Set metadata.
- # TODO: set meta internally if dataset is small enough?
- if trans.app.config.set_metadata_externally:
- trans.app.datatypes_registry.set_external_metadata_tool.tool_action.execute( trans.app.datatypes_registry.set_external_metadata_tool,
- trans, incoming = { 'input1':new_dataset },
- overwrite=False, job_params={ "source" : "trackster" } )
- else:
- message = 'Attributes updated'
- new_dataset.set_meta()
- new_dataset.datatype.after_setting_metadata( new_dataset )
-
- # Add HDA subset association.
- subset_association = trans.app.model.HistoryDatasetAssociationSubset( hda=input_dataset, subset=new_dataset, location=location )
- trans.sa_session.add( subset_association )
-
- subset_dataset = new_dataset
-
- trans.sa_session.flush()
-
- # Add dataset to tool's parameters.
- if not set_param_value( tool_params, jida.name, subset_dataset ):
- return to_json_string( { "error" : True, "message" : "error setting parameter %s" % jida.name } )
-
- #
- # Execute tool and handle outputs.
- #
- try:
- subset_job, subset_job_outputs = tool.execute( trans, incoming=tool_params,
- history=target_history,
- job_params={ "source" : "trackster" } )
- except Exception, e:
- # Lots of things can go wrong when trying to execute tool.
- return to_json_string( { "error" : True, "message" : e.__class__.__name__ + ": " + str(e) } )
- if run_on_region:
- for output in subset_job_outputs.values():
- output.visible = False
- trans.sa_session.flush()
-
- #
- # Return new track that corresponds to the original dataset.
- #
- output_name = None
- for joda in original_job.output_datasets:
- if joda.dataset == original_dataset:
- output_name = joda.name
- break
- for joda in subset_job.output_datasets:
- if joda.name == output_name:
- output_dataset = joda.dataset
-
- return self.add_track_async( trans, output_dataset.id )
-
@web.expose
@web.require_login( "use Galaxy visualizations", use_panels=True )
def paramamonster( self, trans, hda_ldda, dataset_id ):
@@ -799,18 +514,6 @@
# Helper methods.
# -----------------
- def _check_dataset_state( self, trans, dataset ):
- """
- Returns a message if dataset is not ready to be used in visualization.
- """
- if not dataset:
- return messages.NO_DATA
- if dataset.state == trans.app.model.Job.states.ERROR:
- return messages.ERROR
- if dataset.state != trans.app.model.Job.states.OK:
- return messages.PENDING
- return None
-
def _get_datasources( self, trans, dataset ):
"""
Returns datasources for dataset; if datasources are not available
@@ -833,56 +536,10 @@
data_sources_dict[ source_type ] = { "name" : data_source, "message": msg }
return data_sources_dict
-
- def _convert_dataset( self, trans, dataset, target_type ):
- """
- Converts a dataset to the target_type and returns a message indicating
- status of the conversion. None is returned to indicate that dataset
- was converted successfully.
- """
-
- # Get converted dataset; this will start the conversion if necessary.
- try:
- converted_dataset = dataset.get_converted_dataset( trans, target_type )
- except NoConverterException:
- return messages.NO_CONVERTER
- except ConverterDependencyException, dep_error:
- return { 'kind': messages.ERROR, 'message': dep_error.value }
-
- # Check dataset state and return any messages.
- msg = None
- if converted_dataset and converted_dataset.state == model.Dataset.states.ERROR:
- job_id = trans.sa_session.query( trans.app.model.JobToOutputDatasetAssociation ) \
- .filter_by( dataset_id=converted_dataset.id ).first().job_id
- job = trans.sa_session.query( trans.app.model.Job ).get( job_id )
- msg = { 'kind': messages.ERROR, 'message': job.stderr }
- elif not converted_dataset or converted_dataset.state != model.Dataset.states.OK:
- msg = messages.PENDING
-
- return msg
-
+
def _get_dataset( self, trans, hda_ldda, dataset_id ):
""" Returns either HDA or LDDA for hda/ldda and id combination. """
if hda_ldda == "hda":
return self.get_dataset( trans, dataset_id, check_ownership=False, check_accessible=True )
else:
- return trans.sa_session.query( trans.app.model.LibraryDatasetDatasetAssociation ).get( trans.security.decode_id( dataset_id ) )
-
-
-def _get_highest_priority_msg( message_list ):
- """
- Returns highest priority message from a list of messages.
- """
- return_message = None
-
- # For now, priority is: job error (dict), no converter, pending.
- for message in message_list:
- if message is not None:
- if isinstance(message, dict):
- return_message = message
- break
- elif message == messages.NO_CONVERTER:
- return_message = message
- elif return_message == None and message == messages.PENDING:
- return_message = message
- return return_message
+ return trans.sa_session.query( trans.app.model.LibraryDatasetDatasetAssociation ).get( trans.security.decode_id( dataset_id ) )
\ No newline at end of file
diff -r b2eabe39a70f676b8cb3b90a656501804547fd87 -r 4fbd05095ca70adf740ed635451c0ec876635f50 lib/galaxy/web/controllers/visualization.py
--- a/lib/galaxy/web/controllers/visualization.py
+++ b/lib/galaxy/web/controllers/visualization.py
@@ -69,7 +69,7 @@
class VisualizationController( BaseUIController, SharableMixin, UsesAnnotations,
- UsesHistoryMixinDatasetAssociationMixin, UsesVisualizationMixin,
+ UsesHistoryDatasetAssociationMixin, UsesVisualizationMixin,
UsesItemRatings ):
_user_list_grid = VisualizationListGrid()
_published_list_grid = VisualizationAllPublishedGrid()
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/b2eabe39a70f/
changeset: b2eabe39a70f
user: greg
date: 2012-06-12 21:15:08
summary: Enhance the tool's DependencyManager to handle information stored in tool_dependency databse records when finding tool dependencies installed with tool shed repositories. Many miscellaneous tool shed dependency fixes are included with this change set. Several fixes for uninstalling and reinstalling tool shed repositories and associated tool dependenciesare also included.
affected #: 9 files
diff -r b762062399b3cab0a4139dc3fcd33f30945e49ac -r b2eabe39a70f676b8cb3b90a656501804547fd87 lib/galaxy/tool_shed/install_manager.py
--- a/lib/galaxy/tool_shed/install_manager.py
+++ b/lib/galaxy/tool_shed/install_manager.py
@@ -120,7 +120,7 @@
if not is_displayed:
is_displayed = True
return is_displayed, tool_sections
- def handle_repository_contents( self, repository_clone_url, relative_install_dir, repository_elem, repository_name, description, changeset_revision,
+ def handle_repository_contents( self, repository_clone_url, relative_install_dir, repository_elem, repository_name, description, installed_changeset_revision,
ctx_rev, install_dependencies ):
# Generate the metadata for the installed tool shed repository, among other things. It is critical that the installed repository is
# updated to the desired changeset_revision before metadata is set because the process for setting metadata uses the repository files on disk.
@@ -141,11 +141,11 @@
# Add a new record to the tool_shed_repository table if one doesn't already exist. If one exists but is marked
# deleted, undelete it. It is critical that this happens before the call to add_to_tool_panel() below because
# tools will not be properly loaded if the repository is marked deleted.
- print "Adding new row (or updating an existing row) for repository '%s' in the tool_shed_repository table." % repository_name
+ print "Adding new row (or updating an existing row) for repository '%s' in the tool_shed_repository table." % repository_name
tool_shed_repository = create_or_update_tool_shed_repository( self.app,
repository_name,
description,
- changeset_revision,
+ installed_changeset_revision,
ctx_rev,
repository_clone_url,
metadata_dict,
@@ -158,7 +158,7 @@
# Handle missing data table entries for tool parameters that are dynamically generated select lists.
repository_tools_tups = handle_missing_data_table_entry( self.app,
tool_shed_repository,
- changeset_revision,
+ installed_changeset_revision,
self.tool_path,
repository_tools_tups,
work_dir )
@@ -171,12 +171,12 @@
tool_dependencies_config = get_config_from_repository( self.app,
'tool_dependencies.xml',
tool_shed_repository,
- changeset_revision,
+ installed_changeset_revision,
work_dir )
# Install tool dependencies.
status, message = handle_tool_dependencies( app=self.app,
tool_shed_repository=tool_shed_repository,
- installed_changeset_revision=changeset_revision,
+ installed_changeset_revision=installed_changeset_revision,
tool_dependencies_config=tool_dependencies_config )
if status != 'ok' and message:
print 'The following error occurred from the InstallManager while installing tool dependencies:'
@@ -184,7 +184,7 @@
add_to_tool_panel( self.app,
repository_name,
repository_clone_url,
- changeset_revision,
+ installed_changeset_revision,
repository_tools_tups,
self.repository_owner,
self.migrated_tools_config,
@@ -199,7 +199,7 @@
datatypes_config = get_config_from_repository( self.app,
'datatypes_conf.xml',
tool_shed_repository,
- changeset_revision,
+ installed_changeset_revision,
work_dir )
# Load proprietary data types required by tools. The value of override is not important here since the Galaxy server will be started
# after this installation completes.
@@ -209,7 +209,7 @@
repository_dict = create_repository_dict_for_proprietary_datatypes( tool_shed=self.tool_shed,
name=repository_name,
owner=self.repository_owner,
- installed_changeset_revision=changeset_revision,
+ installed_changeset_revision=installed_changeset_revision,
tool_dicts=metadata_dict.get( 'tools', [] ),
converter_path=converter_path,
display_path=display_path )
@@ -228,29 +228,29 @@
# Install a single repository, loading contained tools into the tool panel.
name = repository_elem.get( 'name' )
description = repository_elem.get( 'description' )
- changeset_revision = repository_elem.get( 'changeset_revision' )
+ installed_changeset_revision = repository_elem.get( 'changeset_revision' )
# Install path is of the form: <tool path>/<tool shed>/repos/<repository owner>/<repository name>/<installed changeset revision>
- clone_dir = os.path.join( self.tool_path, self.tool_shed, 'repos', self.repository_owner, name, changeset_revision )
+ clone_dir = os.path.join( self.tool_path, self.tool_shed, 'repos', self.repository_owner, name, installed_changeset_revision )
if self.__isinstalled( clone_dir ):
print "Skipping automatic install of repository '", name, "' because it has already been installed in location ", clone_dir
else:
tool_shed_url = self.__get_url_from_tool_shed( self.tool_shed )
repository_clone_url = os.path.join( tool_shed_url, 'repos', self.repository_owner, name )
relative_install_dir = os.path.join( clone_dir, name )
- ctx_rev = get_ctx_rev( tool_shed_url, name, self.repository_owner, changeset_revision )
+ ctx_rev = get_ctx_rev( tool_shed_url, name, self.repository_owner, installed_changeset_revision )
clone_repository( repository_clone_url, os.path.abspath( relative_install_dir ), ctx_rev )
tool_shed_repository, metadata_dict = self.handle_repository_contents( repository_clone_url,
relative_install_dir,
repository_elem,
name,
description,
- changeset_revision,
+ installed_changeset_revision,
ctx_rev,
install_dependencies )
if 'tools' in metadata_dict:
# Get the tool_versions from the tool shed for each tool in the installed change set.
url = '%s/repository/get_tool_versions?name=%s&owner=%s&changeset_revision=%s&webapp=galaxy&no_reset=true' % \
- ( tool_shed_url, tool_shed_repository.name, self.repository_owner, changeset_revision )
+ ( tool_shed_url, tool_shed_repository.name, self.repository_owner, installed_changeset_revision )
response = urllib2.urlopen( url )
text = response.read()
response.close()
diff -r b762062399b3cab0a4139dc3fcd33f30945e49ac -r b2eabe39a70f676b8cb3b90a656501804547fd87 lib/galaxy/tool_shed/tool_dependencies/common_util.py
--- a/lib/galaxy/tool_shed/tool_dependencies/common_util.py
+++ b/lib/galaxy/tool_shed/tool_dependencies/common_util.py
@@ -1,6 +1,10 @@
-import os, tarfile, urllib2
+import os, shutil, tarfile, urllib2
from galaxy.datatypes.checkers import *
+DIRECTORY_BUILD_COMMAND_NAMES = [ 'change_directory' ]
+MOVE_BUILD_COMMAND_NAMES = [ 'move_directory_files', 'move_file' ]
+ALL_BUILD_COMMAND_NAMES = DIRECTORY_BUILD_COMMAND_NAMES + MOVE_BUILD_COMMAND_NAMES
+
def extract_tar( file_name, file_path ):
if isgzip( file_name ) or isbz2( file_name ):
# Open for reading with transparent compression.
@@ -17,6 +21,21 @@
return tarfile.is_tarfile( file_path )
def iszip( file_path ):
return check_zip( file_path )
+def move_directory_files( current_dir, source_dir, destination_dir ):
+ source_directory = os.path.abspath( os.path.join( current_dir, source_dir ) )
+ destination_directory = os.path.join( destination_dir )
+ if not os.path.isdir( destination_directory ):
+ os.makedirs( destination_directory )
+ for file_name in os.listdir( source_directory ):
+ source_file = os.path.join( source_directory, file_name )
+ destination_file = os.path.join( destination_directory, file_name )
+ shutil.move( source_file, destination_file )
+def move_file( current_dir, source, destination_dir ):
+ source_file = os.path.abspath( os.path.join( current_dir, source ) )
+ destination_directory = os.path.join( destination_dir )
+ if not os.path.isdir( destination_directory ):
+ os.makedirs( destination_directory )
+ shutil.move( source_file, destination_directory )
def tar_extraction_directory( file_path, file_name ):
file_name = file_name.strip()
extensions = [ '.tar.gz', '.tgz', '.tar.bz2', '.zip' ]
diff -r b762062399b3cab0a4139dc3fcd33f30945e49ac -r b2eabe39a70f676b8cb3b90a656501804547fd87 lib/galaxy/tool_shed/tool_dependencies/fabric_util.py
--- a/lib/galaxy/tool_shed/tool_dependencies/fabric_util.py
+++ b/lib/galaxy/tool_shed/tool_dependencies/fabric_util.py
@@ -1,7 +1,7 @@
# For Python 2.5
from __future__ import with_statement
-import os, shutil
+import os, shutil, tempfile
from contextlib import contextmanager
import common_util
@@ -13,10 +13,6 @@
from fabric.api import env, lcd, local, settings
-DIRECTORY_BUILD_COMMAND_NAMES = [ 'change_directory' ]
-MOVE_BUILD_COMMAND_NAMES = [ 'move_directory_files', 'move_file' ]
-ALL_BUILD_COMMAND_NAMES = DIRECTORY_BUILD_COMMAND_NAMES + MOVE_BUILD_COMMAND_NAMES
-
def check_fabric_version():
version = env.version
if int( version.split( "." )[ 0 ] ) < 1:
@@ -32,18 +28,12 @@
return env
@contextmanager
def make_tmp_dir():
- tmp_dir = local( 'echo $TMPDIR' ).strip()
- if not tmp_dir:
- home_dir = local( 'echo $HOME' )
- tmp_dir = os.path.join( home_dir, 'tmp' )
- work_dir = os.path.join( tmp_dir, 'deploy_tmp' )
- if not os.path.exists( work_dir ):
- local( 'mkdir -p %s' % work_dir )
+ work_dir = tempfile.mkdtemp()
yield work_dir
if os.path.exists( work_dir ):
local( 'rm -rf %s' % work_dir )
def handle_post_build_processing( tool_dependency_dir, install_dir, package_name=None ):
- cmd = "echo 'PATH=%s/bin:$PATH' > %s/env.sh;chmod +x %s/env.sh" % ( install_dir, install_dir, install_dir )
+ cmd = "echo 'PATH=%s/bin:$PATH; export PATH' > %s/env.sh;chmod +x %s/env.sh" % ( install_dir, install_dir, install_dir )
message = ''
output = local( cmd, capture=True )
log_results( cmd, output, os.path.join( install_dir, 'env_sh.log' ) )
@@ -94,7 +84,7 @@
build_command_items = build_command_key.split( 'v^v^v' )
build_command_name = build_command_items[ 0 ]
build_command = build_command_items[ 1 ]
- elif build_command_key in ALL_BUILD_COMMAND_NAMES:
+ elif build_command_key in common_util.ALL_BUILD_COMMAND_NAMES:
build_command_name = build_command_key
else:
build_command_name = None
@@ -103,16 +93,13 @@
current_dir = os.path.join( current_dir, build_command )
lcd( current_dir )
elif build_command_name == 'move_directory_files':
- source_directory = os.path.abspath( os.path.join( current_dir, build_command_dict[ 'source_directory' ] ) )
- destination_directory = build_command_dict[ 'destination_directory' ]
- for file_name in os.listdir( source_directory ):
- source_file = os.path.join( source_directory, file_name )
- destination_file = os.path.join( destination_directory, file_name )
- shutil.move( source_file, destination_file )
+ common_util.move_directory_files( current_dir=current_dir,
+ source_dir=os.path.join( build_command_dict[ 'source_directory' ] ),
+ destination_dir=os.path.join( build_command_dict[ 'destination_directory' ] ) )
elif build_command_name == 'move_file':
- source_file = os.path.abspath( os.path.join( current_dir, build_command_dict[ 'source' ] ) )
- destination = build_command_dict[ 'destination' ]
- shutil.move( source_file, destination )
+ common_util.move_file( current_dir=current_dir,
+ source=os.path.join( build_command_dict[ 'source' ] ),
+ destination_dir=os.path.join( build_command_dict[ 'destination' ] ) )
else:
build_command = build_command_key
with settings( warn_only=True ):
diff -r b762062399b3cab0a4139dc3fcd33f30945e49ac -r b2eabe39a70f676b8cb3b90a656501804547fd87 lib/galaxy/tool_shed/tool_dependencies/install_util.py
--- a/lib/galaxy/tool_shed/tool_dependencies/install_util.py
+++ b/lib/galaxy/tool_shed/tool_dependencies/install_util.py
@@ -1,4 +1,5 @@
import sys, os, subprocess, tempfile
+from common_util import *
from fabric_util import *
from galaxy.tool_shed.encoding_util import *
from galaxy.model.orm import *
@@ -11,16 +12,32 @@
from elementtree.ElementTree import Element, SubElement
def create_or_update_tool_dependency( app, tool_shed_repository, changeset_revision, name, version, type ):
+ """
+ This method is called from Galaxy (never the tool shed) when a new tool_shed_repository is being installed or when an ininstalled repository is
+ being reinstalled.
+ """
+ # First see if a tool_dependency record exists for the received changeset_revision.
sa_session = app.model.context.current
tool_dependency = get_tool_dependency_by_shed_changeset_revision( app, tool_shed_repository, name, version, type, changeset_revision )
if tool_dependency:
tool_dependency.uninstalled = False
else:
- tool_dependency = app.model.ToolDependency( tool_shed_repository_id=tool_shed_repository.id,
- installed_changeset_revision=changeset_revision,
- name=name,
- version=version,
- type=type )
+ # Check the tool_shed_repository's set of tool_depnedency records for any that are marked uninstalled. If one is found, set uninstalled to
+ # False and update the value of installed_changeset_revision.
+ found = False
+ for tool_dependency in tool_shed_repository.tool_dependencies:
+ if tool_dependency.name == name and tool_dependency.version == version and tool_dependency.type == type and tool_dependency.uninstalled:
+ found = True
+ tool_dependency.uninstalled = False
+ tool_dependency.installed_changeset_revision = changeset_revision
+ break
+ if not found:
+ # Create a new tool_dependency record for the tool_shed_repository.
+ tool_dependency = app.model.ToolDependency( tool_shed_repository_id=tool_shed_repository.id,
+ installed_changeset_revision=changeset_revision,
+ name=name,
+ version=version,
+ type=type )
sa_session.add( tool_dependency )
sa_session.flush()
return tool_dependency
diff -r b762062399b3cab0a4139dc3fcd33f30945e49ac -r b2eabe39a70f676b8cb3b90a656501804547fd87 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -794,6 +794,20 @@
if tool_version:
return tool_version.get_version_ids( self.app )
return []
+ @property
+ def installed_tool_dependencies( self ):
+ # If this tool is included in an installed tool shed repository and tool dependencies were installed along with the
+ # tool shed repository, then this method will return the repository's ToolDependency records.
+ if self.app.config.use_tool_dependencies:
+ if self.tool_shed:
+ tool_shed_repository = get_tool_shed_repository_by_shed_name_owner_changeset_revision( self.app,
+ self.tool_shed,
+ self.repository_name,
+ self.repository_owner,
+ self.installed_changeset_revision )
+ if tool_shed_repository:
+ return tool_shed_repository.tool_dependencies
+ return None
def __get_job_run_config( self, run_configs, key, job_params=None ):
# Look through runners/handlers to find one with matching parameters.
available_configs = []
@@ -1113,7 +1127,7 @@
for stdio_elem in ( root.findall( 'stdio' ) ):
self.parse_stdio_exit_codes( stdio_elem )
self.parse_stdio_regexes( stdio_elem )
- except Exception as e:
+ except Exception, e:
log.error( "Exception in parse_stdio! " + str(sys.exc_info()) )
def parse_stdio_exit_codes( self, stdio_elem ):
@@ -1185,7 +1199,7 @@
log.warning( "Tool exit_code range %s will match on "
+ "all exit codes" % code_range )
self.stdio_exit_codes.append( exit_code )
- except Exception as e:
+ except Exception, e:
log.error( "Exception in parse_stdio_exit_codes! "
+ str(sys.exc_info()) )
trace = sys.exc_info()[2]
@@ -1244,7 +1258,7 @@
regex.stdout_match = True
regex.stderr_match = True
self.stdio_regexes.append( regex )
- except Exception as e:
+ except Exception, e:
log.error( "Exception in parse_stdio_exit_codes! "
+ str(sys.exc_info()) )
trace = sys.exc_info()[2]
@@ -1270,7 +1284,7 @@
return_level = "warning"
elif ( re.search( "fatal", err_level, re.IGNORECASE ) ):
return_level = "fatal"
- except Exception as e:
+ except Exception, e:
log.error( "Exception in parse_error_level "
+ str(sys.exc_info() ) )
trace = sys.exc_info()[2]
@@ -2323,9 +2337,12 @@
# TODO: currently only supporting requirements of type package,
# need to implement some mechanism for mapping other types
# back to packages
- log.debug( "Dependency %s", requirement.name )
+ log.debug( "Building dependency shell command for dependency '%s'", requirement.name )
if requirement.type == 'package':
- script_file, base_path, version = self.app.toolbox.dependency_manager.find_dep( requirement.name, requirement.version )
+ script_file, base_path, version = self.app.toolbox.dependency_manager.find_dep( name=requirement.name,
+ version=requirement.version,
+ type=requirement.type,
+ installed_tool_dependencies=self.installed_tool_dependencies )
if script_file is None and base_path is None:
log.warn( "Failed to resolve dependency on '%s', ignoring", requirement.name )
elif script_file is None:
diff -r b762062399b3cab0a4139dc3fcd33f30945e49ac -r b2eabe39a70f676b8cb3b90a656501804547fd87 lib/galaxy/tools/deps/__init__.py
--- a/lib/galaxy/tools/deps/__init__.py
+++ b/lib/galaxy/tools/deps/__init__.py
@@ -30,7 +30,7 @@
if not os.path.isdir( base_path ):
log.warn( "Path '%s' is not directory, ignoring", base_path )
self.base_paths.append( os.path.abspath( base_path ) )
- def find_dep( self, name, version=None ):
+ def find_dep( self, name, version=None, type='package', installed_tool_dependencies=None ):
"""
Attempt to find a dependency named `name` at version `version`. If
version is None, return the "default" version as determined using a
@@ -40,10 +40,24 @@
if version is None:
return self._find_dep_default( name )
else:
- return self._find_dep_versioned( name, version )
- def _find_dep_versioned( self, name, version ):
+ return self._find_dep_versioned( name, version, installed_tool_dependencies=installed_tool_dependencies )
+ def _find_dep_versioned( self, name, version, type='package', installed_tool_dependencies=None ):
+ installed_dependency = None
+ if installed_tool_dependencies:
+ for installed_dependency in installed_tool_dependencies:
+ if not installed_dependency.uninstalled:
+ if installed_dependency.name == name and installed_dependency.version == version and installed_dependency.type == type:
+ break
for base_path in self.base_paths:
- path = os.path.join( base_path, name, version )
+ if installed_dependency:
+ tool_shed_repository = installed_dependency.tool_shed_repository
+ path = os.path.join( base_path,
+ name, version,
+ tool_shed_repository.owner,
+ tool_shed_repository.name,
+ installed_dependency.installed_changeset_revision )
+ else:
+ path = os.path.join( base_path, name, version )
script = os.path.join( path, 'env.sh' )
if os.path.exists( script ):
return script, path, version
@@ -51,7 +65,7 @@
return None, path, version
else:
return None, None, None
- def _find_dep_default( self, name ):
+ def _find_dep_default( self, name, type='package' ):
version = None
for base_path in self.base_paths:
path = os.path.join( base_path, name, 'default' )
diff -r b762062399b3cab0a4139dc3fcd33f30945e49ac -r b2eabe39a70f676b8cb3b90a656501804547fd87 lib/galaxy/util/shed_util.py
--- a/lib/galaxy/util/shed_util.py
+++ b/lib/galaxy/util/shed_util.py
@@ -7,6 +7,7 @@
from galaxy.util.json import *
from galaxy.tools.search import ToolBoxSearch
from galaxy.tool_shed.tool_dependencies.install_util import install_package
+from galaxy.tool_shed.encoding_util import *
from galaxy.model.orm import *
from galaxy import eggs
@@ -310,20 +311,24 @@
tool_dicts=tool_dicts,
converter_path=converter_path,
display_path=display_path )
-def create_or_update_tool_shed_repository( app, name, description, changeset_revision, ctx_rev, repository_clone_url, metadata_dict,
- owner='', dist_to_shed=False ):
+def create_or_update_tool_shed_repository( app, name, description, installed_changeset_revision, ctx_rev, repository_clone_url, metadata_dict,
+ current_changeset_revision=None, owner='', dist_to_shed=False ):
# The received value for dist_to_shed will be True if the InstallManager is installing a repository that contains tools or datatypes that used
# to be in the Galaxy distribution, but have been moved to the main Galaxy tool shed.
- sa_session = app.model.context.current
- tmp_url = clean_repository_clone_url( repository_clone_url )
- tool_shed = tmp_url.split( 'repos' )[ 0 ].rstrip( '/' )
+ if current_changeset_revision is None:
+ # The current_changeset_revision is not passed if a repository is being installed for the first time. If a previously installed repository
+ # was later uninstalled, this value should be received as the value of that change set to which the repository had been updated just prior to
+ # it being uninstalled.
+ current_changeset_revision = installed_changeset_revision
+ sa_session = app.model.context.current
+ tool_shed = get_tool_shed_from_clone_url( repository_clone_url )
if not owner:
- owner = get_repository_owner( tmp_url )
+ owner = get_repository_owner_from_clone_url( repository_clone_url )
includes_datatypes = 'datatypes' in metadata_dict
- tool_shed_repository = get_repository_by_shed_name_owner_changeset_revision( app, tool_shed, name, owner, changeset_revision )
+ tool_shed_repository = get_tool_shed_repository_by_shed_name_owner_installed_changeset_revision( app, tool_shed, name, owner, installed_changeset_revision )
if tool_shed_repository:
tool_shed_repository.description = description
- tool_shed_repository.changeset_revision = changeset_revision
+ tool_shed_repository.changeset_revision = current_changeset_revision
tool_shed_repository.ctx_rev = ctx_rev
tool_shed_repository.metadata = metadata_dict
tool_shed_repository.includes_datatypes = includes_datatypes
@@ -335,7 +340,7 @@
description=description,
owner=owner,
installed_changeset_revision=changeset_revision,
- changeset_revision=changeset_revision,
+ changeset_revision=current_changeset_revision,
ctx_rev=ctx_rev,
metadata=metadata_dict,
includes_datatypes=includes_datatypes,
@@ -876,23 +881,16 @@
fh.close()
return tmp_filename
return None
-def get_repository_by_shed_name_owner_changeset_revision( app, tool_shed, name, owner, changeset_revision ):
- sa_session = app.model.context.current
- if tool_shed.find( '//' ) > 0:
- tool_shed = tool_shed.split( '//' )[1]
- tool_shed = tool_shed.rstrip( '/' )
- return sa_session.query( app.model.ToolShedRepository ) \
- .filter( and_( app.model.ToolShedRepository.table.c.tool_shed == tool_shed,
- app.model.ToolShedRepository.table.c.name == name,
- app.model.ToolShedRepository.table.c.owner == owner,
- app.model.ToolShedRepository.table.c.changeset_revision == changeset_revision ) ) \
- .first()
def get_repository_owner( cleaned_repository_url ):
items = cleaned_repository_url.split( 'repos' )
repo_path = items[ 1 ]
if repo_path.startswith( '/' ):
repo_path = repo_path.replace( '/', '', 1 )
return repo_path.lstrip( '/' ).split( '/' )[ 0 ]
+def get_repository_owner_from_clone_url( repository_clone_url ):
+ tmp_url = clean_repository_clone_url( repository_clone_url )
+ tool_shed = tmp_url.split( 'repos' )[ 0 ].rstrip( '/' )
+ return get_repository_owner( tmp_url )
def get_repository_tools_tups( app, metadata_dict ):
repository_tools_tups = []
if 'tools' in metadata_dict:
@@ -988,6 +986,33 @@
relative_install_dir = os.path.join( tool_path, partial_install_dir )
return tool_path, relative_install_dir
return None, None
+def get_tool_shed_from_clone_url( repository_clone_url ):
+ tmp_url = clean_repository_clone_url( repository_clone_url )
+ return tmp_url.split( 'repos' )[ 0 ].rstrip( '/' )
+def get_tool_shed_repository_by_shed_name_owner_changeset_revision( app, tool_shed, name, owner, changeset_revision ):
+ # This method is used only in Galaxy, not the tool shed.
+ sa_session = app.model.context.current
+ if tool_shed.find( '//' ) > 0:
+ tool_shed = tool_shed.split( '//' )[1]
+ tool_shed = tool_shed.rstrip( '/' )
+ return sa_session.query( app.model.ToolShedRepository ) \
+ .filter( and_( app.model.ToolShedRepository.table.c.tool_shed == tool_shed,
+ app.model.ToolShedRepository.table.c.name == name,
+ app.model.ToolShedRepository.table.c.owner == owner,
+ app.model.ToolShedRepository.table.c.changeset_revision == changeset_revision ) ) \
+ .first()
+def get_tool_shed_repository_by_shed_name_owner_installed_changeset_revision( app, tool_shed, name, owner, installed_changeset_revision ):
+ # This method is used only in Galaxy, not the tool shed.
+ sa_session = app.model.context.current
+ if tool_shed.find( '//' ) > 0:
+ tool_shed = tool_shed.split( '//' )[1]
+ tool_shed = tool_shed.rstrip( '/' )
+ return sa_session.query( app.model.ToolShedRepository ) \
+ .filter( and_( app.model.ToolShedRepository.table.c.tool_shed == tool_shed,
+ app.model.ToolShedRepository.table.c.name == name,
+ app.model.ToolShedRepository.table.c.owner == owner,
+ app.model.ToolShedRepository.table.c.installed_changeset_revision == installed_changeset_revision ) ) \
+ .first()
def get_tool_version( app, tool_id ):
sa_session = app.model.context.current
return sa_session.query( app.model.ToolVersion ) \
@@ -1000,6 +1025,24 @@
.filter( and_( app.model.ToolVersionAssociation.table.c.parent_id == parent_tool_version.id,
app.model.ToolVersionAssociation.table.c.tool_id == tool_version.id ) ) \
.first()
+def get_update_to_changeset_revision_and_ctx_rev( trans, repository ):
+ """Return the changeset revision hash to which the repository can be updated."""
+ tool_shed_url = get_url_from_repository_tool_shed( trans.app, repository )
+ url = '%s/repository/get_changeset_revision_and_ctx_rev?name=%s&owner=%s&changeset_revision=%s&no_reset=true' % \
+ ( tool_shed_url, repository.name, repository.owner, repository.installed_changeset_revision )
+ try:
+ response = urllib2.urlopen( url )
+ encoded_update_dict = response.read()
+ if encoded_update_dict:
+ update_dict = tool_shed_decode( encoded_update_dict )
+ changeset_revision = update_dict[ 'changeset_revision' ]
+ ctx_rev = update_dict[ 'ctx_rev' ]
+ response.close()
+ except Exception, e:
+ log.debug( "Error getting change set revision for update from the tool shed for repository '%s': %s" % ( repository.name, str( e ) ) )
+ changeset_revision = None
+ ctx_rev = None
+ return changeset_revision, ctx_rev
def get_url_from_repository_tool_shed( app, repository ):
"""
The stored value of repository.tool_shed is something like: toolshed.g2.bx.psu.edu. We need the URL to this tool shed, which is
@@ -1015,7 +1058,8 @@
def handle_missing_data_table_entry( app, repository, changeset_revision, tool_path, repository_tools_tups, dir ):
"""
Inspect each tool to see if any have input parameters that are dynamically generated select lists that require entries in the
- tool_data_table_conf.xml file. This method is called only from Galaxy (not the tool shed) when a repository is being installed.
+ tool_data_table_conf.xml file. This method is called only from Galaxy (not the tool shed) when a repository is being installed
+ or reinstalled.
"""
missing_data_table_entry = False
for index, repository_tools_tup in enumerate( repository_tools_tups ):
@@ -1079,10 +1123,10 @@
return error, message
def handle_tool_dependencies( app, tool_shed_repository, installed_changeset_revision, tool_dependencies_config ):
"""
- Install and build tool dependencies defined in the tool_dependencies_config. This config's tag sets can refer to installation
- methods in Galaxy's tool_dependencies module or to proprietary fabric scripts contained in the repository. Future enhancements
- to handling tool dependencies may provide installation processes in addition to fabric based processes. The dependencies will be
- installed in:
+ Install and build tool dependencies defined in the tool_dependencies_config. This config's tag sets can currently refer to installation
+ methods in Galaxy's tool_dependencies module. In the future, proprietary fabric scripts contained in the repository will be supported.
+ Future enhancements to handling tool dependencies may provide installation processes in addition to fabric based processes. The dependencies
+ will be installed in:
~/<app.config.tool_dependency_dir>/<package_name>/<package_version>/<repository_owner>/<repository_name>/<installed_changeset_revision>
"""
status = 'ok'
@@ -1160,8 +1204,9 @@
def load_installed_display_applications( installed_repository_dict, deactivate=False ):
# Load or deactivate proprietary datatype display applications
app.datatypes_registry.load_display_applications( installed_repository_dict=installed_repository_dict, deactivate=deactivate )
-def load_repository_contents( trans, repository_name, description, owner, changeset_revision, ctx_rev, tool_path, repository_clone_url,
- relative_install_dir, tool_shed=None, tool_section=None, shed_tool_conf=None, install_tool_dependencies=False ):
+def load_repository_contents( trans, repository_name, description, owner, installed_changeset_revision, current_changeset_revision, ctx_rev,
+ tool_path, repository_clone_url, relative_install_dir, tool_shed=None, tool_section=None, shed_tool_conf=None,
+ install_tool_dependencies=False ):
"""
Generate the metadata for the installed tool shed repository, among other things. This method is called from Galaxy (never the tool shed)
when an admin is installing a new repository or reinstalling an uninstalled repository.
@@ -1174,10 +1219,12 @@
tool_shed_repository = create_or_update_tool_shed_repository( trans.app,
repository_name,
description,
- changeset_revision,
+ installed_changeset_revision,
ctx_rev,
repository_clone_url,
metadata_dict,
+ current_changeset_revision=current_changeset_revision,
+ owner='',
dist_to_shed=False )
if 'tools' in metadata_dict:
tool_panel_dict = generate_tool_panel_dict_for_new_install( metadata_dict[ 'tools' ], tool_section )
@@ -1187,7 +1234,7 @@
work_dir = make_tmp_directory()
repository_tools_tups = handle_missing_data_table_entry( trans.app,
tool_shed_repository,
- changeset_revision,
+ current_changeset_revision,
tool_path,
repository_tools_tups,
work_dir )
@@ -1201,12 +1248,14 @@
tool_dependencies_config = get_config_from_repository( trans.app,
'tool_dependencies.xml',
tool_shed_repository,
- changeset_revision,
+ current_changeset_revision,
work_dir )
- # Install dependencies for repository tools.
+ # Install dependencies for repository tools. The tool_dependency.installed_changeset_revision value will be the value of
+ # tool_shed_repository.changeset_revision (this method's current_changeset_revision). This approach will allow for different
+ # versions of the same tool_dependency to be installed for associated versions of tools included in the installed repository.
status, message = handle_tool_dependencies( app=trans.app,
tool_shed_repository=tool_shed_repository,
- installed_changeset_revision=changeset_revision,
+ installed_changeset_revision=current_changeset_revision,
tool_dependencies_config=tool_dependencies_config )
if status != 'ok' and message:
print 'The following error occurred from load_repository_contents while installing tool dependencies:'
@@ -1214,7 +1263,7 @@
add_to_tool_panel( app=trans.app,
repository_name=repository_name,
repository_clone_url=repository_clone_url,
- changeset_revision=changeset_revision,
+ changeset_revision=current_changeset_revision,
repository_tools_tups=repository_tools_tups,
owner=owner,
shed_tool_conf=shed_tool_conf,
@@ -1229,7 +1278,7 @@
datatypes_config = get_config_from_repository( trans.app,
'datatypes_conf.xml',
tool_shed_repository,
- changeset_revision,
+ current_changeset_revision,
work_dir )
# Load data types required by tools.
converter_path, display_path = alter_config_and_load_prorietary_datatypes( trans.app, datatypes_config, relative_install_dir, override=False )
@@ -1238,7 +1287,7 @@
repository_dict = create_repository_dict_for_proprietary_datatypes( tool_shed=tool_shed,
name=repository_name,
owner=owner,
- installed_changeset_revision=changeset_revision,
+ installed_changeset_revision=installed_changeset_revision,
tool_dicts=metadata_dict.get( 'tools', [] ),
converter_path=converter_path,
display_path=display_path )
diff -r b762062399b3cab0a4139dc3fcd33f30945e49ac -r b2eabe39a70f676b8cb3b90a656501804547fd87 lib/galaxy/web/controllers/admin_toolshed.py
--- a/lib/galaxy/web/controllers/admin_toolshed.py
+++ b/lib/galaxy/web/controllers/admin_toolshed.py
@@ -469,7 +469,8 @@
repository_name=name,
description=description,
owner=owner,
- changeset_revision=changeset_revision,
+ installed_changeset_revision=changeset_revision,
+ current_changeset_revision=changeset_revision,
ctx_rev=ctx_rev,
tool_path=tool_path,
repository_clone_url=repository_clone_url,
@@ -606,7 +607,8 @@
def reinstall_repository( self, trans, **kwd ):
message = kwd.get( 'message', '' )
status = kwd.get( 'status', 'done' )
- repository = get_repository( trans, kwd[ 'id' ] )
+ repository_id = kwd[ 'id' ]
+ repository = get_repository( trans, repository_id )
no_changes = kwd.get( 'no_changes', '' )
no_changes_checked = CheckboxField.is_checked( no_changes )
install_tool_dependencies = CheckboxField.is_checked( kwd.get( 'install_tool_dependencies', '' ) )
@@ -621,6 +623,12 @@
else:
ctx_rev = repository.ctx_rev
clone_repository( repository_clone_url, os.path.abspath( relative_install_dir ), ctx_rev )
+ # Since we're reinstalling the repository we need to find the latest changeset revision to which is can be updated.
+ current_changeset_revision, current_ctx_rev = get_update_to_changeset_revision_and_ctx_rev( trans, repository )
+ if current_ctx_rev != ctx_rev:
+ repo = hg.repository( get_configured_ui(), path=os.path.abspath( relative_install_dir ) )
+ pull_repository( repo, repository_clone_url, current_changeset_revision )
+ update_repository( repo, ctx_rev=current_ctx_rev )
tool_section = None
if repository.includes_tools:
# Get the location in the tool panel in which each tool was originally loaded.
@@ -681,7 +689,8 @@
repository_name=repository.name,
description=repository.description,
owner=repository.owner,
- changeset_revision=repository.installed_changeset_revision,
+ installed_changeset_revision=repository.installed_changeset_revision,
+ current_changeset_revision=current_changeset_revision,
ctx_rev=ctx_rev,
tool_path=tool_path,
repository_clone_url=repository_clone_url,
@@ -691,6 +700,7 @@
shed_tool_conf=shed_tool_conf,
install_tool_dependencies=install_tool_dependencies )
if error_message:
+ # We'll only have an error_message if there was a problem installing tool dependencies.
message += error_message
status = 'error'
repository.uninstalled = False
@@ -787,7 +797,7 @@
changeset_revision = params.get( 'changeset_revision', None )
latest_changeset_revision = params.get( 'latest_changeset_revision', None )
latest_ctx_rev = params.get( 'latest_ctx_rev', None )
- repository = get_repository_by_shed_name_owner_changeset_revision( trans.app, tool_shed_url, name, owner, changeset_revision )
+ repository = get_tool_shed_repository_by_shed_name_owner_changeset_revision( trans.app, tool_shed_url, name, owner, changeset_revision )
if changeset_revision and latest_changeset_revision and latest_ctx_rev:
if changeset_revision == latest_changeset_revision:
message = "The installed repository named '%s' is current, there are no updates available. " % name
diff -r b762062399b3cab0a4139dc3fcd33f30945e49ac -r b2eabe39a70f676b8cb3b90a656501804547fd87 lib/galaxy/webapps/community/controllers/repository.py
--- a/lib/galaxy/webapps/community/controllers/repository.py
+++ b/lib/galaxy/webapps/community/controllers/repository.py
@@ -648,6 +648,10 @@
elif not update_to_changeset_hash and changeset_hash == changeset_revision:
# We've found the changeset in the changelog for which we need to get the next update.
update_to_changeset_hash = changeset_hash
+ if from_update_manager:
+ if latest_changeset_revision == changeset_revision:
+ return no_update
+ return update
url += str( latest_changeset_revision )
url += '&latest_ctx_rev=%s' % str( update_to_ctx.rev() )
return trans.response.send_redirect( url )
@@ -1111,6 +1115,57 @@
return to_json_string( tool_version_dicts )
return ''
@web.expose
+ def get_changeset_revision_and_ctx_rev( self, trans, **kwd ):
+ """Handle a request from a local Galaxy instance to retrieve the changeset revision hash to which an installed repository can be updated."""
+ params = util.Params( kwd )
+ message = util.restore_text( params.get( 'message', '' ) )
+ status = params.get( 'status', 'done' )
+ galaxy_url = kwd.get( 'galaxy_url', '' )
+ name = params.get( 'name', None )
+ owner = params.get( 'owner', None )
+ changeset_revision = params.get( 'changeset_revision', None )
+ repository = get_repository_by_name_and_owner( trans, name, owner )
+ repo_dir = repository.repo_path
+ repo = hg.repository( get_configured_ui(), repo_dir )
+ # Default to the received changeset revision and ctx_rev.
+ update_to_ctx = get_changectx_for_changeset( repo, changeset_revision )
+ latest_changeset_revision = changeset_revision
+ update_dict = dict( changeset_revision=update_to_ctx, ctx_rev=str( update_to_ctx.rev() ) )
+ if changeset_revision == repository.tip:
+ # If changeset_revision is the repository tip, there are no additional updates.
+ return tool_shed_encode( update_dict )
+ else:
+ repository_metadata = get_repository_metadata_by_changeset_revision( trans,
+ trans.security.encode_id( repository.id ),
+ changeset_revision )
+ if repository_metadata:
+ # If changeset_revision is in the repository_metadata table for this repository, there are no additional updates.
+ return tool_shed_encode( update_dict )
+ else:
+ # The changeset_revision column in the repository_metadata table has been updated with a new changeset_revision value since the
+ # repository was installed. We need to find the changeset_revision to which we need to update.
+ update_to_changeset_hash = None
+ for changeset in repo.changelog:
+ changeset_hash = str( repo.changectx( changeset ) )
+ ctx = get_changectx_for_changeset( repo, changeset_hash )
+ if update_to_changeset_hash:
+ if get_repository_metadata_by_changeset_revision( trans, trans.security.encode_id( repository.id ), changeset_hash ):
+ # We found a RepositoryMetadata record.
+ if changeset_hash == repository.tip:
+ # The current ctx is the repository tip, so use it.
+ update_to_ctx = get_changectx_for_changeset( repo, changeset_hash )
+ latest_changeset_revision = changeset_hash
+ else:
+ update_to_ctx = get_changectx_for_changeset( repo, update_to_changeset_hash )
+ latest_changeset_revision = update_to_changeset_hash
+ break
+ elif not update_to_changeset_hash and changeset_hash == changeset_revision:
+ # We've found the changeset in the changelog for which we need to get the next update.
+ update_to_changeset_hash = changeset_hash
+ update_dict[ 'changeset_revision' ] = str( latest_changeset_revision )
+ update_dict[ 'ctx_rev' ] = str( update_to_ctx.rev() )
+ return tool_shed_encode( update_dict )
+ @web.expose
def help( self, trans, **kwd ):
params = util.Params( kwd )
message = util.restore_text( params.get( 'message', '' ) )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Migration script to alter the tool_dependency.version database table column type to Text.
by Bitbucket 12 Jun '12
by Bitbucket 12 Jun '12
12 Jun '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/b762062399b3/
changeset: b762062399b3
user: greg
date: 2012-06-12 21:09:39
summary: Migration script to alter the tool_dependency.version database table column type to Text.
affected #: 2 files
diff -r 9a3ab5009e8e70596a07283e3611661410f92c4d -r b762062399b3cab0a4139dc3fcd33f30945e49ac lib/galaxy/model/mapping.py
--- a/lib/galaxy/model/mapping.py
+++ b/lib/galaxy/model/mapping.py
@@ -397,7 +397,7 @@
Column( "tool_shed_repository_id", Integer, ForeignKey( "tool_shed_repository.id" ), index=True, nullable=False ),
Column( "installed_changeset_revision", TrimmedString( 255 ) ),
Column( "name", TrimmedString( 255 ) ),
- Column( "version", TrimmedString( 40 ) ),
+ Column( "version", Text ),
Column( "type", TrimmedString( 40 ) ),
Column( "uninstalled", Boolean, default=False ) )
diff -r 9a3ab5009e8e70596a07283e3611661410f92c4d -r b762062399b3cab0a4139dc3fcd33f30945e49ac lib/galaxy/model/migrate/versions/0100_alter_tool_dependency_table_version_column.py
--- /dev/null
+++ b/lib/galaxy/model/migrate/versions/0100_alter_tool_dependency_table_version_column.py
@@ -0,0 +1,53 @@
+"""
+Migration script to alter the type of the tool_dependency.version column from TrimmedString(40) to Text.
+"""
+
+from sqlalchemy import *
+from sqlalchemy.orm import *
+from migrate import *
+from migrate.changeset import *
+
+import datetime
+now = datetime.datetime.utcnow
+# Need our custom types, but don't import anything else from model
+from galaxy.model.custom_types import *
+
+import sys, logging
+log = logging.getLogger( __name__ )
+log.setLevel(logging.DEBUG)
+handler = logging.StreamHandler( sys.stdout )
+format = "%(name)s %(levelname)s %(asctime)s %(message)s"
+formatter = logging.Formatter( format )
+handler.setFormatter( formatter )
+log.addHandler( handler )
+
+metadata = MetaData( migrate_engine )
+db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, autocommit=True ) )
+
+def upgrade():
+ print __doc__
+ metadata.reflect()
+ ToolDependency_table = Table( "tool_dependency", metadata, autoload=True )
+ # Change the tool_dependency table's version column from TrimmedString to Text.
+ if migrate_engine.name == 'postgres':
+ cmd = "ALTER TABLE tool_dependency ALTER COLUMN version TYPE Text;"
+ elif migrate_engine.name == 'mysql':
+ cmd = "ALTER TABLE tool_dependency MODIFY COLUMN version Text;"
+ else:
+ # We don't have to do anything for sqlite tables. From the sqlite documentation at http://sqlite.org/datatype3.html:
+ # 1.0 Storage Classes and Datatypes
+ # Each value stored in an SQLite database (or manipulated by the database engine) has one of the following storage classes:
+ # NULL. The value is a NULL value.
+ # INTEGER. The value is a signed integer, stored in 1, 2, 3, 4, 6, or 8 bytes depending on the magnitude of the value.
+ # REAL. The value is a floating point value, stored as an 8-byte IEEE floating point number.
+ # TEXT. The value is a text string, stored using the database encoding (UTF-8, UTF-16BE or UTF-16LE).
+ # BLOB. The value is a blob of data, stored exactly as it was input.
+ cmd = None
+ if cmd:
+ try:
+ db_session.execute( cmd )
+ except Exception, e:
+ log.debug( "Altering tool_dependency.version column from TrimmedString(40) to Text failed: %s" % str( e ) )
+def downgrade():
+ # Not necessary to change column type Text to TrimmedString(40).
+ pass
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: dan: Allow OpenID to work when require_login is set to True.
by Bitbucket 12 Jun '12
by Bitbucket 12 Jun '12
12 Jun '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/9a3ab5009e8e/
changeset: 9a3ab5009e8e
user: dan
date: 2012-06-12 20:12:46
summary: Allow OpenID to work when require_login is set to True.
affected #: 1 file
diff -r 36c540d788c3842b37c4ab5e522afd612ea8bd80 -r 9a3ab5009e8e70596a07283e3611661410f92c4d lib/galaxy/web/framework/__init__.py
--- a/lib/galaxy/web/framework/__init__.py
+++ b/lib/galaxy/web/framework/__init__.py
@@ -435,6 +435,9 @@
url_for( controller='user', action='manage_user_info' ),
url_for( controller='user', action='set_default_permissions' ),
url_for( controller='user', action='reset_password' ),
+ url_for( controller='user', action='openid_auth' ),
+ url_for( controller='user', action='openid_process' ),
+ url_for( controller='user', action='openid_associate' ),
url_for( controller='library', action='browse' ),
url_for( controller='history', action='list' ),
url_for( controller='dataset', action='list' )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jgoecks: Have Bowtie2 wrapper produce BAM rather than SAM output.
by Bitbucket 11 Jun '12
by Bitbucket 11 Jun '12
11 Jun '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/36c540d788c3/
changeset: 36c540d788c3
user: jgoecks
date: 2012-06-11 21:52:36
summary: Have Bowtie2 wrapper produce BAM rather than SAM output.
affected #: 2 files
diff -r 17302ca3be4e7b3d987733ea6e61982f9520996d -r 36c540d788c3842b37c4ab5e522afd612ea8bd80 tools/sr_mapping/bowtie2_wrapper.py
--- a/tools/sr_mapping/bowtie2_wrapper.py
+++ b/tools/sr_mapping/bowtie2_wrapper.py
@@ -65,7 +65,7 @@
index_path = options.index_path
# Build bowtie command.
- cmd = 'bowtie2 %s -x %s %s -S %s'
+ cmd = 'bowtie2 %s -x %s %s | samtools view -Sb - > %s'
# Set up reads.
if options.single_paired == 'paired':
diff -r 17302ca3be4e7b3d987733ea6e61982f9520996d -r 36c540d788c3842b37c4ab5e522afd612ea8bd80 tools/sr_mapping/bowtie2_wrapper.xml
--- a/tools/sr_mapping/bowtie2_wrapper.xml
+++ b/tools/sr_mapping/bowtie2_wrapper.xml
@@ -112,7 +112,7 @@
</inputs><outputs>
- <data format="sam" name="output" label="${tool.name} on ${on_string}: mapped reads">
+ <data format="bam" name="output" label="${tool.name} on ${on_string}: mapped reads"><actions><conditional name="refGenomeSource.genomeSource"><when value="indexed">
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jgoecks: Put myExperiment URL in config's Beta features location.
by Bitbucket 11 Jun '12
by Bitbucket 11 Jun '12
11 Jun '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/17302ca3be4e/
changeset: 17302ca3be4e
user: jgoecks
date: 2012-06-11 21:22:51
summary: Put myExperiment URL in config's Beta features location.
affected #: 1 file
diff -r 0f32c2fc37cdc2959c86d2fdb0debe9a4c66282b -r 17302ca3be4e7b3d987733ea6e61982f9520996d universe_wsgi.ini.sample
--- a/universe_wsgi.ini.sample
+++ b/universe_wsgi.ini.sample
@@ -527,6 +527,9 @@
# be used for each "Set at Runtime" input, independent of others in the Workflow
#enable_unique_workflow_defaults = False
+# The URL to the myExperiment instance being used (omit scheme but include port)
+#myexperiment_url = www.myexperiment.org:80
+
# Enable Galaxy's "Upload via FTP" interface. You'll need to install and
# configure an FTP server (we've used ProFTPd since it can use Galaxy's
# database for authentication) and set the following two options.
@@ -681,9 +684,6 @@
#pbs_stage_path =
#pbs_dataset_server =
-# The URL to the myExperiment instance being used (omit scheme but include port)
-myexperiment_url = www.myexperiment.org:80
-
# This option allows users to see the full path of datasets via the "View
# Details" option in the history. Administrators can always see this.
#expose_dataset_path = False
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
3 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/66395a9d870f/
changeset: 66395a9d870f
user: jmchilton
date: 2012-06-06 03:35:33
summary: First attempt at updated dynamic job runners.
affected #: 3 files
diff -r 1890cb0d1cfbb3ef5a09affcdd18d2b8acf7d811 -r 66395a9d870fbe48660dbf75cc0a59264bb862f6 lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py
+++ b/lib/galaxy/jobs/__init__.py
@@ -21,6 +21,7 @@
from galaxy.util.expressions import ExpressionContext
from galaxy.jobs.actions.post import ActionBox
from galaxy.exceptions import ObjectInvalid
+from galaxy.jobs.mapper import JobRunnerMapper
log = logging.getLogger( __name__ )
@@ -80,6 +81,7 @@
self.tool_provided_job_metadata = None
# Wrapper holding the info required to restore and clean up from files used for setting metadata externally
self.external_output_metadata = metadata.JobExternalOutputMetadataWrapper( job )
+ self.job_runner_mapper = JobRunnerMapper( self )
self.params = None
if job.params:
self.params = from_json_string( job.params )
@@ -88,7 +90,8 @@
self.__galaxy_system_pwent = None
def get_job_runner( self ):
- return self.tool.get_job_runner( self.params )
+ job_runner = self.job_runner_mapper.get_job_runner( self.params )
+ return job_runner
def get_job( self ):
return self.sa_session.query( model.Job ).get( self.job_id )
diff -r 1890cb0d1cfbb3ef5a09affcdd18d2b8acf7d811 -r 66395a9d870fbe48660dbf75cc0a59264bb862f6 lib/galaxy/jobs/mapper.py
--- /dev/null
+++ b/lib/galaxy/jobs/mapper.py
@@ -0,0 +1,82 @@
+import inspect, sys
+
+import galaxy.jobs.rules
+
+DYNAMIC_RUNNER_PREFIX = "dynamic:///"
+
+class JobRunnerMapper( object ):
+
+ def __init__( self, job_wrapper ):
+ self.job_wrapper = job_wrapper
+
+ def __invoke_expand_function( self, expand_function ):
+ function_arg_names = inspect.getargspec( expand_function ).args
+
+ possible_args = { "job_id" : self.job_wrapper.job_id,
+ "tool" : self.job_wrapper.tool,
+ "tool_id" : self.job_wrapper.tool.id,
+ "job_wrapper" : self.job_wrapper,
+ "app" : self.job_wrapper.app }
+
+ actual_args = {}
+
+ # Populate needed args
+ for possible_arg_name in possible_args:
+ if possible_arg_name in function_arg_names:
+ actual_args[ possible_arg_name ] = possible_args[ possible_arg_name ]
+
+ # Don't hit the DB to load the job object is not needed
+ if "job" in function_arg_names or "user" in function_arg_names or "user_email" in function_arg_names:
+ job = self.job_wrapper.get_job()
+ history = job.history
+ user = history and history.user
+ user_email = user and str(user.email)
+
+ if "job" in function_arg_names:
+ actual_args[ "job" ] = job
+
+ if "user" in function_arg_names:
+ actual_args[ "user" ] = user
+
+ if "user_email" in function_arg_names:
+ actual_args[ "user_email" ] = user_email
+
+ return expand_function( **actual_args )
+
+ def __determine_expand_function_name( self, option_parts ):
+ # default look for function with same name as tool, unless one specified
+ expand_function_name = self.job_wrapper.tool.id
+ if len( option_parts ) > 1:
+ expand_function_name = option_parts[ 1 ]
+ return expand_function_name
+
+ def __get_expand_function( self, expand_function_name ):
+ rules_module = sys.modules[ "galaxy.jobs.rules" ]
+ if hasattr( rules_module, expand_function_name ):
+ expand_function = getattr( rules_module, expand_function_name )
+ return expand_function
+ else:
+ raise Exception( "Dynamic job runner cannot find function to expand job runner type - %s" % expand_function_name )
+
+ def __expand_dynamic_job_runner( self, options_str ):
+ option_parts = options_str.split( '/' )
+ expand_type = option_parts[ 0 ]
+ if expand_type == "python":
+ expand_function_name = self.__determine_expand_function_name( option_parts )
+ expand_function = self.__get_expand_function( expand_function_name )
+ return self.__invoke_expand_function( expand_function )
+ else:
+ raise Exception( "Unhandled dynamic job runner type specified - %s" % calculation_type )
+
+ def __cache_job_runner( self, params ):
+ raw_job_runner = self.job_wrapper.tool.get_job_runner( params )
+ if raw_job_runner.startswith( DYNAMIC_RUNNER_PREFIX ):
+ job_runner = self.__expand_dynamic_job_runner( raw_job_runner[ len( DYNAMIC_RUNNER_PREFIX ) : ] )
+ else:
+ job_runner = raw_job_runner
+ self.cached_job_runner = job_runner
+
+ def get_job_runner( self, params ):
+ if not hasattr( self, 'cached_job_runner' ):
+ self.__cache_job_runner( params )
+ return self.cached_job_runner
diff -r 1890cb0d1cfbb3ef5a09affcdd18d2b8acf7d811 -r 66395a9d870fbe48660dbf75cc0a59264bb862f6 lib/galaxy/jobs/rules.py
--- /dev/null
+++ b/lib/galaxy/jobs/rules.py
@@ -0,0 +1,9 @@
+import logging
+
+log = logging.getLogger( __name__ )
+
+# Add functions to dynamically map job descriptions to job runners in
+# this file. These functions can optionally take in any of the
+# following arguments - job_wrapper, app, user_email, job, tool,
+# email, tool_id, and job_id.
+
https://bitbucket.org/galaxy/galaxy-central/changeset/f54b848298d4/
changeset: f54b848298d4
user: jmchilton
date: 2012-06-10 06:00:16
summary: Rework dynamic job runner config so that instead using a rules.py file
for storing rules, they should be placed in
lib/galaxy/jobs/rules/. The rules submodules are "searched" in
lexiographically allowing for hierarchical configuration overrides
(e.g. naming schemes like: 000_galaxy_rules.py, 100_site_rules.py,
200_instance_rules.py)
affected #: 3 files
diff -r 66395a9d870fbe48660dbf75cc0a59264bb862f6 -r f54b848298d4d78d6fbcc85e9887960f21b5c239 lib/galaxy/jobs/mapper.py
--- a/lib/galaxy/jobs/mapper.py
+++ b/lib/galaxy/jobs/mapper.py
@@ -1,13 +1,50 @@
-import inspect, sys
+import logging
+import inspect
+import os
+
+log = logging.getLogger( __name__ )
import galaxy.jobs.rules
DYNAMIC_RUNNER_PREFIX = "dynamic:///"
class JobRunnerMapper( object ):
-
+ """
+ This class is responsible to managing the mapping of jobs
+ (in the form of job_wrappers) to job runner strings.
+ """
+
def __init__( self, job_wrapper ):
self.job_wrapper = job_wrapper
+ self.rule_modules = self.__get_rule_modules( )
+
+ def __get_rule_modules( self ):
+ unsorted_module_names = self.__get_rule_module_names( )
+ ## Load modules in reverse order to allow hierarchical overrides
+ ## i.e. 000_galaxy_rules.py, 100_site_rules.py, 200_instance_rules.py
+ module_names = sorted( unsorted_module_names, reverse=True )
+ modules = []
+ for rule_module_name in module_names:
+ try:
+ module = __import__( rule_module_name )
+ for comp in rule_module_name.split( "." )[1:]:
+ module = getattr( module, comp )
+ modules.append( module )
+ except BaseException, exception:
+ exception_str = str( exception )
+ message = "%s rule module could not be loaded: %s" % ( rule_module_name, exception_str )
+ log.debug( message )
+ continue
+ return modules
+
+ def __get_rule_module_names( self ):
+ rules_dir = galaxy.jobs.rules.__path__[0]
+ names = []
+ for fname in os.listdir( rules_dir ):
+ if not( fname.startswith( "_" ) ) and fname.endswith( ".py" ):
+ rule_module_name = "galaxy.jobs.rules.%s" % fname[:-len(".py")]
+ names.append( rule_module_name )
+ return names
def __invoke_expand_function( self, expand_function ):
function_arg_names = inspect.getargspec( expand_function ).args
@@ -15,7 +52,7 @@
possible_args = { "job_id" : self.job_wrapper.job_id,
"tool" : self.job_wrapper.tool,
"tool_id" : self.job_wrapper.tool.id,
- "job_wrapper" : self.job_wrapper,
+ "job_wrapper" : self.job_wrapper,
"app" : self.job_wrapper.app }
actual_args = {}
@@ -25,7 +62,7 @@
if possible_arg_name in function_arg_names:
actual_args[ possible_arg_name ] = possible_args[ possible_arg_name ]
- # Don't hit the DB to load the job object is not needed
+ # Don't hit the DB to load the job object if not needed
if "job" in function_arg_names or "user" in function_arg_names or "user_email" in function_arg_names:
job = self.job_wrapper.get_job()
history = job.history
@@ -51,13 +88,21 @@
return expand_function_name
def __get_expand_function( self, expand_function_name ):
- rules_module = sys.modules[ "galaxy.jobs.rules" ]
- if hasattr( rules_module, expand_function_name ):
- expand_function = getattr( rules_module, expand_function_name )
+ matching_rule_module = self.__last_rule_module_with_function( expand_function_name )
+ if matching_rule_module:
+ expand_function = getattr( matching_rule_module, expand_function_name )
return expand_function
else:
raise Exception( "Dynamic job runner cannot find function to expand job runner type - %s" % expand_function_name )
-
+
+ def __last_rule_module_with_function( self, function_name ):
+ # self.rule_modules is sorted in reverse order, so find first
+ # wiht function
+ for rule_module in self.rule_modules:
+ if hasattr( rule_module, function_name ):
+ return rule_module
+ return None
+
def __expand_dynamic_job_runner( self, options_str ):
option_parts = options_str.split( '/' )
expand_type = option_parts[ 0 ]
@@ -77,6 +122,9 @@
self.cached_job_runner = job_runner
def get_job_runner( self, params ):
+ """
+ Cache the job_runner string to avoid recalculation.
+ """
if not hasattr( self, 'cached_job_runner' ):
self.__cache_job_runner( params )
return self.cached_job_runner
diff -r 66395a9d870fbe48660dbf75cc0a59264bb862f6 -r f54b848298d4d78d6fbcc85e9887960f21b5c239 lib/galaxy/jobs/rules.py
--- a/lib/galaxy/jobs/rules.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import logging
-
-log = logging.getLogger( __name__ )
-
-# Add functions to dynamically map job descriptions to job runners in
-# this file. These functions can optionally take in any of the
-# following arguments - job_wrapper, app, user_email, job, tool,
-# email, tool_id, and job_id.
-
https://bitbucket.org/galaxy/galaxy-central/changeset/0f32c2fc37cd/
changeset: 0f32c2fc37cd
user: natefoo
date: 2012-06-11 16:26:12
summary: Merged in jmchilton/umn-galaxy-central (pull request #47)
affected #: 3 files
diff -r 1df26d9240bb4e115cdd8cd019673c56c8e4ad67 -r 0f32c2fc37cdc2959c86d2fdb0debe9a4c66282b lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py
+++ b/lib/galaxy/jobs/__init__.py
@@ -21,6 +21,7 @@
from galaxy.util.expressions import ExpressionContext
from galaxy.jobs.actions.post import ActionBox
from galaxy.exceptions import ObjectInvalid
+from galaxy.jobs.mapper import JobRunnerMapper
log = logging.getLogger( __name__ )
@@ -80,6 +81,7 @@
self.tool_provided_job_metadata = None
# Wrapper holding the info required to restore and clean up from files used for setting metadata externally
self.external_output_metadata = metadata.JobExternalOutputMetadataWrapper( job )
+ self.job_runner_mapper = JobRunnerMapper( self )
self.params = None
if job.params:
self.params = from_json_string( job.params )
@@ -88,7 +90,8 @@
self.__galaxy_system_pwent = None
def get_job_runner( self ):
- return self.tool.get_job_runner( self.params )
+ job_runner = self.job_runner_mapper.get_job_runner( self.params )
+ return job_runner
def get_job( self ):
return self.sa_session.query( model.Job ).get( self.job_id )
diff -r 1df26d9240bb4e115cdd8cd019673c56c8e4ad67 -r 0f32c2fc37cdc2959c86d2fdb0debe9a4c66282b lib/galaxy/jobs/mapper.py
--- /dev/null
+++ b/lib/galaxy/jobs/mapper.py
@@ -0,0 +1,130 @@
+import logging
+import inspect
+import os
+
+log = logging.getLogger( __name__ )
+
+import galaxy.jobs.rules
+
+DYNAMIC_RUNNER_PREFIX = "dynamic:///"
+
+class JobRunnerMapper( object ):
+ """
+ This class is responsible to managing the mapping of jobs
+ (in the form of job_wrappers) to job runner strings.
+ """
+
+ def __init__( self, job_wrapper ):
+ self.job_wrapper = job_wrapper
+ self.rule_modules = self.__get_rule_modules( )
+
+ def __get_rule_modules( self ):
+ unsorted_module_names = self.__get_rule_module_names( )
+ ## Load modules in reverse order to allow hierarchical overrides
+ ## i.e. 000_galaxy_rules.py, 100_site_rules.py, 200_instance_rules.py
+ module_names = sorted( unsorted_module_names, reverse=True )
+ modules = []
+ for rule_module_name in module_names:
+ try:
+ module = __import__( rule_module_name )
+ for comp in rule_module_name.split( "." )[1:]:
+ module = getattr( module, comp )
+ modules.append( module )
+ except BaseException, exception:
+ exception_str = str( exception )
+ message = "%s rule module could not be loaded: %s" % ( rule_module_name, exception_str )
+ log.debug( message )
+ continue
+ return modules
+
+ def __get_rule_module_names( self ):
+ rules_dir = galaxy.jobs.rules.__path__[0]
+ names = []
+ for fname in os.listdir( rules_dir ):
+ if not( fname.startswith( "_" ) ) and fname.endswith( ".py" ):
+ rule_module_name = "galaxy.jobs.rules.%s" % fname[:-len(".py")]
+ names.append( rule_module_name )
+ return names
+
+ def __invoke_expand_function( self, expand_function ):
+ function_arg_names = inspect.getargspec( expand_function ).args
+
+ possible_args = { "job_id" : self.job_wrapper.job_id,
+ "tool" : self.job_wrapper.tool,
+ "tool_id" : self.job_wrapper.tool.id,
+ "job_wrapper" : self.job_wrapper,
+ "app" : self.job_wrapper.app }
+
+ actual_args = {}
+
+ # Populate needed args
+ for possible_arg_name in possible_args:
+ if possible_arg_name in function_arg_names:
+ actual_args[ possible_arg_name ] = possible_args[ possible_arg_name ]
+
+ # Don't hit the DB to load the job object if not needed
+ if "job" in function_arg_names or "user" in function_arg_names or "user_email" in function_arg_names:
+ job = self.job_wrapper.get_job()
+ history = job.history
+ user = history and history.user
+ user_email = user and str(user.email)
+
+ if "job" in function_arg_names:
+ actual_args[ "job" ] = job
+
+ if "user" in function_arg_names:
+ actual_args[ "user" ] = user
+
+ if "user_email" in function_arg_names:
+ actual_args[ "user_email" ] = user_email
+
+ return expand_function( **actual_args )
+
+ def __determine_expand_function_name( self, option_parts ):
+ # default look for function with same name as tool, unless one specified
+ expand_function_name = self.job_wrapper.tool.id
+ if len( option_parts ) > 1:
+ expand_function_name = option_parts[ 1 ]
+ return expand_function_name
+
+ def __get_expand_function( self, expand_function_name ):
+ matching_rule_module = self.__last_rule_module_with_function( expand_function_name )
+ if matching_rule_module:
+ expand_function = getattr( matching_rule_module, expand_function_name )
+ return expand_function
+ else:
+ raise Exception( "Dynamic job runner cannot find function to expand job runner type - %s" % expand_function_name )
+
+ def __last_rule_module_with_function( self, function_name ):
+ # self.rule_modules is sorted in reverse order, so find first
+ # wiht function
+ for rule_module in self.rule_modules:
+ if hasattr( rule_module, function_name ):
+ return rule_module
+ return None
+
+ def __expand_dynamic_job_runner( self, options_str ):
+ option_parts = options_str.split( '/' )
+ expand_type = option_parts[ 0 ]
+ if expand_type == "python":
+ expand_function_name = self.__determine_expand_function_name( option_parts )
+ expand_function = self.__get_expand_function( expand_function_name )
+ return self.__invoke_expand_function( expand_function )
+ else:
+ raise Exception( "Unhandled dynamic job runner type specified - %s" % calculation_type )
+
+ def __cache_job_runner( self, params ):
+ raw_job_runner = self.job_wrapper.tool.get_job_runner( params )
+ if raw_job_runner.startswith( DYNAMIC_RUNNER_PREFIX ):
+ job_runner = self.__expand_dynamic_job_runner( raw_job_runner[ len( DYNAMIC_RUNNER_PREFIX ) : ] )
+ else:
+ job_runner = raw_job_runner
+ self.cached_job_runner = job_runner
+
+ def get_job_runner( self, params ):
+ """
+ Cache the job_runner string to avoid recalculation.
+ """
+ if not hasattr( self, 'cached_job_runner' ):
+ self.__cache_job_runner( params )
+ return self.cached_job_runner
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jgoecks: Handle Tophat2 fusion-ignore-chromosome parameter correctly.
by Bitbucket 11 Jun '12
by Bitbucket 11 Jun '12
11 Jun '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/1df26d9240bb/
changeset: 1df26d9240bb
user: jgoecks
date: 2012-06-11 14:49:31
summary: Handle Tophat2 fusion-ignore-chromosome parameter correctly.
affected #: 2 files
diff -r 0edd1d65d746a806de155d3d37428ac84b4265ff -r 1df26d9240bb4e115cdd8cd019673c56c8e4ad67 tools/ngs_rna/tophat2_wrapper.py
--- a/tools/ngs_rna/tophat2_wrapper.py
+++ b/tools/ngs_rna/tophat2_wrapper.py
@@ -200,10 +200,12 @@
# Fusion search options.
if options.fusion_search:
- opts += ' --fusion-search --fusion-anchor-length %i --fusion-min-dist %i --fusion-read-mismatches %i --fusion-multireads %i --fusion-multipairs %i --fusion-ignore-chromosomes %s' % \
+ opts += ' --fusion-search --fusion-anchor-length %i --fusion-min-dist %i --fusion-read-mismatches %i --fusion-multireads %i --fusion-multipairs %i' % \
( int( options.fusion_anchor_length ), int( options.fusion_min_dist ),
int( options.fusion_read_mismatches ), int( options.fusion_multireads ),
- int( options.fusion_multipairs ), options.fusion_ignore_chromosomes )
+ int( options.fusion_multipairs ) )
+ if options.fusion_ignore_chromosomes:
+ opts += ' --fusion-ignore-chromosomes %s' % options.fusion_ignore_chromosomes
# Bowtie2 options.
if options.b2_very_fast:
diff -r 0edd1d65d746a806de155d3d37428ac84b4265ff -r 1df26d9240bb4e115cdd8cd019673c56c8e4ad67 tools/ngs_rna/tophat2_wrapper.xml
--- a/tools/ngs_rna/tophat2_wrapper.xml
+++ b/tools/ngs_rna/tophat2_wrapper.xml
@@ -103,7 +103,7 @@
--fusion-read-mismatches $params.fusion_search.read_mismatches
--fusion-multireads $params.fusion_search.multireads
--fusion-multipairs $params.fusion_search.multipairs
- --fusion-ignore-chromosomes $params.fusion_search.ignore_chromosomes
+ --fusion-ignore-chromosomes "$params.fusion_search.ignore_chromosomes"
#end if
#if $params.bowtie2_settings.b2_settings == "Yes":
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jgoecks: Backbone-ify server-state deferred and use when running tools in Trackster.
by Bitbucket 08 Jun '12
by Bitbucket 08 Jun '12
08 Jun '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/0edd1d65d746/
changeset: 0edd1d65d746
user: jgoecks
date: 2012-06-08 21:36:02
summary: Backbone-ify server-state deferred and use when running tools in Trackster.
affected #: 3 files
diff -r be2577bfdbac49309f962b0b9a48ab11bac6159b -r 0edd1d65d746a806de155d3d37428ac84b4265ff static/scripts/viz/paramamonster.js
--- a/static/scripts/viz/paramamonster.js
+++ b/static/scripts/viz/paramamonster.js
@@ -244,8 +244,17 @@
.attr("class", "node")
.attr("transform", function(d) { return "translate(" + d.y + "," + d.x + ")"; });
+ // Set up behavior when node is clicked.
node.on("click", function(d, i) {
- console.log(d, i);
+ console.log(d, i);
+
+ // Gather: (a) dataset of interest; (b) region(s) of interest and (c) sets of parameters based on node clicked.
+
+ // Run job by submitting parameters + dataset as job inputs; get dataset ids as result.
+
+ // Create tracks for all resulting dataset ids.
+
+ // Display tiles for region(s) of interest.
});
node.append("circle")
diff -r be2577bfdbac49309f962b0b9a48ab11bac6159b -r 0edd1d65d746a806de155d3d37428ac84b4265ff static/scripts/viz/trackster.js
--- a/static/scripts/viz/trackster.js
+++ b/static/scripts/viz/trackster.js
@@ -25,28 +25,6 @@
};
/**
- * Provides support for server-state based deferred. Server is repeatedly polled, and when
- * condition is met, deferred is resolved.
- */
-var server_state_deferred = function(url, url_params, interval, success_fn) {
- var deferred = $.Deferred(),
- go = function() {
- $.getJSON(url, url_params, function(result) {
- if (success_fn(result)) {
- // Result is good, so resolve.
- deferred.resolve(result);
- }
- else {
- // Result not good, try again.
- setTimeout(go, interval);
- }
- });
- };
- go();
- return deferred;
-};
-
-/**
* Find browser's requestAnimationFrame method or fallback on a setTimeout
*/
var requestAnimationFrame = (function(){
@@ -1950,7 +1928,10 @@
this.run(url_params, new_track,
// Success callback.
function(track_data) {
- new_track.dataset_id = track_data.dataset_id;
+ new_track.set_dataset(new Dataset({
+ id: track_data.dataset_id,
+ hda_ldda: track_data.hda_ldda
+ }));
new_track.tiles_div.text("Running job.");
new_track.init();
}
@@ -1960,36 +1941,36 @@
* Run tool using a set of URL params and a success callback.
*/
run: function(url_params, new_track, success_callback) {
- // Add tool params to URL params.
- $.extend(url_params, this.get_param_values_dict());
+ // Run tool.
+ var ss_deferred = new ServerStateDeferred({
+ url: rerun_tool_url,
+ url_params: $.extend(url_params, this.get_param_values_dict()),
+ interval: 2000,
+ success_fn: function(response) {
+ return response !== "pending";
+ }
+ });
- // Run tool.
- // TODO: rewrite to use server state deferred.
- var json_run_tool = function() {
- $.getJSON(rerun_tool_url, url_params, function(response) {
- if (response === "no converter") {
- // No converter available for input datasets, so cannot run tool.
- new_track.container_div.addClass("error");
- new_track.content_div.text(DATA_NOCONVERTER);
- }
- else if (response.error) {
- // General error.
- new_track.container_div.addClass("error");
- new_track.content_div.text(DATA_CANNOT_RUN_TOOL + response.message);
- }
- else if (response === "pending") {
- // Converting/indexing input datasets; show message and try again.
- new_track.container_div.addClass("pending");
- new_track.content_div.text("Converting input data so that it can be used quickly with tool.");
- setTimeout(json_run_tool, 2000);
- }
- else {
- // Job submitted and running.
- success_callback(response);
- }
- });
- };
- json_run_tool();
+ // Start with this status message.
+ //new_track.container_div.addClass("pending");
+ //new_track.content_div.text("Converting input data so that it can be used quickly with tool.");
+
+ $.when(ss_deferred.go()).then(function(response) {
+ if (response === "no converter") {
+ // No converter available for input datasets, so cannot run tool.
+ new_track.container_div.addClass("error");
+ new_track.content_div.text(DATA_NOCONVERTER);
+ }
+ else if (response.error) {
+ // General error.
+ new_track.container_div.addClass("error");
+ new_track.content_div.text(DATA_CANNOT_RUN_TOOL + response.message);
+ }
+ else {
+ // Job submitted and running.
+ success_callback(response);
+ }
+ });
}
});
@@ -3765,7 +3746,7 @@
init_for_tool_data: function() {
// Set up track to fetch initial data from raw data URL when the dataset--not the converted datasets--
// is ready.
- this.data_url = raw_data_url;
+ this.data_manager.set('data_url', raw_data_url);
this.data_query_wait = 1000;
this.dataset_check_url = dataset_state_url;
@@ -3786,15 +3767,16 @@
self.data_query_wait = DEFAULT_DATA_QUERY_WAIT;
// Reset data URL when dataset indexing has completed/when not pending.
- $.when(
+ var ss_deferred = new ServerStateDeferred({
+ url: self.dataset_state_url,
+ url_params: {dataset_id : self.dataset_id, hda_ldda: self.hda_ldda},
+ interval: self.data_query_wait,
// Set up deferred to check dataset state until it is not pending.
- server_state_deferred(self.dataset_state_url,
- {dataset_id : self.dataset_id, hda_ldda: self.hda_ldda},
- self.data_query_wait,
- function(result) { return result !== "pending" })
- ).then(function() {
+ success_fn: function(result) { return result !== "pending" }
+ });
+ $.when(ss_deferred.go()).then(function() {
// Dataset is indexed, so use default data URL.
- self.data_url = default_data_url;
+ self.data_manager.set('data_url', default_data_url);
});
// Reset post-draw actions function.
@@ -4371,6 +4353,12 @@
this.set_painter_from_config();
};
extend(FeatureTrack.prototype, Drawable.prototype, TiledTrack.prototype, {
+ set_dataset: function(dataset) {
+ this.dataset_id = dataset.get('id');
+ this.hda_ldda = dataset.get('hda_ldda');
+ this.data_manager.set('dataset', dataset);
+ },
+
set_painter_from_config: function() {
if ( this.config.values['connector_style'] === 'arcs' ) {
this.painter = painters.ArcLinkedFeaturePainter;
diff -r be2577bfdbac49309f962b0b9a48ab11bac6159b -r 0edd1d65d746a806de155d3d37428ac84b4265ff static/scripts/viz/visualization.js
--- a/static/scripts/viz/visualization.js
+++ b/static/scripts/viz/visualization.js
@@ -9,6 +9,43 @@
// --------- Models ---------
/**
+ * Implementation of a server-state based deferred. Server is repeatedly polled, and when
+ * condition is met, deferred is resolved.
+ */
+var ServerStateDeferred = Backbone.Model.extend({
+ defaults: {
+ url: null,
+ url_params: {},
+ interval: 1000,
+ success_fn: function(result) { return true; }
+ },
+
+ /**
+ * Returns a deferred that resolves when success function returns true.
+ */
+ go: function() {
+ var deferred = $.Deferred(),
+ self = this,
+ success_fn = self.get('success_fn'),
+ interval = self.get('interval'),
+ _go = function() {
+ $.getJSON(self.get('url'), self.get('url_params'), function(result) {
+ if (success_fn(result)) {
+ // Result is good, so resolve.
+ deferred.resolve(result);
+ }
+ else {
+ // Result not good, try again.
+ setTimeout(_go, interval);
+ }
+ });
+ };
+ _go();
+ return deferred;
+ }
+});
+
+/**
* Generic cache that handles key/value pairs.
*/
var Cache = Backbone.Model.extend({
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jgoecks: Add min, max insert size parameters to bowtie2 wrapper.
by Bitbucket 08 Jun '12
by Bitbucket 08 Jun '12
08 Jun '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/be2577bfdbac/
changeset: be2577bfdbac
user: jgoecks
date: 2012-06-08 20:03:41
summary: Add min, max insert size parameters to bowtie2 wrapper.
affected #: 2 files
diff -r 6f15c9e850ab60fa027f0eccd445aad69c45eef9 -r be2577bfdbac49309f962b0b9a48ab11bac6159b tools/sr_mapping/bowtie2_wrapper.py
--- a/tools/sr_mapping/bowtie2_wrapper.py
+++ b/tools/sr_mapping/bowtie2_wrapper.py
@@ -18,6 +18,8 @@
parser.add_option( '-1', '--input1', dest='input1', help='The (forward or single-end) reads file in Sanger FASTQ format' )
parser.add_option( '-2', '--input2', dest='input2', help='The reverse reads file in Sanger FASTQ format' )
parser.add_option( '', '--single-paired', dest='single_paired', help='' )
+ parser.add_option( '-I', '--minins', dest='min_insert' )
+ parser.add_option( '-X', '--maxins', dest='max_insert' )
parser.add_option( '', '--settings', dest='settings', help='' )
parser.add_option( '', '--end-to-end', dest='end_to_end', action="store_true" )
parser.add_option( '', '--local', dest='local', action="store_true" )
@@ -73,6 +75,11 @@
# Set up options.
opts = '-p %s' % ( options.num_threads )
+ if options.single_paired == 'paired':
+ if options.min_insert:
+ opts += ' -I %s' % options.min_insert
+ if options.max_insert:
+ opts += ' -X %s' % options.max_insert
if options.settings == 'preSet':
pass
else:
diff -r 6f15c9e850ab60fa027f0eccd445aad69c45eef9 -r be2577bfdbac49309f962b0b9a48ab11bac6159b tools/sr_mapping/bowtie2_wrapper.xml
--- a/tools/sr_mapping/bowtie2_wrapper.xml
+++ b/tools/sr_mapping/bowtie2_wrapper.xml
@@ -30,6 +30,8 @@
## Second input only if input is paired-end.
#if $singlePaired.sPaired == "paired"
--input2=$singlePaired.input2
+ -I $singlePaired.minInsert
+ -X $singlePaired.maxInsert
#end if
## Set params.
@@ -56,6 +58,8 @@
<param format="fastqsanger" name="input1" type="data" label="FASTQ file" help="Nucleotide-space: Must have Sanger-scaled quality values with ASCII offset 33" /><param format="fastqsanger" name="input2" type="data" label="FASTQ file" help="Nucleotide-space: Must have Sanger-scaled quality values with ASCII offset 33" /><!-- TODO: paired-end specific parameters. -->
+ <param name="minInsert" type="integer" value="0" label="Minimum insert size for valid paired-end alignments" />
+ <param name="maxInsert" type="integer" value="250" label="Maximum insert size for valid paired-end alignments" /></when></conditional><conditional name="refGenomeSource">
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0