3 new commits in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/commits/2588b7b42ecd/ Changeset: 2588b7b42ecd User: jmchilton Date: 2014-01-13 15:58:22 Summary: Non-exhaustive tools related PEP-8 fixes. Fixes for api/tools.py, tools/__init__.py, and tools/parameters/{__init__.py,basic.py}. Affected #: 4 files diff -r e40e2965ac65f3a52778838e2a6879aadaf18add -r 2588b7b42ecdd9998c878f70420c75543bd5219d lib/galaxy/tools/__init__.py --- a/lib/galaxy/tools/__init__.py +++ b/lib/galaxy/tools/__init__.py @@ -20,7 +20,7 @@ from math import isinf from galaxy import eggs -eggs.require( "MarkupSafe" ) #MarkupSafe must load before mako +eggs.require( "MarkupSafe" ) # MarkupSafe must load before mako eggs.require( "Mako" ) eggs.require( "elementtree" ) eggs.require( "Paste" ) @@ -69,11 +69,13 @@ log = logging.getLogger( __name__ ) -WORKFLOW_PARAMETER_REGULAR_EXPRESSION = re.compile( '''\$\{.+?\}''' ) +WORKFLOW_PARAMETER_REGULAR_EXPRESSION = re.compile( '''\$\{.+?\}''' ) + class ToolNotFoundException( Exception ): pass + def to_dict_helper( obj, kwargs ): """ Helper function that provides the appropriate kwargs to to_dict an object. """ @@ -83,6 +85,7 @@ return obj.to_dict( **kwargs ) + class ToolBox( object, Dictifiable ): """Container for a collection of tools""" @@ -421,6 +424,7 @@ return self.app.install_model.context.query( self.app.install_model.ToolVersion ) \ .filter( self.app.install_model.ToolVersion.table.c.tool_id == tool_id ) \ .first() + def __get_tool_shed_repository( self, tool_shed, name, owner, installed_changeset_revision ): return self.app.install_model.context.query( self.app.install_model.ToolShedRepository ) \ .filter( and_( self.app.install_model.ToolShedRepository.table.c.tool_shed == tool_shed, @@ -730,8 +734,8 @@ # Produce panel. rval = [] kwargs = dict( - trans = trans, - link_details = True + trans=trans, + link_details=True ) for elt in panel_elts: rval.append( to_dict_helper( elt, kwargs ) ) @@ -798,7 +802,6 @@ return None - class ToolSection( object, Dictifiable ): """ A group of tools with similar type/purpose that will be displayed as a @@ -828,8 +831,8 @@ section_dict = super( ToolSection, self ).to_dict() section_elts = [] kwargs = dict( - trans = trans, - link_details = link_details + trans=trans, + link_details=link_details ) for elt in self.elems.values(): section_elts.append( to_dict_helper( elt, kwargs ) ) @@ -837,6 +840,7 @@ return section_dict + class ToolSectionLabel( object, Dictifiable ): """ A label for a set of tools that can be displayed above groups of tools @@ -850,6 +854,7 @@ self.id = elem.get( "id" ) self.version = elem.get( "version" ) or '' + class DefaultToolState( object ): """ Keeps track of the state of a users interaction with a tool between @@ -860,6 +865,7 @@ self.page = 0 self.rerun_remap_job_id = None self.inputs = None + def encode( self, tool, app, secure=True ): """ Convert the data to a string @@ -877,6 +883,7 @@ return "%s:%s" % ( a, b ) else: return value + def decode( self, value, tool, app, secure=True ): """ Restore the state from a string @@ -896,6 +903,7 @@ self.rerun_remap_job_id = None self.inputs = params_from_strings( tool.inputs, values, app, ignore_errors=True ) + class ToolOutput( object, Dictifiable ): """ Represents an output datasets produced by a tool. For backward @@ -907,7 +915,7 @@ dict_collection_visible_keys = ( 'name', 'format', 'label', 'hidden' ) def __init__( self, name, format=None, format_source=None, metadata_source=None, - parent=None, label=None, filters = None, actions = None, hidden=False ): + parent=None, label=None, filters=None, actions=None, hidden=False ): self.name = name self.format = format self.format_source = format_source @@ -993,16 +1001,19 @@ # Parse XML element containing configuration self.parse( root, guid=guid ) self.external_runJob_script = app.config.drmaa_external_runjob_script + @property def sa_session( self ): """Returns a SQLAlchemy session""" return self.app.model.context + @property def tool_version( self ): """Return a ToolVersion if one exists for our id""" return self.app.install_model.context.query( self.app.install_model.ToolVersion ) \ .filter( self.app.install_model.ToolVersion.table.c.tool_id == self.id ) \ .first() + @property def tool_versions( self ): # If we have versions, return them. @@ -1010,6 +1021,7 @@ if tool_version: return tool_version.get_versions( self.app ) return [] + @property def tool_version_ids( self ): # If we have versions, return a list of their tool_ids. @@ -1017,6 +1029,7 @@ if tool_version: return tool_version.get_version_ids( self.app ) return [] + @property def tool_shed_repository( self ): # If this tool is included in an installed tool shed repository, return it. @@ -1075,7 +1088,7 @@ :returns: galaxy.jobs.JobDestination -- The destination definition and runner parameters. """ return self.app.job_config.get_destination(self.__get_job_tool_configuration(job_params=job_params).destination) - + def get_panel_section( self ): for key, item in self.app.toolbox.integrated_tool_panel.items(): if item: @@ -1099,7 +1112,7 @@ # Get the (user visible) name of the tool self.name = root.get( "name" ) if not self.name: - raise Exception, "Missing tool 'name'" + raise Exception( "Missing tool 'name'" ) # Get the UNIQUE id for the tool self.old_id = root.get( "id" ) if guid is None: @@ -1107,7 +1120,7 @@ else: self.id = guid if not self.id: - raise Exception, "Missing tool 'id'" + raise Exception( "Missing tool 'id'" ) self.version = root.get( "version" ) if not self.version: # For backward compatibility, some tools may not have versions yet. @@ -1127,7 +1140,7 @@ # Command line (template). Optional for tools that do not invoke a local program command = root.find("command") if command is not None and command.text is not None: - self.command = command.text.lstrip() # get rid of leading whitespace + self.command = command.text.lstrip() # get rid of leading whitespace # Must pre-pend this AFTER processing the cheetah command template self.interpreter = command.get( "interpreter", None ) else: @@ -1169,14 +1182,15 @@ self_ids = [ self.id.lower() ] if self.old_id != self.id: # Handle toolshed guids - self_ids = [ self.id.lower(), self.id.lower().rsplit('/',1)[0], self.old_id.lower() ] + self_ids = [ self.id.lower(), self.id.lower().rsplit('/', 1)[0], self.old_id.lower() ] self.all_ids = self_ids # In the toolshed context, there is no job config. if 'job_config' in dir(self.app): self.job_tool_configurations = self.app.job_config.get_job_tool_configurations(self_ids) # Is this a 'hidden' tool (hidden in tool menu) self.hidden = xml_text(root, "hidden") - if self.hidden: self.hidden = string_as_bool(self.hidden) + if self.hidden: + self.hidden = string_as_bool(self.hidden) # Load any tool specific code (optional) Edit: INS 5/29/2007, # allow code files to have access to the individual tool's # "module" if it has one. Allows us to reuse code files, etc. @@ -1186,7 +1200,7 @@ for hook_elem in code_elem.findall("hook"): for key, value in hook_elem.items(): # map hook to function - self.hook_map[key]=value + self.hook_map[key] = value file_name = code_elem.get("file") code_path = os.path.join( self.tool_dir, file_name ) execfile( code_path, self.code_namespace ) @@ -1308,7 +1322,7 @@ elif len( enctypes ) == 1: self.enctype = enctypes.pop() else: - raise Exception, "Conflicting required enctypes: %s" % str( enctypes ) + raise Exception( "Conflicting required enctypes: %s" % str( enctypes ) ) # Check if the tool either has no parameters or only hidden (and # thus hardcoded) FIXME: hidden parameters aren't # parameters at all really, and should be passed in a different @@ -1318,6 +1332,7 @@ if not isinstance( param, ( HiddenToolParameter, BaseURLToolParameter ) ): self.input_required = True break + def parse_help( self, root ): """ Parse the help text for the tool. Formatted in reStructuredText, but @@ -1365,6 +1380,7 @@ # Pad out help pages to match npages ... could this be done better? while len( self.help_by_page ) < self.npages: self.help_by_page.append( self.help ) + def parse_outputs( self, root ): """ Parse <outputs> elements and fill in self.outputs (keyed by name) @@ -1601,6 +1617,7 @@ else: display = None return display, inputs + def parse_input_elem( self, parent_elem, enctypes, context=None ): """ Parse a parent element whose children are inputs -- these could be @@ -1653,7 +1670,7 @@ input_elem = elem.find( "param" ) assert input_elem is not None, "<conditional> must have a child <param>" group.test_param = self.parse_param_elem( input_elem, enctypes, context ) - possible_cases = list( group.test_param.legal_values ) #store possible cases, undefined whens will have no inputs + possible_cases = list( group.test_param.legal_values ) # store possible cases, undefined whens will have no inputs # Must refresh when test_param changes group.test_param.refresh_on_change = True # And a set of possible cases @@ -1694,6 +1711,7 @@ param.ref_input = context[ param.data_ref ] self.input_params.append( param ) return rval + def parse_param_elem( self, input_elem, enctypes, context ): """ Parse a single "<param>" element and return a ToolParameter instance. @@ -1777,12 +1795,14 @@ raise Exception( "'get_param_html_map' only supported for simple paramters" ) rval[key] = param.get_html( trans, other_values=other_values ) return rval + def get_param( self, key ): """ Returns the parameter named `key` or None if there is no such parameter. """ return self.inputs.get( key, None ) + def get_hook(self, name): """ Returns an object from the code file referenced by `code_namespace` @@ -1795,6 +1815,7 @@ elif name in self.code_namespace: return self.code_namespace[name] return None + def visit_inputs( self, value, callback ): """ Call the function `callback` on each parameter of this tool. Visits @@ -1811,6 +1832,7 @@ callback( "", input, value[input.name] ) else: input.visit_inputs( "", value[input.name], callback ) + def handle_input( self, trans, incoming, history=None, old_errors=None, process_state='update', source='html' ): """ Process incoming parameters for this tool from the dict `incoming`, @@ -1955,6 +1977,7 @@ [ self.find_fieldstorage( y ) for y in x.values() ] elif type( x ) is types.ListType: [ self.find_fieldstorage( y ) for y in x ] + def handle_interrupted( self, trans, inputs ): """ Upon handling inputs, if it appears that we have received an incomplete @@ -2320,6 +2343,7 @@ errors[ input.name ] = error state[ input.name ] = value return errors + @property def params_with_missing_data_table_entry( self ): """ @@ -2333,6 +2357,7 @@ if options and options.missing_tool_data_table_name and input_param not in params: params.append( input_param ) return params + @property def params_with_missing_index_file( self ): """ @@ -2346,6 +2371,7 @@ if options and options.missing_index_file and input_param not in params: params.append( input_param ) return params + def get_static_param_values( self, trans ): """ Returns a map of parameter names and values if the tool does not @@ -2361,6 +2387,7 @@ else: raise Exception( "Unexpected parameter type" ) return args + def execute( self, trans, incoming={}, set_output_hid=True, history=None, **kwargs ): """ Execute the tool using parameter values in `incoming`. This just @@ -2369,10 +2396,13 @@ when run will build the tool's outputs, e.g. `DefaultToolAction`. """ return self.tool_action.execute( self, trans, incoming=incoming, set_output_hid=set_output_hid, history=history, **kwargs ) + def params_to_strings( self, params, app ): return params_to_strings( self.inputs, params, app ) + def params_from_strings( self, params, app, ignore_errors=False ): return params_from_strings( self.inputs, params, app, ignore_errors ) + def check_and_update_param_values( self, values, trans, update_values=True, allow_workflow_parameters=False ): """ Check that all parameters have values, and fill in with default @@ -2382,6 +2412,7 @@ messages = {} self.check_and_update_param_values_helper( self.inputs, values, trans, messages, update_values=update_values, allow_workflow_parameters=allow_workflow_parameters ) return messages + def check_and_update_param_values_helper( self, inputs, values, trans, messages, context=None, prefix="", update_values=True, allow_workflow_parameters=False ): """ Recursive helper for `check_and_update_param_values_helper` @@ -2438,6 +2469,7 @@ messages[ input.name ] = "Value no longer valid for '%s%s', replaced with default" % ( prefix, input.label ) if update_values: values[ input.name ] = input.get_initial_value( trans, context ) + def handle_unvalidated_param_values( self, input_values, app ): """ Find any instances of `UnvalidatedValue` within input_values and @@ -2448,6 +2480,7 @@ if not self.check_values: return self.handle_unvalidated_param_values_helper( self.inputs, input_values, app ) + def handle_unvalidated_param_values_helper( self, inputs, input_values, app, context=None, prefix="" ): """ Recursive helper for `handle_unvalidated_param_values` @@ -2488,6 +2521,7 @@ % ( prefix, input.label, e ) raise LateValidationError( message ) input_values[ input.name ] = value + def handle_job_failure_exception( self, e ): """ Called by job.fail when an exception is generated to allow generation @@ -2499,6 +2533,7 @@ if isinstance( e, LateValidationError ): message = e.message return message + def build_param_dict( self, incoming, input_datasets, output_datasets, output_paths, job_working_directory ): """ Build the dictionary of parameters for substituting into the command @@ -2754,6 +2789,7 @@ # Remove newlines redirect_url_params = redirect_url_params.replace( "\n", " " ).replace( "\r", " " ) return redirect_url_params + def parse_redirect_url( self, data, param_dict ): """ Parse the REDIRECT_URL tool param. Tools that send data to an external @@ -2793,6 +2829,7 @@ USERNAME = 'Anonymous' redirect_url += "&USERNAME=%s" % USERNAME return redirect_url + def call_hook( self, hook_name, *args, **kwargs ): """ Call the custom code hook function identified by 'hook_name' if any, @@ -2808,15 +2845,19 @@ original_message = e.args[0] e.args = ( "Error in '%s' hook '%s', original message: %s" % ( self.name, hook_name, original_message ), ) raise + def exec_before_job( self, app, inp_data, out_data, param_dict={} ): pass - def exec_after_process( self, app, inp_data, out_data, param_dict, job = None ): + + def exec_after_process( self, app, inp_data, out_data, param_dict, job=None ): pass - def job_failed( self, job_wrapper, message, exception = False ): + + def job_failed( self, job_wrapper, message, exception=False ): """ Called when a job has failed """ pass + def collect_associated_files( self, output, job_working_directory ): """ Find extra files in the job working directory and move them into @@ -2835,10 +2876,11 @@ for f in files: self.app.object_store.update_from_file(hda.dataset, extra_dir=extra_dir, - alt_name = f, - file_name = os.path.join(root, f), - create = True, - preserve_symlinks = True ) + alt_name=f, + file_name=os.path.join(root, f), + create=True, + preserve_symlinks=True + ) # Clean up after being handled by object store. # FIXME: If the object (e.g., S3) becomes async, this will # cause issues so add it to the object store functionality? @@ -2848,6 +2890,7 @@ except Exception, e: log.debug( "Error in collect_associated_files: %s" % ( e ) ) continue + def collect_child_datasets( self, output, job_working_directory ): """ Look for child dataset files, create HDA and attach to parent. @@ -2858,9 +2901,9 @@ for name, outdata in output.items(): filenames = [] if 'new_file_path' in self.app.config.collect_outputs_from: - filenames.extend( glob.glob(os.path.join(self.app.config.new_file_path,"child_%i_*" % outdata.id) ) ) + filenames.extend( glob.glob(os.path.join(self.app.config.new_file_path, "child_%i_*" % outdata.id) ) ) if 'job_working_directory' in self.app.config.collect_outputs_from: - filenames.extend( glob.glob(os.path.join(job_working_directory,"child_%i_*" % outdata.id) ) ) + filenames.extend( glob.glob(os.path.join(job_working_directory, "child_%i_*" % outdata.id) ) ) for filename in filenames: if not name in children: children[name] = {} @@ -2909,12 +2952,14 @@ # Need to update all associated output hdas, i.e. history was # shared with job running for dataset in outdata.dataset.history_associations: - if outdata == dataset: continue + if outdata == dataset: + continue # Create new child dataset - child_data = child_dataset.copy( parent_id = dataset.id ) + child_data = child_dataset.copy( parent_id=dataset.id ) self.sa_session.add( child_data ) self.sa_session.flush() return children + def collect_primary_datasets( self, output, job_working_directory ): """ Find any additional datasets generated by a tool and attach (for @@ -2937,9 +2982,9 @@ for name, outdata in output.items(): filenames = [] if 'new_file_path' in self.app.config.collect_outputs_from: - filenames.extend( glob.glob(os.path.join(self.app.config.new_file_path,"primary_%i_*" % outdata.id) ) ) + filenames.extend( glob.glob(os.path.join(self.app.config.new_file_path, "primary_%i_*" % outdata.id) ) ) if 'job_working_directory' in self.app.config.collect_outputs_from: - filenames.extend( glob.glob(os.path.join(job_working_directory,"primary_%i_*" % outdata.id) ) ) + filenames.extend( glob.glob(os.path.join(job_working_directory, "primary_%i_*" % outdata.id) ) ) for filename in filenames: if not name in primary_datasets: primary_datasets[name] = {} @@ -2948,8 +2993,10 @@ parent_id = int(fields.pop(0)) designation = fields.pop(0) visible = fields.pop(0).lower() - if visible == "visible": visible = True - else: visible = False + if visible == "visible": + visible = True + else: + visible = False ext = fields.pop(0).lower() dbkey = outdata.dbkey if fields: @@ -2999,7 +3046,8 @@ # Need to update all associated output hdas, i.e. history was # shared with job running for dataset in outdata.dataset.history_associations: - if outdata == dataset: continue + if outdata == dataset: + continue new_data = primary_data.copy() dataset.history.add( new_data ) self.sa_session.add( new_data ) @@ -3029,7 +3077,7 @@ if io_details: tool_dict[ 'inputs' ] = [ input.to_dict( trans ) for input in self.inputs.values() ] tool_dict[ 'outputs' ] = [ output.to_dict() for output in self.outputs.values() ] - + tool_dict[ 'panel_section_id' ], tool_dict[ 'panel_section_name' ] = self.get_panel_section() return tool_dict @@ -3044,6 +3092,7 @@ JSONified within the contents of an output dataset """ tool_type = 'output_parameter_json' + def _prepare_json_list( self, param_list ): rval = [] for value in param_list: @@ -3054,6 +3103,7 @@ else: rval.append( str( value ) ) return rval + def _prepare_json_param_dict( self, param_dict ): rval = {} for key, value in param_dict.iteritems(): @@ -3064,11 +3114,12 @@ else: rval[ key ] = str( value ) return rval + def exec_before_job( self, app, inp_data, out_data, param_dict=None ): if param_dict is None: param_dict = {} json_params = {} - json_params[ 'param_dict' ] = self._prepare_json_param_dict( param_dict ) #it would probably be better to store the original incoming parameters here, instead of the Galaxy modified ones? + json_params[ 'param_dict' ] = self._prepare_json_param_dict( param_dict ) # it would probably be better to store the original incoming parameters here, instead of the Galaxy modified ones? json_params[ 'output_data' ] = [] json_params[ 'job_config' ] = dict( GALAXY_DATATYPES_CONF_FILE=param_dict.get( 'GALAXY_DATATYPES_CONF_FILE' ), GALAXY_ROOT_DIR=param_dict.get( 'GALAXY_ROOT_DIR' ), TOOL_PROVIDED_JOB_METADATA_FILE=jobs.TOOL_PROVIDED_JOB_METADATA_FILE ) json_filename = None @@ -3078,12 +3129,12 @@ #allow multiple files to be created file_name = str( wrapped_data ) extra_files_path = str( wrapped_data.files_path ) - data_dict = dict( out_data_name = out_name, - ext = data.ext, - dataset_id = data.dataset.id, - hda_id = data.id, - file_name = file_name, - extra_files_path = extra_files_path ) + data_dict = dict( out_data_name=out_name, + ext=data.ext, + dataset_id=data.dataset.id, + hda_id=data.id, + file_name=file_name, + extra_files_path=extra_files_path ) json_params[ 'output_data' ].append( data_dict ) if json_filename is None: json_filename = file_name @@ -3091,6 +3142,7 @@ out.write( json.dumps( json_params ) ) out.close() + class DataSourceTool( OutputParameterJSONTool ): """ Alternate implementation of Tool for data_source tools -- those that @@ -3101,11 +3153,13 @@ def _build_GALAXY_URL_parameter( self ): return ToolParameter.build( self, ElementTree.XML( '<param name="GALAXY_URL" type="baseurl" value="/tool_runner?tool_id=%s" />' % self.id ) ) + def parse_inputs( self, root ): super( DataSourceTool, self ).parse_inputs( root ) if 'GALAXY_URL' not in self.inputs: self.inputs[ 'GALAXY_URL' ] = self._build_GALAXY_URL_parameter() self.inputs_by_page[0][ 'GALAXY_URL' ] = self.inputs[ 'GALAXY_URL' ] + def exec_before_job( self, app, inp_data, out_data, param_dict=None ): if param_dict is None: param_dict = {} @@ -3115,7 +3169,7 @@ name = param_dict.get( 'name' ) json_params = {} - json_params[ 'param_dict' ] = self._prepare_json_param_dict( param_dict ) #it would probably be better to store the original incoming parameters here, instead of the Galaxy modified ones? + json_params[ 'param_dict' ] = self._prepare_json_param_dict( param_dict ) # it would probably be better to store the original incoming parameters here, instead of the Galaxy modified ones? json_params[ 'output_data' ] = [] json_params[ 'job_config' ] = dict( GALAXY_DATATYPES_CONF_FILE=param_dict.get( 'GALAXY_DATATYPES_CONF_FILE' ), GALAXY_ROOT_DIR=param_dict.get( 'GALAXY_ROOT_DIR' ), TOOL_PROVIDED_JOB_METADATA_FILE=jobs.TOOL_PROVIDED_JOB_METADATA_FILE ) json_filename = None @@ -3138,12 +3192,12 @@ data.extension = cur_data_type file_name = str( wrapped_data ) extra_files_path = str( wrapped_data.files_path ) - data_dict = dict( out_data_name = out_name, - ext = data.ext, - dataset_id = data.dataset.id, - hda_id = data.id, - file_name = file_name, - extra_files_path = extra_files_path ) + data_dict = dict( out_data_name=out_name, + ext=data.ext, + dataset_id=data.dataset.id, + hda_id=data.id, + file_name=file_name, + extra_files_path=extra_files_path ) json_params[ 'output_data' ].append( data_dict ) if json_filename is None: json_filename = file_name @@ -3151,15 +3205,18 @@ out.write( json.dumps( json_params ) ) out.close() + class AsyncDataSourceTool( DataSourceTool ): tool_type = 'data_source_async' def _build_GALAXY_URL_parameter( self ): return ToolParameter.build( self, ElementTree.XML( '<param name="GALAXY_URL" type="baseurl" value="/async/%s" />' % self.id ) ) + class DataDestinationTool( Tool ): tool_type = 'data_destination' + class SetMetadataTool( Tool ): """ Tool implementation for special tool that sets metadata on an existing @@ -3167,8 +3224,8 @@ """ tool_type = 'set_metadata' requires_setting_metadata = False - - def exec_after_process( self, app, inp_data, out_data, param_dict, job = None ): + + def exec_after_process( self, app, inp_data, out_data, param_dict, job=None ): for name, dataset in inp_data.iteritems(): external_metadata = JobExternalOutputMetadataWrapper( job ) if external_metadata.external_metadata_set_successfully( dataset, app.model.context ): @@ -3191,23 +3248,28 @@ dataset.set_peek() self.sa_session.add( dataset ) self.sa_session.flush() - def job_failed( self, job_wrapper, message, exception = False ): + + def job_failed( self, job_wrapper, message, exception=False ): job = job_wrapper.sa_session.query( model.Job ).get( job_wrapper.job_id ) if job: inp_data = {} for dataset_assoc in job.input_datasets: inp_data[dataset_assoc.name] = dataset_assoc.dataset - return self.exec_after_process( job_wrapper.app, inp_data, {}, job_wrapper.get_param_dict(), job = job ) + return self.exec_after_process( job_wrapper.app, inp_data, {}, job_wrapper.get_param_dict(), job=job ) + class ExportHistoryTool( Tool ): tool_type = 'export_history' + class ImportHistoryTool( Tool ): tool_type = 'import_history' + class GenomeIndexTool( Tool ): tool_type = 'index_genome' + class DataManagerTool( OutputParameterJSONTool ): tool_type = 'manage_data' default_tool_action = DataManagerToolAction @@ -3218,9 +3280,9 @@ if self.data_manager_id is None: self.data_manager_id = self.id - def exec_after_process( self, app, inp_data, out_data, param_dict, job = None, **kwds ): + def exec_after_process( self, app, inp_data, out_data, param_dict, job=None, **kwds ): #run original exec_after_process - super( DataManagerTool, self ).exec_after_process( app, inp_data, out_data, param_dict, job = job, **kwds ) + super( DataManagerTool, self ).exec_after_process( app, inp_data, out_data, param_dict, job=job, **kwds ) #process results of tool if job and job.state == job.states.ERROR: return @@ -3267,8 +3329,8 @@ for tool_class in [ Tool, DataDestinationTool, SetMetadataTool, DataSourceTool, AsyncDataSourceTool, DataManagerTool ]: tool_types[ tool_class.tool_type ] = tool_class + # ---- Utility classes to be factored out ----------------------------------- - class TracksterConfig: """ Trackster configuration encapsulation. """ @@ -3282,6 +3344,7 @@ actions.append( SetParamAction.parse( action_elt ) ) return TracksterConfig( actions ) + class SetParamAction: """ Set parameter action. """ @@ -3294,10 +3357,12 @@ """ Parse action from element. """ return SetParamAction( elt.get( "name" ), elt.get( "output_name" ) ) + class BadValue( object ): def __init__( self, value ): self.value = value + class ToolStdioRegex( object ): """ This is a container for the <stdio> element's regex subelement. @@ -3313,6 +3378,7 @@ self.error_level = "fatal" self.desc = "" + class ToolStdioExitCode( object ): """ This is a container for the <stdio> element's <exit_code> subelement. @@ -3532,6 +3598,6 @@ else: return incoming.get( key, default ) + class InterruptedUpload( Exception ): pass - diff -r e40e2965ac65f3a52778838e2a6879aadaf18add -r 2588b7b42ecdd9998c878f70420c75543bd5219d lib/galaxy/tools/parameters/__init__.py --- a/lib/galaxy/tools/parameters/__init__.py +++ b/lib/galaxy/tools/parameters/__init__.py @@ -6,6 +6,7 @@ from grouping import * from galaxy.util.json import * + def visit_input_values( inputs, input_values, callback, name_prefix="", label_prefix="" ): """ Given a tools parameter definition (`inputs`) and a specific set of @@ -35,11 +36,12 @@ else: new_value = callback( input, input_values[input.name], - prefixed_name = name_prefix + input.name, - prefixed_label = label_prefix + input.label ) + prefixed_name=name_prefix + input.name, + prefixed_label=label_prefix + input.label ) if new_value: input_values[input.name] = new_value + def check_param( trans, param, incoming_value, param_values, source='html' ): """ Check the value of a single parameter `param`. The value in @@ -62,12 +64,13 @@ # Then do any further validation on the value param.validate( filtered_value, trans.history ) elif value is None and isinstance( param, SelectToolParameter ): - # An empty select list or column list - param.validate( value, trans.history ) + # An empty select list or column list + param.validate( value, trans.history ) except ValueError, e: error = str( e ) return value, error + def params_to_strings( params, param_values, app ): """ Convert a dictionary of parameter values to a dictionary of strings @@ -83,6 +86,7 @@ rval[ key ] = str( to_json_string( value ) ) return rval + def params_from_strings( params, param_values, app, ignore_errors=False ): """ Convert a dictionary of strings as produced by `params_to_strings` @@ -98,6 +102,7 @@ rval[ key ] = value return rval + def params_to_incoming( incoming, inputs, input_values, app, name_prefix="" ): """ Given a tool's parameter definition (`inputs`) and a specific set of @@ -119,4 +124,3 @@ params_to_incoming( incoming, input.cases[current].inputs, values, app, new_name_prefix ) else: incoming[ name_prefix + input.name ] = input.to_html_value( input_values.get( input.name ), app ) - diff -r e40e2965ac65f3a52778838e2a6879aadaf18add -r 2588b7b42ecdd9998c878f70420c75543bd5219d lib/galaxy/tools/parameters/basic.py --- a/lib/galaxy/tools/parameters/basic.py +++ b/lib/galaxy/tools/parameters/basic.py @@ -1441,10 +1441,11 @@ for opt in option['options']: recurse_option( option_list, opt ) rval = [] - recurse_option( rval, get_base_option( value, self.get_options( other_values = other_values ) ) ) + recurse_option( rval, get_base_option( value, self.get_options( other_values=other_values ) ) ) return rval or [value] - if value is None: return "None" + if value is None: + return "None" rval = [] if self.hierarchy == "exact": rval = value @@ -1483,8 +1484,9 @@ if value == option['value']: return option['name'] rval = get_option_display( value, option['options'] ) - if rval: return rval - return None #not found + if rval: + return rval + return None # not found if isinstance( value, UnvalidatedValue ): suffix = "\n(value not yet validated)" @@ -1524,12 +1526,12 @@ options = [] try: options = self.get_options( trans, {} ) - except KeyError, key_err: + except KeyError: # will sometimes error if self.is_dynamic and self.filtered # bc we dont/cant fill out other_values above ({}) pass - d[ 'options' ] = options; + d[ 'options' ] = options return d @@ -1762,7 +1764,7 @@ if v: if v.deleted: raise ValueError( "The previously selected dataset has been previously deleted" ) - if v.dataset.state in [galaxy.model.Dataset.states.ERROR, galaxy.model.Dataset.states.DISCARDED ]: + if v.dataset.state in [ galaxy.model.Dataset.states.ERROR, galaxy.model.Dataset.states.DISCARDED ]: raise ValueError( "The previously selected dataset has entered an unusable state" ) return rval @@ -1794,7 +1796,8 @@ return app.model.context.query( app.model.HistoryDatasetAssociation ).get( int( value ) ) def to_param_dict_string( self, value, other_values={} ): - if value is None: return "None" + if value is None: + return "None" return value.file_name def value_to_display_text( self, value, app ): @@ -1828,12 +1831,13 @@ if self.tool is None or self.tool.has_multiple_pages or not hasattr( trans, 'workflow_building_mode' ) or trans.workflow_building_mode: return False if other_values is None: - return True # we don't know other values, so we can't check, assume ok + return True # we don't know other values, so we can't check, assume ok converter_safe = [True] - def visitor( prefix, input, value, parent = None ): + + def visitor( prefix, input, value, parent=None ): if isinstance( input, SelectToolParameter ) and self.name in input.get_dependencies(): if input.is_dynamic and ( input.dynamic_options or ( not input.dynamic_options and not input.options ) or not input.options.converter_safe ): - converter_safe[0] = False #This option does not allow for conversion, i.e. uses contents of dataset file to generate options + converter_safe[0] = False # This option does not allow for conversion, i.e. uses contents of dataset file to generate options self.tool.visit_inputs( other_values, visitor ) return False not in converter_safe @@ -1960,21 +1964,24 @@ # self.html = form_builder.HiddenField( self.name, trans.history.id ).get_html() # return self.html -parameter_types = dict( text = TextToolParameter, - integer = IntegerToolParameter, - float = FloatToolParameter, - boolean = BooleanToolParameter, - genomebuild = GenomeBuildParameter, - select = SelectToolParameter, - data_column = ColumnListParameter, - hidden = HiddenToolParameter, - hidden_data = HiddenDataToolParameter, - baseurl = BaseURLToolParameter, - file = FileToolParameter, - ftpfile = FTPFileToolParameter, - data = DataToolParameter, - library_data = LibraryDatasetToolParameter, - drill_down = DrillDownSelectToolParameter ) +parameter_types = dict( + text=TextToolParameter, + integer=IntegerToolParameter, + float=FloatToolParameter, + boolean=BooleanToolParameter, + genomebuild=GenomeBuildParameter, + select=SelectToolParameter, + data_column=ColumnListParameter, + hidden=HiddenToolParameter, + hidden_data=HiddenDataToolParameter, + baseurl=BaseURLToolParameter, + file=FileToolParameter, + ftpfile=FTPFileToolParameter, + data=DataToolParameter, + library_data=LibraryDatasetToolParameter, + drill_down=DrillDownSelectToolParameter +) + class UnvalidatedValue( object ): """ @@ -1993,4 +2000,3 @@ runtime. """ pass - diff -r e40e2965ac65f3a52778838e2a6879aadaf18add -r 2588b7b42ecdd9998c878f70420c75543bd5219d lib/galaxy/webapps/galaxy/api/tools.py --- a/lib/galaxy/webapps/galaxy/api/tools.py +++ b/lib/galaxy/webapps/galaxy/api/tools.py @@ -1,9 +1,11 @@ import urllib from galaxy import web, util -from galaxy.web.base.controller import BaseAPIController, UsesHistoryDatasetAssociationMixin, UsesVisualizationMixin, UsesHistoryMixin +from galaxy.web.base.controller import BaseAPIController +from galaxy.web.base.controller import UsesVisualizationMixin +from galaxy.web.base.controller import UsesHistoryMixin from galaxy.visualization.genomes import GenomeRegion -from galaxy.util.json import to_json_string, from_json_string +from galaxy.util.json import to_json_string from galaxy.visualization.data_providers.genome import * import logging @@ -47,7 +49,7 @@ GET /api/tools/{tool_id} Returns tool information, including parameters and inputs. """ - io_details = util.string_as_bool( kwd.get( 'io_details', False ) ) + io_details = util.string_as_bool( kwd.get( 'io_details', False ) ) link_details = util.string_as_bool( kwd.get( 'link_details', False ) ) try: id = urllib.unquote_plus( id ) @@ -96,7 +98,7 @@ for k, v in payload.iteritems(): if k.startswith("files_") or k.startswith("__files_"): inputs[k] = v - + #for inputs that are coming from the Library, copy them into the history input_patch = {} for k, v in inputs.iteritems(): @@ -112,7 +114,7 @@ inputs['runtool_btn'] = 'Execute' # TODO: encode data ids and decode ids. # TODO: handle dbkeys - params = util.Params( inputs, sanitize = False ) + params = util.Params( inputs, sanitize=False ) # process_state will be 'populate' or 'update'. When no tool # state is specified in input - it will be 'populate', and # tool will fully expand repeat and conditionals when building @@ -140,8 +142,8 @@ for output_name, output in output_datasets: output_dict = output.to_dict() #add the output name back into the output data structure - #so it's possible to figure out which newly created elements - #correspond with which tool file outputs + #so it's possible to figure out which newly created elements + #correspond with which tool file outputs output_dict['output_name'] = output_name outputs.append( trans.security.encode_dict_ids( output_dict ) ) return rval @@ -185,14 +187,13 @@ # HACK: add run button so that tool.handle_input will run tool. kwargs['runtool_btn'] = 'Execute' - params = util.Params( kwargs, sanitize = False ) + params = util.Params( kwargs, sanitize=False ) template, vars = tool.handle_input( trans, params.__dict__, history=target_history ) # TODO: check for errors and ensure that output dataset is available. output_datasets = vars[ 'out_data' ].values() return self.add_track_async( trans, output_datasets[0].id ) - def _rerun_tool( self, trans, payload, **kwargs ): """ Rerun a tool to produce a new output dataset that corresponds to a @@ -363,7 +364,7 @@ continue input_dataset = jida.dataset - if input_dataset is None: #optional dataset and dataset wasn't selected + if input_dataset is None: # optional dataset and dataset wasn't selected tool_params[ jida.name ] = None elif run_on_regions and 'data' in input_dataset.datatype.data_sources: # Dataset is indexed and hence a subset can be extracted and used @@ -407,7 +408,7 @@ # Set metadata. # TODO: set meta internally if dataset is small enough? trans.app.datatypes_registry.set_external_metadata_tool.tool_action.execute( trans.app.datatypes_registry.set_external_metadata_tool, - trans, incoming = { 'input1':new_dataset }, + trans, incoming={ 'input1': new_dataset }, overwrite=False, job_params={ "source" : "trackster" } ) # Add HDA subset association. subset_association = trans.app.model.HistoryDatasetAssociationSubset( hda=input_dataset, subset=new_dataset, location=regions_str ) @@ -450,5 +451,5 @@ dataset_dict = output_dataset.to_dict() dataset_dict[ 'id' ] = trans.security.encode_id( dataset_dict[ 'id' ] ) - dataset_dict[ 'track_config' ] = self.get_new_track_config( trans, output_dataset ); + dataset_dict[ 'track_config' ] = self.get_new_track_config( trans, output_dataset ) return dataset_dict https://bitbucket.org/galaxy/galaxy-central/commits/14ab802d0209/ Changeset: 14ab802d0209 User: jmchilton Date: 2014-01-13 15:58:22 Summary: Initial work on tool execution unit tests. Going to be doing some more work on tool state stuff so it will be good to have a way to test that. This also brings in test/unit/tools_support.py from Pull Request #287 (would be overkill for just these tests, but it is useful for future tests coming down the pipe.) Affected #: 3 files diff -r 2588b7b42ecdd9998c878f70420c75543bd5219d -r 14ab802d020997e1acfdce191d912b702624b96f test/unit/tools/test_execution.py --- /dev/null +++ b/test/unit/tools/test_execution.py @@ -0,0 +1,104 @@ +""" Test Tool execution and state handling logic. +""" +import os + +from unittest import TestCase + +import galaxy.model +from galaxy.tools import Tool +from galaxy.tools import DefaultToolState +from galaxy.util import parse_xml +from galaxy.util import string_to_object +from galaxy.util import object_to_string +from galaxy.util.odict import odict +from tools_support import UsesApp + +EXAMPLE_TOOL_CONTENTS = '''<tool id="test_tool" name="Test Tool"> + <command>echo "$text" < $out1</command> + <inputs> + <param type="text" name="param1" value="" /> + </inputs> + <outputs> + <output name="out1" format="data" /> + </outputs> +</tool>''' + + +class ToolExecutionTestCase( TestCase, UsesApp ): + + def setUp(self): + self.setup_app() + self.app.job_config["get_job_tool_configurations"] = lambda ids: None + self.app.config.drmaa_external_runjob_script = "" + self.app.config.tool_secret = "testsecret" + self.trans = MockTrans( self.app ) + self.tool_action = MockAction( self.trans ) + self.tool_file = os.path.join( self.test_directory, "tool.xml" ) + + def tearDown(self): + self.tear_down_app() + + def test_state_new( self ): + self.__write_tool( EXAMPLE_TOOL_CONTENTS ) + self.__setup_tool( ) + template, template_vars = self.tool.handle_input( + trans=self.trans, + incoming=dict( param1="moo" ) + # no runtool_btn, just rerenders the form mako with tool + # state populated. + ) + assert template == "tool_form.mako" + assert not template_vars[ "errors" ] + state = template_vars[ "tool_state" ] + assert state.inputs[ "param1" ] == "moo" + + def test_execute( self ): + self.__write_tool( EXAMPLE_TOOL_CONTENTS ) + self.__setup_tool( ) + template, template_vars = self.tool.handle_input( + trans=self.trans, + incoming=dict( param1="moo", runtool_btn="dummy" ) + ) + assert template == "tool_executed.mako" + + def __setup_tool( self ): + tree = parse_xml( self.tool_file ) + self.tool = Tool( self.tool_file, tree.getroot(), self.app ) + self.tool.tool_action = self.tool_action + + def __write_tool( self, contents ): + open( self.tool_file, "w" ).write( contents ) + + def __string_to_state( self, state_string ): + encoded_state = string_to_object( state_string ) + state = DefaultToolState() + state.decode( encoded_state, self.tool, self.app ) + return state + + def __inputs_to_state( self, inputs ): + tool_state = DefaultToolState() + tool_state.inputs = inputs + return tool_state + + def __inputs_to_state_string( self, inputs ): + tool_state = self.__inputs_to_state( inputs ) + return object_to_string( tool_state.encode( self.tool, self.app ) ) + + +class MockAction( object ): + + def __init__( self, expected_trans ): + self.expected_trans = expected_trans + self.execution_call_args = [] + + def execute( self, tool, trans, **kwds ): + assert self.expected_trans == trans + self.execution_call_args.append( kwds ) + return None, odict(dict(out1="1")) + + +class MockTrans( object ): + + def __init__( self, app ): + self.app = app + self.history = galaxy.model.History() diff -r 2588b7b42ecdd9998c878f70420c75543bd5219d -r 14ab802d020997e1acfdce191d912b702624b96f test/unit/tools_support.py --- /dev/null +++ b/test/unit/tools_support.py @@ -0,0 +1,51 @@ +""" Module contains test fixtures meant to aide in the testing of jobs and +tool evaluation. Such extensive "fixtures" are something of an anti-pattern +so use of this should be limitted to tests of very 'extensive' classes. +""" + +import os.path +import tempfile +import shutil + +from galaxy.util.bunch import Bunch +from galaxy.model import mapping + + +class UsesApp( object ): + + def setup_app( self ): + # The following line is needed in order to create + # HistoryDatasetAssociations - ideally the model classes would be + # usable without the ORM infrastructure in place. + mapping.init( "/tmp", "sqlite:///:memory:", create_tables=True ) + self.test_directory = tempfile.mkdtemp() + self.app = MockApp(self.test_directory) + + def tear_down_app( self ): + shutil.rmtree( self.test_directory ) + + +class MockApp( object ): + + def __init__( self, test_directory ): + + self.datatypes_registry = Bunch( + integrated_datatypes_configs='/galaxy/integrated_datatypes_configs.xml', + get_datatype_by_extension=lambda ext: Bunch(), + ) + + self.config = Bunch( + outputs_to_working_directory=False, + new_file_path=os.path.join(test_directory, "new_files"), + tool_data_path=os.path.join(test_directory, "tools"), + root=os.path.join(test_directory, "galaxy"), + admin_users="mary@example.com", + ) + + # Setup some attributes for downstream extension by specific tests. + self.job_config = Bunch() + self.model = Bunch() + self.toolbox = None + self.object_store = None + +__all__ = [ UsesApp ] https://bitbucket.org/galaxy/galaxy-central/commits/c250b1cfc60f/ Changeset: c250b1cfc60f User: jmchilton Date: 2014-01-13 15:58:22 Summary: Initial work on tools API functional tests. Just tests some simple indexing and tool execution. Tons more one can and should test. Affected #: 1 file diff -r 14ab802d020997e1acfdce191d912b702624b96f -r c250b1cfc60fe2b56ddcf18e2cf2543631d4c26f test/functional/api/test_tools.py --- /dev/null +++ b/test/functional/api/test_tools.py @@ -0,0 +1,102 @@ +# Test tools API. +from itertools import chain +from json import dumps +import time + +from base import api +from operator import itemgetter + + +class ToolsTestCase( api.ApiTestCase ): + + def test_index( self ): + index = self._get( "tools" ) + tools_index = index.json() + # In panels by default, so flatten out sections... + tools = list( chain( *map( itemgetter( "elems" ), tools_index ) ) ) + tool_ids = map( itemgetter( "id" ), tools ) + assert "upload1" in tool_ids + assert "cat1" in tool_ids + + def test_no_panel_index( self ): + index = self._get( "tools", data=dict(in_panel="false") ) + tools_index = index.json() + # No need to flatten out sections, with in_panel=False, only tools are + # returned. + tool_ids = map( itemgetter( "id" ), tools_index ) + assert "upload1" in tool_ids + assert "cat1" in tool_ids + + def test_upload1_paste( self ): + history_id = self._new_history() + payload = self._upload_payload( history_id, 'Hello World' ) + create_response = self._post( "tools", data=payload ) + self._assert_has_keys( create_response.json(), 'outputs' ) + + def test_run_cat1( self ): + history_id = self._new_history() + new_dataset = self._new_dataset( history_id ) + dataset_id = new_dataset[ 'id' ] + payload = self._run_tool_payload( + tool_id='cat1', + inputs=dict( + input1=dict( + src='hda', + id=dataset_id + ), + ), + history_id=history_id, + ) + create_response = self._post( "tools", data=payload ) + self._assert_status_code_is( create_response, 200 ) + self._assert_has_keys( create_response.json(), 'outputs' ) + self._wait_for_history( history_id, assert_ok=True ) + + def _new_dataset( self, history_id, content='TestData123', **kwds ): + payload = self._upload_payload( history_id, content, **kwds ) + run_response = self._post( "tools", data=payload ) + self._assert_status_code_is( run_response, 200 ) + return run_response.json()["outputs"][0] + + def _wait_for_history( self, history_id, assert_ok=False ): + while True: + history_details_response = self._get( "histories/%s" % history_id ) + self._assert_status_code_is( history_details_response, 200 ) + history_state = history_details_response.json()[ "state" ] + if history_state not in [ "running", "queued" ]: + break + time.sleep( .1 ) + if assert_ok: + self.assertEquals( history_state, 'ok' ) + + def _new_history( self, **kwds ): + name = kwds.get( "name", "API Test History" ) + create_history_response = self._post( "histories", data=dict( name=name ) ) + self._assert_status_code_is( create_history_response, 200 ) + history_id = create_history_response.json()[ "id" ] + return history_id + + def _upload_payload( self, history_id, content, **kwds ): + name = kwds.get( "name", "Test Dataset" ) + dbkey = kwds.get( "dbkey", "?" ) + file_type = kwds.get( "file_type", 'txt' ) + upload_params = { + 'files_0|NAME': name, + 'files_0|url_paste': content, + 'dbkey': dbkey, + 'file_type': file_type, + } + return self._run_tool_payload( + tool_id='upload1', + inputs=upload_params, + history_id=history_id, + upload_type='upload_dataset' + ) + + def _run_tool_payload( self, tool_id, inputs, history_id, **kwds ): + return dict( + tool_id=tool_id, + inputs=dumps(inputs), + history_id=history_id, + **kwds + ) Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.