9 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/f58a9199e992/ Changeset: f58a9199e992 User: jmchilton Date: 2014-08-10 20:05:31 Summary: Fix failing test test/unit/workflows/test_extract_summary.py. Affected #: 1 file
diff -r de2e33813275b89a04e7de4e95b21f6d2e100d4e -r f58a9199e9922c3bef27240aa613a9a3c5bae67e test/unit/workflows/test_extract_summary.py --- a/test/unit/workflows/test_extract_summary.py +++ b/test/unit/workflows/test_extract_summary.py @@ -71,6 +71,10 @@ def __init__( self ): self.active_datasets = []
+ @property + def active_contents( self ): + return self.active_datasets +
class MockTrans( object ):
@@ -87,6 +91,7 @@ self.id = 123 self.state = state self.copied_from_history_dataset_association = None + self.history_content_type = "dataset" if job is not UNDEFINED_JOB: if not job: job = model.Job()
https://bitbucket.org/galaxy/galaxy-central/commits/f1a1c52f8495/ Changeset: f1a1c52f8495 User: jmchilton Date: 2014-08-10 20:05:31 Summary: Move galaxy.dataset_collections.util to galaxy.managers.collections_util to prevent circular dependency error. Still some problems with histories and hda managers when loading from unit tests:
====================================================================== ERROR: Failure: ImportError (cannot import name histories) ---------------------------------------------------------------------- Traceback (most recent call last): File "/home/john/workspace/lwr/.venv/local/lib/python2.7/site-packages/nose/loader.py", line 414, in loadTestsFromName addr.filename, addr.module) File "/home/john/workspace/lwr/.venv/local/lib/python2.7/site-packages/nose/importer.py", line 47, in importFromPath return self.importFromDir(dir_path, fqname) File "/home/john/workspace/lwr/.venv/local/lib/python2.7/site-packages/nose/importer.py", line 94, in importFromDir mod = load_module(part_fqname, fh, filename, desc) File "/home/john/workspace/galaxy-central/test/unit/workflows/test_workflow_run_request.py", line 4, in <module> from galaxy.workflow import run_request File "/home/john/workspace/galaxy-central/lib/galaxy/workflow/run_request.py", line 4, in <module> import galaxy.managers.histories File "/home/john/workspace/galaxy-central/lib/galaxy/managers/histories.py", line 12, in <module> import galaxy.managers.hdas File "/home/john/workspace/galaxy-central/lib/galaxy/managers/hdas.py", line 11, in <module> from galaxy.managers import histories as history_manager ImportError: cannot import name histories Affected #: 9 files
diff -r f58a9199e9922c3bef27240aa613a9a3c5bae67e -r f1a1c52f8495d1035d1c0a5e21b710974db0908e lib/galaxy/dataset_collections/__init__.py --- a/lib/galaxy/dataset_collections/__init__.py +++ b/lib/galaxy/dataset_collections/__init__.py @@ -1,7 +1,6 @@ from .registry import DatasetCollectionTypesRegistry from .matching import MatchingCollections from .type_description import CollectionTypeDescriptionFactory -from .util import validate_input_element_identifiers
from galaxy import model @@ -14,7 +13,7 @@ UsesTagsMixin, ) from galaxy.managers import hdas # TODO: Refactor all mixin use into managers. - +from galaxy.managers.collections_util import validate_input_element_identifiers from galaxy.util import validation from galaxy.util import odict
diff -r f58a9199e9922c3bef27240aa613a9a3c5bae67e -r f1a1c52f8495d1035d1c0a5e21b710974db0908e lib/galaxy/dataset_collections/util.py --- a/lib/galaxy/dataset_collections/util.py +++ /dev/null @@ -1,95 +0,0 @@ -from galaxy import exceptions -from galaxy import web -from galaxy import model - -import logging -log = logging.getLogger( __name__ ) - -ERROR_MESSAGE_UNKNOWN_SRC = "Unknown dataset source (src) %s." -ERROR_MESSAGE_NO_NESTED_IDENTIFIERS = "Dataset source new_collection requires nested element_identifiers for new collection." -ERROR_MESSAGE_NO_NAME = "Cannot load invalid dataset identifier - missing name - %s" -ERROR_MESSAGE_NO_COLLECTION_TYPE = "No collection_type define for nested collection %s." -ERROR_MESSAGE_INVALID_PARAMETER_FOUND = "Found invalid parameter %s in element identifier description %s." -ERROR_MESSAGE_DUPLICATED_IDENTIFIER_FOUND = "Found duplicated element identifier name %s." - - -def api_payload_to_create_params( payload ): - """ - Cleanup API payload to pass into dataset_collections. - """ - required_parameters = [ "collection_type", "element_identifiers" ] - missing_parameters = [ p for p in required_parameters if p not in payload ] - if missing_parameters: - message = "Missing required parameters %s" % missing_parameters - raise exceptions.ObjectAttributeMissingException( message ) - - params = dict( - collection_type=payload.get( "collection_type" ), - element_identifiers=payload.get( "element_identifiers" ), - name=payload.get( "name", None ), - ) - - return params - - -def validate_input_element_identifiers( element_identifiers ): - """ Scan through the list of element identifiers supplied by the API consumer - and verify the structure is valid. - """ - log.debug( "Validating %d element identifiers for collection creation." % len( element_identifiers ) ) - identifier_names = set() - for element_identifier in element_identifiers: - if "__object__" in element_identifier: - message = ERROR_MESSAGE_INVALID_PARAMETER_FOUND % ( "__model_object__", element_identifier ) - raise exceptions.RequestParameterInvalidException( message ) - if "name" not in element_identifier: - message = ERROR_MESSAGE_NO_NAME % element_identifier - raise exceptions.RequestParameterInvalidException( message ) - name = element_identifier[ "name" ] - if name in identifier_names: - message = ERROR_MESSAGE_DUPLICATED_IDENTIFIER_FOUND % name - raise exceptions.RequestParameterInvalidException( message ) - else: - identifier_names.add( name ) - src = element_identifier.get( "src", "hda" ) - if src not in [ "hda", "hdca", "ldda", "new_collection" ]: - message = ERROR_MESSAGE_UNKNOWN_SRC % src - raise exceptions.RequestParameterInvalidException( message ) - if src == "new_collection": - if "element_identifiers" not in element_identifier: - message = ERROR_MESSAGE_NO_NESTED_IDENTIFIERS - raise exceptions.RequestParameterInvalidException( ERROR_MESSAGE_NO_NESTED_IDENTIFIERS ) - if "collection_type" not in element_identifier: - message = ERROR_MESSAGE_NO_COLLECTION_TYPE % element_identifier - raise exceptions.RequestParameterInvalidException( message ) - validate_input_element_identifiers( element_identifier[ "element_identifiers" ] ) - - -def dictify_dataset_collection_instance( dataset_colleciton_instance, parent, security, view="element" ): - dict_value = dataset_colleciton_instance.to_dict( view=view ) - encoded_id = security.encode_id( dataset_colleciton_instance.id ) - if isinstance( parent, model.History ): - encoded_history_id = security.encode_id( parent.id ) - dict_value[ 'url' ] = web.url_for( 'history_content_typed', history_id=encoded_history_id, id=encoded_id, type="dataset_collection" ) - elif isinstance( parent, model.LibraryFolder ): - encoded_library_id = security.encode_id( parent.library.id ) - encoded_folder_id = security.encode_id( parent.id ) - # TODO: Work in progress - this end-point is not right yet... - dict_value[ 'url' ] = web.url_for( 'library_content', library_id=encoded_library_id, id=encoded_id, folder_id=encoded_folder_id ) - if view == "element": - dict_value[ 'elements' ] = map( dictify_element, dataset_colleciton_instance.collection.elements ) - security.encode_all_ids( dict_value, recursive=True ) # TODO: Use Kyle's recusrive formulation of this. - return dict_value - - -def dictify_element( element ): - dictified = element.to_dict( view="element" ) - object_detials = element.element_object.to_dict() - if element.child_collection: - # Recursively yield elements for each nested collection... - object_detials[ "elements" ] = map( dictify_element, element.child_collection.elements ) - - dictified[ "object" ] = object_detials - return dictified - -__all__ = [ api_payload_to_create_params, dictify_dataset_collection_instance ]
diff -r f58a9199e9922c3bef27240aa613a9a3c5bae67e -r f1a1c52f8495d1035d1c0a5e21b710974db0908e lib/galaxy/managers/collections.py --- /dev/null +++ b/lib/galaxy/managers/collections.py @@ -0,0 +1,1 @@ +""" Future home of dataset collections managers. """
diff -r f58a9199e9922c3bef27240aa613a9a3c5bae67e -r f1a1c52f8495d1035d1c0a5e21b710974db0908e lib/galaxy/managers/collections_util.py --- /dev/null +++ b/lib/galaxy/managers/collections_util.py @@ -0,0 +1,95 @@ +from galaxy import exceptions +from galaxy import web +from galaxy import model + +import logging +log = logging.getLogger( __name__ ) + +ERROR_MESSAGE_UNKNOWN_SRC = "Unknown dataset source (src) %s." +ERROR_MESSAGE_NO_NESTED_IDENTIFIERS = "Dataset source new_collection requires nested element_identifiers for new collection." +ERROR_MESSAGE_NO_NAME = "Cannot load invalid dataset identifier - missing name - %s" +ERROR_MESSAGE_NO_COLLECTION_TYPE = "No collection_type define for nested collection %s." +ERROR_MESSAGE_INVALID_PARAMETER_FOUND = "Found invalid parameter %s in element identifier description %s." +ERROR_MESSAGE_DUPLICATED_IDENTIFIER_FOUND = "Found duplicated element identifier name %s." + + +def api_payload_to_create_params( payload ): + """ + Cleanup API payload to pass into dataset_collections. + """ + required_parameters = [ "collection_type", "element_identifiers" ] + missing_parameters = [ p for p in required_parameters if p not in payload ] + if missing_parameters: + message = "Missing required parameters %s" % missing_parameters + raise exceptions.ObjectAttributeMissingException( message ) + + params = dict( + collection_type=payload.get( "collection_type" ), + element_identifiers=payload.get( "element_identifiers" ), + name=payload.get( "name", None ), + ) + + return params + + +def validate_input_element_identifiers( element_identifiers ): + """ Scan through the list of element identifiers supplied by the API consumer + and verify the structure is valid. + """ + log.debug( "Validating %d element identifiers for collection creation." % len( element_identifiers ) ) + identifier_names = set() + for element_identifier in element_identifiers: + if "__object__" in element_identifier: + message = ERROR_MESSAGE_INVALID_PARAMETER_FOUND % ( "__model_object__", element_identifier ) + raise exceptions.RequestParameterInvalidException( message ) + if "name" not in element_identifier: + message = ERROR_MESSAGE_NO_NAME % element_identifier + raise exceptions.RequestParameterInvalidException( message ) + name = element_identifier[ "name" ] + if name in identifier_names: + message = ERROR_MESSAGE_DUPLICATED_IDENTIFIER_FOUND % name + raise exceptions.RequestParameterInvalidException( message ) + else: + identifier_names.add( name ) + src = element_identifier.get( "src", "hda" ) + if src not in [ "hda", "hdca", "ldda", "new_collection" ]: + message = ERROR_MESSAGE_UNKNOWN_SRC % src + raise exceptions.RequestParameterInvalidException( message ) + if src == "new_collection": + if "element_identifiers" not in element_identifier: + message = ERROR_MESSAGE_NO_NESTED_IDENTIFIERS + raise exceptions.RequestParameterInvalidException( ERROR_MESSAGE_NO_NESTED_IDENTIFIERS ) + if "collection_type" not in element_identifier: + message = ERROR_MESSAGE_NO_COLLECTION_TYPE % element_identifier + raise exceptions.RequestParameterInvalidException( message ) + validate_input_element_identifiers( element_identifier[ "element_identifiers" ] ) + + +def dictify_dataset_collection_instance( dataset_colleciton_instance, parent, security, view="element" ): + dict_value = dataset_colleciton_instance.to_dict( view=view ) + encoded_id = security.encode_id( dataset_colleciton_instance.id ) + if isinstance( parent, model.History ): + encoded_history_id = security.encode_id( parent.id ) + dict_value[ 'url' ] = web.url_for( 'history_content_typed', history_id=encoded_history_id, id=encoded_id, type="dataset_collection" ) + elif isinstance( parent, model.LibraryFolder ): + encoded_library_id = security.encode_id( parent.library.id ) + encoded_folder_id = security.encode_id( parent.id ) + # TODO: Work in progress - this end-point is not right yet... + dict_value[ 'url' ] = web.url_for( 'library_content', library_id=encoded_library_id, id=encoded_id, folder_id=encoded_folder_id ) + if view == "element": + dict_value[ 'elements' ] = map( dictify_element, dataset_colleciton_instance.collection.elements ) + security.encode_all_ids( dict_value, recursive=True ) # TODO: Use Kyle's recusrive formulation of this. + return dict_value + + +def dictify_element( element ): + dictified = element.to_dict( view="element" ) + object_detials = element.element_object.to_dict() + if element.child_collection: + # Recursively yield elements for each nested collection... + object_detials[ "elements" ] = map( dictify_element, element.child_collection.elements ) + + dictified[ "object" ] = object_detials + return dictified + +__all__ = [ api_payload_to_create_params, dictify_dataset_collection_instance ]
diff -r f58a9199e9922c3bef27240aa613a9a3c5bae67e -r f1a1c52f8495d1035d1c0a5e21b710974db0908e lib/galaxy/managers/histories.py --- a/lib/galaxy/managers/histories.py +++ b/lib/galaxy/managers/histories.py @@ -10,9 +10,9 @@
from galaxy.managers import base as manager_base import galaxy.managers.hdas +import galaxy.managers.collections_util
import galaxy.web -import galaxy.dataset_collections.util
import logging log = logging.getLogger( __name__ ) @@ -141,7 +141,7 @@ history and an array of serialized history contents respectively. """ hda_mgr = galaxy.managers.hdas.HDAManager() - collection_dictifier = galaxy.dataset_collections.util.dictify_dataset_collection_instance + collection_dictifier = galaxy.managers.collections_util.dictify_dataset_collection_instance
history_dictionary = {} contents_dictionaries = []
diff -r f58a9199e9922c3bef27240aa613a9a3c5bae67e -r f1a1c52f8495d1035d1c0a5e21b710974db0908e lib/galaxy/webapps/galaxy/api/dataset_collections.py --- a/lib/galaxy/webapps/galaxy/api/dataset_collections.py +++ b/lib/galaxy/webapps/galaxy/api/dataset_collections.py @@ -4,8 +4,7 @@ from galaxy.web.base.controller import UsesHistoryMixin from galaxy.web.base.controller import UsesLibraryMixinItems
-from galaxy.dataset_collections.util import api_payload_to_create_params -from galaxy.dataset_collections.util import dictify_dataset_collection_instance +from galaxy.managers.collections_util import api_payload_to_create_params, dictify_dataset_collection_instance
from logging import getLogger log = getLogger( __name__ )
diff -r f58a9199e9922c3bef27240aa613a9a3c5bae67e -r f1a1c52f8495d1035d1c0a5e21b710974db0908e lib/galaxy/webapps/galaxy/api/history_contents.py --- a/lib/galaxy/webapps/galaxy/api/history_contents.py +++ b/lib/galaxy/webapps/galaxy/api/history_contents.py @@ -16,12 +16,9 @@ from galaxy.web.base.controller import UsesLibraryMixinItems from galaxy.web.base.controller import UsesTagsMixin
-from galaxy.dataset_collections.util import api_payload_to_create_params -from galaxy.dataset_collections.util import dictify_dataset_collection_instance - - from galaxy.managers import histories from galaxy.managers import hdas +from galaxy.managers.collections_util import api_payload_to_create_params, dictify_dataset_collection_instance
import logging log = logging.getLogger( __name__ )
diff -r f58a9199e9922c3bef27240aa613a9a3c5bae67e -r f1a1c52f8495d1035d1c0a5e21b710974db0908e lib/galaxy/webapps/galaxy/api/library_contents.py --- a/lib/galaxy/webapps/galaxy/api/library_contents.py +++ b/lib/galaxy/webapps/galaxy/api/library_contents.py @@ -5,11 +5,10 @@ from galaxy import web from galaxy import exceptions from galaxy.web import _future_expose_api as expose_api -from galaxy.dataset_collections.util import api_payload_to_create_params -from galaxy.dataset_collections.util import dictify_dataset_collection_instance from galaxy.web.base.controller import BaseAPIController, UsesLibraryMixin, UsesLibraryMixinItems from galaxy.web.base.controller import UsesHistoryDatasetAssociationMixin from galaxy.web.base.controller import HTTPBadRequest, url_for +from galaxy.managers.collections_util import api_payload_to_create_params, dictify_dataset_collection_instance from galaxy.model import ExtendedMetadata, ExtendedMetadataIndex from sqlalchemy.orm.exc import MultipleResultsFound from sqlalchemy.orm.exc import NoResultFound
diff -r f58a9199e9922c3bef27240aa613a9a3c5bae67e -r f1a1c52f8495d1035d1c0a5e21b710974db0908e lib/galaxy/webapps/galaxy/api/tools.py --- a/lib/galaxy/webapps/galaxy/api/tools.py +++ b/lib/galaxy/webapps/galaxy/api/tools.py @@ -9,7 +9,8 @@ from galaxy.visualization.genomes import GenomeRegion from galaxy.util.json import to_json_string from galaxy.visualization.data_providers.genome import * -from galaxy.dataset_collections.util import dictify_dataset_collection_instance + +from galaxy.managers.collections_util import dictify_dataset_collection_instance
import logging log = logging.getLogger( __name__ )
https://bitbucket.org/galaxy/galaxy-central/commits/b17747153f1a/ Changeset: b17747153f1a User: jmchilton Date: 2014-08-10 20:05:31 Summary: Extract all the workflow run API param handling out... This should allow for the reuse of all of this in asynchronous workflow running and make unit testing of all the different paths easier (though that is not included in this commit - the API functional test coverage for this code is pretty decent at this point). Affected #: 2 files
diff -r f1a1c52f8495d1035d1c0a5e21b710974db0908e -r b17747153f1a55292a03392cf2c9f5a6c69c5460 lib/galaxy/webapps/galaxy/api/workflows.py --- a/lib/galaxy/webapps/galaxy/api/workflows.py +++ b/lib/galaxy/webapps/galaxy/api/workflows.py @@ -15,7 +15,7 @@ from galaxy.web.base.controller import SharableMixin from galaxy.workflow.extract import extract_workflow from galaxy.workflow.run import invoke -from galaxy.workflow.run import WorkflowRunConfig +from galaxy.workflow.run_request import build_workflow_run_config
log = logging.getLogger(__name__)
@@ -212,105 +212,12 @@ message = "Invalid workflow_id specified." raise exceptions.RequestParameterInvalidException( message )
- # Pull other parameters out of payload. - param_map = payload.get( 'parameters', {} ) - inputs = payload.get( 'inputs', None ) - inputs_by = payload.get( 'inputs_by', None ) - if inputs is None: - # Default to legacy behavior - read ds_map and reference steps - # by unencoded step id (a raw database id). - inputs = payload.get( 'ds_map', {} ) - inputs_by = inputs_by or 'step_id' - else: - inputs = inputs or {} - # New default is to reference steps by index of workflow step - # which is intrinsic to the workflow and independent of the state - # of Galaxy at the time of workflow import. - inputs_by = inputs_by or 'step_index' - - valid_inputs_by = [ 'step_id', 'step_index', 'name' ] - if inputs_by not in valid_inputs_by: - trans.response.status = 403 - error_message_template = "Invalid inputs_by specified '%s' must be one of %s" - error_message = error_message_template % ( inputs_by, valid_inputs_by ) - raise ValueError( error_message ) - - add_to_history = 'no_add_to_history' not in payload - history_param = payload.get('history', '') - # Get workflow + accessibility check. stored_workflow = self.__get_stored_accessible_workflow( trans, workflow_id ) workflow = stored_workflow.latest_workflow
- # Sanity checks. - if len( workflow.steps ) == 0: - raise exceptions.MessageException( "Workflow cannot be run because it does not have any steps" ) - if workflow.has_cycles: - raise exceptions.MessageException( "Workflow cannot be run because it contains cycles" ) - if workflow.has_errors: - message = "Workflow cannot be run because of validation errors in some steps" - raise exceptions.MessageException( message ) - - # Get target history. - if history_param.startswith('hist_id='): - # Passing an existing history to use. - encoded_history_id = history_param[ 8: ] - history_id = self.__decode_id( trans, encoded_history_id, model_type="history" ) - history = self.history_manager.get( trans, history_id, check_ownership=True ) - else: - # Send workflow outputs to new history. - history = self.app.model.History(name=history_param, user=trans.user) - trans.sa_session.add(history) - trans.sa_session.flush() - - # Set workflow inputs. - for k in inputs: - try: - if inputs[k]['src'] == 'ldda': - ldda = trans.sa_session.query(self.app.model.LibraryDatasetDatasetAssociation).get( - trans.security.decode_id(inputs[k]['id'])) - assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), ldda.dataset ) - content = ldda.to_history_dataset_association(history, add_to_history=add_to_history) - elif inputs[k]['src'] == 'ld': - ldda = trans.sa_session.query(self.app.model.LibraryDataset).get( - trans.security.decode_id(inputs[k]['id'])).library_dataset_dataset_association - assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), ldda.dataset ) - content = ldda.to_history_dataset_association(history, add_to_history=add_to_history) - elif inputs[k]['src'] == 'hda': - # Get dataset handle, add to dict and history if necessary - content = trans.sa_session.query(self.app.model.HistoryDatasetAssociation).get( - trans.security.decode_id(inputs[k]['id'])) - assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), content.dataset ) - elif inputs[k]['src'] == 'hdca': - content = self.app.dataset_collections_service.get_dataset_collection_instance( - trans, - 'history', - inputs[k]['id'] - ) - else: - message = "Unknown workflow input source '%s' specified." % inputs[k]['src'] - raise exceptions.RequestParameterInvalidException( message ) - if add_to_history and content.history != history: - content = content.copy() - if isinstance( content, self.app.model.HistoryDatasetAssociation ): - history.add_dataset( content ) - else: - history.add_dataset_collection( content ) - inputs[k]['hda'] = content # TODO: rename key to 'content', prescreen input ensure not populated explicitly - except AssertionError: - message = "Invalid workflow input '%s' specified" % inputs[k]['id'] - raise exceptions.ItemAccessibilityException( message ) - - # Run each step, connecting outputs to inputs - replacement_dict = payload.get('replacement_params', {}) - - run_config = WorkflowRunConfig( - target_history=history, - replacement_dict=replacement_dict, - inputs=inputs, - inputs_by=inputs_by, - param_map=param_map, - ) + run_config = build_workflow_run_config( trans, workflow, payload ) + history = run_config.target_history
# invoke may throw MessageExceptions on tool erors, failure # to match up inputs, etc... @@ -525,8 +432,8 @@ return stored_workflow
def __get_stored_workflow( self, trans, workflow_id ): + workflow_id = self.__decode_id( trans, workflow_id ) try: - workflow_id = self.__decode_id( trans, workflow_id ) query = trans.sa_session.query( trans.app.model.StoredWorkflow ) stored_workflow = query.get( workflow_id ) except Exception:
diff -r f1a1c52f8495d1035d1c0a5e21b710974db0908e -r b17747153f1a55292a03392cf2c9f5a6c69c5460 lib/galaxy/workflow/run_request.py --- /dev/null +++ b/lib/galaxy/workflow/run_request.py @@ -0,0 +1,114 @@ +from galaxy import exceptions + +from galaxy.managers import histories +from galaxy.workflow.run import WorkflowRunConfig + + +def build_workflow_run_config( trans, workflow, payload ): + app = trans.app + history_manager = histories.HistoryManager() + + # Pull other parameters out of payload. + param_map = payload.get( 'parameters', {} ) + inputs = payload.get( 'inputs', None ) + inputs_by = payload.get( 'inputs_by', None ) + if inputs is None: + # Default to legacy behavior - read ds_map and reference steps + # by unencoded step id (a raw database id). + inputs = payload.get( 'ds_map', {} ) + inputs_by = inputs_by or 'step_id' + else: + inputs = inputs or {} + # New default is to reference steps by index of workflow step + # which is intrinsic to the workflow and independent of the state + # of Galaxy at the time of workflow import. + inputs_by = inputs_by or 'step_index' + + valid_inputs_by = [ 'step_id', 'step_index', 'name' ] + if inputs_by not in valid_inputs_by: + trans.response.status = 403 + error_message_template = "Invalid inputs_by specified '%s' must be one of %s" + error_message = error_message_template % ( inputs_by, valid_inputs_by ) + raise ValueError( error_message ) + + add_to_history = 'no_add_to_history' not in payload + history_param = payload.get('history', '') + + # Sanity checks. + if len( workflow.steps ) == 0: + raise exceptions.MessageException( "Workflow cannot be run because it does not have any steps" ) + if workflow.has_cycles: + raise exceptions.MessageException( "Workflow cannot be run because it contains cycles" ) + if workflow.has_errors: + message = "Workflow cannot be run because of validation errors in some steps" + raise exceptions.MessageException( message ) + + # Get target history. + if history_param.startswith('hist_id='): + # Passing an existing history to use. + encoded_history_id = history_param[ 8: ] + history_id = __decode_id( trans, encoded_history_id, model_type="history" ) + history = history_manager.get( trans, history_id, check_ownership=True ) + else: + # Send workflow outputs to new history. + history = app.model.History(name=history_param, user=trans.user) + trans.sa_session.add(history) + trans.sa_session.flush() + + # Set workflow inputs. + for k in inputs: + try: + if inputs[k]['src'] == 'ldda': + ldda = trans.sa_session.query(app.model.LibraryDatasetDatasetAssociation).get( + trans.security.decode_id(inputs[k]['id'])) + assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), ldda.dataset ) + content = ldda.to_history_dataset_association(history, add_to_history=add_to_history) + elif inputs[k]['src'] == 'ld': + ldda = trans.sa_session.query(app.model.LibraryDataset).get( + trans.security.decode_id(inputs[k]['id'])).library_dataset_dataset_association + assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), ldda.dataset ) + content = ldda.to_history_dataset_association(history, add_to_history=add_to_history) + elif inputs[k]['src'] == 'hda': + # Get dataset handle, add to dict and history if necessary + content = trans.sa_session.query(app.model.HistoryDatasetAssociation).get( + trans.security.decode_id(inputs[k]['id'])) + assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), content.dataset ) + elif inputs[k]['src'] == 'hdca': + content = app.dataset_collections_service.get_dataset_collection_instance( + trans, + 'history', + inputs[k]['id'] + ) + else: + message = "Unknown workflow input source '%s' specified." % inputs[k]['src'] + raise exceptions.RequestParameterInvalidException( message ) + if add_to_history and content.history != history: + content = content.copy() + if isinstance( content, app.model.HistoryDatasetAssociation ): + history.add_dataset( content ) + else: + history.add_dataset_collection( content ) + inputs[k]['hda'] = content # TODO: rename key to 'content', prescreen input ensure not populated explicitly + except AssertionError: + message = "Invalid workflow input '%s' specified" % inputs[k]['id'] + raise exceptions.ItemAccessibilityException( message ) + + # Run each step, connecting outputs to inputs + replacement_dict = payload.get('replacement_params', {}) + + run_config = WorkflowRunConfig( + target_history=history, + replacement_dict=replacement_dict, + inputs=inputs, + inputs_by=inputs_by, + param_map=param_map, + ) + return run_config + + +def __decode_id( trans, workflow_id, model_type="workflow" ): + try: + return trans.security.decode_id( workflow_id ) + except Exception: + message = "Malformed %s id ( %s ) specified, unable to decode" % ( model_type, workflow_id ) + raise exceptions.MalformedId( message )
https://bitbucket.org/galaxy/galaxy-central/commits/041779f857a1/ Changeset: 041779f857a1 User: jmchilton Date: 2014-08-10 20:05:31 Summary: Hack to get around circular dependency by histories and hdas manager modules. ====================================================================== ERROR: Failure: ImportError (cannot import name histories) ---------------------------------------------------------------------- Traceback (most recent call last): File "/home/john/workspace/lwr/.venv/local/lib/python2.7/site-packages/nose/loader.py", line 414, in loadTestsFromName addr.filename, addr.module) File "/home/john/workspace/lwr/.venv/local/lib/python2.7/site-packages/nose/importer.py", line 47, in importFromPath return self.importFromDir(dir_path, fqname) File "/home/john/workspace/lwr/.venv/local/lib/python2.7/site-packages/nose/importer.py", line 94, in importFromDir mod = load_module(part_fqname, fh, filename, desc) File "/home/john/workspace/galaxy-central/test/unit/workflows/test_workflow_run_request.py", line 4, in <module> from galaxy.workflow import run_request File "/home/john/workspace/galaxy-central/lib/galaxy/workflow/run_request.py", line 4, in <module> import galaxy.managers.histories File "/home/john/workspace/galaxy-central/lib/galaxy/managers/histories.py", line 12, in <module> import galaxy.managers.hdas File "/home/john/workspace/galaxy-central/lib/galaxy/managers/hdas.py", line 11, in <module> from galaxy.managers import histories as history_manager ImportError: cannot import name histories Affected #: 1 file
diff -r b17747153f1a55292a03392cf2c9f5a6c69c5460 -r 041779f857a178092e216a628b62d3c04a4c8977 lib/galaxy/managers/histories.py --- a/lib/galaxy/managers/histories.py +++ b/lib/galaxy/managers/histories.py @@ -9,7 +9,6 @@ from galaxy.model import orm
from galaxy.managers import base as manager_base -import galaxy.managers.hdas import galaxy.managers.collections_util
import galaxy.web @@ -140,6 +139,8 @@ Returns a dictionary containing ``history`` and ``contents``, serialized history and an array of serialized history contents respectively. """ + # import here prevents problems related to circular dependecy between histories and hdas managers. + import galaxy.managers.hdas hda_mgr = galaxy.managers.hdas.HDAManager() collection_dictifier = galaxy.managers.collections_util.dictify_dataset_collection_instance
https://bitbucket.org/galaxy/galaxy-central/commits/7772ff282e95/ Changeset: 7772ff282e95 User: jmchilton Date: 2014-08-10 20:05:31 Summary: Cleanup repeated dictionary access in build_workflow_run_config, better error handling. Affected #: 1 file
diff -r 041779f857a178092e216a628b62d3c04a4c8977 -r 7772ff282e95516a4d39ef0a87afeabb8b123d23 lib/galaxy/workflow/run_request.py --- a/lib/galaxy/workflow/run_request.py +++ b/lib/galaxy/workflow/run_request.py @@ -56,31 +56,39 @@ trans.sa_session.flush()
# Set workflow inputs. - for k in inputs: + for input_dict in inputs.itervalues(): + if 'src' not in input_dict: + message = "Not input source type defined for input '%s'." % input_dict + raise exceptions.RequestParameterInvalidException( message ) + if 'id' not in input_dict: + message = "Not input id defined for input '%s'." % input_dict + raise exceptions.RequestParameterInvalidException( message ) + input_source = input_dict['src'] + input_id = input_dict['id'] try: - if inputs[k]['src'] == 'ldda': + if input_source == 'ldda': ldda = trans.sa_session.query(app.model.LibraryDatasetDatasetAssociation).get( - trans.security.decode_id(inputs[k]['id'])) + trans.security.decode_id(input_id)) assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), ldda.dataset ) content = ldda.to_history_dataset_association(history, add_to_history=add_to_history) - elif inputs[k]['src'] == 'ld': + elif input_source == 'ld': ldda = trans.sa_session.query(app.model.LibraryDataset).get( - trans.security.decode_id(inputs[k]['id'])).library_dataset_dataset_association + trans.security.decode_id(input_id)).library_dataset_dataset_association assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), ldda.dataset ) content = ldda.to_history_dataset_association(history, add_to_history=add_to_history) - elif inputs[k]['src'] == 'hda': + elif input_source == 'hda': # Get dataset handle, add to dict and history if necessary content = trans.sa_session.query(app.model.HistoryDatasetAssociation).get( - trans.security.decode_id(inputs[k]['id'])) + trans.security.decode_id(input_id)) assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), content.dataset ) - elif inputs[k]['src'] == 'hdca': + elif input_source == 'hdca': content = app.dataset_collections_service.get_dataset_collection_instance( trans, 'history', - inputs[k]['id'] + input_id ) else: - message = "Unknown workflow input source '%s' specified." % inputs[k]['src'] + message = "Unknown workflow input source '%s' specified." % input_source raise exceptions.RequestParameterInvalidException( message ) if add_to_history and content.history != history: content = content.copy() @@ -88,9 +96,9 @@ history.add_dataset( content ) else: history.add_dataset_collection( content ) - inputs[k]['hda'] = content # TODO: rename key to 'content', prescreen input ensure not populated explicitly + input_dict['hda'] = content # TODO: rename key to 'content', prescreen input ensure not populated explicitly except AssertionError: - message = "Invalid workflow input '%s' specified" % inputs[k]['id'] + message = "Invalid workflow input '%s' specified" % input_id raise exceptions.ItemAccessibilityException( message )
# Run each step, connecting outputs to inputs
https://bitbucket.org/galaxy/galaxy-central/commits/84caa9526b5f/ Changeset: 84caa9526b5f User: jmchilton Date: 2014-08-10 20:05:31 Summary: Change 'hda' input name in workflow inputs dict to 'content'... ... 'content' better reflects that this can be either an HDA or an HDCA. Affected #: 2 files
diff -r 7772ff282e95516a4d39ef0a87afeabb8b123d23 -r 84caa9526b5fe407718082dcdd7c8b65cb210175 lib/galaxy/workflow/run.py --- a/lib/galaxy/workflow/run.py +++ b/lib/galaxy/workflow/run.py @@ -224,7 +224,7 @@ else: raise Exception("Unknown history content encountered") if self.inputs: - outputs[ step.id ][ 'output' ] = self.inputs_by_step_id[ step.id ][ 'hda' ] + outputs[ step.id ][ 'output' ] = self.inputs_by_step_id[ step.id ][ 'content' ]
return job
diff -r 7772ff282e95516a4d39ef0a87afeabb8b123d23 -r 84caa9526b5fe407718082dcdd7c8b65cb210175 lib/galaxy/workflow/run_request.py --- a/lib/galaxy/workflow/run_request.py +++ b/lib/galaxy/workflow/run_request.py @@ -63,6 +63,9 @@ if 'id' not in input_dict: message = "Not input id defined for input '%s'." % input_dict raise exceptions.RequestParameterInvalidException( message ) + if 'content' in input_dict: + message = "Input cannot specify explicit 'content' attribute %s'." % input_dict + raise exceptions.RequestParameterInvalidException( message ) input_source = input_dict['src'] input_id = input_dict['id'] try: @@ -96,7 +99,7 @@ history.add_dataset( content ) else: history.add_dataset_collection( content ) - input_dict['hda'] = content # TODO: rename key to 'content', prescreen input ensure not populated explicitly + input_dict['content'] = content except AssertionError: message = "Invalid workflow input '%s' specified" % input_id raise exceptions.ItemAccessibilityException( message )
https://bitbucket.org/galaxy/galaxy-central/commits/483ceff2d2b6/ Changeset: 483ceff2d2b6 User: jmchilton Date: 2014-08-10 20:05:31 Summary: Normalize workflow step parameters earlier... ... this will allow linking to actual steps (referential integrity) if/when we store workflow requests in the database.
Also unit tests. Affected #: 4 files
diff -r 84caa9526b5fe407718082dcdd7c8b65cb210175 -r 483ceff2d2b6f41432cd16f4dbf539355c3f4dde lib/galaxy/workflow/run.py --- a/lib/galaxy/workflow/run.py +++ b/lib/galaxy/workflow/run.py @@ -41,9 +41,9 @@ that step ('name'). :type inputs_by: str
- :param param_map: Override tool and/or step parameters (see documentation on - _update_step_parameters below). - :type param_map: + :param param_map: Override step parameters - should be dict with step id keys and + tool param name-value dicts as values. + :type param_map: dict """
def __init__( self, target_history, replacement_dict, copy_inputs_to_history=False, inputs={}, inputs_by='step_id', param_map={} ): @@ -304,36 +304,9 @@ self.inputs_by_step_id[ step.id ] = self.inputs[ key ]
-def _update_step_parameters(step, param_map): - """ - Update ``step`` parameters based on the user-provided ``param_map`` dict. - - ``param_map`` should be structured as follows:: - - PARAM_MAP = {STEP_ID: PARAM_DICT, ...} - PARAM_DICT = {NAME: VALUE, ...} - - For backwards compatibility, the following (deprecated) format is - also supported for ``param_map``:: - - PARAM_MAP = {TOOL_ID: PARAM_DICT, ...} - - in which case PARAM_DICT affects all steps with the given tool id. - If both by-tool-id and by-step-id specifications are used, the - latter takes precedence. - - Finally (again, for backwards compatibility), PARAM_DICT can also - be specified as:: - - PARAM_DICT = {'param': NAME, 'value': VALUE} - - Note that this format allows only one parameter to be set per step. - """ - param_dict = param_map.get(step.tool_id, {}).copy() - param_dict.update(param_map.get(str(step.id), {})) +def _update_step_parameters(step, normalized_param_map): + param_dict = normalized_param_map.get(step.id, {}) if param_dict: - if 'param' in param_dict and 'value' in param_dict: - param_dict[param_dict['param']] = param_dict['value'] step.state.inputs.update(param_dict)
diff -r 84caa9526b5fe407718082dcdd7c8b65cb210175 -r 483ceff2d2b6f41432cd16f4dbf539355c3f4dde lib/galaxy/workflow/run_request.py --- a/lib/galaxy/workflow/run_request.py +++ b/lib/galaxy/workflow/run_request.py @@ -4,12 +4,62 @@ from galaxy.workflow.run import WorkflowRunConfig
+def normalize_step_parameters(steps, param_map): + """ Take a complex param_map that can reference parameters by + step_id in the new flexible way or in the old one-parameter + per tep fashion or by tool id and normalize the parameters so + everything is referenced by a numeric step id. + """ + normalized_param_map = {} + for step in steps: + param_dict = _step_parameters(step, param_map) + if param_dict: + normalized_param_map[step.id] = param_dict + return normalized_param_map + + +def _step_parameters(step, param_map): + """ + Update ``step`` parameters based on the user-provided ``param_map`` dict. + + ``param_map`` should be structured as follows:: + + PARAM_MAP = {STEP_ID: PARAM_DICT, ...} + PARAM_DICT = {NAME: VALUE, ...} + + For backwards compatibility, the following (deprecated) format is + also supported for ``param_map``:: + + PARAM_MAP = {TOOL_ID: PARAM_DICT, ...} + + in which case PARAM_DICT affects all steps with the given tool id. + If both by-tool-id and by-step-id specifications are used, the + latter takes precedence. + + Finally (again, for backwards compatibility), PARAM_DICT can also + be specified as:: + + PARAM_DICT = {'param': NAME, 'value': VALUE} + + Note that this format allows only one parameter to be set per step. + """ + param_dict = param_map.get(step.tool_id, {}).copy() + param_dict.update(param_map.get(str(step.id), {})) + if param_dict: + if 'param' in param_dict and 'value' in param_dict: + param_dict[param_dict['param']] = param_dict['value'] + del param_dict[ 'param' ] + del param_dict[ 'value' ] + return param_dict + + def build_workflow_run_config( trans, workflow, payload ): app = trans.app history_manager = histories.HistoryManager()
# Pull other parameters out of payload. param_map = payload.get( 'parameters', {} ) + param_map = normalize_step_parameters( workflow.steps, param_map ) inputs = payload.get( 'inputs', None ) inputs_by = payload.get( 'inputs_by', None ) if inputs is None:
diff -r 84caa9526b5fe407718082dcdd7c8b65cb210175 -r 483ceff2d2b6f41432cd16f4dbf539355c3f4dde test/unit/workflows/test_run_parameters.py --- /dev/null +++ b/test/unit/workflows/test_run_parameters.py @@ -0,0 +1,84 @@ +from .workflow_support import MockTrans + +from galaxy import model +from galaxy.workflow.run_request import normalize_step_parameters + + +def test_normalize_parameters_empty(): + normalized_params = __normalize_parameters_against_fixture( {} ) + assert normalized_params == {} + + +def test_normalize_parameters_by_tool(): + normalized_params = __normalize_parameters_against_fixture( { + 'cat1': { 'foo': 'bar' } + } ) + # Tool specified parameters are expanded out. + assert normalized_params[ 3 ] == { 'foo': 'bar' } + assert normalized_params[ 4 ] == { 'foo': 'bar' } + assert len( normalized_params.keys() ) == 2 + + +def test_step_parameters(): + normalized_params = __normalize_parameters_against_fixture( { + '1': { 'foo': 'bar' } + } ) + assert normalized_params[ 1 ] == { 'foo': 'bar' } + assert len( normalized_params.keys() ) == 1 + + +def test_step_parameters_legacy(): + normalized_params = __normalize_parameters_against_fixture( { + '1': { 'param': 'foo', 'value': 'bar' } + } ) + assert normalized_params[ 1 ] == { 'foo': 'bar' }, normalized_params + assert len( normalized_params.keys() ) == 1 + + +def __normalize_parameters_against_fixture( params ): + trans = MockTrans() + workflow = __workflow_fixure( trans ) + normalized_params = normalize_step_parameters( workflow.steps, params ) + return normalized_params + + +def __workflow_fixure( trans ): + user = model.User( + email="testworkflow_params@bx.psu.edu", + password="pass" + ) + stored_workflow = model.StoredWorkflow() + stored_workflow.user = user + workflow = model.Workflow() + workflow.stored_workflow = stored_workflow + + def add_step( **kwds ): + workflow_step = model.WorkflowStep() + for key, value in kwds.iteritems(): + setattr(workflow_step, key, value) + workflow.steps.append( workflow_step ) + + trans.app.model.context.add( + workflow, + ) + + add_step( + type="input", + order_index=0, + ) + add_step( + type="input", + order_index=1, + ) + add_step( + type="tool", + tool_id="cat1", + order_index=2, + ) + add_step( + type="tool", + tool_id="cat1", + order_index=4, + ) + trans.app.model.context.flush() + return workflow
diff -r 84caa9526b5fe407718082dcdd7c8b65cb210175 -r 483ceff2d2b6f41432cd16f4dbf539355c3f4dde test/unit/workflows/workflow_support.py --- /dev/null +++ b/test/unit/workflows/workflow_support.py @@ -0,0 +1,19 @@ +from galaxy.util import bunch +from galaxy.model import mapping + + +class MockTrans( object ): + + def __init__( self ): + self.app = TestApp() + + +class TestApp( object ): + + def __init__( self ): + self.config = bunch.Bunch( ) + self.model = mapping.init( + "/tmp", + "sqlite:///:memory:", + create_tables=True + )
https://bitbucket.org/galaxy/galaxy-central/commits/9664ed5cbb9f/ Changeset: 9664ed5cbb9f User: jmchilton Date: 2014-08-10 20:05:31 Summary: Normalize workflow input dataset dict earlier... ... this will allow linking to actual steps (referential integrity) if/when we store workflow requests in the database and failing faster.
Also more unit tests. Affected #: 3 files
diff -r 483ceff2d2b6f41432cd16f4dbf539355c3f4dde -r 9664ed5cbb9fa654da1972772b5a3065450d2a15 lib/galaxy/workflow/run.py --- a/lib/galaxy/workflow/run.py +++ b/lib/galaxy/workflow/run.py @@ -46,12 +46,11 @@ :type param_map: dict """
- def __init__( self, target_history, replacement_dict, copy_inputs_to_history=False, inputs={}, inputs_by='step_id', param_map={} ): + def __init__( self, target_history, replacement_dict, copy_inputs_to_history=False, inputs={}, param_map={} ): self.target_history = target_history self.replacement_dict = replacement_dict self.copy_inputs_to_history = copy_inputs_to_history self.inputs = inputs - self.inputs_by = inputs_by self.param_map = param_map
@@ -73,9 +72,7 @@ self.target_history = workflow_run_config.target_history self.replacement_dict = workflow_run_config.replacement_dict self.copy_inputs_to_history = workflow_run_config.copy_inputs_to_history - self.inputs = workflow_run_config.inputs - self.inputs_by = workflow_run_config.inputs_by - self.inputs_by_step_id = {} + self.inputs_by_step_id = workflow_run_config.inputs self.param_map = workflow_run_config.param_map
self.outputs = odict() @@ -223,8 +220,8 @@ outputs[ step.id ][ 'input_ds_copy' ] = new_hdca else: raise Exception("Unknown history content encountered") - if self.inputs: - outputs[ step.id ][ 'output' ] = self.inputs_by_step_id[ step.id ][ 'content' ] + if self.inputs_by_step_id: + outputs[ step.id ][ 'output' ] = self.inputs_by_step_id[ step.id ]
return job
@@ -289,20 +286,6 @@ step.module = modules.module_factory.from_workflow_step( self.trans, step ) step.state = step.module.get_runtime_state()
- # This is an input step. Make sure we have an available input. - if step.type in [ 'data_input', 'data_collection_input' ]: - if self.inputs_by == "step_id": - key = str( step.id ) - elif self.inputs_by == "name": - key = step.tool_inputs.get( 'name', None ) - else: - key = str( step.order_index ) - if key not in self.inputs: - message = "Workflow cannot be run because an expected input step '%s' has no input dataset." % step.id - raise exceptions.MessageException( message ) - else: - self.inputs_by_step_id[ step.id ] = self.inputs[ key ] -
def _update_step_parameters(step, normalized_param_map): param_dict = normalized_param_map.get(step.id, {})
diff -r 483ceff2d2b6f41432cd16f4dbf539355c3f4dde -r 9664ed5cbb9fa654da1972772b5a3065450d2a15 lib/galaxy/workflow/run_request.py --- a/lib/galaxy/workflow/run_request.py +++ b/lib/galaxy/workflow/run_request.py @@ -3,6 +3,32 @@ from galaxy.managers import histories from galaxy.workflow.run import WorkflowRunConfig
+INPUT_STEP_TYPES = [ 'data_input', 'data_collection_input' ] + + +def normalize_inputs(steps, inputs, inputs_by): + normalized_inputs = {} + for step in steps: + if step.type not in INPUT_STEP_TYPES: + continue + + if inputs_by == "step_id": + inputs_key = str( step.id ) + elif inputs_by == "step_index": + inputs_key = str( step.order_index ) + elif inputs_by == "name": + inputs_key = step.tool_inputs.get( 'name', None ) + else: + message = "Workflow cannot be run because unexpected inputs_by value specified." + raise exceptions.MessageException( message ) + if inputs_key not in inputs: + message = "Workflow cannot be run because an expected input step '%s' has no input dataset." % step.id + raise exceptions.MessageException( message ) + + normalized_inputs[ step.id ] = inputs[ inputs_key ][ 'content' ] + + return normalized_inputs +
def normalize_step_parameters(steps, param_map): """ Take a complex param_map that can reference parameters by @@ -74,13 +100,6 @@ # of Galaxy at the time of workflow import. inputs_by = inputs_by or 'step_index'
- valid_inputs_by = [ 'step_id', 'step_index', 'name' ] - if inputs_by not in valid_inputs_by: - trans.response.status = 403 - error_message_template = "Invalid inputs_by specified '%s' must be one of %s" - error_message = error_message_template % ( inputs_by, valid_inputs_by ) - raise ValueError( error_message ) - add_to_history = 'no_add_to_history' not in payload history_param = payload.get('history', '')
@@ -154,14 +173,15 @@ message = "Invalid workflow input '%s' specified" % input_id raise exceptions.ItemAccessibilityException( message )
+ normalized_inputs = normalize_inputs( workflow.steps, inputs, inputs_by ) + # Run each step, connecting outputs to inputs replacement_dict = payload.get('replacement_params', {})
run_config = WorkflowRunConfig( target_history=history, replacement_dict=replacement_dict, - inputs=inputs, - inputs_by=inputs_by, + inputs=normalized_inputs, param_map=param_map, ) return run_config
diff -r 483ceff2d2b6f41432cd16f4dbf539355c3f4dde -r 9664ed5cbb9fa654da1972772b5a3065450d2a15 test/unit/workflows/test_run_parameters.py --- a/test/unit/workflows/test_run_parameters.py +++ b/test/unit/workflows/test_run_parameters.py @@ -2,6 +2,9 @@
from galaxy import model from galaxy.workflow.run_request import normalize_step_parameters +from galaxy.workflow.run_request import normalize_inputs + +STEP_ID_OFFSET = 4 # Offset a little so ids and order index are different.
def test_normalize_parameters_empty(): @@ -14,34 +17,87 @@ 'cat1': { 'foo': 'bar' } } ) # Tool specified parameters are expanded out. - assert normalized_params[ 3 ] == { 'foo': 'bar' } - assert normalized_params[ 4 ] == { 'foo': 'bar' } + assert normalized_params[ STEP_ID_OFFSET + 3 ] == { 'foo': 'bar' } + assert normalized_params[ STEP_ID_OFFSET + 4 ] == { 'foo': 'bar' } assert len( normalized_params.keys() ) == 2
def test_step_parameters(): normalized_params = __normalize_parameters_against_fixture( { - '1': { 'foo': 'bar' } + str( STEP_ID_OFFSET + 1 ): { 'foo': 'bar' } } ) - assert normalized_params[ 1 ] == { 'foo': 'bar' } + assert normalized_params[ STEP_ID_OFFSET + 1 ] == { 'foo': 'bar' } assert len( normalized_params.keys() ) == 1
def test_step_parameters_legacy(): normalized_params = __normalize_parameters_against_fixture( { - '1': { 'param': 'foo', 'value': 'bar' } + str( STEP_ID_OFFSET + 1 ): { 'param': 'foo', 'value': 'bar' } } ) - assert normalized_params[ 1 ] == { 'foo': 'bar' }, normalized_params + assert normalized_params[ STEP_ID_OFFSET + 1 ] == { 'foo': 'bar' }, normalized_params assert len( normalized_params.keys() ) == 1
+def test_inputs_by_step_id(): + input1 = __new_input() + input2 = __new_input() + normalized_inputs = __normalize_inputs_against_fixture( { + str( STEP_ID_OFFSET + 1 ): input1, + str( STEP_ID_OFFSET + 2 ): input2 + }, inputs_by="step_id" ) + assert normalized_inputs[ STEP_ID_OFFSET + 1 ] == input1[ 'content' ] + assert normalized_inputs[ STEP_ID_OFFSET + 2 ] == input2[ 'content' ] + + +def test_inputs_by_step_index(): + input1 = __new_input() + input2 = __new_input() + normalized_inputs = __normalize_inputs_against_fixture( { + str( 0 ): input1, + str( 1 ): input2 + }, inputs_by="step_index" ) + assert normalized_inputs[ STEP_ID_OFFSET + 1 ] == input1[ 'content' ] + assert normalized_inputs[ STEP_ID_OFFSET + 2 ] == input2[ 'content' ] + + +def test_inputs_by_name(): + input1 = __new_input() + input2 = __new_input() + normalized_inputs = __normalize_inputs_against_fixture( { + "input1": input1, + "input2": input2 + }, inputs_by="name" ) + print normalized_inputs + assert normalized_inputs[ STEP_ID_OFFSET + 1 ] == input1[ 'content' ] + assert normalized_inputs[ STEP_ID_OFFSET + 2 ] == input2[ 'content' ] + + def __normalize_parameters_against_fixture( params ): trans = MockTrans() + # Create a throw away workflow so step ids and order_index + # are different for actual fixture. + __workflow_fixure( trans ) + workflow = __workflow_fixure( trans ) normalized_params = normalize_step_parameters( workflow.steps, params ) return normalized_params
+def __normalize_inputs_against_fixture( inputs, inputs_by ): + trans = MockTrans() + # Create a throw away workflow so step ids and order_index + # are different for actual fixture. + __workflow_fixure( trans ) + + workflow = __workflow_fixure( trans ) + normalized_inputs = normalize_inputs( workflow.steps, inputs, inputs_by ) + return normalized_inputs + + +def __new_input( ): + return dict( content=model.HistoryDatasetAssociation() ) + + def __workflow_fixure( trans ): user = model.User( email="testworkflow_params@bx.psu.edu", @@ -63,12 +119,14 @@ )
add_step( - type="input", + type="data_input", order_index=0, + tool_inputs={"name": "input1"} ) add_step( - type="input", + type="data_input", order_index=1, + tool_inputs={"name": "input2"} ) add_step( type="tool", @@ -81,4 +139,8 @@ order_index=4, ) trans.app.model.context.flush() - return workflow + # Expunge and reload to ensure step state is as expected from database. + workflow_id = workflow.id + trans.app.model.context.expunge_all() + + return trans.app.model.context.query( model.Workflow ).get( workflow_id )
https://bitbucket.org/galaxy/galaxy-central/commits/bdc4017c2e7e/ Changeset: bdc4017c2e7e User: jmchilton Date: 2014-08-10 20:05:31 Summary: Move WorkflowConfig so run_request doesn't need to import run. This is a more logical organization of these modules. Affected #: 2 files
diff -r 9664ed5cbb9fa654da1972772b5a3065450d2a15 -r bdc4017c2e7e9ecb5dfa3d36798f535402ec80aa lib/galaxy/workflow/run.py --- a/lib/galaxy/workflow/run.py +++ b/lib/galaxy/workflow/run.py @@ -13,47 +13,12 @@ from galaxy.tools.execute import execute from galaxy.util.odict import odict from galaxy.workflow import modules +from galaxy.workflow.run_request import WorkflowRunConfig
import logging log = logging.getLogger( __name__ )
-class WorkflowRunConfig( object ): - """ Wrapper around all the ways a workflow execution can be parameterized. - - :param target_history: History to execute workflow in. - :type target_history: galaxy.model.History. - - :param replacement_dict: Workflow level parameters used for renaming post - job actions. - :type replacement_dict: dict - - :param copy_inputs_to_history: Should input data parameters be copied to - target_history. (Defaults to False) - :type copy_inputs_to_history: bool - - :param inputs: Map from step ids to dict's containing HDA for these steps. - :type inputs: dict - - :param inputs_by: How inputs maps to inputs (datasets/collections) to workflows - steps - by unencoded database id ('step_id'), index in workflow - 'step_index' (independent of database), or by input name for - that step ('name'). - :type inputs_by: str - - :param param_map: Override step parameters - should be dict with step id keys and - tool param name-value dicts as values. - :type param_map: dict - """ - - def __init__( self, target_history, replacement_dict, copy_inputs_to_history=False, inputs={}, param_map={} ): - self.target_history = target_history - self.replacement_dict = replacement_dict - self.copy_inputs_to_history = copy_inputs_to_history - self.inputs = inputs - self.param_map = param_map - - def invoke( trans, workflow, workflow_run_config ): """ Run the supplied workflow in the supplied target_history. """
diff -r 9664ed5cbb9fa654da1972772b5a3065450d2a15 -r bdc4017c2e7e9ecb5dfa3d36798f535402ec80aa lib/galaxy/workflow/run_request.py --- a/lib/galaxy/workflow/run_request.py +++ b/lib/galaxy/workflow/run_request.py @@ -1,11 +1,46 @@ from galaxy import exceptions
from galaxy.managers import histories -from galaxy.workflow.run import WorkflowRunConfig
INPUT_STEP_TYPES = [ 'data_input', 'data_collection_input' ]
+class WorkflowRunConfig( object ): + """ Wrapper around all the ways a workflow execution can be parameterized. + + :param target_history: History to execute workflow in. + :type target_history: galaxy.model.History. + + :param replacement_dict: Workflow level parameters used for renaming post + job actions. + :type replacement_dict: dict + + :param copy_inputs_to_history: Should input data parameters be copied to + target_history. (Defaults to False) + :type copy_inputs_to_history: bool + + :param inputs: Map from step ids to dict's containing HDA for these steps. + :type inputs: dict + + :param inputs_by: How inputs maps to inputs (datasets/collections) to workflows + steps - by unencoded database id ('step_id'), index in workflow + 'step_index' (independent of database), or by input name for + that step ('name'). + :type inputs_by: str + + :param param_map: Override step parameters - should be dict with step id keys and + tool param name-value dicts as values. + :type param_map: dict + """ + + def __init__( self, target_history, replacement_dict, copy_inputs_to_history=False, inputs={}, param_map={} ): + self.target_history = target_history + self.replacement_dict = replacement_dict + self.copy_inputs_to_history = copy_inputs_to_history + self.inputs = inputs + self.param_map = param_map + + def normalize_inputs(steps, inputs, inputs_by): normalized_inputs = {} for step in steps:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.
galaxy-commits@lists.galaxyproject.org