5 new commits in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/commits/64a258380859/ Changeset: 64a258380859 User: jmchilton Date: 2014-07-29 01:18:39 Summary: Easier to use input mapping when running workflows. The old 'ds_map' parameter remains in place for backward compatibility and with the same behavior. A new parameter 'inputs' can now be specified instead however, and its keys corresponding to the steps 'order_index' instead of the raw unencoded database ids 'ds_map' uses. This variant has the nice property that this map can be constructed without prior knowledge of how Galaxy will assign ids during import - hence it is easier to use and more portable. Additionally, 'inputs' is more flexiable and can revert to the old behavior by specifying a new parameter 'inputs_by' as 'step_id'. 'inputs_by' can also be 'name' - this is even more human friendly because it will assign the ids based on data input names (this is what I intend to use mostly, but I have not made it the default because not all workflows will have data inputs with distinct names). Finally, the new name 'inputs' will be more appropriate once data collection inputs can be explicitly mapped via the API. Affected #: 3 files diff -r 1fb11ee1db887886672a4b74982b9971420f67a4 -r 64a25838085978c250a0486b516bb2c8e7c3b850 lib/galaxy/webapps/galaxy/api/workflows.py --- a/lib/galaxy/webapps/galaxy/api/workflows.py +++ b/lib/galaxy/webapps/galaxy/api/workflows.py @@ -194,7 +194,25 @@ # Pull other parameters out of payload. param_map = payload.get( 'parameters', {} ) - ds_map = payload.get( 'ds_map', {} ) + inputs = payload.get( 'inputs', None ) + inputs_by = payload.get( 'inputs_by', None ) + if inputs is None: + # Default to legacy behavior - read ds_map and reference steps + # by unencoded step id (a raw database id). + inputs = payload.get( 'ds_map', {} ) + inputs_by = inputs_by or 'step_id' + else: + inputs = inputs or {} + # New default is to reference steps by index of workflow step + # which is intrinstic to the workflow and independent of the state + # of Galaxy at the time of workflow import. + inputs_by = inputs_by or 'step_index' + + valid_inputs_by = [ 'step_id', 'step_index', 'name' ] + if inputs_by not in valid_inputs_by: + trans.response.status = 403 + return "Invalid inputs_by specified '%s' must be one of %s" + add_to_history = 'no_add_to_history' not in payload history_param = payload.get('history', '') @@ -234,33 +252,33 @@ trans.sa_session.flush() # Set workflow inputs. - for k in ds_map: + for k in inputs: try: - if ds_map[k]['src'] == 'ldda': + if inputs[k]['src'] == 'ldda': ldda = trans.sa_session.query(self.app.model.LibraryDatasetDatasetAssociation).get( - trans.security.decode_id(ds_map[k]['id'])) + trans.security.decode_id(inputs[k]['id'])) assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), ldda.dataset ) hda = ldda.to_history_dataset_association(history, add_to_history=add_to_history) - elif ds_map[k]['src'] == 'ld': + elif inputs[k]['src'] == 'ld': ldda = trans.sa_session.query(self.app.model.LibraryDataset).get( - trans.security.decode_id(ds_map[k]['id'])).library_dataset_dataset_association + trans.security.decode_id(inputs[k]['id'])).library_dataset_dataset_association assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), ldda.dataset ) hda = ldda.to_history_dataset_association(history, add_to_history=add_to_history) - elif ds_map[k]['src'] == 'hda': + elif inputs[k]['src'] == 'hda': # Get dataset handle, add to dict and history if necessary hda = trans.sa_session.query(self.app.model.HistoryDatasetAssociation).get( - trans.security.decode_id(ds_map[k]['id'])) + trans.security.decode_id(inputs[k]['id'])) assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), hda.dataset ) else: trans.response.status = 400 - return "Unknown dataset source '%s' specified." % ds_map[k]['src'] + return "Unknown dataset source '%s' specified." % inputs[k]['src'] if add_to_history and hda.history != history: hda = hda.copy() history.add_dataset(hda) - ds_map[k]['hda'] = hda + inputs[k]['hda'] = hda except AssertionError: trans.response.status = 400 - return "Invalid Dataset '%s' Specified" % ds_map[k]['id'] + return "Invalid Dataset '%s' Specified" % inputs[k]['id'] # Run each step, connecting outputs to inputs replacement_dict = payload.get('replacement_params', {}) @@ -268,7 +286,8 @@ run_config = WorkflowRunConfig( target_history=history, replacement_dict=replacement_dict, - ds_map=ds_map, + inputs=inputs, + inputs_by=inputs_by, param_map=param_map, ) diff -r 1fb11ee1db887886672a4b74982b9971420f67a4 -r 64a25838085978c250a0486b516bb2c8e7c3b850 lib/galaxy/workflow/run.py --- a/lib/galaxy/workflow/run.py +++ b/lib/galaxy/workflow/run.py @@ -32,19 +32,26 @@ target_history. (Defaults to False) :type copy_inputs_to_history: bool - :param ds_map: Map from step ids to dict's containing HDA for these steps. - :type ds_map: dict + :param inputs: Map from step ids to dict's containing HDA for these steps. + :type inputs: dict + + :param inputs_by: How inputs maps to inputs (datasets/collections) to workflows + steps - by unencoded database id ('step_id'), index in workflow + 'step_index' (independent of database), or by input name for + that step ('name'). + :type inputs_by: str :param param_map: Override tool and/or step parameters (see documentation on _update_step_parameters below). :type param_map: """ - def __init__( self, target_history, replacement_dict, copy_inputs_to_history=False, ds_map={}, param_map={} ): + def __init__( self, target_history, replacement_dict, copy_inputs_to_history=False, inputs={}, inputs_by='step_id', param_map={} ): self.target_history = target_history self.replacement_dict = replacement_dict self.copy_inputs_to_history = copy_inputs_to_history - self.ds_map = ds_map + self.inputs = inputs + self.inputs_by = inputs_by self.param_map = param_map @@ -66,7 +73,9 @@ self.target_history = workflow_run_config.target_history self.replacement_dict = workflow_run_config.replacement_dict self.copy_inputs_to_history = workflow_run_config.copy_inputs_to_history - self.ds_map = workflow_run_config.ds_map + self.inputs = workflow_run_config.inputs + self.inputs_by = workflow_run_config.inputs_by + self.inputs_by_step_id = {} self.param_map = workflow_run_config.param_map self.outputs = odict() @@ -200,7 +209,7 @@ outputs[ step.id ] = out_data # Web controller may set copy_inputs_to_history, API controller always sets - # ds_map. + # inputs. if self.copy_inputs_to_history: for input_dataset_hda in out_data.values(): content_type = input_dataset_hda.history_content_type @@ -214,8 +223,8 @@ outputs[ step.id ][ 'input_ds_copy' ] = new_hdca else: raise Exception("Unknown history content encountered") - if self.ds_map: - outputs[ step.id ][ 'output' ] = self.ds_map[ str( step.id ) ][ 'hda' ] + if self.inputs: + outputs[ step.id ][ 'output' ] = self.inputs_by_step_id[ step.id ][ 'hda' ] return job @@ -277,14 +286,23 @@ message = "Workflow cannot be run because of step upgrade messages: %s" % step.upgrade_messages raise exceptions.MessageException( message ) else: - # This is an input step. Make sure we have an available input. - if step.type == 'data_input' and str( step.id ) not in self.ds_map: - message = "Workflow cannot be run because an expected input step '%s' has no input dataset." % step.id - raise exceptions.MessageException( message ) - step.module = modules.module_factory.from_workflow_step( self.trans, step ) step.state = step.module.get_runtime_state() + # This is an input step. Make sure we have an available input. + if step.type == 'data_input': + if self.inputs_by == "step_id": + key = str( step.id ) + elif self.inputs_by == "name": + key = step.tool_inputs.get( 'name', None ) + else: + key = str( step.order_index ) + if key not in self.inputs: + message = "Workflow cannot be run because an expected input step '%s' has no input dataset." % step.id + raise exceptions.MessageException( message ) + else: + self.inputs_by_step_id[ step.id ] = self.inputs[ key ] + def _update_step_parameters(step, param_map): """ diff -r 1fb11ee1db887886672a4b74982b9971420f67a4 -r 64a25838085978c250a0486b516bb2c8e7c3b850 test/api/test_workflows.py --- a/test/api/test_workflows.py +++ b/test/api/test_workflows.py @@ -65,9 +65,20 @@ assert first_input[ "name" ] == "WorkflowInput1" @skip_without_tool( "cat1" ) + def test_run_workflow_by_index( self ): + self.__run_cat_workflow( inputs_by='step_index' ) + + @skip_without_tool( "cat1" ) + def test_run_workflow_by_name( self ): + self.__run_cat_workflow( inputs_by='name' ) + + @skip_without_tool( "cat1" ) def test_run_workflow( self ): + self.__run_cat_workflow( inputs_by='step_id' ) + + def __run_cat_workflow( self, inputs_by ): workflow = self.workflow_populator.load_workflow( name="test_for_run" ) - workflow_request, history_id = self._setup_workflow_run( workflow ) + workflow_request, history_id = self._setup_workflow_run( workflow, inputs_by=inputs_by ) # TODO: This should really be a post to workflows/<workflow_id>/run or # something like that. run_workflow_response = self._post( "workflows", data=workflow_request ) https://bitbucket.org/galaxy/galaxy-central/commits/8b991693bd07/ Changeset: 8b991693bd07 User: jmchilton Date: 2014-07-29 01:18:40 Summary: Fix typos caught by @simleo and @nsoranzo. Affected #: 1 file diff -r 64a25838085978c250a0486b516bb2c8e7c3b850 -r 8b991693bd07987bb69bbd2b7470c0c2b5b886ca lib/galaxy/webapps/galaxy/api/workflows.py --- a/lib/galaxy/webapps/galaxy/api/workflows.py +++ b/lib/galaxy/webapps/galaxy/api/workflows.py @@ -204,14 +204,16 @@ else: inputs = inputs or {} # New default is to reference steps by index of workflow step - # which is intrinstic to the workflow and independent of the state + # which is intrinsic to the workflow and independent of the state # of Galaxy at the time of workflow import. inputs_by = inputs_by or 'step_index' valid_inputs_by = [ 'step_id', 'step_index', 'name' ] if inputs_by not in valid_inputs_by: trans.response.status = 403 - return "Invalid inputs_by specified '%s' must be one of %s" + error_message_template = "Invalid inputs_by specified '%s' must be one of %s" + error_message = error_message_template % ( inputs_by, valid_inputs_by ) + raise ValueError( error_message ) add_to_history = 'no_add_to_history' not in payload history_param = payload.get('history', '') https://bitbucket.org/galaxy/galaxy-central/commits/6882f1947c7e/ Changeset: 6882f1947c7e User: jmchilton Date: 2014-07-29 01:18:40 Summary: Allow using data collection steps via workflow API. Implement API test for this and fixup test for previous commit related improved workflow run endpoint. Affected #: 6 files diff -r 8b991693bd07987bb69bbd2b7470c0c2b5b886ca -r 6882f1947c7eba4b68db77ff26bbd45bfb1125da lib/galaxy/webapps/galaxy/api/workflows.py --- a/lib/galaxy/webapps/galaxy/api/workflows.py +++ b/lib/galaxy/webapps/galaxy/api/workflows.py @@ -82,11 +82,17 @@ latest_workflow = stored_workflow.latest_workflow inputs = {} for step in latest_workflow.steps: - if step.type == 'data_input': + step_type = step.type + if step_type in ['data_input', 'data_collection_input']: if step.tool_inputs and "name" in step.tool_inputs: - inputs[step.id] = {'label': step.tool_inputs['name'], 'value': ""} + label = step.tool_inputs['name'] + elif step_type == "data_input": + label = "Input Dataset" + elif step_type == "data_collection_input": + label = "Input Dataset Collection" else: - inputs[step.id] = {'label': "Input Dataset", 'value': ""} + raise ValueError("Invalid step_type %s" % step_type) + inputs[step.id] = {'label': label, 'value': ""} else: pass # Eventually, allow regular tool parameters to be inserted and modified at runtime. @@ -258,26 +264,35 @@ try: if inputs[k]['src'] == 'ldda': ldda = trans.sa_session.query(self.app.model.LibraryDatasetDatasetAssociation).get( - trans.security.decode_id(inputs[k]['id'])) + trans.security.decode_id(inputs[k]['id'])) assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), ldda.dataset ) - hda = ldda.to_history_dataset_association(history, add_to_history=add_to_history) + content = ldda.to_history_dataset_association(history, add_to_history=add_to_history) elif inputs[k]['src'] == 'ld': ldda = trans.sa_session.query(self.app.model.LibraryDataset).get( - trans.security.decode_id(inputs[k]['id'])).library_dataset_dataset_association + trans.security.decode_id(inputs[k]['id'])).library_dataset_dataset_association assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), ldda.dataset ) - hda = ldda.to_history_dataset_association(history, add_to_history=add_to_history) + content = ldda.to_history_dataset_association(history, add_to_history=add_to_history) elif inputs[k]['src'] == 'hda': # Get dataset handle, add to dict and history if necessary - hda = trans.sa_session.query(self.app.model.HistoryDatasetAssociation).get( - trans.security.decode_id(inputs[k]['id'])) - assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), hda.dataset ) + content = trans.sa_session.query(self.app.model.HistoryDatasetAssociation).get( + trans.security.decode_id(inputs[k]['id'])) + assert trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), content.dataset ) + elif inputs[k]['src'] == 'hdca': + content = self.app.dataset_collections_service.get_dataset_collection_instance( + trans, + 'history', + inputs[k]['id'] + ) else: trans.response.status = 400 return "Unknown dataset source '%s' specified." % inputs[k]['src'] - if add_to_history and hda.history != history: - hda = hda.copy() - history.add_dataset(hda) - inputs[k]['hda'] = hda + if add_to_history and content.history != history: + content = content.copy() + if isinstance( content, self.app.model.HistoryDatasetAssociation ): + history.add_dataset( content ) + else: + history.add_dataset_collection( content ) + inputs[k]['hda'] = content # TODO: rename key to 'content', prescreen input ensure not populated explicitly except AssertionError: trans.response.status = 400 return "Invalid Dataset '%s' Specified" % inputs[k]['id'] diff -r 8b991693bd07987bb69bbd2b7470c0c2b5b886ca -r 6882f1947c7eba4b68db77ff26bbd45bfb1125da lib/galaxy/workflow/run.py --- a/lib/galaxy/workflow/run.py +++ b/lib/galaxy/workflow/run.py @@ -290,7 +290,7 @@ step.state = step.module.get_runtime_state() # This is an input step. Make sure we have an available input. - if step.type == 'data_input': + if step.type in [ 'data_input', 'data_collection_input' ]: if self.inputs_by == "step_id": key = str( step.id ) elif self.inputs_by == "name": diff -r 8b991693bd07987bb69bbd2b7470c0c2b5b886ca -r 6882f1947c7eba4b68db77ff26bbd45bfb1125da test/api/helpers.py --- a/test/api/helpers.py +++ b/test/api/helpers.py @@ -11,6 +11,8 @@ # Simple workflow that takes an input and filters with random lines twice in a # row - first grabbing 8 lines at random and then 6. workflow_random_x2_str = resource_string( __name__, "test_workflow_2.ga" ) +workflow_two_paired_str = resource_string( __name__, "test_workflow_two_paired.ga" ) + DEFAULT_HISTORY_TIMEOUT = 10 # Secs to wait on history to turn ok @@ -140,6 +142,9 @@ def load_random_x2_workflow( self, name ): return self.load_workflow( name, content=workflow_random_x2_str ) + def load_two_paired_workflow( self, name ): + return self.load_workflow( name, content=workflow_two_paired_str ) + def simple_workflow( self, name, **create_kwds ): workflow = self.load_workflow( name ) return self.create_workflow( workflow, **create_kwds ) diff -r 8b991693bd07987bb69bbd2b7470c0c2b5b886ca -r 6882f1947c7eba4b68db77ff26bbd45bfb1125da test/api/test_workflow_two_paired.ga --- /dev/null +++ b/test/api/test_workflow_two_paired.ga @@ -0,0 +1,116 @@ +{ + "a_galaxy_workflow": "true", + "annotation": "", + "format-version": "0.1", + "name": "MultipairTest223", + "steps": { + "0": { + "annotation": "", + "id": 0, + "input_connections": {}, + "inputs": [ + { + "description": "", + "name": "f1" + } + ], + "name": "Input dataset collection", + "outputs": [], + "position": { + "left": 302.3333435058594, + "top": 330 + }, + "tool_errors": null, + "tool_id": null, + "tool_state": "{\"collection_type\": \"paired\", \"name\": \"f1\"}", + "tool_version": null, + "type": "data_collection_input", + "user_outputs": [] + }, + "1": { + "annotation": "", + "id": 1, + "input_connections": {}, + "inputs": [ + { + "description": "", + "name": "f2" + } + ], + "name": "Input dataset collection", + "outputs": [], + "position": { + "left": 288.3333435058594, + "top": 446 + }, + "tool_errors": null, + "tool_id": null, + "tool_state": "{\"collection_type\": \"paired\", \"name\": \"f2\"}", + "tool_version": null, + "type": "data_collection_input", + "user_outputs": [] + }, + "2": { + "annotation": "", + "id": 2, + "input_connections": { + "kind|f1": { + "id": 0, + "output_name": "output" + }, + "kind|f2": { + "id": 1, + "output_name": "output" + } + }, + "inputs": [], + "name": "collection_two_paired", + "outputs": [ + { + "name": "out1", + "type": "txt" + } + ], + "position": { + "left": 782.3333740234375, + "top": 200 + }, + "post_job_actions": {}, + "tool_errors": null, + "tool_id": "collection_two_paired", + "tool_state": "{\"__page__\": 0, \"kind\": \"{\\\"f1\\\": null, \\\"f2\\\": null, \\\"collection_type\\\": \\\"paired\\\", \\\"__current_case__\\\": 0}\", \"__rerun_remap_job_id__\": null}", + "tool_version": "0.1.0", + "type": "tool", + "user_outputs": [] + }, + "3": { + "annotation": "", + "id": 3, + "input_connections": { + "cond1|input1": { + "id": 2, + "output_name": "out1" + } + }, + "inputs": [], + "name": "Concatenate datasets", + "outputs": [ + { + "name": "out_file1", + "type": "input" + } + ], + "position": { + "left": 1239.3333740234375, + "top": 108.97916793823242 + }, + "post_job_actions": {}, + "tool_errors": null, + "tool_id": "cat2", + "tool_state": "{\"__page__\": 0, \"__rerun_remap_job_id__\": null, \"cond1\": \"{\\\"datatype\\\": \\\"txt\\\", \\\"input1\\\": null, \\\"__current_case__\\\": 0}\"}", + "tool_version": "1.0.0", + "type": "tool", + "user_outputs": [] + } + } +} \ No newline at end of file diff -r 8b991693bd07987bb69bbd2b7470c0c2b5b886ca -r 6882f1947c7eba4b68db77ff26bbd45bfb1125da test/api/test_workflows.py --- a/test/api/test_workflows.py +++ b/test/api/test_workflows.py @@ -86,6 +86,25 @@ self.dataset_populator.wait_for_history( history_id, assert_ok=True ) @skip_without_tool( "cat1" ) + @skip_without_tool( "collection_two_paired" ) + def test_run_workflow_collection_params( self ): + workflow = self.workflow_populator.load_two_paired_workflow( name="test_for_run_two_paired" ) + workflow_id = self.workflow_populator.create_workflow( workflow ) + history_id = self.dataset_populator.new_history() + hdca1 = self.dataset_collection_populator.create_pair_in_history( history_id, contents=["1 2 3", "4 5 6"] ).json() + hdca2 = self.dataset_collection_populator.create_pair_in_history( history_id, contents=["7 8 9", "0 a b"] ).json() + self.dataset_populator.wait_for_history( history_id, assert_ok=True ) + label_map = { "f1": self._ds_entry( hdca1 ), "f2": self._ds_entry( hdca2 ) } + workflow_request = dict( + history="hist_id=%s" % history_id, + workflow_id=workflow_id, + ds_map=self._build_ds_map( workflow_id, label_map ), + ) + run_workflow_response = self._post( "workflows", data=workflow_request ) + self._assert_status_code_is( run_workflow_response, 200 ) + self.dataset_populator.wait_for_history( history_id, assert_ok=True ) + + @skip_without_tool( "cat1" ) def test_extract_from_history( self ): history_id = self.dataset_populator.new_history() # Run the simple test workflow and extract it back out from history @@ -137,7 +156,6 @@ input1 = tool_step[ "input_connections" ][ "input1" ] input2 = tool_step[ "input_connections" ][ "queries_0|input2" ] - print downloaded_workflow self.assertEquals( input_steps[ 0 ][ "id" ], input1[ "id" ] ) self.assertEquals( input_steps[ 1 ][ "id" ], input2[ "id" ] ) @@ -421,21 +439,34 @@ # renamed to 'the_new_name'. assert "the_new_name" in map( lambda hda: hda[ "name" ], contents ) - def _setup_workflow_run( self, workflow, history_id=None ): + def _setup_workflow_run( self, workflow, inputs_by='step_id', history_id=None ): uploaded_workflow_id = self.workflow_populator.create_workflow( workflow ) if not history_id: history_id = self.dataset_populator.new_history() hda1 = self.dataset_populator.new_dataset( history_id, content="1 2 3" ) hda2 = self.dataset_populator.new_dataset( history_id, content="4 5 6" ) + workflow_request = dict( + history="hist_id=%s" % history_id, + workflow_id=uploaded_workflow_id, + ) label_map = { 'WorkflowInput1': self._ds_entry(hda1), 'WorkflowInput2': self._ds_entry(hda2) } - workflow_request = dict( - history="hist_id=%s" % history_id, - workflow_id=uploaded_workflow_id, - ds_map=self._build_ds_map( uploaded_workflow_id, label_map ), - ) + if inputs_by == 'step_id': + ds_map = self._build_ds_map( uploaded_workflow_id, label_map ) + workflow_request[ "ds_map" ] = ds_map + elif inputs_by == "step_index": + index_map = { + '0': self._ds_entry(hda1), + '1': self._ds_entry(hda2) + } + workflow_request[ "inputs" ] = dumps( index_map ) + workflow_request[ "inputs_by" ] = 'step_index' + elif inputs_by == "name": + workflow_request[ "inputs" ] = dumps( label_map ) + workflow_request[ "inputs_by" ] = 'name' + return workflow_request, history_id def _build_ds_map( self, workflow_id, label_map ): @@ -471,7 +502,10 @@ return workflow_inputs def _ds_entry( self, hda ): - return dict( src="hda", id=hda[ "id" ] ) + src = 'hda' + if 'history_content_type' in hda and hda[ 'history_content_type' ] == "dataset_collection": + src = 'hdca' + return dict( src=src, id=hda[ "id" ] ) def _assert_user_has_workflow_with_name( self, name ): names = self.__workflow_names() diff -r 8b991693bd07987bb69bbd2b7470c0c2b5b886ca -r 6882f1947c7eba4b68db77ff26bbd45bfb1125da test/functional/tools/collection_two_paired.xml --- a/test/functional/tools/collection_two_paired.xml +++ b/test/functional/tools/collection_two_paired.xml @@ -16,12 +16,12 @@ <option value="list">List of Datasets</option></param><when value="paired"> - <param name="f1" type="data_collection" collection_type="paired" /> - <param name="f2" type="data_collection" collection_type="paired" /> + <param name="f1" type="data_collection" collection_type="paired" label="F1" /> + <param name="f2" type="data_collection" collection_type="paired" label="F2" /></when><when value="list"> - <param name="f1" type="data_collection" collection_type="list" /> - <param name="f2" type="data_collection" collection_type="list" /> + <param name="f1" type="data_collection" collection_type="list" label="F1" /> + <param name="f2" type="data_collection" collection_type="list" label="F2" /></when></conditional></inputs> https://bitbucket.org/galaxy/galaxy-central/commits/de3a9e1e7ba3/ Changeset: de3a9e1e7ba3 User: jmchilton Date: 2014-07-29 01:18:40 Summary: Fix unit tests broken with 6292ada. Affected #: 1 file diff -r 6882f1947c7eba4b68db77ff26bbd45bfb1125da -r de3a9e1e7ba3fd55d0d58b8d14efaef4d324bfa8 lib/galaxy/datatypes/sniff.py --- a/lib/galaxy/datatypes/sniff.py +++ b/lib/galaxy/datatypes/sniff.py @@ -97,7 +97,7 @@ >>> fname = get_test_fname('temp.txt') >>> file(fname, 'wt').write("1 2\\r3 4") - >>> convert_newlines(fname) + >>> convert_newlines(fname, tmp_prefix="gxtest", tmp_dir=tempfile.gettempdir()) (2, None) >>> file(fname).read() '1 2\\n3 4\\n' @@ -157,7 +157,7 @@ >>> fname = get_test_fname('temp.txt') >>> file(fname, 'wt').write("1 2\\r3 4") - >>> convert_newlines_sep2tabs(fname) + >>> convert_newlines_sep2tabs(fname, tmp_prefix="gxtest", tmp_dir=tempfile.gettempdir()) (2, None) >>> file(fname).read() '1\\t2\\n3\\t4\\n' https://bitbucket.org/galaxy/galaxy-central/commits/48eacfabffb9/ Changeset: 48eacfabffb9 User: jmchilton Date: 2014-07-29 01:18:40 Summary: Fix some basic citation unit tests missing from PR #440. Affected #: 1 file diff -r de3a9e1e7ba3fd55d0d58b8d14efaef4d324bfa8 -r 48eacfabffb9af10695beba2fbbb08d7eabc341d test/unit/tools/test_citations.py --- /dev/null +++ b/test/unit/tools/test_citations.py @@ -0,0 +1,56 @@ +import tempfile +from contextlib import contextmanager +from shutil import rmtree + +from galaxy.util import parse_xml_string +from galaxy.managers.citations import ( + parse_citation, + BibtexCitation, + CitationCollection, +) + +EXAMPLE_BIBTEX_CITATION = """<citation type="bibtex">@article{goecks2010galaxy, + title={Galaxy: a comprehensive approach for supporting accessible, reproducible, and transparent computational research in the life sciences}, + author={Goecks, Jeremy and Nekrutenko, Anton and Taylor, James and The Galaxy Team}, + journal={Genome Biol}, + volume={11}, + number={8}, + pages={R86}, + year={2010} +}</citation>""" + + +def test_parse_citation(): + xml_text = EXAMPLE_BIBTEX_CITATION + citation_elem = parse_xml_string(xml_text) + with temp_directory() as test_directory: + citation = parse_citation(citation_elem, test_directory, None) + bibtex = citation.to_bibtex() + assert "title={Galaxy" in bibtex + + +def test_citation_collection(): + citation_collection = CitationCollection() + assert len( citation_collection ) == 0 + cite1 = QuickBibtexCitation("@article{'test1'}") + cite1dup = QuickBibtexCitation("@article{'test1'}") + cite2 = QuickBibtexCitation("@article{'test2'}") + assert citation_collection.add(cite1) + assert not citation_collection.add(cite1dup) + assert citation_collection.add(cite2) + assert len( citation_collection ) == 2 + + +@contextmanager +def temp_directory(): + base_path = tempfile.mkdtemp() + try: + yield base_path + finally: + rmtree(base_path) + + +class QuickBibtexCitation( BibtexCitation ): + + def __init__( self, raw_bibtex ): + self._set_raw_bibtex( raw_bibtex ) Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.