commit/galaxy-central: 5 new changesets
5 new commits in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/commits/b7d43f49b239/ Changeset: b7d43f49b239 User: jmchilton Date: 2014-05-08 23:12:07 Summary: Doc fix in Galaxy's tool shed repositories API. Affected #: 1 file diff -r 521fead562531f96564c0e08f2c3ac523a92c6e5 -r b7d43f49b239e99fdb65e4b049bc2b633aafff98 lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py --- a/lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py +++ b/lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py @@ -107,7 +107,7 @@ @web.expose_api def import_workflows( self, trans, **kwd ): """ - POST /api/tool_shed_repositories/import_workflow + POST /api/tool_shed_repositories/import_workflows Import all of the exported workflows contained in the specified installed tool shed repository into Galaxy. https://bitbucket.org/galaxy/galaxy-central/commits/7c302d65be90/ Changeset: 7c302d65be90 User: jmchilton Date: 2014-05-08 23:12:07 Summary: Reduce polling frequency of API tests, seems to help prevent sqlite locks. Affected #: 1 file diff -r b7d43f49b239e99fdb65e4b049bc2b633aafff98 -r 7c302d65be90f414cff9bf067cf426f5ea3b3207 test/api/helpers.py --- a/test/api/helpers.py +++ b/test/api/helpers.py @@ -312,7 +312,7 @@ def wait_on_state( state_func, assert_ok=False, timeout=5 ): - delta = .1 + delta = .25 iteration = 0 while True: if (delta * iteration) > timeout: https://bitbucket.org/galaxy/galaxy-central/commits/376d5e107a58/ Changeset: 376d5e107a58 User: jmchilton Date: 2014-05-08 23:12:07 Summary: Refactor API assertions for reuse outside explicit TestCase classes. Affected #: 2 files diff -r 7c302d65be90f414cff9bf067cf426f5ea3b3207 -r 376d5e107a584ac32e5f827b47cd962b79e4248c test/base/api.py --- a/test/base/api.py +++ b/test/base/api.py @@ -8,6 +8,11 @@ from .api_util import get_master_api_key from .api_util import get_user_api_key +from .api_asserts import ( + assert_status_code_is, + assert_has_keys, + assert_error_code_is, +) from urllib import urlencode @@ -73,25 +78,13 @@ return self.galaxy_interactor.post( *args, **kwds ) def _assert_status_code_is( self, response, expected_status_code ): - response_status_code = response.status_code - if expected_status_code != response_status_code: - try: - body = response.json() - except Exception: - body = "INVALID JSON RESPONSE <%s>" % response.content - assertion_message_template = "Request status code (%d) was not expected value %d. Body was %s" - assertion_message = assertion_message_template % ( response_status_code, expected_status_code, body ) - raise AssertionError( assertion_message ) + assert_status_code_is( response, expected_status_code ) def _assert_has_keys( self, response, *keys ): - for key in keys: - assert key in response, "Response [%s] does not contain key [%s]" % ( response, key ) + assert_has_keys( response, *keys ) def _assert_error_code_is( self, response, error_code ): - if hasattr( response, "json" ): - response = response.json() - self._assert_has_keys( response, "err_code" ) - self.assertEquals( response[ "err_code" ], int( error_code ) ) + assert_error_code_is( response, error_code ) def _random_key( self ): # Used for invalid request testing... return "1234567890123456" diff -r 7c302d65be90f414cff9bf067cf426f5ea3b3207 -r 376d5e107a584ac32e5f827b47cd962b79e4248c test/base/api_asserts.py --- /dev/null +++ b/test/base/api_asserts.py @@ -0,0 +1,30 @@ +""" Utility methods for making assertions about Galaxy API responses, etc... +""" +ASSERT_FAIL_ERROR_CODE = "Expected Galaxy error code %d, obtained %d" +ASSERT_FAIL_STATUS_CODE = "Request status code (%d) was not expected value %d. Body was %s" + + +def assert_status_code_is( response, expected_status_code ): + response_status_code = response.status_code + if expected_status_code != response_status_code: + try: + body = response.json() + except Exception: + body = "INVALID JSON RESPONSE <%s>" % response.content + assertion_message = ASSERT_FAIL_STATUS_CODE % ( response_status_code, expected_status_code, body ) + raise AssertionError( assertion_message ) + + +def assert_has_keys( response, *keys ): + for key in keys: + assert key in response, "Response [%s] does not contain key [%s]" % ( response, key ) + + +def assert_error_code_is( response, error_code ): + if hasattr( response, "json" ): + response = response.json() + assert_has_keys( response, "err_code" ) + err_code = response[ "err_code" ] + assert err_code == int( error_code ), ASSERT_FAIL_ERROR_CODE % ( err_code, int( error_code ) ) + +assert_has_key = assert_has_keys https://bitbucket.org/galaxy/galaxy-central/commits/a314a3464b08/ Changeset: a314a3464b08 User: jmchilton Date: 2014-05-08 23:12:07 Summary: Add simple collection mapping workflow extraction functional test. Affected #: 3 files diff -r 376d5e107a584ac32e5f827b47cd962b79e4248c -r a314a3464b0806c5f7cb92379ce0788260637ba4 lib/galaxy/workflow/extract.py --- a/lib/galaxy/workflow/extract.py +++ b/lib/galaxy/workflow/extract.py @@ -93,7 +93,9 @@ steps.append( step ) # Tool steps for job_id in job_ids: - assert job_id in jobs_by_id, "Attempt to create workflow with job not connected to current history" + if job_id not in jobs_by_id: + log.warn( "job_id %s not found in jobs_by_id %s" % ( job_id, jobs_by_id ) ) + raise AssertionError( "Attempt to create workflow with job not connected to current history" ) job = jobs_by_id[ job_id ] tool_inputs, associations = step_inputs( trans, job ) step = model.WorkflowStep() diff -r 376d5e107a584ac32e5f827b47cd962b79e4248c -r a314a3464b0806c5f7cb92379ce0788260637ba4 test/api/helpers.py --- a/test/api/helpers.py +++ b/test/api/helpers.py @@ -1,3 +1,4 @@ +from base import api_asserts from operator import itemgetter import time @@ -111,6 +112,12 @@ **kwds ) + def run_tool( self, tool_id, inputs, history_id, **kwds ): + payload = self.run_tool_payload( tool_id, inputs, history_id, **kwds ) + tool_response = self.galaxy_interactor.post( "tools", data=payload ) + api_asserts.assert_status_code_is( tool_response, 200 ) + return tool_response.json() + class WorkflowPopulator( object ): # Impulse is to make this a Mixin, but probably better as an object. diff -r 376d5e107a584ac32e5f827b47cd962b79e4248c -r a314a3464b0806c5f7cb92379ce0788260637ba4 test/api/test_workflows.py --- a/test/api/test_workflows.py +++ b/test/api/test_workflows.py @@ -115,35 +115,81 @@ inputs = { "f1": dict( src="hdca", id=hdca_id ) } - payload = self.dataset_populator.run_tool_payload( + run_output = self.dataset_populator.run_tool( tool_id="collection_paired_test", inputs=inputs, history_id=history_id, ) - tool_response = self._post( "tools", data=payload ) - self._assert_status_code_is( tool_response, 200 ) - job_id = tool_response.json()[ "jobs" ][ 0 ][ "id" ] + job_id = run_output[ "jobs" ][ 0 ][ "id" ] self.dataset_populator.wait_for_history( history_id, assert_ok=True ) - create_from_data = dict( + downloaded_workflow = self._extract_and_download_workflow( from_history_id=history_id, dataset_collection_ids=dumps( [ hdca[ "hid" ] ] ), job_ids=dumps( [ job_id ] ), workflow_name="test import from history", ) - create_workflow_response = self._post( "workflows", data=create_from_data ) + collection_steps = self._get_steps_of_type( downloaded_workflow, "data_collection_input", expected_len=1 ) + collection_step = collection_steps[ 0 ] + collection_step_state = loads( collection_step[ "tool_state" ] ) + self.assertEquals( collection_step_state[ "collection_type" ], u"paired" ) + + @skip_without_tool( "random_lines1" ) + def test_extract_mapping_workflow_from_history( self ): + history_id = self.dataset_populator.new_history() + hdca = self.dataset_collection_populator.create_pair_in_history( history_id, contents=["1 2 3\n4 5 6", "7 8 9\n10 11 10"] ).json() + hdca_id = hdca[ "id" ] + inputs1 = { + "input|__collection_multirun__": hdca_id, + "num_lines": 2 + } + run_output1 = self.dataset_populator.run_tool( + tool_id="random_lines1", + inputs=inputs1, + history_id=history_id, + ) + implicit_hdca1 = run_output1[ "implicit_collections" ][ 0 ] + job_id1 = run_output1[ "jobs" ][ 0 ][ "id" ] + self.dataset_populator.wait_for_history( history_id, assert_ok=True, timeout=20 ) + inputs2 = { + "input|__collection_multirun__": implicit_hdca1[ "id" ], + "num_lines": 1 + } + run_output2 = self.dataset_populator.run_tool( + tool_id="random_lines1", + inputs=inputs2, + history_id=history_id, + ) + # implicit_hdca2 = run_output2[ "implicit_collections" ][ 0 ] + job_id2 = run_output2[ "jobs" ][ 0 ][ "id" ] + self.dataset_populator.wait_for_history( history_id, assert_ok=True, timeout=20 ) + downloaded_workflow = self._extract_and_download_workflow( + from_history_id=history_id, + dataset_collection_ids=dumps( [ hdca[ "hid" ] ] ), + job_ids=dumps( [ job_id1, job_id2 ] ), + workflow_name="test import from mapping history", + ) + assert len( downloaded_workflow[ "steps" ] ) == 3 + collection_steps = self._get_steps_of_type( downloaded_workflow, "data_collection_input", expected_len=1 ) + collection_step = collection_steps[ 0 ] + collection_step_state = loads( collection_step[ "tool_state" ] ) + self.assertEquals( collection_step_state[ "collection_type" ], u"paired" ) + self._get_steps_of_type( downloaded_workflow, "tool", expected_len=2 ) + + def _extract_and_download_workflow( self, **extract_payload ): + create_workflow_response = self._post( "workflows", data=extract_payload ) self._assert_status_code_is( create_workflow_response, 200 ) - create_workflow_response.json()[ "id" ] new_workflow_id = create_workflow_response.json()[ "id" ] download_response = self._get( "workflows/%s/download" % new_workflow_id ) self._assert_status_code_is( download_response, 200 ) downloaded_workflow = download_response.json() - assert len( downloaded_workflow[ "steps" ] ) == 2 - collection_steps = [ s for s in downloaded_workflow[ "steps" ].values() if s[ "type" ] == "data_collection_input" ] - assert len( collection_steps ) == 1 - collection_step = collection_steps[ 0 ] - collection_step_state = loads( collection_step[ "tool_state" ] ) - self.assertEquals( collection_step_state[ "collection_type" ], u"paired" ) + return downloaded_workflow + + def _get_steps_of_type( self, downloaded_workflow, type, expected_len=None ): + steps = [ s for s in downloaded_workflow[ "steps" ].values() if s[ "type" ] == type ] + if expected_len is not None: + assert len( steps ) == expected_len + return steps @skip_without_tool( "random_lines1" ) def test_run_replace_params_by_tool( self ): https://bitbucket.org/galaxy/galaxy-central/commits/f2f32d8b8a06/ Changeset: f2f32d8b8a06 User: jmchilton Date: 2014-05-08 23:12:07 Summary: Bugfix: (For bf60fa7) Only initial mapping steps in map reduce workflows were being connected correctly. With expanded test case that checks such a connection. This takes care of of subsequent connections by iteratively updating hid_to_output_pair correctly (... I think, test works with at least two). Affected #: 2 files diff -r a314a3464b0806c5f7cb92379ce0788260637ba4 -r f2f32d8b8a06ff370145360b5c75c90e2e2447a9 lib/galaxy/workflow/extract.py --- a/lib/galaxy/workflow/extract.py +++ b/lib/galaxy/workflow/extract.py @@ -123,7 +123,18 @@ steps_by_job_id[ job_id ] = step # Store created dataset hids for assoc in job.output_datasets: - hid_to_output_pair[ assoc.dataset.hid ] = ( step, assoc.name ) + if job in summary.implicit_map_jobs: + hid = None + for implicit_pair in jobs[ job ]: + query_assoc_name, dataset_collection = implicit_pair + if query_assoc_name == assoc.name: + hid = dataset_collection.hid + if hid is None: + log.warn("Failed to find matching implicit job.") + raise Exception( "Failed to extract job." ) + else: + hid = assoc.dataset.hid + hid_to_output_pair[ hid ] = ( step, assoc.name ) return steps diff -r a314a3464b0806c5f7cb92379ce0788260637ba4 -r f2f32d8b8a06ff370145360b5c75c90e2e2447a9 test/api/test_workflows.py --- a/test/api/test_workflows.py +++ b/test/api/test_workflows.py @@ -173,7 +173,19 @@ collection_step = collection_steps[ 0 ] collection_step_state = loads( collection_step[ "tool_state" ] ) self.assertEquals( collection_step_state[ "collection_type" ], u"paired" ) - self._get_steps_of_type( downloaded_workflow, "tool", expected_len=2 ) + collect_step_idx = collection_step[ "id" ] + tool_steps = self._get_steps_of_type( downloaded_workflow, "tool", expected_len=2 ) + tool_step_idxs = [] + tool_input_step_idxs = [] + for tool_step in tool_steps: + self._assert_has_key( tool_step[ "input_connections" ], "input" ) + input_step_idx = tool_step[ "input_connections" ][ "input" ][ "id" ] + tool_step_idxs.append( tool_step[ "id" ] ) + tool_input_step_idxs.append( input_step_idx ) + + assert collect_step_idx not in tool_step_idxs + assert tool_input_step_idxs[ 0 ] == collect_step_idx + assert tool_input_step_idxs[ 1 ] == tool_step_idxs[ 0 ] def _extract_and_download_workflow( self, **extract_payload ): create_workflow_response = self._post( "workflows", data=extract_payload ) Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.
participants (1)
-
commits-noreply@bitbucket.org