3 new commits in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/commits/ec858cad8c51/ Changeset: ec858cad8c51 User: jmchilton Date: 2014-01-27 05:17:01 Summary: PEP-8 fixes for workflow controllers. Affected #: 2 files diff -r a7ac443f9c9b7b5383284149db4bcc6a99e4b124 -r ec858cad8c51d5190237eaacb31c51841e2e2843 lib/galaxy/webapps/galaxy/api/workflows.py --- a/lib/galaxy/webapps/galaxy/api/workflows.py +++ b/lib/galaxy/webapps/galaxy/api/workflows.py @@ -18,7 +18,9 @@ log = logging.getLogger(__name__) + class WorkflowsAPIController(BaseAPIController, UsesStoredWorkflowMixin): + @web.expose_api def index(self, trans, **kwd): """ @@ -34,10 +36,10 @@ filter1 = ( trans.app.model.StoredWorkflow.user == trans.user ) if show_published: filter1 = or_( filter1, ( trans.app.model.StoredWorkflow.published == True ) ) - for wf in trans.sa_session.query(trans.app.model.StoredWorkflow).filter( + for wf in trans.sa_session.query( trans.app.model.StoredWorkflow ).filter( filter1, trans.app.model.StoredWorkflow.table.c.deleted == False ).order_by( - desc(trans.app.model.StoredWorkflow.table.c.update_time)).all(): - item = wf.to_dict(value_mapper={'id':trans.security.encode_id}) + desc( trans.app.model.StoredWorkflow.table.c.update_time ) ).all(): + item = wf.to_dict( value_mapper={ 'id': trans.security.encode_id } ) encoded_id = trans.security.encode_id(wf.id) item['url'] = url_for('workflow', id=encoded_id) rval.append(item) @@ -45,9 +47,9 @@ user=trans.user ).join( 'stored_workflow' ).filter( trans.app.model.StoredWorkflow.deleted == False ).order_by( desc( trans.app.model.StoredWorkflow.update_time ) ).all(): - item = wf_sa.stored_workflow.to_dict(value_mapper={'id':trans.security.encode_id}) + item = wf_sa.stored_workflow.to_dict( value_mapper={ 'id': trans.security.encode_id }) encoded_id = trans.security.encode_id(wf_sa.stored_workflow.id) - item['url'] = url_for('workflow', id=encoded_id) + item['url'] = url_for( 'workflow', id=encoded_id ) rval.append(item) return rval @@ -73,16 +75,16 @@ except: trans.response.status = 400 return "That workflow does not exist." - item = stored_workflow.to_dict(view='element', value_mapper={'id':trans.security.encode_id}) + item = stored_workflow.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id } ) item['url'] = url_for('workflow', id=workflow_id) latest_workflow = stored_workflow.latest_workflow inputs = {} for step in latest_workflow.steps: if step.type == 'data_input': if step.tool_inputs and "name" in step.tool_inputs: - inputs[step.id] = {'label':step.tool_inputs['name'], 'value':""} + inputs[step.id] = {'label': step.tool_inputs['name'], 'value': ""} else: - inputs[step.id] = {'label':"Input Dataset", 'value':""} + inputs[step.id] = {'label': "Input Dataset", 'value': ""} else: pass # Eventually, allow regular tool parameters to be inserted and modified at runtime. @@ -220,12 +222,12 @@ if 'param' in param_dict and 'value' in param_dict: param_dict[ param_dict['param'] ] = param_dict['value'] - # Update step if there's no step id (i.e. all steps with tool are + # Update step if there's no step id (i.e. all steps with tool are # updated) or update if step ids match. if not step_id or ( step_id and int( step_id ) == step.id ): for name, value in param_dict.items(): step.state.inputs[ name ] = value - + if step.tool_errors: trans.response.status = 400 return "Workflow cannot be run because of validation errors in some steps: %s" % step_errors @@ -251,6 +253,7 @@ job = None if step.type == 'tool' or step.type is None: tool = self.app.toolbox.get_tool( step.tool_id ) + def callback( input, value, prefixed_name, prefixed_label ): if isinstance( input, DataToolParameter ): if prefixed_name in step.input_connections_by_name: @@ -347,6 +350,7 @@ """ data = payload['workflow'] + workflow, missing_tool_tups = self._workflow_from_dict( trans, data, source="API" ) # galaxy workflow newly created id @@ -357,7 +361,7 @@ # return list rval = [] - item = workflow.to_dict(value_mapper={'id':trans.security.encode_id}) + item = workflow.to_dict(value_mapper={'id': trans.security.encode_id}) item['url'] = url_for('workflow', id=encoded_id) rval.append(item) @@ -388,7 +392,7 @@ elif stored_workflow.deleted: raise exceptions.MessageException( "You can't import this workflow because it has been deleted." ) imported_workflow = self._import_shared_workflow( trans, stored_workflow ) - item = imported_workflow.to_dict(value_mapper={'id':trans.security.encode_id}) + item = imported_workflow.to_dict( value_mapper={ 'id': trans.security.encode_id } ) encoded_id = trans.security.encode_id(imported_workflow.id) item['url'] = url_for('workflow', id=encoded_id) return item diff -r a7ac443f9c9b7b5383284149db4bcc6a99e4b124 -r ec858cad8c51d5190237eaacb31c51841e2e2843 lib/galaxy/webapps/galaxy/controllers/workflow.py --- a/lib/galaxy/webapps/galaxy/controllers/workflow.py +++ b/lib/galaxy/webapps/galaxy/controllers/workflow.py @@ -68,6 +68,7 @@ grids.GridOperation( "Sharing", condition=( lambda item: not item.deleted ), async_compatible=False ), grids.GridOperation( "Delete", condition=( lambda item: item.deleted ), async_compatible=True ), ] + def apply_query_filter( self, trans, query, **kwargs ): return query.filter_by( user=trans.user, deleted=False ) @@ -109,7 +110,7 @@ def apply_query_filter( self, trans, query, **kwargs ): # A public workflow is published, has a slug, and is not deleted. - return query.filter( self.model_class.published==True ).filter( self.model_class.slug != None ).filter( self.model_class.deleted == False ) + return query.filter( self.model_class.published == True ).filter( self.model_class.slug != None ).filter( self.model_class.deleted == False ) # Simple SGML parser to get all content in a single tag. @@ -183,8 +184,8 @@ trans.sa_session.flush() return trans.fill_template( "workflow/list.mako", - workflows = workflows, - shared_by_others = shared_by_others ) + workflows=workflows, + shared_by_others=shared_by_others ) @web.expose @web.require_login( "use Galaxy workflows" ) @@ -205,8 +206,8 @@ .order_by( desc( model.StoredWorkflow.table.c.update_time ) ) \ .all() return trans.fill_template( "workflow/list_for_run.mako", - workflows = workflows, - shared_by_others = shared_by_others ) + workflows=workflows, + shared_by_others=shared_by_others ) @web.expose def list_published( self, trans, **kwargs ): @@ -237,7 +238,7 @@ def display(self, trans, stored_workflow): """ Base workflow display """ if stored_workflow is None: - raise web.httpexceptions.HTTPNotFound() + raise web.httpexceptions.HTTPNotFound() # Security check raises error if user cannot access workflow. self.security_check( trans, stored_workflow, False, True ) # Get data for workflow's steps. @@ -256,7 +257,7 @@ user_item_rating = 0 ave_item_rating, num_ratings = self.get_ave_item_rating_data( trans.sa_session, stored_workflow ) return trans.fill_template_mako( "workflow/display.mako", item=stored_workflow, item_data=stored_workflow.latest_workflow.steps, - user_item_rating = user_item_rating, ave_item_rating=ave_item_rating, num_ratings=num_ratings ) + user_item_rating=user_item_rating, ave_item_rating=ave_item_rating, num_ratings=num_ratings ) @web.expose def get_item_content_async( self, trans, id ): @@ -272,7 +273,7 @@ stored.annotation = self.get_item_annotation_str( trans.sa_session, stored.user, stored ) for step in stored.latest_workflow.steps: step.annotation = self.get_item_annotation_str( trans.sa_session, stored.user, step ) - return trans.stream_template_mako( "/workflow/item_content.mako", item = stored, item_data = stored.latest_workflow.steps ) + return trans.stream_template_mako( "/workflow/item_content.mako", item=stored, item_data=stored.latest_workflow.steps ) @web.expose @web.require_login( "use Galaxy workflows" ) @@ -282,8 +283,8 @@ stored = self.get_stored_workflow( trans, id ) if email: other = trans.sa_session.query( model.User ) \ - .filter( and_( model.User.table.c.email==email, - model.User.table.c.deleted==False ) ) \ + .filter( and_( model.User.table.c.email == email, + model.User.table.c.deleted == False ) ) \ .first() if not other: mtype = "error" @@ -306,8 +307,8 @@ trans.set_message( "Workflow '%s' shared with user '%s'" % ( stored.name, other.email ) ) return trans.response.send_redirect( url_for( controller='workflow', action='sharing', id=id ) ) return trans.fill_template( "/ind_share_base.mako", - message = msg, - messagetype = mtype, + message=msg, + messagetype=mtype, item=stored, email=email, use_panels=use_panels ) @@ -414,7 +415,7 @@ stored.latest_workflow.name = san_new_name trans.sa_session.flush() # For current workflows grid: - trans.set_message ( "Workflow renamed to '%s'." % new_name ) + trans.set_message( "Workflow renamed to '%s'." % new_name ) return self.list( trans ) # For new workflows grid: #message = "Workflow renamed to '%s'." % new_name @@ -468,7 +469,7 @@ stored = self.get_stored_workflow( trans, id ) # Only set if importable value would change; this prevents a change in the update_time unless attribute really changed. - importable = accessible in ['True', 'true', 't', 'T']; + importable = accessible in ['True', 'true', 't', 'T'] if stored and stored.importable != importable: if importable: self._make_item_accessible( trans.sa_session, stored ) @@ -527,7 +528,7 @@ annotation_obj = self.get_item_annotation_obj( trans.sa_session, stored.user, stored ) if annotation_obj: self.add_item_annotation( trans.sa_session, trans.get_user(), new_stored, annotation_obj.annotation ) - new_stored.copy_tags_from(trans.user,stored) + new_stored.copy_tags_from(trans.user, stored) if not owner: new_stored.name += " shared by '%s'" % stored.user.email new_stored.user = user @@ -617,7 +618,7 @@ } ) module.update_state( incoming ) - if type=='tool': + if type == 'tool': return { 'tool_state': module.get_state(), 'data_inputs': module.get_data_inputs(), @@ -702,10 +703,10 @@ 'data_inputs': [], 'data_outputs': [], 'form_html': invalid_tool_form_html, - 'annotation' : annotation_str, - 'input_connections' : {}, - 'post_job_actions' : {}, - 'workflow_outputs' : [] + 'annotation': annotation_str, + 'input_connections': {}, + 'post_job_actions': {}, + 'workflow_outputs': [] } # Position step_dict['position'] = step.position @@ -741,10 +742,11 @@ } # Connections input_connections = step.input_connections - multiple_input = {} # Boolean value indicating if this can be mutliple + multiple_input = {} # Boolean value indicating if this can be mutliple if step.type is None or step.type == 'tool': # Determine full (prefixed) names of valid input datasets data_input_names = {} + def callback( input, value, prefixed_name, prefixed_label ): if isinstance( input, DataToolParameter ): data_input_names[ prefixed_name ] = True @@ -756,9 +758,11 @@ # post_job_actions pja_dict = {} for pja in step.post_job_actions: - pja_dict[pja.action_type+pja.output_name] = dict(action_type = pja.action_type, - output_name = pja.output_name, - action_arguments = pja.action_arguments) + pja_dict[pja.action_type + pja.output_name] = dict( + action_type=pja.action_type, + output_name=pja.output_name, + action_arguments=pja.action_arguments + ) step_dict['post_job_actions'] = pja_dict #workflow outputs outputs = [] @@ -823,9 +827,9 @@ step.position = step_dict['position'] module = module_factory.from_dict( trans, step_dict ) module.save_to_step( step ) - if step_dict.has_key('workflow_outputs'): + if 'workflow_outputs' in step_dict: for output_name in step_dict['workflow_outputs']: - m = model.WorkflowOutput(workflow_step = step, output_name = output_name) + m = model.WorkflowOutput(workflow_step=step, output_name=output_name) trans.sa_session.add(m) if step.tool_errors: # DBTODO Check for conditional inputs here. @@ -842,7 +846,7 @@ # Input connections for input_name, conns in step.temp_input_connections.iteritems(): if conns: - conn_dicts = conns if isinstance(conns,list) else [conns] + conn_dicts = conns if isinstance(conns, list) else [ conns ] for conn_dict in conn_dicts: conn = model.WorkflowStepConnection() conn.input_step = step @@ -1049,7 +1053,7 @@ # This may not be ideal... workflow_data = uploaded_file.read() else: - message= 'You attempted to upload an empty file.' + message = 'You attempted to upload an empty file.' status = 'error' if workflow_data: # Convert incoming workflow data from json @@ -1154,13 +1158,14 @@ ext_to_class_name[k] = c.__module__ + "." + c.__name__ classes.append( c ) class_to_classes = dict() + def visit_bases( types, cls ): for base in cls.__bases__: if issubclass( base, Data ): types.add( base.__module__ + "." + base.__name__ ) visit_bases( types, base ) for c in classes: - n = c.__module__ + "." + c.__name__ + n = c.__module__ + "." + c.__name__ types = set( [ n ] ) visit_bases( types, c ) class_to_classes[ n ] = dict( ( t, True ) for t in types ) @@ -1179,7 +1184,8 @@ "workflow/build_from_current_history.mako", jobs=jobs, warnings=warnings, - history=history ) + history=history + ) else: # Ensure job_ids and dataset_ids are lists (possibly empty) if job_ids is None: @@ -1212,7 +1218,7 @@ assert job_id in jobs_by_id, "Attempt to create workflow with job not connected to current history" job = jobs_by_id[ job_id ] tool = trans.app.toolbox.get_tool( job.tool_id ) - param_values = job.get_param_values( trans.app, ignore_errors=True ) #If a tool was updated and e.g. had a text value changed to an integer, we don't want a traceback here + param_values = job.get_param_values( trans.app, ignore_errors=True ) # If a tool was updated and e.g. had a text value changed to an integer, we don't want a traceback here associations = cleanup_param_values( tool.inputs, param_values ) step = model.WorkflowStep() step.type = 'tool' @@ -1247,8 +1253,8 @@ for i, steps_at_level in enumerate( levorder ): for j, index in enumerate( steps_at_level ): step = steps[ index ] - step.position = dict( top = ( base_pos + 120 * j ), - left = ( base_pos + 220 * i ) ) + step.position = dict( top=( base_pos + 120 * j ), + left=( base_pos + 220 * i ) ) # Store it stored = model.StoredWorkflow() stored.user = user @@ -1288,7 +1294,7 @@ # has_errors = False saved_history = None if history_id is not None: - saved_history = trans.get_history(); + saved_history = trans.get_history() try: decoded_history_id = trans.security.decode_id( history_id ) history = trans.sa_session.query(trans.app.model.History).get(decoded_history_id) @@ -1300,12 +1306,12 @@ error("Malformed history id ( %s ) specified, unable to decode." % str( history_id )) except: error("That history does not exist.") - try: # use a try/finally block to restore the user's current history + try: # use a try/finally block to restore the user's current history if kwargs: # If kwargs were provided, the states for each step should have # been POSTed # List to gather values for the template - invocations=[] + invocations = [] for (kwargs, multi_input_keys) in _expand_multiple_inputs(kwargs, mode=multiple_input_mode): for step in workflow.steps: step.upgrade_messages = {} @@ -1372,6 +1378,7 @@ job = None if step.type == 'tool' or step.type is None: tool = trans.app.toolbox.get_tool( step.tool_id ) + # Connect up def callback( input, value, prefixed_name, prefixed_label ): replacement = None @@ -1465,7 +1472,7 @@ steps=[], workflow=stored, hide_fixed_params=hide_fixed_params, - missing_tools = missing_tools) + missing_tools=missing_tools) # Render the form stored.annotation = self.get_item_annotation_str( trans.sa_session, trans.user, stored ) return trans.fill_template( @@ -1525,7 +1532,7 @@ else: outputs.remove(existing_output.output_name) for outputname in outputs: - m = model.WorkflowOutput(workflow_step_id = int(step.id), output_name = outputname) + m = model.WorkflowOutput(workflow_step_id=int(step.id), output_name=outputname) trans.sa_session.add(m) # Prepare each step trans.sa_session.flush() @@ -1562,7 +1569,8 @@ workflow=stored, has_upgrade_messages=has_upgrade_messages, errors=errors, - incoming=kwargs ) + incoming=kwargs + ) @web.expose def configure_menu( self, trans, workflow_ids=None ): @@ -1619,8 +1627,8 @@ in_pos = {} out_pos = {} margin = 5 - line_px = 16 # how much spacing between input/outputs - widths = {} # store px width for boxes of each step + line_px = 16 # how much spacing between input/outputs + widths = {} # store px width for boxes of each step max_width, max_x, max_y = 0, 0, 0 for step in workflow.steps: @@ -1651,7 +1659,7 @@ y += 45 for di in module.get_data_inputs(): - cur_y = y+count*line_px + cur_y = y + count * line_px if step.order_index not in in_pos: in_pos[step.order_index] = {} in_pos[step.order_index][di['name']] = (x, cur_y) @@ -1659,12 +1667,11 @@ count += 1 max_len = max(max_len, len(di['label'])) - if len(module.get_data_inputs()) > 0: y += 15 for do in module.get_data_outputs(): - cur_y = y+count*line_px + cur_y = y + count * line_px if step.order_index not in out_pos: out_pos[step.order_index] = {} out_pos[step.order_index][do['name']] = (x, cur_y) @@ -1672,7 +1679,7 @@ count += 1 max_len = max(max_len, len(do['name'])) - widths[step.order_index] = max_len*5.5 + widths[step.order_index] = max_len * 5.5 max_x = max(max_x, step.position['left']) max_y = max(max_y, step.position['top']) max_width = max(max_width, widths[step.order_index]) @@ -1680,7 +1687,7 @@ for step_dict in data: width = widths[step_dict['id']] x, y = step_dict['position']['left'], step_dict['position']['top'] - boxes.append( svgfig.Rect(x-margin, y, x+width-margin, y+30, fill="#EBD9B2").SVG() ) + boxes.append( svgfig.Rect(x - margin, y, x + width - margin, y + 30, fill="#EBD9B2").SVG() ) box_height = (len(step_dict['data_inputs']) + len(step_dict['data_outputs'])) * line_px + margin # Draw separator line @@ -1709,8 +1716,8 @@ return canvas + ## ---- Utility methods ------------------------------------------------------- - def attach_ordered_steps( workflow, steps ): ordered_steps = order_workflow_steps( steps ) if ordered_steps: @@ -1722,6 +1729,7 @@ workflow.has_cycles = True workflow.steps = steps + def edgelist_for_workflow_steps( steps ): """ Create a list of tuples representing edges between ``WorkflowSteps`` based @@ -1735,6 +1743,7 @@ edges.append( ( steps_to_index[conn.output_step], steps_to_index[conn.input_step] ) ) return edges + def order_workflow_steps( steps ): """ Perform topological sort of the steps, return ordered or None @@ -1744,7 +1753,7 @@ if not step.position or not 'left' in step.position or not 'top' in step.position: position_data_available = False if position_data_available: - steps.sort(cmp=lambda s1,s2: cmp( math.sqrt(s1.position['left']**2 + s1.position['top']**2), math.sqrt(s2.position['left']**2 + s2.position['top']**2))) + steps.sort(cmp=lambda s1, s2: cmp( math.sqrt(s1.position['left'] ** 2 + s1.position['top'] ** 2), math.sqrt(s2.position['left'] ** 2 + s2.position['top'] ** 2))) try: edges = edgelist_for_workflow_steps( steps ) node_order = topsort( edges ) @@ -1752,6 +1761,7 @@ except CycleError: return None + def order_workflow_steps_with_levels( steps ): try: return topsort_levels( edgelist_for_workflow_steps( steps ) ) @@ -1768,6 +1778,7 @@ self.is_fake = True self.id = "fake_%s" % dataset.id + def get_job_dict( trans ): """ Return a dictionary of Job -> [ Dataset ] mappings, for all finished @@ -1799,6 +1810,7 @@ jobs[ job ] = [ ( assoc.name, dataset ) ] return jobs, warnings + def cleanup_param_values( inputs, values ): """ Remove 'Data' values from `param_values`, along with metadata cruft, @@ -1809,6 +1821,7 @@ if 'dbkey' in values: del values['dbkey'] root_values = values + # Recursively clean data inputs and dynamic selects def cleanup( prefix, inputs, values ): for key, input in inputs.items(): @@ -1821,7 +1834,7 @@ # HACK: Nested associations are not yet working, but we # still need to clean them up so we can serialize # if not( prefix ): - if tmp: #this is false for a non-set optional dataset + if tmp: # this is false for a non-set optional dataset if not isinstance(tmp, list): associations.append( ( tmp.hid, prefix + key ) ) else: @@ -1846,6 +1859,7 @@ cleanup( "", inputs, values ) return associations + def _build_workflow_on_str(instance_ds_names): # Returns suffix for new histories based on multi input iteration num_multi_inputs = len(instance_ds_names) https://bitbucket.org/galaxy/galaxy-central/commits/d283386f6032/ Changeset: d283386f6032 User: jmchilton Date: 2014-01-27 05:17:01 Summary: API testing helpers to run methods as alternative users. Previous run_as method has some potential limitations, seems testing permission/security things is more correct if actually using normal alternative user key. Affected #: 1 file diff -r ec858cad8c51d5190237eaacb31c51841e2e2843 -r d283386f60325faafa4b8cb68593c59d32e768bf test/base/api.py --- a/test/base/api.py +++ b/test/base/api.py @@ -1,3 +1,5 @@ +from contextlib import contextmanager + # TODO: We don't need all of TwillTestCase, strip down to a common super class # shared by API and Twill test cases. from .twilltestcase import TwillTestCase @@ -11,6 +13,7 @@ TEST_USER = "user@bx.psu.edu" +DEFAULT_OTHER_USER = "otheruser@bx.psu.edu" # A second user for API testing. # TODO: Allow these to point at existing Galaxy instances. @@ -39,6 +42,30 @@ user = [ user for user in users if user["email"] == email ][0] return user + def _setup_user_get_key( self, email ): + self.galaxy_interactor.ensure_user_with_email( email ) + users = self._get( "users", admin=True ).json() + user = [ user for user in users if user["email"] == email ][0] + return self._post( "users/%s/api_key" % user[ "id" ], admin=True ).json() + + @contextmanager + def _different_user( self, email=DEFAULT_OTHER_USER ): + """ Use in test cases to switch get/post operations to act as new user, + + with self._different_user( "other_user@bx.psu.edu" ): + self._get( "histories" ) # Gets other_user@bx.psu.edu histories. + """ + original_api_key = self.user_api_key + original_interactor_key = self.galaxy_interactor.api_key + new_key = self._setup_user_get_key( email ) + try: + self.user_api_key = new_key + self.galaxy_interactor.api_key = new_key + yield + finally: + self.user_api_key = original_api_key + self.galaxy_interactor.api_key = original_interactor_key + def _get( self, *args, **kwds ): return self.galaxy_interactor.get( *args, **kwds ) @@ -51,7 +78,7 @@ try: body = response.json() except Exception: - body = "INVALID JSON RESPONSE" + body = "INVALID JSON RESPONSE <%s>" % response.content assertion_message_template = "Request status code (%d) was not expected value %d. Body was %s" assertion_message = assertion_message_template % ( response_status_code, expected_status_code, body ) raise AssertionError( assertion_message ) https://bitbucket.org/galaxy/galaxy-central/commits/bbd852d8ed7c/ Changeset: bbd852d8ed7c User: jmchilton Date: 2014-01-27 05:17:01 Summary: Workflow API tests for deletion functionality. Various refactoring to reduce code duplication (in this and subsequent changesets). Affected #: 1 file diff -r d283386f60325faafa4b8cb68593c59d32e768bf -r bbd852d8ed7c125844aac9f38a85d5246bbd845b test/functional/api/test_workflows.py --- a/test/functional/api/test_workflows.py +++ b/test/functional/api/test_workflows.py @@ -5,6 +5,8 @@ import time from .helpers import TestsDatasets +from base.interactor import delete_request # requests like delete + workflow_str = resource_string( __name__, "test_workflow_1.ga" ) @@ -15,6 +17,23 @@ # - Much more testing obviously, always more testing. class WorkflowsApiTestCase( api.ApiTestCase, TestsDatasets ): + def test_delete( self ): + workflow_id = self._simple_workflow( "test_delete" ) + workflow_name = "test_delete (imported from API)" + self._assert_user_has_workflow_with_name( workflow_name ) + workflow_url = self._api_url( "workflows/%s" % workflow_id, use_key=True ) + delete_response = delete_request( workflow_url ) + self._assert_status_code_is( delete_response, 200 ) + # Make sure workflow is no longer in index by default. + assert workflow_name not in self.__workflow_names() + + def test_other_cannot_delete( self ): + workflow_id = self._simple_workflow( "test_other_delete" ) + with self._different_user(): + workflow_url = self._api_url( "workflows/%s" % workflow_id, use_key=True ) + delete_response = delete_request( workflow_url ) + self._assert_status_code_is( delete_response, 403 ) + def test_index( self ): index_response = self._get( "workflows" ) self._assert_status_code_is( index_response, 200 ) @@ -29,7 +48,7 @@ self._assert_user_has_workflow_with_name( "test_import (imported from API)" ) def test_export( self ): - uploaded_workflow_id = self._create_workflow( self._load_workflow( name="test_for_export" ) ) + uploaded_workflow_id = self._simple_workflow( "test_for_export" ) download_response = self._get( "workflows/%s/download" % uploaded_workflow_id ) self._assert_status_code_is( download_response, 200 ) downloaded_workflow = download_response.json() @@ -100,9 +119,14 @@ def _ds_entry( self, hda ): return dict( src="hda", id=hda[ "id" ] ) - def _create_workflow( self, workflow ): + def _simple_workflow( self, name, **create_kwds ): + workflow = self._load_workflow( name ) + return self._create_workflow( workflow, **create_kwds ) + + def _create_workflow( self, workflow, **create_kwds ): data = dict( workflow=dumps( workflow ), + **create_kwds ) upload_response = self._post( "workflows/upload", data=data ) self._assert_status_code_is( upload_response, 200 ) @@ -110,10 +134,14 @@ return uploaded_workflow_id def _assert_user_has_workflow_with_name( self, name ): + names = self.__workflow_names() + assert name in names, "No workflows with name %s in users workflows <%s>" % ( name, names ) + + def __workflow_names( self ): index_response = self._get( "workflows" ) self._assert_status_code_is( index_response, 200 ) names = map( lambda w: w[ "name" ], index_response.json() ) - assert name in names, "No workflows with name %s in users workflows <%s>" % ( name, names ) + return names def _load_workflow( self, name, add_pja=False ): workflow = loads( workflow_str ) Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.