commit/galaxy-central: 44 new changesets
44 new commits in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/commits/fd2ae1a670a6/ Changeset: fd2ae1a670a6 User: jmchilton Date: 2013-11-22 02:17:12 Summary: Refactor twill tool test driver to encode hda id earlier. And pass an encoded id to display action in root controller. Would definitely like to simplify or better yet remove root.display if not used anywhere else, it should be coming in through dataset controller. Affected #: 2 files diff -r 5471fa863712ba3f89ad9adceb194daf0baeafa2 -r fd2ae1a670a6caf79ad3e2baa3d3d4242bef27d6 lib/galaxy/webapps/galaxy/controllers/root.py --- a/lib/galaxy/webapps/galaxy/controllers/root.py +++ b/lib/galaxy/webapps/galaxy/controllers/root.py @@ -172,12 +172,16 @@ ## ---- Dataset display / editing ---------------------------------------- @web.expose - def display( self, trans, id=None, hid=None, tofile=None, toext=".txt", **kwd ): + def display( self, trans, id=None, hid=None, tofile=None, toext=".txt", encoded_id=None, **kwd ): """Returns data directly into the browser. Sets the mime-type according to the extension. + + Used by the twill tool test driver - used anywhere else? Would like to drop hid + argument and path if unneeded now. Likewise, would like to drop encoded_id=XXX + and use assume id is encoded (likely id wouldn't be coming in encoded if this + is used anywhere else though.) """ - #TODO: unused? #TODO: unencoded id if hid is not None: try: @@ -192,6 +196,8 @@ else: raise Exception( "No dataset with hid '%d'" % hid ) else: + if encoded_id and not id: + id = trans.security.decode_id( encoded_id ) try: data = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id ) except: diff -r 5471fa863712ba3f89ad9adceb194daf0baeafa2 -r fd2ae1a670a6caf79ad3e2baa3d3d4242bef27d6 test/base/twilltestcase.py --- a/test/base/twilltestcase.py +++ b/test/base/twilltestcase.py @@ -826,8 +826,10 @@ if ext != test_ext: raise AssertionError( errmsg ) else: + hda_id = self.security.encode_id( elem.get( 'id' ) ) self.home() - self.visit_page( "display?hid=" + hid ) + # See not in controllers/root.py about encoded_id. + self.visit_page( "display?encoded_id=%s" % hda_id ) data = self.last_page() if attributes is not None and attributes.get( "assert_list", None ) is not None: try: @@ -918,7 +920,7 @@ if base_name is None: base_name = os.path.split(file_name)[-1] temp_name = self.makeTfname(fname=base_name) - self.visit_url( "%s/datasets/%s/display/%s" % ( self.url, self.security.encode_id( hda_id ), base_name ) ) + self.visit_url( "%s/datasets/%s/display/%s" % ( self.url, hda_id, base_name ) ) data = self.last_page() file( temp_name, 'wb' ).write( data ) if self.keepOutdir > '': https://bitbucket.org/galaxy/galaxy-central/commits/5c5eaf0c19da/ Changeset: 5c5eaf0c19da User: jmchilton Date: 2013-11-22 02:17:12 Summary: Refactor shared code in functional_tests.py into a method. Affected #: 1 file diff -r fd2ae1a670a6caf79ad3e2baa3d3d4242bef27d6 -r 5c5eaf0c19da5b347d05a1f664e1fc394a680735 scripts/functional_tests.py --- a/scripts/functional_tests.py +++ b/scripts/functional_tests.py @@ -415,6 +415,16 @@ os.environ[ 'GALAXY_TEST_SAVE' ] = galaxy_test_save # Pass in through script setenv, will leave a copy of ALL test validate files os.environ[ 'GALAXY_TEST_HOST' ] = galaxy_test_host + + def _run_functional_test( testing_shed_tools=None ): + functional.test_toolbox.toolbox = app.toolbox + functional.test_toolbox.build_tests( testing_shed_tools=testing_shed_tools ) + test_config = nose.config.Config( env=os.environ, ignoreFiles=ignore_files, plugins=nose.plugins.manager.DefaultPluginManager() ) + test_config.configure( sys.argv ) + result = run_tests( test_config ) + success = result.wasSuccessful() + return success + if testing_migrated_tools or testing_installed_tools: shed_tools_dict = {} if testing_migrated_tools: @@ -438,12 +448,7 @@ for installed_tool_panel_config in installed_tool_panel_configs: tool_configs.append( installed_tool_panel_config ) app.toolbox = tools.ToolBox( tool_configs, app.config.tool_path, app ) - functional.test_toolbox.toolbox = app.toolbox - functional.test_toolbox.build_tests( testing_shed_tools=True ) - test_config = nose.config.Config( env=os.environ, ignoreFiles=ignore_files, plugins=nose.plugins.manager.DefaultPluginManager() ) - test_config.configure( sys.argv ) - result = run_tests( test_config ) - success = result.wasSuccessful() + success = _run_functional_test( testing_shed_tools=True ) try: os.unlink( tmp_tool_panel_conf ) except: @@ -453,14 +458,9 @@ except: log.info( "Unable to remove file: %s" % galaxy_tool_shed_test_file ) else: - functional.test_toolbox.toolbox = app.toolbox - functional.test_toolbox.build_tests() if galaxy_test_file_dir: os.environ[ 'GALAXY_TEST_FILE_DIR' ] = galaxy_test_file_dir - test_config = nose.config.Config( env=os.environ, ignoreFiles=ignore_files, plugins=nose.plugins.manager.DefaultPluginManager() ) - test_config.configure( sys.argv ) - result = run_tests( test_config ) - success = result.wasSuccessful() + success = _run_functional_test( ) except: log.exception( "Failure running tests" ) https://bitbucket.org/galaxy/galaxy-central/commits/faf49c10ea03/ Changeset: faf49c10ea03 User: jmchilton Date: 2013-11-22 02:17:12 Summary: Small simplification to test/functional/test_toolbox.py. Affected #: 1 file diff -r 5c5eaf0c19da5b347d05a1f664e1fc394a680735 -r faf49c10ea033cf7fcde67ebfd426d9e344ea396 test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -12,10 +12,12 @@ class ToolTestCase( TwillTestCase ): """Abstract test case that runs tests based on a `galaxy.tools.test.ToolTest`""" - def do_it( self, testdef, shed_tool_id=None ): + def do_it( self, testdef ): """ Run through a tool test case. """ + shed_tool_id = self.shed_tool_id + self.__handle_test_def_errors( testdef ) latest_history = self.__setup_test_history() @@ -206,22 +208,21 @@ for i, tool_id in enumerate( toolbox.tools_by_id ): tool = toolbox.get_tool( tool_id ) if tool.tests: + shed_tool_id = None if not testing_shed_tools else tool.id # Create a new subclass of ToolTestCase, dynamically adding methods # named test_tool_XXX that run each test defined in the tool config. name = "TestForTool_" + tool.id.replace( ' ', '_' ) baseclasses = ( ToolTestCase, ) namespace = dict() for j, testdef in enumerate( tool.tests ): - def make_test_method( td, shed_tool_id=None ): + def make_test_method( td ): def test_tool( self ): - self.do_it( td, shed_tool_id=shed_tool_id ) + self.do_it( td ) return test_tool - if testing_shed_tools: - test_method = make_test_method( testdef, shed_tool_id=tool.id ) - else: - test_method = make_test_method( testdef ) + test_method = make_test_method( testdef ) test_method.__doc__ = "%s ( %s ) > %s" % ( tool.name, tool.id, testdef.name ) namespace[ 'test_tool_%06d' % j ] = test_method + namespace[ 'shed_tool_id' ] = shed_tool_id # The new.classobj function returns a new class object, with name name, derived # from baseclasses (which should be a tuple of classes) and with namespace dict. new_class_obj = new.classobj( name, baseclasses, namespace ) https://bitbucket.org/galaxy/galaxy-central/commits/65c2a41d4a33/ Changeset: 65c2a41d4a33 User: jmchilton Date: 2013-11-22 02:17:12 Summary: Refactor test data input setup logic out of TestToolBox into ToolTestBuilder. Simplifying test_toolbox.py will hopefully make it easier to provide an alternative API driven option. Affected #: 2 files diff -r faf49c10ea033cf7fcde67ebfd426d9e344ea396 -r 65c2a41d4a336d2bf5cf40d8b980ecb2e99cb163 lib/galaxy/tools/test.py --- a/lib/galaxy/tools/test.py +++ b/lib/galaxy/tools/test.py @@ -6,6 +6,9 @@ log = logging.getLogger( __name__ ) +DEFAULT_FTYPE = 'auto' +DEFAULT_DBKEY = 'hg17' + class ToolTestBuilder( object ): """ @@ -29,6 +32,31 @@ self.__parse_elem( test_elem, i ) + def test_data( self ): + """ + Iterator over metadata representing the required files for upload. + """ + for fname, extra in self.required_files: + data_dict = dict( + fname=fname, + metadata=extra.get( 'metadata', [] ), + composite_data=extra.get( 'composite_data', [] ), + ftype=extra.get( 'ftype', DEFAULT_FTYPE ), + dbkey=extra.get( 'dbkey', DEFAULT_DBKEY ), + ) + edit_attributes = extra.get( 'edit_attributes', [] ) + + #currently only renaming is supported + for edit_att in edit_attributes: + if edit_att.get( 'type', None ) == 'name': + new_name = edit_att.get( 'value', None ) + assert new_name, 'You must supply the new dataset name as the value tag of the edit_attributes tag' + data_dict['name'] = new_name + else: + raise Exception( 'edit_attributes type (%s) is unimplemented' % edit_att.get( 'type', None ) ) + + yield data_dict + def __parse_elem( self, test_elem, i ): # Composite datasets need a unique name: each test occurs in a fresh # history, but we'll keep it unique per set of tests - use i (test #) diff -r faf49c10ea033cf7fcde67ebfd426d9e344ea396 -r 65c2a41d4a336d2bf5cf40d8b980ecb2e99cb163 test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -57,32 +57,20 @@ def __setup_test_data( self, testdef, shed_tool_id ): # Upload any needed files - for fname, extra in testdef.required_files: - metadata = extra.get( 'metadata', [] ) - composite_data = extra.get( 'composite_data', [] ) - self.upload_file( fname, - ftype=extra.get( 'ftype', 'auto' ), - dbkey=extra.get( 'dbkey', 'hg17' ), - metadata=metadata, - composite_data=composite_data, + for test_data in testdef.test_data(): + self.upload_file( test_data['fname'], + ftype=test_data['ftype'], + dbkey=test_data['dbkey'], + metadata=test_data['metadata'], + composite_data=test_data['composite_data'], shed_tool_id=shed_tool_id ) - - print "Uploaded file: ", fname, ", ftype: ", extra.get( 'ftype', 'auto' ), ", extra: ", extra - #Post upload attribute editing - edit_attributes = extra.get( 'edit_attributes', [] ) - - #currently only renaming is supported - for edit_att in edit_attributes: - if edit_att.get( 'type', None ) == 'name': - new_name = edit_att.get( 'value', None ) - assert new_name, 'You must supply the new dataset name as the value tag of the edit_attributes tag' - hda_id = self.get_history_as_data_list()[-1].get( 'id' ) - try: - self.edit_hda_attribute_info( hda_id=str(hda_id), new_name=new_name ) - except: - print "### call to edit_hda failed for hda_id %s, new_name=%s" % (hda_id, new_name) - else: - raise Exception( 'edit_attributes type (%s) is unimplemented' % edit_att.get( 'type', None ) ) + name = test_data.get('name', None) + if name: + hda_id = self.get_history_as_data_list()[-1].get( 'id' ) + try: + self.edit_hda_attribute_info( hda_id=str(hda_id), new_name=name ) + except: + print "### call to edit_hda failed for hda_id %s, new_name=%s" % (hda_id, name) def __run_tool( self, testdef ): # We need to handle the case where we've uploaded a valid compressed file since the upload https://bitbucket.org/galaxy/galaxy-central/commits/3b4ed0725792/ Changeset: 3b4ed0725792 User: jmchilton Date: 2013-11-22 02:17:12 Summary: Refactor data staging and tool running logic out of test/functional/test_toolbox.py. In a new twill specific interactor class, idea here is to proceed by implementing a matching API interactor class. Affected #: 1 file diff -r 65c2a41d4a336d2bf5cf40d8b980ecb2e99cb163 -r 3b4ed072579212764322cbdd8440ecbaad836ca9 test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -20,16 +20,20 @@ self.__handle_test_def_errors( testdef ) - latest_history = self.__setup_test_history() + galaxy_interactor = GalaxyInteractorTwill( self ) - self.__setup_test_data( testdef, shed_tool_id ) + test_history = galaxy_interactor.new_history() - data_list = self.__run_tool( testdef ) + # Upload any needed files + for test_data in testdef.test_data(): + galaxy_interactor.stage_data( test_data, shed_tool_id ) + + data_list = galaxy_interactor.run_tool( testdef ) self.assertTrue( data_list ) self.__verify_outputs( testdef, shed_tool_id, data_list ) - self.__delete_history( latest_history ) + galaxy_interactor.delete_history( test_history ) def __handle_test_def_errors(self, testdef): # If the test generation had an error, raise @@ -39,81 +43,6 @@ else: raise Exception( "Test parse failure" ) - def __setup_test_history( self ): - # Start with a new history - self.logout() - self.login( email='test@bx.psu.edu' ) - admin_user = sa_session.query( galaxy.model.User ).filter( galaxy.model.User.table.c.email == 'test@bx.psu.edu' ).one() - self.new_history() - latest_history = sa_session.query( galaxy.model.History ) \ - .filter( and_( galaxy.model.History.table.c.deleted == False, - galaxy.model.History.table.c.user_id == admin_user.id ) ) \ - .order_by( desc( galaxy.model.History.table.c.create_time ) ) \ - .first() - assert latest_history is not None, "Problem retrieving latest_history from database" - if len( self.get_history_as_data_list() ) > 0: - raise AssertionError("ToolTestCase.do_it failed") - return latest_history - - def __setup_test_data( self, testdef, shed_tool_id ): - # Upload any needed files - for test_data in testdef.test_data(): - self.upload_file( test_data['fname'], - ftype=test_data['ftype'], - dbkey=test_data['dbkey'], - metadata=test_data['metadata'], - composite_data=test_data['composite_data'], - shed_tool_id=shed_tool_id ) - name = test_data.get('name', None) - if name: - hda_id = self.get_history_as_data_list()[-1].get( 'id' ) - try: - self.edit_hda_attribute_info( hda_id=str(hda_id), new_name=name ) - except: - print "### call to edit_hda failed for hda_id %s, new_name=%s" % (hda_id, name) - - def __run_tool( self, testdef ): - # We need to handle the case where we've uploaded a valid compressed file since the upload - # tool will have uncompressed it on the fly. - all_inputs = {} - for name, value, _ in testdef.inputs: - all_inputs[ name ] = value - - # See if we have a grouping.Repeat element - repeat_name = None - for input_name, input_value in testdef.tool.inputs_by_page[0].items(): - if isinstance( input_value, grouping.Repeat ) and all_inputs.get( input_name, 1 ) not in [ 0, "0" ]: # default behavior is to test 1 repeat, for backwards compatibility - if not input_value.min: # If input_value.min == 1, the element is already on the page don't add new element. - repeat_name = input_name - break - - #check if we need to verify number of outputs created dynamically by tool - if testdef.tool.force_history_refresh: - job_finish_by_output_count = len( self.get_history_as_data_list() ) - else: - job_finish_by_output_count = False - - # Do the first page - page_inputs = self.__expand_grouping(testdef.tool.inputs_by_page[0], all_inputs) - - # Run the tool - self.run_tool( testdef.tool.id, repeat_name=repeat_name, **page_inputs ) - print "page_inputs (0)", page_inputs - # Do other pages if they exist - for i in range( 1, testdef.tool.npages ): - page_inputs = self.__expand_grouping(testdef.tool.inputs_by_page[i], all_inputs) - self.submit_form( **page_inputs ) - print "page_inputs (%i)" % i, page_inputs - - # Check the results ( handles single or multiple tool outputs ). Make sure to pass the correct hid. - # The output datasets from the tool should be in the same order as the testdef.outputs. - data_list = None - while data_list is None: - data_list = self.get_history_as_data_list() - if job_finish_by_output_count and len( testdef.outputs ) > ( len( data_list ) - job_finish_by_output_count ): - data_list = None - return data_list - def __verify_outputs( self, testdef, shed_tool_id, data_list ): maxseconds = testdef.maxseconds @@ -135,8 +64,87 @@ print >>sys.stderr, self.get_job_stderr( elem.get( 'id' ), format=True ) raise - def __delete_history( self, latest_history ): - self.delete_history( id=self.security.encode_id( latest_history.id ) ) + +class GalaxyInteractorTwill( object ): + + def __init__( self, twill_test_case ): + self.twill_test_case = twill_test_case + + def stage_data( self, test_data, shed_tool_id ): + self.twill_test_case.upload_file( test_data['fname'], + ftype=test_data['ftype'], + dbkey=test_data['dbkey'], + metadata=test_data['metadata'], + composite_data=test_data['composite_data'], + shed_tool_id=shed_tool_id ) + name = test_data.get('name', None) + if name: + hda_id = self.twill_test_case.get_history_as_data_list()[-1].get( 'id' ) + try: + self.twill_test_case.edit_hda_attribute_info( hda_id=str(hda_id), new_name=name ) + except: + print "### call to edit_hda failed for hda_id %s, new_name=%s" % (hda_id, name) + + def run_tool( self, testdef ): + # We need to handle the case where we've uploaded a valid compressed file since the upload + # tool will have uncompressed it on the fly. + all_inputs = {} + for name, value, _ in testdef.inputs: + all_inputs[ name ] = value + + # See if we have a grouping.Repeat element + repeat_name = None + for input_name, input_value in testdef.tool.inputs_by_page[0].items(): + if isinstance( input_value, grouping.Repeat ) and all_inputs.get( input_name, 1 ) not in [ 0, "0" ]: # default behavior is to test 1 repeat, for backwards compatibility + if not input_value.min: # If input_value.min == 1, the element is already on the page don't add new element. + repeat_name = input_name + break + + #check if we need to verify number of outputs created dynamically by tool + if testdef.tool.force_history_refresh: + job_finish_by_output_count = len( self.twill_test_case.get_history_as_data_list() ) + else: + job_finish_by_output_count = False + + # Do the first page + page_inputs = self.__expand_grouping(testdef.tool.inputs_by_page[0], all_inputs) + + # Run the tool + self.twill_test_case.run_tool( testdef.tool.id, repeat_name=repeat_name, **page_inputs ) + print "page_inputs (0)", page_inputs + # Do other pages if they exist + for i in range( 1, testdef.tool.npages ): + page_inputs = self.__expand_grouping(testdef.tool.inputs_by_page[i], all_inputs) + self.twill_test_case.submit_form( **page_inputs ) + print "page_inputs (%i)" % i, page_inputs + + # Check the results ( handles single or multiple tool outputs ). Make sure to pass the correct hid. + # The output datasets from the tool should be in the same order as the testdef.outputs. + data_list = None + while data_list is None: + data_list = self.twill_test_case.get_history_as_data_list() + if job_finish_by_output_count and len( testdef.outputs ) > ( len( data_list ) - job_finish_by_output_count ): + data_list = None + return data_list + + def new_history( self ): + # Start with a new history + self.twill_test_case.logout() + self.twill_test_case.login( email='test@bx.psu.edu' ) + admin_user = sa_session.query( galaxy.model.User ).filter( galaxy.model.User.table.c.email == 'test@bx.psu.edu' ).one() + self.twill_test_case.new_history() + latest_history = sa_session.query( galaxy.model.History ) \ + .filter( and_( galaxy.model.History.table.c.deleted == False, + galaxy.model.History.table.c.user_id == admin_user.id ) ) \ + .order_by( desc( galaxy.model.History.table.c.create_time ) ) \ + .first() + assert latest_history is not None, "Problem retrieving latest_history from database" + if len( self.twill_test_case.get_history_as_data_list() ) > 0: + raise AssertionError("ToolTestCase.do_it failed") + return latest_history + + def delete_history( self, latest_history ): + self.twill_test_case.delete_history( id=self.twill_test_case.security.encode_id( latest_history.id ) ) def __expand_grouping( self, tool_inputs, declared_inputs, prefix='' ): expanded_inputs = {} https://bitbucket.org/galaxy/galaxy-central/commits/c31db7ed3f0a/ Changeset: c31db7ed3f0a User: jmchilton Date: 2013-11-22 02:17:12 Summary: Upload a tool test's inputs in parallel. Probably small optimization in most cases since tests generally have few inputs. Affected #: 2 files diff -r 3b4ed072579212764322cbdd8440ecbaad836ca9 -r c31db7ed3f0a1970e45fc5fbe085e07d024cc30a test/base/twilltestcase.py --- a/test/base/twilltestcase.py +++ b/test/base/twilltestcase.py @@ -197,7 +197,7 @@ filename = os.path.join( *path ) file(filename, 'wt').write(buffer.getvalue()) - def upload_file( self, filename, ftype='auto', dbkey='unspecified (?)', space_to_tab=False, metadata=None, composite_data=None, shed_tool_id=None ): + def upload_file( self, filename, ftype='auto', dbkey='unspecified (?)', space_to_tab=False, metadata=None, composite_data=None, name=None, shed_tool_id=None, wait=True ): """ Uploads a file. If shed_tool_id has a value, we're testing tools migrated from the distribution to the tool shed, so the tool-data directory of test data files is contained in the installed tool shed repository. @@ -218,12 +218,19 @@ filename = self.get_filename( filename, shed_tool_id=shed_tool_id ) tc.formfile( "tool_form", "file_data", filename ) tc.fv( "tool_form", "space_to_tab", space_to_tab ) + if name: + # NAME is a hidden form element, so the following prop must + # set to use it. + tc.config("readonly_controls_writeable", 1) + tc.fv( "tool_form", "NAME", name ) tc.submit( "runtool_btn" ) self.home() except AssertionError, err: errmsg = "Uploading file resulted in the following exception. Make sure the file (%s) exists. " % filename errmsg += str( err ) raise AssertionError( errmsg ) + if not wait: + return # Make sure every history item has a valid hid hids = self.get_hids_in_history() for hid in hids: diff -r 3b4ed072579212764322cbdd8440ecbaad836ca9 -r c31db7ed3f0a1970e45fc5fbe085e07d024cc30a test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -25,8 +25,11 @@ test_history = galaxy_interactor.new_history() # Upload any needed files + upload_waits = [] for test_data in testdef.test_data(): - galaxy_interactor.stage_data( test_data, shed_tool_id ) + upload_waits.append( galaxy_interactor.stage_data_async( test_data, shed_tool_id ) ) + for upload_wait in upload_waits: + upload_wait() data_list = galaxy_interactor.run_tool( testdef ) self.assertTrue( data_list ) @@ -70,20 +73,24 @@ def __init__( self, twill_test_case ): self.twill_test_case = twill_test_case - def stage_data( self, test_data, shed_tool_id ): + def stage_data_async( self, test_data, shed_tool_id, async=True ): + name = test_data.get( 'name', None ) + if name: + async = False self.twill_test_case.upload_file( test_data['fname'], ftype=test_data['ftype'], dbkey=test_data['dbkey'], metadata=test_data['metadata'], composite_data=test_data['composite_data'], - shed_tool_id=shed_tool_id ) - name = test_data.get('name', None) + shed_tool_id=shed_tool_id, + wait=(not async) ) if name: hda_id = self.twill_test_case.get_history_as_data_list()[-1].get( 'id' ) try: self.twill_test_case.edit_hda_attribute_info( hda_id=str(hda_id), new_name=name ) except: print "### call to edit_hda failed for hda_id %s, new_name=%s" % (hda_id, name) + return lambda: self.twill_test_case.wait() def run_tool( self, testdef ): # We need to handle the case where we've uploaded a valid compressed file since the upload https://bitbucket.org/galaxy/galaxy-central/commits/e2df40d1cc38/ Changeset: e2df40d1cc38 User: jmchilton Date: 2013-11-22 02:17:12 Summary: Inject master API key into functional test Galaxy. Override or set external key with GALAXY_TEST_MASTER_API_KEY. Affected #: 2 files diff -r c31db7ed3f0a1970e45fc5fbe085e07d024cc30a -r e2df40d1cc38c08f6bb229c28f3a044a51013618 scripts/functional_tests.py --- a/scripts/functional_tests.py +++ b/scripts/functional_tests.py @@ -62,6 +62,7 @@ default_galaxy_test_port_max = 9999 default_galaxy_locales = 'en' default_galaxy_test_file_dir = "test-data" +default_galaxy_master_key = "TEST123" migrated_tool_panel_config = 'migrated_tools_conf.xml' installed_tool_panel_configs = [ 'shed_tool_conf.xml' ] @@ -296,6 +297,7 @@ pass # ---- Build Application -------------------------------------------------- + master_api_key = os.environ.get( "GALAXY_TEST_MASTER_API_KEY", default_galaxy_master_key ) app = None if start_server: kwargs = dict( admin_users='test@bx.psu.edu', @@ -322,7 +324,9 @@ tool_parse_help=False, update_integrated_tool_panel=False, use_heartbeat=False, - user_library_import_dir=user_library_import_dir ) + user_library_import_dir=user_library_import_dir, + master_api_key=master_api_key, + ) if psu_production: kwargs[ 'global_conf' ] = None if not database_connection.startswith( 'sqlite://' ): @@ -418,7 +422,10 @@ def _run_functional_test( testing_shed_tools=None ): functional.test_toolbox.toolbox = app.toolbox - functional.test_toolbox.build_tests( testing_shed_tools=testing_shed_tools ) + functional.test_toolbox.build_tests( + testing_shed_tools=testing_shed_tools, + master_api_key=master_api_key, + ) test_config = nose.config.Config( env=os.environ, ignoreFiles=ignore_files, plugins=nose.plugins.manager.DefaultPluginManager() ) test_config.configure( sys.argv ) result = run_tests( test_config ) diff -r c31db7ed3f0a1970e45fc5fbe085e07d024cc30a -r e2df40d1cc38c08f6bb229c28f3a044a51013618 test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -191,7 +191,7 @@ return expanded_inputs -def build_tests( testing_shed_tools=False ): +def build_tests( testing_shed_tools=False, master_api_key=None ): """ If the module level variable `toolbox` is set, generate `ToolTestCase` classes for all of its tests and put them into this modules globals() so @@ -226,6 +226,7 @@ test_method.__doc__ = "%s ( %s ) > %s" % ( tool.name, tool.id, testdef.name ) namespace[ 'test_tool_%06d' % j ] = test_method namespace[ 'shed_tool_id' ] = shed_tool_id + namespace[ 'master_api_key' ] = master_api_key # The new.classobj function returns a new class object, with name name, derived # from baseclasses (which should be a tuple of classes) and with namespace dict. new_class_obj = new.classobj( name, baseclasses, namespace ) https://bitbucket.org/galaxy/galaxy-central/commits/3f105f22fc57/ Changeset: 3f105f22fc57 User: jmchilton Date: 2013-11-22 02:17:12 Summary: Tool functional tests - refactor reusable function for verifing output ... from verify_dataset_correctness. Introduce simple, extensible abstraction for fetching outputs for verification in twilltestcase.py. Will want to reuse the core verification of the displayed dataset in the API driven case and will not need or be able to use the extra cruft related to checking UI, job state, etc... Also small twilltestcase code structure updates for Python 2.6+. Affected #: 1 file diff -r e2df40d1cc38c08f6bb229c28f3a044a51013618 -r 3f105f22fc5781d89421f5bcb6d9a0bd5825e9b0 test/base/twilltestcase.py --- a/test/base/twilltestcase.py +++ b/test/base/twilltestcase.py @@ -833,70 +833,83 @@ if ext != test_ext: raise AssertionError( errmsg ) else: + # See not in controllers/root.py about encoded_id. hda_id = self.security.encode_id( elem.get( 'id' ) ) - self.home() - # See not in controllers/root.py about encoded_id. - self.visit_page( "display?encoded_id=%s" % hda_id ) + self.verify_hid( filename, hid=hid, hda_id=hda_id, attributes=attributes, shed_tool_id=shed_tool_id) + + def verify_hid( self, filename, hda_id, attributes, shed_tool_id, hid="", dataset_fetcher=None): + dataset_fetcher = dataset_fetcher or self.__default_dataset_fetcher() + data = dataset_fetcher( hda_id ) + if attributes is not None and attributes.get( "assert_list", None ) is not None: + try: + verify_assertions(data, attributes["assert_list"]) + except AssertionError, err: + errmsg = 'History item %s different than expected\n' % (hid) + errmsg += str( err ) + raise AssertionError( errmsg ) + if filename is not None: + local_name = self.get_filename( filename, shed_tool_id=shed_tool_id ) + temp_name = self.makeTfname(fname=filename) + file( temp_name, 'wb' ).write( data ) + + # if the server's env has GALAXY_TEST_SAVE, save the output file to that dir + if self.keepOutdir: + ofn = os.path.join( self.keepOutdir, os.path.basename( local_name ) ) + log.debug( 'keepoutdir: %s, ofn: %s', self.keepOutdir, ofn ) + try: + shutil.copy( temp_name, ofn ) + except Exception, exc: + error_log_msg = ( 'TwillTestCase could not save output file %s to %s: ' % ( temp_name, ofn ) ) + error_log_msg += str( exc ) + log.error( error_log_msg, exc_info=True ) + else: + log.debug('## GALAXY_TEST_SAVE=%s. saved %s' % ( self.keepOutdir, ofn ) ) + try: + if attributes is None: + attributes = {} + compare = attributes.get( 'compare', 'diff' ) + if attributes.get( 'ftype', None ) == 'bam': + local_fh, temp_name = self._bam_to_sam( local_name, temp_name ) + local_name = local_fh.name + extra_files = attributes.get( 'extra_files', None ) + if compare == 'diff': + self.files_diff( local_name, temp_name, attributes=attributes ) + elif compare == 're_match': + self.files_re_match( local_name, temp_name, attributes=attributes ) + elif compare == 're_match_multiline': + self.files_re_match_multiline( local_name, temp_name, attributes=attributes ) + elif compare == 'sim_size': + delta = attributes.get('delta', '100') + s1 = len(data) + s2 = os.path.getsize(local_name) + if abs(s1 - s2) > int(delta): + raise Exception( 'Files %s=%db but %s=%db - compare (delta=%s) failed' % (temp_name, s1, local_name, s2, delta) ) + elif compare == "contains": + self.files_contains( local_name, temp_name, attributes=attributes ) + else: + raise Exception( 'Unimplemented Compare type: %s' % compare ) + if extra_files: + self.verify_extra_files_content( extra_files, hda_id, shed_tool_id=shed_tool_id, dataset_fetcher=dataset_fetcher ) + except AssertionError, err: + errmsg = 'History item %s different than expected, difference (using %s):\n' % ( hid, compare ) + errmsg += "( %s v. %s )\n" % ( local_name, temp_name ) + errmsg += str( err ) + raise AssertionError( errmsg ) + finally: + os.remove( temp_name ) + + def __default_dataset_fetcher( self ): + def fetcher( hda_id, filename=None ): + if filename is None: + page_url = "display?encoded_id=%s" % hda_id + self.home() # I assume this is not needed. + else: + page_url = "datasets/%s/display/%s" % ( hda_id, filename ) + self.visit_page( page_url ) data = self.last_page() - if attributes is not None and attributes.get( "assert_list", None ) is not None: - try: - verify_assertions(data, attributes["assert_list"]) - except AssertionError, err: - errmsg = 'History item %s different than expected\n' % (hid) - errmsg += str( err ) - raise AssertionError( errmsg ) - if filename is not None: - local_name = self.get_filename( filename, shed_tool_id=shed_tool_id ) - temp_name = self.makeTfname(fname=filename) - file( temp_name, 'wb' ).write( data ) + return data - # if the server's env has GALAXY_TEST_SAVE, save the output file to that dir - if self.keepOutdir: - ofn = os.path.join( self.keepOutdir, os.path.basename( local_name ) ) - log.debug( 'keepoutdir: %s, ofn: %s', self.keepOutdir, ofn ) - try: - shutil.copy( temp_name, ofn ) - except Exception, exc: - error_log_msg = ( 'TwillTestCase could not save output file %s to %s: ' % ( temp_name, ofn ) ) - error_log_msg += str( exc ) - log.error( error_log_msg, exc_info=True ) - else: - log.debug('## GALAXY_TEST_SAVE=%s. saved %s' % ( self.keepOutdir, ofn ) ) - try: - # have to nest try-except in try-finally to handle 2.4 - try: - if attributes is None: - attributes = {} - compare = attributes.get( 'compare', 'diff' ) - if attributes.get( 'ftype', None ) == 'bam': - local_fh, temp_name = self._bam_to_sam( local_name, temp_name ) - local_name = local_fh.name - extra_files = attributes.get( 'extra_files', None ) - if compare == 'diff': - self.files_diff( local_name, temp_name, attributes=attributes ) - elif compare == 're_match': - self.files_re_match( local_name, temp_name, attributes=attributes ) - elif compare == 're_match_multiline': - self.files_re_match_multiline( local_name, temp_name, attributes=attributes ) - elif compare == 'sim_size': - delta = attributes.get('delta', '100') - s1 = len(data) - s2 = os.path.getsize(local_name) - if abs(s1 - s2) > int(delta): - raise Exception( 'Files %s=%db but %s=%db - compare (delta=%s) failed' % (temp_name, s1, local_name, s2, delta) ) - elif compare == "contains": - self.files_contains( local_name, temp_name, attributes=attributes ) - else: - raise Exception( 'Unimplemented Compare type: %s' % compare ) - if extra_files: - self.verify_extra_files_content( extra_files, elem.get( 'id' ), shed_tool_id=shed_tool_id ) - except AssertionError, err: - errmsg = 'History item %s different than expected, difference (using %s):\n' % ( hid, compare ) - errmsg += "( %s v. %s )\n" % ( local_name, temp_name ) - errmsg += str( err ) - raise AssertionError( errmsg ) - finally: - os.remove( temp_name ) + return fetcher def _bam_to_sam( self, local_name, temp_name ): temp_local = tempfile.NamedTemporaryFile( suffix='.sam', prefix='local_bam_converted_to_sam_' ) @@ -909,7 +922,7 @@ os.remove( temp_name ) return temp_local, temp_temp - def verify_extra_files_content( self, extra_files, hda_id, shed_tool_id=None ): + def verify_extra_files_content( self, extra_files, hda_id, dataset_fetcher, shed_tool_id=None ): files_list = [] for extra_type, extra_value, extra_name, extra_attributes in extra_files: if extra_type == 'file': @@ -920,44 +933,42 @@ else: raise ValueError( 'unknown extra_files type: %s' % extra_type ) for filename, filepath, attributes in files_list: - self.verify_composite_datatype_file_content( filepath, hda_id, base_name=filename, attributes=attributes, shed_tool_id=shed_tool_id ) + self.verify_composite_datatype_file_content( filepath, hda_id, base_name=filename, attributes=attributes, dataset_fetcher=dataset_fetcher, shed_tool_id=shed_tool_id ) - def verify_composite_datatype_file_content( self, file_name, hda_id, base_name=None, attributes=None, shed_tool_id=None ): + def verify_composite_datatype_file_content( self, file_name, hda_id, base_name=None, attributes=None, dataset_fetcher=None, shed_tool_id=None ): + dataset_fetcher = dataset_fetcher or self.__default_dataset_fetcher() local_name = self.get_filename( file_name, shed_tool_id=shed_tool_id ) if base_name is None: base_name = os.path.split(file_name)[-1] temp_name = self.makeTfname(fname=base_name) - self.visit_url( "%s/datasets/%s/display/%s" % ( self.url, hda_id, base_name ) ) - data = self.last_page() + data = dataset_fetcher( hda_id, base_name ) file( temp_name, 'wb' ).write( data ) if self.keepOutdir > '': ofn = os.path.join(self.keepOutdir, base_name) shutil.copy(temp_name, ofn) log.debug('## GALAXY_TEST_SAVE=%s. saved %s' % (self.keepOutdir, ofn)) try: - # have to nest try-except in try-finally to handle 2.4 - try: - if attributes is None: - attributes = {} - compare = attributes.get( 'compare', 'diff' ) - if compare == 'diff': - self.files_diff( local_name, temp_name, attributes=attributes ) - elif compare == 're_match': - self.files_re_match( local_name, temp_name, attributes=attributes ) - elif compare == 're_match_multiline': - self.files_re_match_multiline( local_name, temp_name, attributes=attributes ) - elif compare == 'sim_size': - delta = attributes.get('delta', '100') - s1 = len(data) - s2 = os.path.getsize(local_name) - if abs(s1 - s2) > int(delta): - raise Exception( 'Files %s=%db but %s=%db - compare (delta=%s) failed' % (temp_name, s1, local_name, s2, delta) ) - else: - raise Exception( 'Unimplemented Compare type: %s' % compare ) - except AssertionError, err: - errmsg = 'Composite file (%s) of History item %s different than expected, difference (using %s):\n' % ( base_name, hda_id, compare ) - errmsg += str( err ) - raise AssertionError( errmsg ) + if attributes is None: + attributes = {} + compare = attributes.get( 'compare', 'diff' ) + if compare == 'diff': + self.files_diff( local_name, temp_name, attributes=attributes ) + elif compare == 're_match': + self.files_re_match( local_name, temp_name, attributes=attributes ) + elif compare == 're_match_multiline': + self.files_re_match_multiline( local_name, temp_name, attributes=attributes ) + elif compare == 'sim_size': + delta = attributes.get('delta', '100') + s1 = len(data) + s2 = os.path.getsize(local_name) + if abs(s1 - s2) > int(delta): + raise Exception( 'Files %s=%db but %s=%db - compare (delta=%s) failed' % (temp_name, s1, local_name, s2, delta) ) + else: + raise Exception( 'Unimplemented Compare type: %s' % compare ) + except AssertionError, err: + errmsg = 'Composite file (%s) of History item %s different than expected, difference (using %s):\n' % ( base_name, hda_id, compare ) + errmsg += str( err ) + raise AssertionError( errmsg ) finally: os.remove( temp_name ) https://bitbucket.org/galaxy/galaxy-central/commits/a35a8198b8fd/ Changeset: a35a8198b8fd User: jmchilton Date: 2013-11-22 02:17:12 Summary: Outline new API based galaxy interactor for functional tests. Add mechanism to tool test parser code to determine which Galaxy interactor should be used ('twill' or 'api' are current options). This maybe a stop gap to force usage of the API interactor until that becomes the default or may prove in the long term to be essential if there are certain tools that will always require Twill-specific functionality or if new browser-based JavaScript (e.g. w/selenium) are implemented. An interactor value can be specified at the tests level and/or at the level of individual test elements in the tool XML (using the 'interactor' attribute on either element). The current default interactor is 'twill'. The default interactor app-wide can be overridden with the GALAXY_TEST_DEFAULT_INTERACTOR environment variable. Affected #: 3 files diff -r 3f105f22fc5781d89421f5bcb6d9a0bd5825e9b0 -r a35a8198b8fd1a0fdea1183ca299fa6ceb41d060 lib/galaxy/tools/__init__.py --- a/lib/galaxy/tools/__init__.py +++ b/lib/galaxy/tools/__init__.py @@ -52,7 +52,7 @@ from galaxy.tools.parameters.output import ToolOutputActionGroup from galaxy.tools.parameters.validation import LateValidationError from galaxy.tools.filters import FilterFactory -from galaxy.tools.test import ToolTestBuilder +from galaxy.tools.test import parse_tests_elem from galaxy.util import listify, parse_xml, rst_to_html, string_as_bool, string_to_object, xml_text, xml_to_string from galaxy.util.bunch import Bunch from galaxy.util.expressions import ExpressionContext @@ -1222,7 +1222,7 @@ tests_elem = root.find( "tests" ) if tests_elem: try: - self.parse_tests( tests_elem ) + self.tests = parse_tests_elem( self, tests_elem ) except: log.exception( "Failed to parse tool tests" ) else: @@ -1564,16 +1564,6 @@ log.error( "Traceback: %s" % trace_msg ) return return_level - def parse_tests( self, tests_elem ): - """ - Parse any "<test>" elements, create a `ToolTestBuilder` for each and - store in `self.tests`. - """ - self.tests = [] - for i, test_elem in enumerate( tests_elem.findall( 'test' ) ): - test = ToolTestBuilder( self, test_elem, i ) - self.tests.append( test ) - def parse_input_page( self, input_elem, enctypes ): """ Parse a page of inputs. This basically just calls 'parse_input_elem', diff -r 3f105f22fc5781d89421f5bcb6d9a0bd5825e9b0 -r a35a8198b8fd1a0fdea1183ca299fa6ceb41d060 lib/galaxy/tools/test.py --- a/lib/galaxy/tools/test.py +++ b/lib/galaxy/tools/test.py @@ -1,3 +1,4 @@ +import os import os.path from parameters import basic from parameters import grouping @@ -8,6 +9,21 @@ DEFAULT_FTYPE = 'auto' DEFAULT_DBKEY = 'hg17' +DEFAULT_INTERACTOR = "twill" # Default mechanism test code uses for interacting with Galaxy instance. + + +def parse_tests_elem(tool, tests_elem): + """ + Build ToolTestBuilder objects for each "<test>" elements and + return default interactor (if any). + """ + default_interactor = os.environ.get( 'GALAXY_TEST_DEFAULT_INTERACTOR', DEFAULT_INTERACTOR ) + tests_default_interactor = tests_elem.get( 'interactor', default_interactor ) + tests = [] + for i, test_elem in enumerate( tests_elem.findall( 'test' ) ): + test = ToolTestBuilder( tool, test_elem, i, default_interactor=tests_default_interactor ) + tests.append( test ) + return tests class ToolTestBuilder( object ): @@ -17,7 +33,7 @@ doing dynamic tests in this way allows better integration) """ - def __init__( self, tool, test_elem, i ): + def __init__( self, tool, test_elem, i, default_interactor ): name = test_elem.get( 'name', 'Test-%d' % (i + 1) ) maxseconds = int( test_elem.get( 'maxseconds', '120' ) ) @@ -30,7 +46,7 @@ self.error = False self.exception = None - self.__parse_elem( test_elem, i ) + self.__parse_elem( test_elem, i, default_interactor ) def test_data( self ): """ @@ -57,12 +73,18 @@ yield data_dict - def __parse_elem( self, test_elem, i ): + def __parse_elem( self, test_elem, i, default_interactor ): # Composite datasets need a unique name: each test occurs in a fresh # history, but we'll keep it unique per set of tests - use i (test #) # and composite_data_names_counter (instance per test #) composite_data_names_counter = 0 try: + # Mechanism test code uses for interacting with Galaxy instance, + # until 'api' is the default switch this to API to use its new + # features. Once 'api' is the default set to 'twill' to use legacy + # features or workarounds. + self.interactor = test_elem.get( 'interactor', default_interactor ) + for param_elem in test_elem.findall( "param" ): attrib = dict( param_elem.attrib ) if 'values' in attrib: diff -r 3f105f22fc5781d89421f5bcb6d9a0bd5825e9b0 -r a35a8198b8fd1a0fdea1183ca299fa6ceb41d060 test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -1,6 +1,8 @@ import sys import new +import os from galaxy.tools.parameters import grouping +from galaxy.util import string_as_bool from base.twilltestcase import TwillTestCase import galaxy.model from galaxy.model.orm import and_, desc @@ -20,7 +22,7 @@ self.__handle_test_def_errors( testdef ) - galaxy_interactor = GalaxyInteractorTwill( self ) + galaxy_interactor = self.__galaxy_interactor( testdef ) test_history = galaxy_interactor.new_history() @@ -38,6 +40,11 @@ galaxy_interactor.delete_history( test_history ) + def __galaxy_interactor( self, testdef ): + interactor_key = testdef.interactor + interactor_class = GALAXY_INTERACTORS[ interactor_key ] + return interactor_class( self ) + def __handle_test_def_errors(self, testdef): # If the test generation had an error, raise if testdef.error: @@ -68,6 +75,25 @@ raise +class GalaxyInteractorApi( object ): + + def __init__( self, twill_test_case ): + self.twill_test_case = twill_test_case + self.master_api_key = twill_test_case.master_api_key + + def new_history( self ): + return None + + def stage_data_async( self, test_data, shed_tool_id, async=True ): + return lambda: True + + def run_tool( self, testdef ): + return [] + + def delete_history( self, history ): + return None + + class GalaxyInteractorTwill( object ): def __init__( self, twill_test_case ): @@ -231,3 +257,9 @@ # from baseclasses (which should be a tuple of classes) and with namespace dict. new_class_obj = new.classobj( name, baseclasses, namespace ) G[ name ] = new_class_obj + + +GALAXY_INTERACTORS = { + 'api': GalaxyInteractorApi, + 'twill': GalaxyInteractorTwill, +} https://bitbucket.org/galaxy/galaxy-central/commits/15c6170f58c8/ Changeset: 15c6170f58c8 User: jmchilton Date: 2013-11-22 02:17:12 Summary: Twill-less tool tests initial implementation. Implement GalaxyInteractorApi, is working for simple tests. Lots left to do likely - conditionals, repeats, composite uploads, pages(?). Introduces dependency on Python requests package - subsequent changeset will provide urllib2 based fallback if requests unavailable. Affected #: 1 file diff -r a35a8198b8fd1a0fdea1183ca299fa6ceb41d060 -r 15c6170f58c87ed719efb37b62c9eab1c2956102 test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -7,6 +7,8 @@ import galaxy.model from galaxy.model.orm import and_, desc from galaxy.model.mapping import context as sa_session +from simplejson import dumps +import requests toolbox = None @@ -29,14 +31,14 @@ # Upload any needed files upload_waits = [] for test_data in testdef.test_data(): - upload_waits.append( galaxy_interactor.stage_data_async( test_data, shed_tool_id ) ) + upload_waits.append( galaxy_interactor.stage_data_async( test_data, test_history, shed_tool_id ) ) for upload_wait in upload_waits: upload_wait() - data_list = galaxy_interactor.run_tool( testdef ) + data_list = galaxy_interactor.run_tool( testdef, test_history ) self.assertTrue( data_list ) - self.__verify_outputs( testdef, shed_tool_id, data_list ) + self.__verify_outputs( testdef, test_history, shed_tool_id, data_list, galaxy_interactor ) galaxy_interactor.delete_history( test_history ) @@ -53,53 +55,167 @@ else: raise Exception( "Test parse failure" ) - def __verify_outputs( self, testdef, shed_tool_id, data_list ): + def __verify_outputs( self, testdef, history, shed_tool_id, data_list, galaxy_interactor ): maxseconds = testdef.maxseconds - elem_index = 0 - len( testdef.outputs ) + output_index = 0 - len( testdef.outputs ) for output_tuple in testdef.outputs: # Get the correct hid - elem = data_list[ elem_index ] - self.assertTrue( elem is not None ) - self.__verify_output( output_tuple, shed_tool_id, elem, maxseconds=maxseconds ) - elem_index += 1 - - def __verify_output( self, output_tuple, shed_tool_id, elem, maxseconds ): + output_data = data_list[ output_index ] + self.assertTrue( output_data is not None ) name, outfile, attributes = output_tuple - elem_hid = elem.get( 'hid' ) - try: - self.verify_dataset_correctness( outfile, hid=elem_hid, attributes=attributes, shed_tool_id=shed_tool_id ) - except Exception: - print >>sys.stderr, self.get_job_stdout( elem.get( 'id' ), format=True ) - print >>sys.stderr, self.get_job_stderr( elem.get( 'id' ), format=True ) - raise + galaxy_interactor.verify_output( history, output_data, outfile, attributes=attributes, shed_tool_id=shed_tool_id, maxseconds=maxseconds ) + output_index += 1 class GalaxyInteractorApi( object ): def __init__( self, twill_test_case ): self.twill_test_case = twill_test_case - self.master_api_key = twill_test_case.master_api_key + self.api_url = "%s/api" % twill_test_case.url.rstrip("/") + self.api_key = self.__get_user_key( twill_test_case.master_api_key ) + self.uploads = {} + + def verify_output( self, history_id, output_data, outfile, attributes, shed_tool_id, maxseconds ): + hid = output_data.get( 'id' ) + try: + fetcher = self.__dataset_fetcher( history_id ) + self.twill_test_case.verify_hid( outfile, hda_id=hid, attributes=attributes, dataset_fetcher=fetcher, shed_tool_id=shed_tool_id ) + except Exception: + ## TODO: Print this! + # print >>sys.stderr, self.twill_test_case.get_job_stdout( output_data.get( 'id' ), format=True ) + ## TODO: Print this! + # print >>sys.stderr, self.twill_test_case.get_job_stderr( output_data.get( 'id' ), format=True ) + raise def new_history( self ): - return None + history_json = self.__post( "histories", {"name": "test_history"} ).json() + return history_json[ 'id' ] - def stage_data_async( self, test_data, shed_tool_id, async=True ): - return lambda: True + def stage_data_async( self, test_data, history_id, shed_tool_id, async=True ): + fname = test_data[ 'fname' ] + file_name = self.twill_test_case.get_filename( fname, shed_tool_id=shed_tool_id ) + name = test_data.get( 'name', None ) + if not name: + name = os.path.basename( file_name ) + tool_input = { + "file_type": test_data[ 'ftype' ], + "dbkey": test_data[ 'dbkey' ], # TODO: Handle it! Doesn't work if undefined, does seem to in Twill. + "files_0|NAME": name, + "files_0|type": "upload_dataset", + } + files = { + "files_0|file_data": open( file_name, 'rb') + } + submit_response = self.__submit_tool( history_id, "upload1", tool_input, extra_data={"type": "upload_dataset"}, files=files ).json() + dataset = submit_response["outputs"][0] + #raise Exception(str(dataset)) + hid = dataset['id'] + self.uploads[ fname ] = {"src": "hda", "id": hid} + return self.__wait_for_history( history_id ) - def run_tool( self, testdef ): - return [] + def run_tool( self, testdef, history_id ): + # We need to handle the case where we've uploaded a valid compressed file since the upload + # tool will have uncompressed it on the fly. + all_inputs = {} + for name, value, _ in testdef.inputs: + all_inputs[ name ] = value + + for key, value in all_inputs.iteritems(): + # TODO: Restrict this to param inputs. + if value in self.uploads: + all_inputs[key] = self.uploads[ value ] + + # TODO: Handle repeats. + # TODO: Handle pages. + # TODO: Handle force_history_refresh + datasets = self.__submit_tool( history_id, tool_id=testdef.tool.id, tool_input=all_inputs ) + self.__wait_for_history( history_id )() # TODO: Remove and respect maxseconds! + return datasets.json()[ 'outputs' ] + + def output_hid( self, output_data ): + return output_data[ 'id' ] def delete_history( self, history ): return None + def __wait_for_history( self, history_id ): + def wait(): + while True: + history_json = self.__get( "histories/%s" % history_id ).json() + state = history_json[ 'state' ] + if state == 'ok': + #raise Exception(str(self.__get( self.__get( "histories/%s/contents" % history_id ).json()[0]['url'] ).json() ) ) + #raise Exception(str(self.__get( self.__get( "histories/%s/contents" % history_id ).json()[0]['url'] ).json() ) ) + break + elif state == 'error': + raise Exception("History in error state.") + return wait + + def __submit_tool( self, history_id, tool_id, tool_input, extra_data={}, files=None ): + data = dict( + history_id=history_id, + tool_id=tool_id, + inputs=dumps( tool_input ), + **extra_data + ) + return self.__post( "tools", files=files, data=data ) + + def __get_user_key( self, admin_key ): + all_users = self.__get( 'users', key=admin_key ).json() + try: + test_user = [ user for user in all_users if user["email"] == 'test@bx.psu.edu' ][0] + except IndexError: + data = dict( + email='test@bx.psu.edu', + password='testuser', + username='admin-user', + ) + test_user = self.__post( 'users', data, key=admin_key ).json() + return self.__post( "users/%s/api_key" % test_user['id'], key=admin_key ).json() + + def __dataset_fetcher( self, history_id ): + def fetcher( hda_id, base_name=None ): + url = "histories/%s/contents/%s/display" % (history_id, hda_id) + if base_name: + url += "&filename=%s" % base_name + return self.__get( url ).text + + return fetcher + + def __post( self, path, data={}, files=None, key=None): + if not key: + key = self.api_key + data = data.copy() + data['key'] = key + return requests.post( "%s/%s" % (self.api_url, path), data=data, files=files ) + + def __get( self, path, data={}, key=None ): + if not key: + key = self.api_key + data = data.copy() + data['key'] = key + if path.startswith("/api"): + path = path[ len("/api"): ] + url = "%s/%s" % (self.api_url, path) + return requests.get( url, params=data ) + class GalaxyInteractorTwill( object ): def __init__( self, twill_test_case ): self.twill_test_case = twill_test_case - def stage_data_async( self, test_data, shed_tool_id, async=True ): + def verify_output( self, history, output_data, outfile, attributes, shed_tool_id, maxseconds ): + hid = output_data.get( 'hid' ) + try: + self.twill_test_case.verify_dataset_correctness( outfile, hid=hid, attributes=attributes, shed_tool_id=shed_tool_id ) + except Exception: + print >>sys.stderr, self.twill_test_case.get_job_stdout( output_data.get( 'id' ), format=True ) + print >>sys.stderr, self.twill_test_case.get_job_stderr( output_data.get( 'id' ), format=True ) + raise + + def stage_data_async( self, test_data, history, shed_tool_id, async=True ): name = test_data.get( 'name', None ) if name: async = False @@ -118,7 +234,7 @@ print "### call to edit_hda failed for hda_id %s, new_name=%s" % (hda_id, name) return lambda: self.twill_test_case.wait() - def run_tool( self, testdef ): + def run_tool( self, testdef, test_history ): # We need to handle the case where we've uploaded a valid compressed file since the upload # tool will have uncompressed it on the fly. all_inputs = {} @@ -179,6 +295,9 @@ def delete_history( self, latest_history ): self.twill_test_case.delete_history( id=self.twill_test_case.security.encode_id( latest_history.id ) ) + def output_hid( self, output_data ): + return output_data.get( 'hid' ) + def __expand_grouping( self, tool_inputs, declared_inputs, prefix='' ): expanded_inputs = {} for key, value in tool_inputs.items(): https://bitbucket.org/galaxy/galaxy-central/commits/1411df5a5fcc/ Changeset: 1411df5a5fcc User: jmchilton Date: 2013-11-22 02:17:12 Summary: Twill-less tool tests - allow tests with GALAXY_TEST_EXTERNAL... ... if GALAXY_TEST_EXTERNAL is ever fixed. Affected #: 2 files diff -r 15c6170f58c87ed719efb37b62c9eab1c2956102 -r 1411df5a5fcc2686512b5560435279f305fc755a scripts/functional_tests.py --- a/scripts/functional_tests.py +++ b/scripts/functional_tests.py @@ -63,6 +63,7 @@ default_galaxy_locales = 'en' default_galaxy_test_file_dir = "test-data" default_galaxy_master_key = "TEST123" +default_galaxy_user_key = None migrated_tool_panel_config = 'migrated_tools_conf.xml' installed_tool_panel_configs = [ 'shed_tool_conf.xml' ] @@ -425,6 +426,7 @@ functional.test_toolbox.build_tests( testing_shed_tools=testing_shed_tools, master_api_key=master_api_key, + user_api_key=os.environ.get( "GALAXY_TEST_USER_API_KEY", default_galaxy_user_key ), ) test_config = nose.config.Config( env=os.environ, ignoreFiles=ignore_files, plugins=nose.plugins.manager.DefaultPluginManager() ) test_config.configure( sys.argv ) diff -r 15c6170f58c87ed719efb37b62c9eab1c2956102 -r 1411df5a5fcc2686512b5560435279f305fc755a test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -73,7 +73,7 @@ def __init__( self, twill_test_case ): self.twill_test_case = twill_test_case self.api_url = "%s/api" % twill_test_case.url.rstrip("/") - self.api_key = self.__get_user_key( twill_test_case.master_api_key ) + self.api_key = self.__get_user_key( twill_test_case.user_api_key, twill_test_case.master_api_key ) self.uploads = {} def verify_output( self, history_id, output_data, outfile, attributes, shed_tool_id, maxseconds ): @@ -161,7 +161,9 @@ ) return self.__post( "tools", files=files, data=data ) - def __get_user_key( self, admin_key ): + def __get_user_key( self, user_key, admin_key ): + if user_key: + return user_key all_users = self.__get( 'users', key=admin_key ).json() try: test_user = [ user for user in all_users if user["email"] == 'test@bx.psu.edu' ][0] @@ -336,7 +338,7 @@ return expanded_inputs -def build_tests( testing_shed_tools=False, master_api_key=None ): +def build_tests( testing_shed_tools=False, master_api_key=None, user_api_key=None ): """ If the module level variable `toolbox` is set, generate `ToolTestCase` classes for all of its tests and put them into this modules globals() so @@ -372,6 +374,7 @@ namespace[ 'test_tool_%06d' % j ] = test_method namespace[ 'shed_tool_id' ] = shed_tool_id namespace[ 'master_api_key' ] = master_api_key + namespace[ 'user_api_key' ] = user_api_key # The new.classobj function returns a new class object, with name name, derived # from baseclasses (which should be a tuple of classes) and with namespace dict. new_class_obj = new.classobj( name, baseclasses, namespace ) https://bitbucket.org/galaxy/galaxy-central/commits/2dcc0a0e3732/ Changeset: 2dcc0a0e3732 User: jmchilton Date: 2013-11-22 02:17:12 Summary: Move test_toolbox's expand_grouping out into tool/test.py. Will be merged with related tree code already in test.py in a subsequent changeset. Affected #: 2 files diff -r 1411df5a5fcc2686512b5560435279f305fc755a -r 2dcc0a0e3732cf32b603279fdc443c6a2a9d9ae8 lib/galaxy/tools/test.py --- a/lib/galaxy/tools/test.py +++ b/lib/galaxy/tools/test.py @@ -73,6 +73,43 @@ yield data_dict + def expand_grouping( self, tool_inputs, declared_inputs, prefix='' ): + expanded_inputs = {} + for key, value in tool_inputs.items(): + if isinstance( value, grouping.Conditional ): + if prefix: + new_prefix = "%s|%s" % ( prefix, value.name ) + else: + new_prefix = value.name + for i, case in enumerate( value.cases ): + if declared_inputs[ value.test_param.name ] == case.value: + if isinstance(case.value, str): + expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = case.value.split( "," ) + else: + expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = case.value + for input_name, input_value in case.inputs.items(): + expanded_inputs.update( self.expand_grouping( { input_name: input_value }, declared_inputs, prefix=new_prefix ) ) + elif isinstance( value, grouping.Repeat ): + for repeat_index in xrange( 0, 1 ): # need to allow for and figure out how many repeats we have + for r_name, r_value in value.inputs.iteritems(): + new_prefix = "%s_%d" % ( value.name, repeat_index ) + if prefix: + new_prefix = "%s|%s" % ( prefix, new_prefix ) + expanded_inputs.update( self.expand_grouping( { new_prefix : r_value }, declared_inputs, prefix=new_prefix ) ) + elif value.name not in declared_inputs: + print "%s not declared in tool test, will not change default value." % value.name + elif isinstance(declared_inputs[value.name], str): + if prefix: + expanded_inputs["%s|%s" % ( prefix, value.name ) ] = declared_inputs[value.name].split(",") + else: + expanded_inputs[value.name] = declared_inputs[value.name].split(",") + else: + if prefix: + expanded_inputs["%s|%s" % ( prefix, value.name ) ] = declared_inputs[value.name] + else: + expanded_inputs[value.name] = declared_inputs[value.name] + return expanded_inputs + def __parse_elem( self, test_elem, i, default_interactor ): # Composite datasets need a unique name: each test occurs in a fresh # history, but we'll keep it unique per set of tests - use i (test #) diff -r 1411df5a5fcc2686512b5560435279f305fc755a -r 2dcc0a0e3732cf32b603279fdc443c6a2a9d9ae8 test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -258,14 +258,14 @@ job_finish_by_output_count = False # Do the first page - page_inputs = self.__expand_grouping(testdef.tool.inputs_by_page[0], all_inputs) + page_inputs = testdef.expand_grouping(testdef.tool.inputs_by_page[0], all_inputs) # Run the tool self.twill_test_case.run_tool( testdef.tool.id, repeat_name=repeat_name, **page_inputs ) print "page_inputs (0)", page_inputs # Do other pages if they exist for i in range( 1, testdef.tool.npages ): - page_inputs = self.__expand_grouping(testdef.tool.inputs_by_page[i], all_inputs) + page_inputs = testdef.expand_grouping(testdef.tool.inputs_by_page[i], all_inputs) self.twill_test_case.submit_form( **page_inputs ) print "page_inputs (%i)" % i, page_inputs @@ -300,43 +300,6 @@ def output_hid( self, output_data ): return output_data.get( 'hid' ) - def __expand_grouping( self, tool_inputs, declared_inputs, prefix='' ): - expanded_inputs = {} - for key, value in tool_inputs.items(): - if isinstance( value, grouping.Conditional ): - if prefix: - new_prefix = "%s|%s" % ( prefix, value.name ) - else: - new_prefix = value.name - for i, case in enumerate( value.cases ): - if declared_inputs[ value.test_param.name ] == case.value: - if isinstance(case.value, str): - expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = case.value.split( "," ) - else: - expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = case.value - for input_name, input_value in case.inputs.items(): - expanded_inputs.update( self.__expand_grouping( { input_name: input_value }, declared_inputs, prefix=new_prefix ) ) - elif isinstance( value, grouping.Repeat ): - for repeat_index in xrange( 0, 1 ): # need to allow for and figure out how many repeats we have - for r_name, r_value in value.inputs.iteritems(): - new_prefix = "%s_%d" % ( value.name, repeat_index ) - if prefix: - new_prefix = "%s|%s" % ( prefix, new_prefix ) - expanded_inputs.update( self.__expand_grouping( { new_prefix : r_value }, declared_inputs, prefix=new_prefix ) ) - elif value.name not in declared_inputs: - print "%s not declared in tool test, will not change default value." % value.name - elif isinstance(declared_inputs[value.name], str): - if prefix: - expanded_inputs["%s|%s" % ( prefix, value.name ) ] = declared_inputs[value.name].split(",") - else: - expanded_inputs[value.name] = declared_inputs[value.name].split(",") - else: - if prefix: - expanded_inputs["%s|%s" % ( prefix, value.name ) ] = declared_inputs[value.name] - else: - expanded_inputs[value.name] = declared_inputs[value.name] - return expanded_inputs - def build_tests( testing_shed_tools=False, master_api_key=None, user_api_key=None ): """ https://bitbucket.org/galaxy/galaxy-central/commits/0435cbff42d1/ Changeset: 0435cbff42d1 User: jmchilton Date: 2013-11-22 02:17:12 Summary: Simplify logic in tool test expand_grouping. Affected #: 1 file diff -r 2dcc0a0e3732cf32b603279fdc443c6a2a9d9ae8 -r 0435cbff42d144e90cf750851f3998b3217a4891 lib/galaxy/tools/test.py --- a/lib/galaxy/tools/test.py +++ b/lib/galaxy/tools/test.py @@ -76,17 +76,13 @@ def expand_grouping( self, tool_inputs, declared_inputs, prefix='' ): expanded_inputs = {} for key, value in tool_inputs.items(): + expanded_key = value.name if not prefix else "%s|%s" % (prefix, value.name) if isinstance( value, grouping.Conditional ): - if prefix: - new_prefix = "%s|%s" % ( prefix, value.name ) - else: - new_prefix = value.name + new_prefix = expanded_key for i, case in enumerate( value.cases ): if declared_inputs[ value.test_param.name ] == case.value: - if isinstance(case.value, str): - expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = case.value.split( "," ) - else: - expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = case.value + expanded_value = self.__split_if_str(case.value) + expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = expanded_value for input_name, input_value in case.inputs.items(): expanded_inputs.update( self.expand_grouping( { input_name: input_value }, declared_inputs, prefix=new_prefix ) ) elif isinstance( value, grouping.Repeat ): @@ -98,18 +94,17 @@ expanded_inputs.update( self.expand_grouping( { new_prefix : r_value }, declared_inputs, prefix=new_prefix ) ) elif value.name not in declared_inputs: print "%s not declared in tool test, will not change default value." % value.name - elif isinstance(declared_inputs[value.name], str): - if prefix: - expanded_inputs["%s|%s" % ( prefix, value.name ) ] = declared_inputs[value.name].split(",") - else: - expanded_inputs[value.name] = declared_inputs[value.name].split(",") else: - if prefix: - expanded_inputs["%s|%s" % ( prefix, value.name ) ] = declared_inputs[value.name] - else: - expanded_inputs[value.name] = declared_inputs[value.name] + value = self.__split_if_str(declared_inputs[value.name]) + expanded_inputs[expanded_key] = value return expanded_inputs + def __split_if_str( self, value ): + split = isinstance(value, str) + if split: + value = value.split(",") + return value + def __parse_elem( self, test_elem, i, default_interactor ): # Composite datasets need a unique name: each test occurs in a fresh # history, but we'll keep it unique per set of tests - use i (test #) https://bitbucket.org/galaxy/galaxy-central/commits/8b99c7bc5e11/ Changeset: 8b99c7bc5e11 User: jmchilton Date: 2013-11-22 02:17:12 Summary: Tool functional tests - all unspecified default conditional values. Fix should apply to both twill and API variants. For now just grabbing first option in select list, a subsequent changeset adds much richer options including handling of booleans and options labeled as default. Affected #: 1 file diff -r 0435cbff42d144e90cf750851f3998b3217a4891 -r 8b99c7bc5e11cd7c452c2ad86f951109b495271c lib/galaxy/tools/test.py --- a/lib/galaxy/tools/test.py +++ b/lib/galaxy/tools/test.py @@ -73,18 +73,29 @@ yield data_dict + def __matching_case( self, cond, declared_inputs ): + param = cond.test_param + declared_value = declared_inputs.get( param.name, None ) + for i, case in enumerate( cond.cases ): + if declared_value and (case.value == declared_value): + return case + if not declared_value: + # TODO: Default might not be top value, fix this. + return case + print "Not matching case found for %s value %s. Test may fail in unexpected ways." % ( param.name, declared_value ) + def expand_grouping( self, tool_inputs, declared_inputs, prefix='' ): expanded_inputs = {} for key, value in tool_inputs.items(): expanded_key = value.name if not prefix else "%s|%s" % (prefix, value.name) if isinstance( value, grouping.Conditional ): new_prefix = expanded_key - for i, case in enumerate( value.cases ): - if declared_inputs[ value.test_param.name ] == case.value: - expanded_value = self.__split_if_str(case.value) - expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = expanded_value - for input_name, input_value in case.inputs.items(): - expanded_inputs.update( self.expand_grouping( { input_name: input_value }, declared_inputs, prefix=new_prefix ) ) + case = self.__matching_case( value, declared_inputs ) + if case: + expanded_value = self.__split_if_str(case.value) + expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = expanded_value + for input_name, input_value in case.inputs.items(): + expanded_inputs.update( self.expand_grouping( { input_name: input_value }, declared_inputs, prefix=new_prefix ) ) elif isinstance( value, grouping.Repeat ): for repeat_index in xrange( 0, 1 ): # need to allow for and figure out how many repeats we have for r_name, r_value in value.inputs.iteritems(): https://bitbucket.org/galaxy/galaxy-central/commits/3360087fcac9/ Changeset: 3360087fcac9 User: jmchilton Date: 2013-11-22 02:17:12 Summary: Twill-less tool tests - handle repeats and conditionals. Expand 'inputs' tree when building tool inputs for API functional tests. Code is in there for both flat inputs (the way the UI would submit them), and a nested tree structure. The nested tree structure will not work with the Galaxy API currently - but it is the way the API should work :). Affected #: 2 files diff -r 8b99c7bc5e11cd7c452c2ad86f951109b495271c -r 3360087fcac9cfecb7848c0b60a26b91c66d1c8b lib/galaxy/tools/test.py --- a/lib/galaxy/tools/test.py +++ b/lib/galaxy/tools/test.py @@ -73,6 +73,24 @@ yield data_dict + def to_dict( self, tool_inputs, declared_inputs ): + expanded_inputs = {} + for key, value in tool_inputs.items(): + if isinstance( value, grouping.Conditional ): + for i, case in enumerate( value.cases ): + if declared_inputs[ value.test_param.name ] == case.value: + pass # TODO + elif isinstance( value, grouping.Repeat ): + values = [] + for r_name, r_value in value.inputs.iteritems(): + values.append( self.to_dict( {r_name: r_value} , declared_inputs ) ) + expanded_inputs[ value.name ] = values + elif value.name not in declared_inputs: + print "%s not declared in tool test, will not change default value." % value.name + else: + expanded_inputs[ value.name ] = declared_inputs[value.name] + return expanded_inputs + def __matching_case( self, cond, declared_inputs ): param = cond.test_param declared_value = declared_inputs.get( param.name, None ) diff -r 8b99c7bc5e11cd7c452c2ad86f951109b495271c -r 3360087fcac9cfecb7848c0b60a26b91c66d1c8b test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -126,10 +126,30 @@ if value in self.uploads: all_inputs[key] = self.uploads[ value ] - # TODO: Handle repeats. - # TODO: Handle pages. - # TODO: Handle force_history_refresh - datasets = self.__submit_tool( history_id, tool_id=testdef.tool.id, tool_input=all_inputs ) + # TODO: Handle pages? + # TODO: Handle force_history_refresh? + flat_inputs = True + if flat_inputs: + # Build up tool_input flately (e.g {"a_repeat_0|a_repeat_param" : "value1"}) + expanded_inputs = {} + expanded_inputs.update(testdef.expand_grouping(testdef.tool.inputs_by_page[0], all_inputs)) + for i in range( 1, testdef.tool.npages ): + expanded_inputs.update(testdef.expand_grouping(testdef.tool.inputs_by_page[i], all_inputs)) + + # # HACK: Flatten single-value lists. Required when using expand_grouping + for key, value in expanded_inputs.iteritems(): + if isinstance(value, list) and len(value) == 1: + expanded_inputs[key] = value[0] + tool_input = expanded_inputs + else: + # Build up tool_input as nested dictionary (e.g. {"a_repeat": [{"a_repeat_param" : "value1"}]}) + # Doesn't work with the tool API at this time. + tool_input = {} + tool_input.update(testdef.to_dict(testdef.tool.inputs_by_page[0], all_inputs)) + for i in range( 1, testdef.tool.npages ): + tool_input.update(testdef.to_dict(testdef.tool.inputs_by_page[i], all_inputs)) + + datasets = self.__submit_tool( history_id, tool_id=testdef.tool.id, tool_input=tool_input ) self.__wait_for_history( history_id )() # TODO: Remove and respect maxseconds! return datasets.json()[ 'outputs' ] https://bitbucket.org/galaxy/galaxy-central/commits/47eeac9693d3/ Changeset: 47eeac9693d3 User: jmchilton Date: 2013-11-22 02:17:12 Summary: Twill-less tool tests fix for nested input filenames. Affected #: 1 file diff -r 3360087fcac9cfecb7848c0b60a26b91c66d1c8b -r 47eeac9693d38593a60bf43f1920a6417fca2e6a test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -111,7 +111,7 @@ dataset = submit_response["outputs"][0] #raise Exception(str(dataset)) hid = dataset['id'] - self.uploads[ fname ] = {"src": "hda", "id": hid} + self.uploads[ os.path.basename(fname) ] = self.uploads[ fname ] = {"src": "hda", "id": hid} return self.__wait_for_history( history_id ) def run_tool( self, testdef, history_id ): https://bitbucket.org/galaxy/galaxy-central/commits/b21e913e21e9/ Changeset: b21e913e21e9 User: jmchilton Date: 2013-11-22 02:17:12 Summary: Twill-less tool tests fix for tests with html outputs. For example gatk_analyze_covariates. Affected #: 1 file diff -r 47eeac9693d38593a60bf43f1920a6417fca2e6a -r b21e913e21e951a9d54de4b8e480334dabb70e2d test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -198,7 +198,7 @@ def __dataset_fetcher( self, history_id ): def fetcher( hda_id, base_name=None ): - url = "histories/%s/contents/%s/display" % (history_id, hda_id) + url = "histories/%s/contents/%s/display?raw=true" % (history_id, hda_id) if base_name: url += "&filename=%s" % base_name return self.__get( url ).text https://bitbucket.org/galaxy/galaxy-central/commits/ff31838afb70/ Changeset: ff31838afb70 User: jmchilton Date: 2013-11-22 02:17:12 Summary: Twill-less tool tests fix for binary data in results. Use requests' response.content instead of response.text for reading results. Fixes the following error 'UnicodeEncodeError: 'ascii' codec can't encode characters in position 1-3: ordinal not in range(128)' when dealing with ASN.1 binary files. Affected #: 1 file diff -r b21e913e21e951a9d54de4b8e480334dabb70e2d -r ff31838afb704f86e10ce4798f8d41e6dfcecb59 test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -201,7 +201,7 @@ url = "histories/%s/contents/%s/display?raw=true" % (history_id, hda_id) if base_name: url += "&filename=%s" % base_name - return self.__get( url ).text + return self.__get( url ).content return fetcher https://bitbucket.org/galaxy/galaxy-central/commits/deaa67411c92/ Changeset: deaa67411c92 User: jmchilton Date: 2013-11-22 02:17:12 Summary: Twill-less tool tests - enhancements related to errors. - Report formatted standard error and standard output of tools like Twill interactor. - Respect testdef.maxseconds to timeout test cases. - Check history item - like Twill interactor do not verify 'error' datasets. - Properly raise exceptions with reported API error messages when running tools. Affected #: 3 files diff -r ff31838afb704f86e10ce4798f8d41e6dfcecb59 -r deaa67411c9212f8c24f3062e8c7710466118f34 lib/galaxy/tools/test.py --- a/lib/galaxy/tools/test.py +++ b/lib/galaxy/tools/test.py @@ -10,6 +10,7 @@ DEFAULT_FTYPE = 'auto' DEFAULT_DBKEY = 'hg17' DEFAULT_INTERACTOR = "twill" # Default mechanism test code uses for interacting with Galaxy instance. +DEFAULT_MAX_SECS = 120 def parse_tests_elem(tool, tests_elem): @@ -35,7 +36,7 @@ def __init__( self, tool, test_elem, i, default_interactor ): name = test_elem.get( 'name', 'Test-%d' % (i + 1) ) - maxseconds = int( test_elem.get( 'maxseconds', '120' ) ) + maxseconds = int( test_elem.get( 'maxseconds', DEFAULT_MAX_SECS ) ) self.tool = tool self.name = name diff -r ff31838afb704f86e10ce4798f8d41e6dfcecb59 -r deaa67411c9212f8c24f3062e8c7710466118f34 test/base/twilltestcase.py --- a/test/base/twilltestcase.py +++ b/test/base/twilltestcase.py @@ -616,6 +616,9 @@ self.visit_page( "datasets/%s/%s" % ( self.security.encode_id( hda_id ), stream ) ) output = self.last_page() + return self._format_stream( output, stream, format ) + + def _format_stream( self, output, stream, format ): if format: msg = "---------------------- >> begin tool %s << -----------------------\n" % stream msg += output + "\n" @@ -1409,13 +1412,16 @@ return True return False - def wait( self, maxseconds=120 ): + def wait( self, **kwds ): """Waits for the tools to finish""" + return self.wait_for(lambda: self.get_running_datasets(), **kwds) + + def wait_for(self, func, maxseconds=120): sleep_amount = 0.1 slept = 0 - self.home() while slept <= maxseconds: - if self.get_running_datasets(): + result = func() + if result: time.sleep( sleep_amount ) slept += sleep_amount sleep_amount *= 2 diff -r ff31838afb704f86e10ce4798f8d41e6dfcecb59 -r deaa67411c9212f8c24f3062e8c7710466118f34 test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -64,7 +64,14 @@ output_data = data_list[ output_index ] self.assertTrue( output_data is not None ) name, outfile, attributes = output_tuple - galaxy_interactor.verify_output( history, output_data, outfile, attributes=attributes, shed_tool_id=shed_tool_id, maxseconds=maxseconds ) + try: + galaxy_interactor.verify_output( history, output_data, outfile, attributes=attributes, shed_tool_id=shed_tool_id, maxseconds=maxseconds ) + except Exception: + for stream in ['stdout', 'stderr']: + stream_output = galaxy_interactor.get_job_stream( history, output_data, stream=stream ) + print >>sys.stderr, self._format_stream( stream_output, stream=stream, format=True ) + raise + output_index += 1 @@ -77,16 +84,16 @@ self.uploads = {} def verify_output( self, history_id, output_data, outfile, attributes, shed_tool_id, maxseconds ): + self.twill_test_case.wait_for( lambda: not self.__history_ready( history_id ), maxseconds=maxseconds) hid = output_data.get( 'id' ) - try: - fetcher = self.__dataset_fetcher( history_id ) - self.twill_test_case.verify_hid( outfile, hda_id=hid, attributes=attributes, dataset_fetcher=fetcher, shed_tool_id=shed_tool_id ) - except Exception: - ## TODO: Print this! - # print >>sys.stderr, self.twill_test_case.get_job_stdout( output_data.get( 'id' ), format=True ) - ## TODO: Print this! - # print >>sys.stderr, self.twill_test_case.get_job_stderr( output_data.get( 'id' ), format=True ) - raise + fetcher = self.__dataset_fetcher( history_id ) + ## TODO: Twill version verifys dataset is 'ok' in here. + self.twill_test_case.verify_hid( outfile, hda_id=hid, attributes=attributes, dataset_fetcher=fetcher, shed_tool_id=shed_tool_id ) + + def get_job_stream( self, history_id, output_data, stream ): + hid = output_data.get( 'id' ) + data = self.__get( "histories/%s/contents/%s/provenance" % (history_id, hid) ).json() + return data.get( stream, '' ) def new_history( self ): history_json = self.__post( "histories", {"name": "test_history"} ).json() @@ -150,8 +157,11 @@ tool_input.update(testdef.to_dict(testdef.tool.inputs_by_page[i], all_inputs)) datasets = self.__submit_tool( history_id, tool_id=testdef.tool.id, tool_input=tool_input ) - self.__wait_for_history( history_id )() # TODO: Remove and respect maxseconds! - return datasets.json()[ 'outputs' ] + datasets_object = datasets.json() + try: + return datasets_object[ 'outputs' ] + except KeyError: + raise Exception( datasets_object[ 'message' ] ) def output_hid( self, output_data ): return output_data[ 'id' ] @@ -161,17 +171,19 @@ def __wait_for_history( self, history_id ): def wait(): - while True: - history_json = self.__get( "histories/%s" % history_id ).json() - state = history_json[ 'state' ] - if state == 'ok': - #raise Exception(str(self.__get( self.__get( "histories/%s/contents" % history_id ).json()[0]['url'] ).json() ) ) - #raise Exception(str(self.__get( self.__get( "histories/%s/contents" % history_id ).json()[0]['url'] ).json() ) ) - break - elif state == 'error': - raise Exception("History in error state.") + while not self.__history_ready( history_id ): + pass return wait + def __history_ready( self, history_id ): + history_json = self.__get( "histories/%s" % history_id ).json() + state = history_json[ 'state' ] + if state == 'ok': + return True + elif state == 'error': + raise Exception("History in error state.") + return False + def __submit_tool( self, history_id, tool_id, tool_input, extra_data={}, files=None ): data = dict( history_id=history_id, @@ -230,12 +242,10 @@ def verify_output( self, history, output_data, outfile, attributes, shed_tool_id, maxseconds ): hid = output_data.get( 'hid' ) - try: - self.twill_test_case.verify_dataset_correctness( outfile, hid=hid, attributes=attributes, shed_tool_id=shed_tool_id ) - except Exception: - print >>sys.stderr, self.twill_test_case.get_job_stdout( output_data.get( 'id' ), format=True ) - print >>sys.stderr, self.twill_test_case.get_job_stderr( output_data.get( 'id' ), format=True ) - raise + self.twill_test_case.verify_dataset_correctness( outfile, hid=hid, attributes=attributes, shed_tool_id=shed_tool_id, maxseconds=maxseconds ) + + def get_job_stream( self, history_id, output_data, stream ): + return self.twill_test_case._get_job_stream_output( output_data.get( 'id' ), stream=stream, format=False ) def stage_data_async( self, test_data, history, shed_tool_id, async=True ): name = test_data.get( 'name', None ) https://bitbucket.org/galaxy/galaxy-central/commits/f5add174debe/ Changeset: f5add174debe User: jmchilton Date: 2013-11-22 02:17:12 Summary: Add new option, tools, and data to functional test framework to test/demonstrate tool framework itself. Add smaple repeat tool demonstrating some weaknesses of twill-based current framework. Affected #: 12 files diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 run_functional_tests.sh --- a/run_functional_tests.sh +++ b/run_functional_tests.sh @@ -13,6 +13,8 @@ echo "'run_functional_tests.sh -list' for listing all the tool ids" echo "'run_functional_tests.sh -toolshed' for running all the test scripts in the ./test/tool_shed/functional directory" echo "'run_functional_tests.sh -toolshed testscriptname' for running one test script named testscriptname in the .test/tool_shed/functional directory" + echo "'run_functional_tests.sh -framework' for running through example tool tests testing framework features in test/functional/tools" + echo "'run_functional_tests.sh -framework -id toolid' for testing one framework tool (in test/functional/tools/) with id 'toolid'" elif [ $1 = '-id' ]; then python ./scripts/functional_tests.py -v functional.test_toolbox:TestForTool_$2 --with-nosehtml --html-report-file run_functional_tests.html elif [ $1 = '-sid' ]; then @@ -46,6 +48,14 @@ else python ./test/tool_shed/functional_tests.py -v --with-nosehtml --html-report-file ./test/tool_shed/run_functional_tests.html $2 fi +elif [ $1 = '-framework' ]; then + if [ ! $2 ]; then + python ./scripts/functional_tests.py -v functional.test_toolbox --with-nosehtml --html-report-file run_functional_tests.html -framework + elif [ $2 = '-id' ]; then + python ./scripts/functional_tests.py -v functional.test_toolbox:TestForTool_$3 --with-nosehtml --html-report-file run_functional_tests.html -framework + else + echo "Invalid test option selected, if -framework first argument to $0, optional second argument must be -id followed a tool id." + fi else python ./scripts/functional_tests.py -v --with-nosehtml --html-report-file run_functional_tests.html $1 fi diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 scripts/functional_tests.py --- a/scripts/functional_tests.py +++ b/scripts/functional_tests.py @@ -178,6 +178,7 @@ os.environ[ 'HTTP_ACCEPT_LANGUAGE' ] = default_galaxy_locales testing_migrated_tools = '-migrated' in sys.argv testing_installed_tools = '-installed' in sys.argv + datatypes_conf_override = None if testing_migrated_tools or testing_installed_tools: sys.argv.pop() @@ -197,8 +198,21 @@ # Exclude all files except test_toolbox.py. ignore_files = ( re.compile( r'^test_[adghlmsu]*' ), re.compile( r'^test_ta*' ) ) else: - tool_config_file = os.environ.get( 'GALAXY_TEST_TOOL_CONF', 'tool_conf.xml' ) - galaxy_test_file_dir = os.environ.get( 'GALAXY_TEST_FILE_DIR', default_galaxy_test_file_dir ) + framework_test = '-framework' in sys.argv # Run through suite of tests testing framework. + if framework_test: + sys.argv.pop() + framework_tool_dir = os.path.join('test', 'functional', 'tools') + tool_conf = os.path.join( framework_tool_dir, 'samples_tool_conf.xml' ) + datatypes_conf_override = os.path.join( framework_tool_dir, 'sample_datatypes_conf.xml' ) + test_dir = os.path.join( framework_tool_dir, 'test-data') + + tool_path = framework_tool_dir + else: + # Use tool_conf.xml toolbox. + tool_conf = 'tool_conf.xml' + test_dir = default_galaxy_test_file_dir + tool_config_file = os.environ.get( 'GALAXY_TEST_TOOL_CONF', tool_conf ) + galaxy_test_file_dir = os.environ.get( 'GALAXY_TEST_FILE_DIR', test_dir ) if not os.path.isabs( galaxy_test_file_dir ): galaxy_test_file_dir = os.path.join( os.getcwd(), galaxy_test_file_dir ) library_import_dir = galaxy_test_file_dir @@ -338,6 +352,8 @@ if use_distributed_object_store: kwargs[ 'object_store' ] = 'distributed' kwargs[ 'distributed_object_store_config_file' ] = 'distributed_object_store_conf.xml.sample' + if datatypes_conf_override: + kwargs[ 'datatypes_config_file' ] = datatypes_conf_override # If the user has passed in a path for the .ini file, do not overwrite it. galaxy_config_file = os.environ.get( 'GALAXY_TEST_INI_FILE', None ) if not galaxy_config_file: diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 test/functional/tools/README.txt --- /dev/null +++ b/test/functional/tools/README.txt @@ -0,0 +1,3 @@ +This directory contains tools only useful for testing the tool test framework +and demonstrating it features. Run the driver script 'run_functional_tests.sh' +with '-installed' as first argument to run through these tests. diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 test/functional/tools/multi_repeats.xml --- /dev/null +++ b/test/functional/tools/multi_repeats.xml @@ -0,0 +1,39 @@ +<tool id="multirepeat" name="multirepeat"> + <description>tail-to-head</description> + <command> + cat $input1 #for $q in $queries# ${q.input2} #end for# #for $q in $more_queries# ${q.more_queries_input} #end for# > $out_file1 + </command> + <inputs> + <param name="input1" type="data" label="Concatenate Dataset"/> + <repeat name="queries" title="Dataset"> + <param name="input2" type="data" label="Select" /> + </repeat> + <repeat name="more_queries" title="Dataset"> + <param name="more_queries_input" type="data" label="Select" /> + </repeat> + </inputs> + <outputs> + <data name="out_file1" format="input" metadata_source="input1"/> + </outputs> + <tests> + <test> + <param name="input1" value="simple_line.txt"/> + <param name="input2" value="simple_line.txt"/> + <output name="out_file1" file="simple_line_x2.txt"/> + </test> + <test> + <param name="input1" value="simple_line.txt"/> + <param name="input2" value="simple_line.txt"/> + <param name="input2" value="simple_line.txt"/> + <output name="out_file1" file="simple_line_x3.txt"/> + </test> + <test> + <param name="input1" value="simple_line.txt"/> + <param name="input2" value="simple_line.txt"/> + <param name="input2" value="simple_line.txt"/> + <param name="more_queries_input" value="simple_line.txt"/> + <param name="more_queries_input" value="simple_line.txt"/> + <output name="out_file1" file="simple_line_x5.txt"/> + </test> + </tests> +</tool> diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 test/functional/tools/sample_datatypes_conf.xml --- /dev/null +++ b/test/functional/tools/sample_datatypes_conf.xml @@ -0,0 +1,8 @@ +<?xml version="1.0"?> +<datatypes> + <registration converters_path="lib/galaxy/datatypes/converters" display_path="display_applications"> + <datatype extension="velvet" type="galaxy.datatypes.assembly:Velvet" display_in_upload="true"/> + <datatype extension="txt" type="galaxy.datatypes.data:Text" display_in_upload="true"/> + <datatype extension="tabular" type="galaxy.datatypes.tabular:Tabular" display_in_upload="true"/> + </registration> +</datatypes> \ No newline at end of file diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 test/functional/tools/samples_tool_conf.xml --- /dev/null +++ b/test/functional/tools/samples_tool_conf.xml @@ -0,0 +1,5 @@ +<?xml version="1.0"?> +<toolbox> + <tool file="upload.xml"/> + <tool file="multi_repeats.xml"/> +</toolbox> \ No newline at end of file diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 test/functional/tools/test-data/simple_line.txt --- /dev/null +++ b/test/functional/tools/test-data/simple_line.txt @@ -0,0 +1,1 @@ +This is a line of text. diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 test/functional/tools/test-data/simple_line_x2.txt --- /dev/null +++ b/test/functional/tools/test-data/simple_line_x2.txt @@ -0,0 +1,2 @@ +This is a line of text. +This is a line of text. diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 test/functional/tools/test-data/simple_line_x3.txt --- /dev/null +++ b/test/functional/tools/test-data/simple_line_x3.txt @@ -0,0 +1,3 @@ +This is a line of text. +This is a line of text. +This is a line of text. diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 test/functional/tools/test-data/simple_line_x5.txt --- /dev/null +++ b/test/functional/tools/test-data/simple_line_x5.txt @@ -0,0 +1,5 @@ +This is a line of text. +This is a line of text. +This is a line of text. +This is a line of text. +This is a line of text. diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 test/functional/tools/upload.py --- /dev/null +++ b/test/functional/tools/upload.py @@ -0,0 +1,1 @@ +../../../tools/data_source/upload.py \ No newline at end of file diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 test/functional/tools/upload.xml --- /dev/null +++ b/test/functional/tools/upload.xml @@ -0,0 +1,1 @@ +../../../tools/data_source/upload.xml \ No newline at end of file https://bitbucket.org/galaxy/galaxy-central/commits/81b6aca75182/ Changeset: 81b6aca75182 User: jmchilton Date: 2013-11-22 02:17:12 Summary: Twill-less tool tests - allow multiple param instances with same name. Allows specifing multiple repeat instances. Affected #: 2 files diff -r f5add174debe1ca23aa21a42530c650ad6b1ad42 -r 81b6aca751826e85bbf58fadb21f96d370dd9f6e lib/galaxy/tools/test.py --- a/lib/galaxy/tools/test.py +++ b/lib/galaxy/tools/test.py @@ -103,6 +103,41 @@ return case print "Not matching case found for %s value %s. Test may fail in unexpected ways." % ( param.name, declared_value ) + def expand_multi_grouping( self, tool_inputs, declared_inputs, prefix='', index=0 ): + """ + Used by API, slight generalization of expand_grouping used by Twill based interactor. Still + not quite the context/tree based specification that should exist! + """ + expanded_inputs = {} + for key, value in tool_inputs.items(): + expanded_key = value.name if not prefix else "%s|%s" % (prefix, value.name) + if isinstance( value, grouping.Conditional ): + new_prefix = expanded_key + case = self.__matching_case( value, declared_inputs ) + if case: + expanded_value = self.__split_if_str(case.value) + expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = expanded_value + for input_name, input_value in case.inputs.items(): + expanded_inputs.update( self.expand_multi_grouping( { input_name: input_value }, declared_inputs, prefix=new_prefix ), index=index ) + elif isinstance( value, grouping.Repeat ): + repeat_index = 0 + any_children_matched = True + while any_children_matched: + any_children_matched = False + for r_name, r_value in value.inputs.iteritems(): + new_prefix = "%s_%d" % ( value.name, repeat_index ) + if prefix: + new_prefix = "%s|%s" % ( prefix, new_prefix ) + expanded_input = self.expand_multi_grouping( { new_prefix : r_value }, declared_inputs, prefix=new_prefix, index=repeat_index ) + if expanded_input: + any_children_matched = True + expanded_inputs.update( expanded_input ) + repeat_index += 1 + elif value.name in declared_inputs and len(declared_inputs[ value.name ]) > index: + value = self.__split_if_str( declared_inputs[ value.name ][ index ] ) + expanded_inputs[ expanded_key ] = value + return expanded_inputs + def expand_grouping( self, tool_inputs, declared_inputs, prefix='' ): expanded_inputs = {} for key, value in tool_inputs.items(): diff -r f5add174debe1ca23aa21a42530c650ad6b1ad42 -r 81b6aca751826e85bbf58fadb21f96d370dd9f6e test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -126,12 +126,14 @@ # tool will have uncompressed it on the fly. all_inputs = {} for name, value, _ in testdef.inputs: - all_inputs[ name ] = value - - for key, value in all_inputs.iteritems(): # TODO: Restrict this to param inputs. if value in self.uploads: - all_inputs[key] = self.uploads[ value ] + value = self.uploads[ value ] + + if name in all_inputs: + all_inputs[name].append( value ) + else: + all_inputs[name] = [ value ] # TODO: Handle pages? # TODO: Handle force_history_refresh? @@ -139,9 +141,9 @@ if flat_inputs: # Build up tool_input flately (e.g {"a_repeat_0|a_repeat_param" : "value1"}) expanded_inputs = {} - expanded_inputs.update(testdef.expand_grouping(testdef.tool.inputs_by_page[0], all_inputs)) + expanded_inputs.update(testdef.expand_multi_grouping(testdef.tool.inputs_by_page[0], all_inputs)) for i in range( 1, testdef.tool.npages ): - expanded_inputs.update(testdef.expand_grouping(testdef.tool.inputs_by_page[i], all_inputs)) + expanded_inputs.update(testdef.expand_multi_grouping(testdef.tool.inputs_by_page[i], all_inputs)) # # HACK: Flatten single-value lists. Required when using expand_grouping for key, value in expanded_inputs.iteritems(): https://bitbucket.org/galaxy/galaxy-central/commits/72c22a117bdd/ Changeset: 72c22a117bdd User: jmchilton Date: 2013-11-22 02:17:12 Summary: Tool functional tests - allow disambiguation of params in conditionals. Disambiguate by add prefix for parent (or any number of direct ancestors) with pipe (|). See included test case for an example. Should fix twill and API driven functional tests. Affected #: 3 files diff -r 81b6aca751826e85bbf58fadb21f96d370dd9f6e -r 72c22a117bdd7fe83b05dd616114bbb7b6a2dae8 lib/galaxy/tools/test.py --- a/lib/galaxy/tools/test.py +++ b/lib/galaxy/tools/test.py @@ -3,6 +3,7 @@ from parameters import basic from parameters import grouping from galaxy.util import string_as_bool +from galaxy.util.bunch import Bunch import logging log = logging.getLogger( __name__ ) @@ -92,15 +93,20 @@ expanded_inputs[ value.name ] = declared_inputs[value.name] return expanded_inputs - def __matching_case( self, cond, declared_inputs ): + def __matching_case( self, cond, declared_inputs, prefix, index=None ): param = cond.test_param - declared_value = declared_inputs.get( param.name, None ) + declared_value = self.__declared_match( declared_inputs, param.name, prefix) + if index is not None: + declared_value = declared_value[index] for i, case in enumerate( cond.cases ): - if declared_value and (case.value == declared_value): + if declared_value is not None and (case.value == declared_value): return case - if not declared_value: + if declared_value is None: # TODO: Default might not be top value, fix this. + # TODO: Also may be boolean, got to look at checked. return case + else: + return Bunch(value=declared_value, inputs=Bunch(items=lambda: [])) print "Not matching case found for %s value %s. Test may fail in unexpected ways." % ( param.name, declared_value ) def expand_multi_grouping( self, tool_inputs, declared_inputs, prefix='', index=0 ): @@ -113,9 +119,9 @@ expanded_key = value.name if not prefix else "%s|%s" % (prefix, value.name) if isinstance( value, grouping.Conditional ): new_prefix = expanded_key - case = self.__matching_case( value, declared_inputs ) + case = self.__matching_case( value, declared_inputs, new_prefix, index=index ) if case: - expanded_value = self.__split_if_str(case.value) + expanded_value = self.__split_if_str( case.value ) expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = expanded_value for input_name, input_value in case.inputs.items(): expanded_inputs.update( self.expand_multi_grouping( { input_name: input_value }, declared_inputs, prefix=new_prefix ), index=index ) @@ -133,9 +139,11 @@ any_children_matched = True expanded_inputs.update( expanded_input ) repeat_index += 1 - elif value.name in declared_inputs and len(declared_inputs[ value.name ]) > index: - value = self.__split_if_str( declared_inputs[ value.name ][ index ] ) - expanded_inputs[ expanded_key ] = value + else: + declared_value = self.__declared_match( declared_inputs, value.name, prefix ) + if declared_value and len(declared_value) > index: + value = self.__split_if_str( declared_value[ index ] ) + expanded_inputs[ expanded_key ] = value return expanded_inputs def expand_grouping( self, tool_inputs, declared_inputs, prefix='' ): @@ -144,7 +152,7 @@ expanded_key = value.name if not prefix else "%s|%s" % (prefix, value.name) if isinstance( value, grouping.Conditional ): new_prefix = expanded_key - case = self.__matching_case( value, declared_inputs ) + case = self.__matching_case( value, declared_inputs, new_prefix ) if case: expanded_value = self.__split_if_str(case.value) expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = expanded_value @@ -157,13 +165,26 @@ if prefix: new_prefix = "%s|%s" % ( prefix, new_prefix ) expanded_inputs.update( self.expand_grouping( { new_prefix : r_value }, declared_inputs, prefix=new_prefix ) ) - elif value.name not in declared_inputs: - print "%s not declared in tool test, will not change default value." % value.name else: - value = self.__split_if_str(declared_inputs[value.name]) - expanded_inputs[expanded_key] = value + declared_value = self.__declared_match( declared_inputs, value.name, prefix ) + if not declared_value: + print "%s not declared in tool test, will not change default value." % value.name + else: + value = self.__split_if_str(declared_value) + expanded_inputs[expanded_key] = value return expanded_inputs + def __declared_match( self, declared_inputs, name, prefix ): + prefix_suffixes = [ "%s|" % part for part in prefix.split( "|" ) ] if prefix else [] + prefix_suffixes.append( name ) + prefix_suffixes.reverse() + prefixed_name = "" + for prefix_suffix in prefix_suffixes: + prefixed_name = "%s%s" % ( prefix_suffix, prefixed_name ) + if prefixed_name in declared_inputs: + return declared_inputs[prefixed_name] + return None + def __split_if_str( self, value ): split = isinstance(value, str) if split: diff -r 81b6aca751826e85bbf58fadb21f96d370dd9f6e -r 72c22a117bdd7fe83b05dd616114bbb7b6a2dae8 test/functional/tools/disambiguate_cond.xml --- /dev/null +++ b/test/functional/tools/disambiguate_cond.xml @@ -0,0 +1,50 @@ +<tool id="handle_cond" name="handle_cond"> + <description>tail-to-head</description> + <command> + echo "$p1.p1v $p2.p2v $p3.p3v" > $out_file1 + </command> + <inputs> + <conditional name="p1"> + <param type="boolean" name="use" /> + <when value="true"> + <param name="p1v" value="4" type="integer" /> + </when> + <when value="false"> + <param name="p1v" value="7" type="integer" /> + </when> + </conditional> + <conditional name="p2"> + <param type="boolean" name="use" /> + <when value="true"> + <param name="p2v" value="4" type="integer" /> + </when> + <when value="false"> + <param name="p2v" value="7" type="integer" /> + </when> + </conditional> + <conditional name="p3"> + <param type="boolean" name="use" /> + <when value="true"> + <param name="p3v" value="4" type="integer" /> + </when> + <when value="false"> + <param name="p3v" value="7" type="integer" /> + </when> + </conditional> + </inputs> + <outputs> + <data name="out_file1" format="txt" /> + </outputs> + <tests> + <test> + <param name="p1|use" value="True"/> + <param name="p2|use" value="False"/> + <param name="p3|use" value="True"/> + <output name="out_file1"> + <assert_contents> + <has_line line="4 7 4" /> + </assert_contents> + </output> + </test> + </tests> +</tool> diff -r 81b6aca751826e85bbf58fadb21f96d370dd9f6e -r 72c22a117bdd7fe83b05dd616114bbb7b6a2dae8 test/functional/tools/samples_tool_conf.xml --- a/test/functional/tools/samples_tool_conf.xml +++ b/test/functional/tools/samples_tool_conf.xml @@ -1,5 +1,6 @@ <?xml version="1.0"?><toolbox><tool file="upload.xml"/> + <tool file="disambiguate_cond.xml" /><tool file="multi_repeats.xml"/></toolbox> \ No newline at end of file https://bitbucket.org/galaxy/galaxy-central/commits/61e4e10e03ce/ Changeset: 61e4e10e03ce User: jmchilton Date: 2013-11-22 02:17:12 Summary: Twill-less tool tests - handle composite inputs. Add example tool test for composite data (worked with Twill based test framework prior to this commit, works with API based framework as a result of this commit). Affected #: 4 files diff -r 72c22a117bdd7fe83b05dd616114bbb7b6a2dae8 -r 61e4e10e03ce8de315a1ae81d12529d80dedf594 test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -101,24 +101,42 @@ def stage_data_async( self, test_data, history_id, shed_tool_id, async=True ): fname = test_data[ 'fname' ] - file_name = self.twill_test_case.get_filename( fname, shed_tool_id=shed_tool_id ) - name = test_data.get( 'name', None ) - if not name: - name = os.path.basename( file_name ) tool_input = { "file_type": test_data[ 'ftype' ], - "dbkey": test_data[ 'dbkey' ], # TODO: Handle it! Doesn't work if undefined, does seem to in Twill. - "files_0|NAME": name, - "files_0|type": "upload_dataset", + "dbkey": test_data[ 'dbkey' ], } - files = { - "files_0|file_data": open( file_name, 'rb') - } - submit_response = self.__submit_tool( history_id, "upload1", tool_input, extra_data={"type": "upload_dataset"}, files=files ).json() + composite_data = test_data[ 'composite_data' ] + if composite_data: + files = {} + for i, composite_file in enumerate( composite_data ): + file_name = self.twill_test_case.get_filename( composite_file.get( 'value' ), shed_tool_id=shed_tool_id ) + files["files_%s|file_data" % i] = open( file_name, 'rb' ) + tool_input.update({ + #"files_%d|NAME" % i: name, + "files_%d|type" % i: "upload_dataset", + ## TODO: + #"files_%d|space_to_tab" % i: composite_file.get( 'space_to_tab', False ) + }) + name = test_data[ 'name' ] + else: + file_name = self.twill_test_case.get_filename( fname, shed_tool_id=shed_tool_id ) + name = test_data.get( 'name', None ) + if not name: + name = os.path.basename( file_name ) + + tool_input.update({ + "files_0|NAME": name, + "files_0|type": "upload_dataset", + }) + files = { + "files_0|file_data": open( file_name, 'rb') + } + submit_response_object = self.__submit_tool( history_id, "upload1", tool_input, extra_data={"type": "upload_dataset"}, files=files ) + submit_response = submit_response_object.json() dataset = submit_response["outputs"][0] #raise Exception(str(dataset)) hid = dataset['id'] - self.uploads[ os.path.basename(fname) ] = self.uploads[ fname ] = {"src": "hda", "id": hid} + self.uploads[ os.path.basename(fname) ] = self.uploads[ fname ] = self.uploads[ name ] = {"src": "hda", "id": hid} return self.__wait_for_history( history_id ) def run_tool( self, testdef, history_id ): diff -r 72c22a117bdd7fe83b05dd616114bbb7b6a2dae8 -r 61e4e10e03ce8de315a1ae81d12529d80dedf594 test/functional/tools/composite.xml --- /dev/null +++ b/test/functional/tools/composite.xml @@ -0,0 +1,21 @@ +<tool id="velvet_inspired" name="velvet_inspired" version="1.0.0"> + <description>Velvet sequence assembler for very short reads</description> + <command>cat '$input.extra_files_path/Sequences' > $output</command> + <inputs> + <param name="input" type="data" format="velvet" label="Velvet Dataset" help="Prepared by velveth."/> + </inputs> + <outputs> + <data format="txt" name="output" label="${tool.name} on ${on_string}: LastGraph"> + </data> + </outputs> + <tests> + <test> + <param name="input" value="velveth_test1/output.html" ftype="velvet" > + <composite_data value='velveth_test1/Sequences' ftype="Sequences"/> + <composite_data value='velveth_test1/Roadmaps' ftype="Roadmaps"/> + <composite_data value='velveth_test1/Log'/> + </param> + <output name="unused_reads_fasta" file="velveth_test1/Sequences" compare="diff"/> + </test> + </tests> +</tool> diff -r 72c22a117bdd7fe83b05dd616114bbb7b6a2dae8 -r 61e4e10e03ce8de315a1ae81d12529d80dedf594 test/functional/tools/samples_tool_conf.xml --- a/test/functional/tools/samples_tool_conf.xml +++ b/test/functional/tools/samples_tool_conf.xml @@ -1,6 +1,7 @@ <?xml version="1.0"?><toolbox><tool file="upload.xml"/> + <tool file="composite.xml" /><tool file="disambiguate_cond.xml" /><tool file="multi_repeats.xml"/></toolbox> \ No newline at end of file diff -r 72c22a117bdd7fe83b05dd616114bbb7b6a2dae8 -r 61e4e10e03ce8de315a1ae81d12529d80dedf594 test/functional/tools/test-data/velveth_test1 --- /dev/null +++ b/test/functional/tools/test-data/velveth_test1 @@ -0,0 +1,1 @@ +../../../../test-data/velveth_test1 \ No newline at end of file https://bitbucket.org/galaxy/galaxy-central/commits/df80192bfdcc/ Changeset: df80192bfdcc User: jmchilton Date: 2013-11-22 02:17:12 Summary: Add more sample tools for tool testing framework. Adding multi_select.xml, which demonstrates twill cannot deal with - at the beginning of select param values - this test works immediately with API interactor. Also adding a multi_output.xml, this is a tool using variable number of outputs (force_refresh=True), both interactors pass this but it is a good test to verify that. Also adding example to test multi-page tools (multi_page.xml) - both interactors pass this test. Though the API needed to be adjusted to allow its use (in a previous changeset). Also adding example tool demonstrating extra_files output. Finally, a tool simple_constructs.xml that just tests the basics, various parameter types, simple conditional, and simple repeat. Affected #: 6 files diff -r 61e4e10e03ce8de315a1ae81d12529d80dedf594 -r df80192bfdcc3b8ed37b104c757e0dcc21535420 test/functional/tools/composite_output.xml --- /dev/null +++ b/test/functional/tools/composite_output.xml @@ -0,0 +1,24 @@ +<tool id="composite_output" name="composite_output" version="1.0.0"> + <command>mkdir $output.extra_files_path; cp $input.extra_files_path/* $output.extra_files_path</command> + <inputs> + <param name="input" type="data" format="velvet" label="Velvet Dataset" help="Prepared by velveth."/> + </inputs> + <outputs> + <data format="velvet" name="output" label=""> + </data> + </outputs> + <tests> + <test> + <param name="input" value="velveth_test1/output.html" ftype="velvet" > + <composite_data value='velveth_test1/Sequences' ftype="Sequences"/> + <composite_data value='velveth_test1/Roadmaps' ftype="Roadmaps"/> + <composite_data value='velveth_test1/Log'/> + </param> + <output name="output" file="velveth_test1/output.html"> + <extra_files type="file" name="Sequences" value="velveth_test1/Sequences" /> + <extra_files type="file" name="Roadmaps" value="velveth_test1/Roadmaps" /> + <extra_files type="file" name="Log" value="velveth_test1/Log" /> + </output> + </test> + </tests> +</tool> diff -r 61e4e10e03ce8de315a1ae81d12529d80dedf594 -r df80192bfdcc3b8ed37b104c757e0dcc21535420 test/functional/tools/multi_output.xml --- /dev/null +++ b/test/functional/tools/multi_output.xml @@ -0,0 +1,22 @@ +<tool id="multi_output" name="Multi_Output" description="multi_output" force_history_refresh="True" version="0.1.0"> + <command> + echo "Hello" > $report; + echo "World" > '${__new_file_path__}/primary_${report.id}_moo_visible_?' + </command> + <inputs> + <param name="input" type="integer" value="7" /> + </inputs> + <outputs> + <data format="txt" name="report" /> + </outputs> + <tests> + <test> + <param name="input" value="7" /> + <output name="report"> + <assert_contents> + <has_line line="Hello" /> + </assert_contents> + </output> + </test> + </tests> +</tool> diff -r 61e4e10e03ce8de315a1ae81d12529d80dedf594 -r df80192bfdcc3b8ed37b104c757e0dcc21535420 test/functional/tools/multi_page.xml --- /dev/null +++ b/test/functional/tools/multi_page.xml @@ -0,0 +1,29 @@ +<tool id="multi_page" name="Multi_Page" description="multi_page" version="0.1.0"> + <configfiles> + <configfile name="config">${p1} ${p2}</configfile> + </configfiles> + <command>cat $config > $output</command> + <inputs> + <page> + <param name="p1" type="integer" value="1" /> + </page> + <page> + <param name="p2" type="integer" value="2" /> + </page> + </inputs> + <outputs> + <data format="txt" name="output" /> + </outputs> + <tests> + <test> + <param name="p1" value="3" /> + <param name="p2" value="4" /> + + <output name="output"> + <assert_contents> + <has_line line="3 4" /> + </assert_contents> + </output> + </test> + </tests> +</tool> diff -r 61e4e10e03ce8de315a1ae81d12529d80dedf594 -r df80192bfdcc3b8ed37b104c757e0dcc21535420 test/functional/tools/multi_select.xml --- /dev/null +++ b/test/functional/tools/multi_select.xml @@ -0,0 +1,29 @@ +<tool id="multi_select" name="multi_select" version="1.0.0"> + <description>multi_select</description> + <configfiles> + <configfile name="config">${select_ex}</configfile> + </configfiles> + <command>cat $config > $output</command> + <inputs> + <param name="select_ex" type="select" display="checkboxes" multiple="true"> + <option value="--ex1">Ex1</option> + <option value="ex2">Ex2</option> + <option value="--ex3">Ex3</option> + <option value="--ex4">Ex4</option> + <option value="ex5">Ex5</option> + </param> + </inputs> + <outputs> + <data format="txt" name="output" /> + </outputs> + <tests> + <test> + <param name="select_ex" value="--ex1,ex2,--ex3" /> + <output name="output"> + <assert_contents> + <has_line line="--ex1,ex2,--ex3" /> + </assert_contents> + </output> + </test> + </tests> +</tool> diff -r 61e4e10e03ce8de315a1ae81d12529d80dedf594 -r df80192bfdcc3b8ed37b104c757e0dcc21535420 test/functional/tools/samples_tool_conf.xml --- a/test/functional/tools/samples_tool_conf.xml +++ b/test/functional/tools/samples_tool_conf.xml @@ -1,7 +1,12 @@ <?xml version="1.0"?><toolbox><tool file="upload.xml"/> + <tool file="simple_constructs.xml" /><tool file="composite.xml" /><tool file="disambiguate_cond.xml" /><tool file="multi_repeats.xml"/> + <tool file="multi_page.xml"/> + <tool file="multi_select.xml" /> + <tool file="multi_output.xml" /> + <tool file="composite_output.xml" /></toolbox> \ No newline at end of file diff -r 61e4e10e03ce8de315a1ae81d12529d80dedf594 -r df80192bfdcc3b8ed37b104c757e0dcc21535420 test/functional/tools/simple_constructs.xml --- /dev/null +++ b/test/functional/tools/simple_constructs.xml @@ -0,0 +1,47 @@ +<tool id="simple_constructs" name="simple_constructs"> + <command> + echo "$p1.p1val" >> $out_file1; + echo "$booltest" >> $out_file1; + echo "$inttest" >> $out_file1; + echo "$floattest" >> $out_file1; + cat "$files[0].file" >> $out_file1; + </command> + <inputs> + <conditional name="p1"> + <param type="boolean" name="p1use" /> + <when value="true"> + <param name="p1val" value="p1used" type="text" /> + </when> + <when value="false"> + <param name="p1val" value="p1notused" type="text" /> + </when> + </conditional> + <param name="booltest" truevalue="booltrue" falsevalue="boolfalse" checked="false" type="boolean" /> + <param name="inttest" value="1" type="integer" /> + <param name="floattest" value="1.0" type="float" /> + <repeat name="files" title="Files"> + <param name="file" type="data" format="txt" /> + </repeat> + </inputs> + <outputs> + <data name="out_file1" format="txt" /> + </outputs> + <tests> + <test> + <param name="p1use" value="true" /> + <param name="booltest" value="true" /> + <param name="inttest" value="12456" /> + <param name="floattest" value="6.789" /> + <param name="file" value="simple_line.txt" /><!-- This is a line of text. --> + <output name="out_file1"> + <assert_contents> + <has_line line="p1used" /> + <has_line line="booltrue" /> + <has_line line="12456" /> + <has_line line="6.789" /> + <has_line line="This is a line of text." /> + </assert_contents> + </output> + </test> + </tests> +</tool> https://bitbucket.org/galaxy/galaxy-central/commits/e3731610302a/ Changeset: e3731610302a User: jmchilton Date: 2013-11-22 02:17:12 Summary: Provide fallback implementation of requests functionality if lib unavailable. Just limited post/get functionality needed for tests. Affected #: 1 file diff -r df80192bfdcc3b8ed37b104c757e0dcc21535420 -r e3731610302a9de11bc15c3b039fac7a17a17558 test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -7,8 +7,7 @@ import galaxy.model from galaxy.model.orm import and_, desc from galaxy.model.mapping import context as sa_session -from simplejson import dumps -import requests +from simplejson import dumps, loads toolbox = None @@ -133,7 +132,10 @@ } submit_response_object = self.__submit_tool( history_id, "upload1", tool_input, extra_data={"type": "upload_dataset"}, files=files ) submit_response = submit_response_object.json() - dataset = submit_response["outputs"][0] + try: + dataset = submit_response["outputs"][0] + except KeyError: + raise Exception(submit_response) #raise Exception(str(dataset)) hid = dataset['id'] self.uploads[ os.path.basename(fname) ] = self.uploads[ fname ] = self.uploads[ name ] = {"src": "hda", "id": hid} @@ -242,7 +244,7 @@ key = self.api_key data = data.copy() data['key'] = key - return requests.post( "%s/%s" % (self.api_url, path), data=data, files=files ) + return post_request( "%s/%s" % (self.api_url, path), data=data, files=files ) def __get( self, path, data={}, key=None ): if not key: @@ -252,7 +254,7 @@ if path.startswith("/api"): path = path[ len("/api"): ] url = "%s/%s" % (self.api_url, path) - return requests.get( url, params=data ) + return get_request( url, params=data ) class GalaxyInteractorTwill( object ): @@ -398,3 +400,66 @@ 'api': GalaxyInteractorApi, 'twill': GalaxyInteractorTwill, } + + +# Lets just try to use requests if it is available, but if not provide fallback +# on custom implementations of limited requests get/post functionality. +try: + from requests import get as get_request + from requests import post as post_request +except ImportError: + import urllib2 + import httplib + + class RequestsLikeResponse( object ): + + def __init__( self, content ): + self.content = content + + def json( self ): + return loads( self.content ) + + def get_request( url, params={} ): + argsep = '&' + if '?' not in url: + argsep = '?' + url = url + argsep + '&'.join( [ '%s=%s' % (k, v) for k, v in params.iteritems() ] ) + #req = urllib2.Request( url, headers = { 'Content-Type': 'application/json' } ) + return RequestsLikeResponse(urllib2.urlopen( url ).read() ) + + def post_request( url, data, files ): + parsed_url = urllib2.urlparse.urlparse( url ) + return __post_multipart( host=parsed_url.netloc, selector=parsed_url.path, fields=data.iteritems(), files=(files or {}).iteritems() ) + + # http://stackoverflow.com/a/681182 + def __post_multipart(host, selector, fields, files): + content_type, body = __encode_multipart_formdata(fields, files) + h = httplib.HTTP(host) + h.putrequest('POST', selector) + h.putheader('content-type', content_type) + h.putheader('content-length', str(len(body))) + h.endheaders() + h.send(body) + errcode, errmsg, headers = h.getreply() + return RequestsLikeResponse(h.file.read()) + + def __encode_multipart_formdata(fields, files): + LIMIT = '----------lImIt_of_THE_fIle_eW_$' + CRLF = '\r\n' + L = [] + for (key, value) in fields: + L.append('--' + LIMIT) + L.append('Content-Disposition: form-data; name="%s"' % key) + L.append('') + L.append(value) + for (key, value) in files: + L.append('--' + LIMIT) + L.append('Content-Disposition: form-data; name="%s"; filename="%s";' % (key, key)) + L.append('Content-Type: application/octet-stream') + L.append('') + L.append(value.read()) + L.append('--' + LIMIT + '--') + L.append('') + body = CRLF.join(L) + content_type = 'multipart/form-data; boundary=%s' % LIMIT + return content_type, body https://bitbucket.org/galaxy/galaxy-central/commits/cf493be30252/ Changeset: cf493be30252 User: jmchilton Date: 2013-11-22 02:17:12 Summary: Twill-less tool tests - handle input metadata tags. Affected #: 1 file diff -r e3731610302a9de11bc15c3b039fac7a17a17558 -r cf493be302529fe2271b2a339f8a3fada63301c8 test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -104,6 +104,9 @@ "file_type": test_data[ 'ftype' ], "dbkey": test_data[ 'dbkey' ], } + for elem in test_data.get('metadata', []): + tool_input["files_metadata|%s" % elem.get( 'name' )] = elem.get( 'value' ) + composite_data = test_data[ 'composite_data' ] if composite_data: files = {} https://bitbucket.org/galaxy/galaxy-central/commits/45c1d11cfff5/ Changeset: 45c1d11cfff5 User: jmchilton Date: 2013-11-22 02:17:12 Summary: Tool functional tests - allow checking extra files without checking a primary file. For output datatypes where primary file is meaningless. Affected #: 1 file diff -r cf493be302529fe2271b2a339f8a3fada63301c8 -r 45c1d11cfff552ce095c0561b5fba2cb5d3610e3 lib/galaxy/tools/test.py --- a/lib/galaxy/tools/test.py +++ b/lib/galaxy/tools/test.py @@ -252,8 +252,6 @@ assert_list = self.__parse_assert_list( output_elem ) file = attrib.pop( 'file', None ) # File no longer required if an list of assertions was present. - if not assert_list and file is None: - raise Exception( "Test output does not have a 'file' to compare with or list of assertions to check") attributes = {} # Method of comparison attributes['compare'] = attrib.pop( 'compare', 'diff' ).lower() @@ -262,12 +260,15 @@ # Allow a file size to vary if sim_size compare attributes['delta'] = int( attrib.pop( 'delta', '10000' ) ) attributes['sort'] = string_as_bool( attrib.pop( 'sort', False ) ) - attributes['extra_files'] = [] - attributes['assert_list'] = assert_list + extra_files = [] if 'ftype' in attrib: attributes['ftype'] = attrib['ftype'] for extra in output_elem.findall( 'extra_files' ): - attributes['extra_files'].append( self.__parse_extra_files_elem( extra ) ) + extra_files.append( self.__parse_extra_files_elem( extra ) ) + if not (assert_list or file or extra_files): + raise Exception( "Test output defines not checks (e.g. must have a 'file' check against, assertions to check, etc...)") + attributes['assert_list'] = assert_list + attributes['extra_files'] = extra_files self.__add_output( name, file, attributes ) except Exception, e: self.error = True https://bitbucket.org/galaxy/galaxy-central/commits/01f05ab8df70/ Changeset: 01f05ab8df70 User: jmchilton Date: 2013-11-22 02:17:13 Summary: Extend tool functional test framework to allow testing output dataset metadata. Adding file test/functional/tools/metadata.xml demonstrating how to check output metadata - this file also demonstrates setting metadata on uploaded datasets and verifies both of these functionalities. Checking output metadata is only available for new API driven tool testing. Affected #: 4 files diff -r 45c1d11cfff552ce095c0561b5fba2cb5d3610e3 -r 01f05ab8df7092b6e03f35faf7f80b47acafa0eb lib/galaxy/tools/test.py --- a/lib/galaxy/tools/test.py +++ b/lib/galaxy/tools/test.py @@ -265,10 +265,14 @@ attributes['ftype'] = attrib['ftype'] for extra in output_elem.findall( 'extra_files' ): extra_files.append( self.__parse_extra_files_elem( extra ) ) - if not (assert_list or file or extra_files): + metadata = {} + for metadata_elem in output_elem.findall( 'metadata' ): + metadata[ metadata_elem.get('name') ] = metadata_elem.get( 'value' ) + if not (assert_list or file or extra_files or metadata): raise Exception( "Test output defines not checks (e.g. must have a 'file' check against, assertions to check, etc...)") attributes['assert_list'] = assert_list attributes['extra_files'] = extra_files + attributes['metadata'] = metadata self.__add_output( name, file, attributes ) except Exception, e: self.error = True diff -r 45c1d11cfff552ce095c0561b5fba2cb5d3610e3 -r 01f05ab8df7092b6e03f35faf7f80b47acafa0eb test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -88,6 +88,21 @@ fetcher = self.__dataset_fetcher( history_id ) ## TODO: Twill version verifys dataset is 'ok' in here. self.twill_test_case.verify_hid( outfile, hda_id=hid, attributes=attributes, dataset_fetcher=fetcher, shed_tool_id=shed_tool_id ) + metadata = attributes.get( 'metadata', {} ) + if metadata: + dataset = self.__get( "histories/%s/contents/%s" % ( history_id, hid ) ).json() + for key, value in metadata.iteritems(): + dataset_key = "metadata_%s" % key + try: + dataset_value = dataset.get( dataset_key, None ) + if dataset_value != value: + msg = "Dataset metadata verification for [%s] failed, expected [%s] but found [%s]." + msg_params = ( key, value, dataset_value ) + msg = msg % msg_params + raise Exception( msg ) + except KeyError: + msg = "Failed to verify dataset metadata, metadata key [%s] was not found." % key + raise Exception( msg ) def get_job_stream( self, history_id, output_data, stream ): hid = output_data.get( 'id' ) diff -r 45c1d11cfff552ce095c0561b5fba2cb5d3610e3 -r 01f05ab8df7092b6e03f35faf7f80b47acafa0eb test/functional/tools/metadata.xml --- /dev/null +++ b/test/functional/tools/metadata.xml @@ -0,0 +1,29 @@ +<tool id="metadata" name="metadata" version="1.0.0"> + <command>mkdir $output_copy_of_input.extra_files_path; cp $input.extra_files_path/* $output_copy_of_input.extra_files_path; echo "$input.metadata.base_name" > $output_of_input_metadata</command> + <inputs> + <param name="input" type="data" format="velvet" label="Velvet Dataset" help="Prepared by velveth."/> + </inputs> + <outputs> + <data format="txt" name="output_of_input_metadata" /> + <data format="velvet" name="output_copy_of_input" /> + </outputs> + <tests> + <test> + <param name="input" value="velveth_test1/output.html" ftype="velvet" > + <composite_data value='velveth_test1/Sequences' ftype="Sequences"/> + <composite_data value='velveth_test1/Roadmaps' ftype="Roadmaps"/> + <composite_data value='velveth_test1/Log'/> + <metadata name="base_name" value="Example Metadata" /> + </param> + <!-- This ouptut tests setting input metadata above --> + <output name="output_of_input_metadata"> + <assert_contents> + <has_line line="Example Metadata" /> + </assert_contents> + </output> + <!-- This output tests an assertion about output metadata --> + <output name="output_copy_of_input" file="velveth_test1/output.html"> + <metadata name="base_name" value="velvet" /> + </output> + </test> + </tests></tool> diff -r 45c1d11cfff552ce095c0561b5fba2cb5d3610e3 -r 01f05ab8df7092b6e03f35faf7f80b47acafa0eb test/functional/tools/samples_tool_conf.xml --- a/test/functional/tools/samples_tool_conf.xml +++ b/test/functional/tools/samples_tool_conf.xml @@ -9,4 +9,5 @@ <tool file="multi_select.xml" /><tool file="multi_output.xml" /><tool file="composite_output.xml" /> + <tool file="metadata.xml" /></toolbox> \ No newline at end of file https://bitbucket.org/galaxy/galaxy-central/commits/d05be33ad6b7/ Changeset: d05be33ad6b7 User: jmchilton Date: 2013-11-22 02:17:13 Summary: Allow outputs in tool functional tests to be specified in any order when using API interactor. Outputs must specify name and must be specified in the same order with the Twill variant. This second restriction is entirely arbitrary using API so dropping it here. Adding tool demonstrating this functionality (test/functional/tools/output_order.xml) which fails with twill interactor but works fine with API interactor. Affected #: 3 files diff -r 01f05ab8df7092b6e03f35faf7f80b47acafa0eb -r d05be33ad6b772c2a688d875c4cf895d6afac4e3 test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -57,12 +57,16 @@ def __verify_outputs( self, testdef, history, shed_tool_id, data_list, galaxy_interactor ): maxseconds = testdef.maxseconds - output_index = 0 - len( testdef.outputs ) - for output_tuple in testdef.outputs: + for output_index, output_tuple in enumerate(testdef.outputs): # Get the correct hid - output_data = data_list[ output_index ] + name, outfile, attributes = output_tuple + try: + output_data = data_list[ name ] + except (TypeError, KeyError): + # Legacy - fall back on ordered data list access if data_list is + # just a list (case with twill variant) + output_data = data_list[ len(data_list) - len(testdef.outputs) + output_index ] self.assertTrue( output_data is not None ) - name, outfile, attributes = output_tuple try: galaxy_interactor.verify_output( history, output_data, outfile, attributes=attributes, shed_tool_id=shed_tool_id, maxseconds=maxseconds ) except Exception: @@ -71,8 +75,6 @@ print >>sys.stderr, self._format_stream( stream_output, stream=stream, format=True ) raise - output_index += 1 - class GalaxyInteractorApi( object ): @@ -199,10 +201,21 @@ datasets = self.__submit_tool( history_id, tool_id=testdef.tool.id, tool_input=tool_input ) datasets_object = datasets.json() try: - return datasets_object[ 'outputs' ] + return self.__dictify_outputs( datasets_object ) except KeyError: raise Exception( datasets_object[ 'message' ] ) + def __dictify_outputs( self, datasets_object ): + ## Convert outputs list to a dictionary that can be accessed by + ## output_name so can be more flexiable about ordering of outputs + ## but also allows fallback to legacy access as list mode. + outputs_dict = {} + index = 0 + for output in datasets_object[ 'outputs' ]: + outputs_dict[ index ] = outputs_dict[ output.get("output_name") ] = output + index += 1 + return outputs_dict + def output_hid( self, output_data ): return output_data[ 'id' ] diff -r 01f05ab8df7092b6e03f35faf7f80b47acafa0eb -r d05be33ad6b772c2a688d875c4cf895d6afac4e3 test/functional/tools/output_order.xml --- /dev/null +++ b/test/functional/tools/output_order.xml @@ -0,0 +1,25 @@ +<tool id="output_order" name="output_order" version="0.1.0"> + <command>echo $pa > $output_a; echo $pb > $output_b</command> + <inputs> + <param name="pa" type="integer" value="1" /> + <param name="pb" type="integer" value="2" /> + </inputs> + <outputs> + <data format="txt" name="output_a" /> + <data format="txt" name="output_b" /> + </outputs> + <tests> + <test> + <output name="output_b"> + <assert_contents> + <has_line line="2" /> + </assert_contents> + </output> + <output name="output_a"> + <assert_contents> + <has_line line="1" /> + </assert_contents> + </output> + </test> + </tests> +</tool> diff -r 01f05ab8df7092b6e03f35faf7f80b47acafa0eb -r d05be33ad6b772c2a688d875c4cf895d6afac4e3 test/functional/tools/samples_tool_conf.xml --- a/test/functional/tools/samples_tool_conf.xml +++ b/test/functional/tools/samples_tool_conf.xml @@ -10,4 +10,5 @@ <tool file="multi_output.xml" /><tool file="composite_output.xml" /><tool file="metadata.xml" /> + <tool file="output_order.xml" /></toolbox> \ No newline at end of file https://bitbucket.org/galaxy/galaxy-central/commits/1cb070e3da5b/ Changeset: 1cb070e3da5b User: jmchilton Date: 2013-11-22 02:17:13 Summary: Refactor lib/galaxy/tools/test.py parsing into smaller methods. Affected #: 1 file diff -r d05be33ad6b772c2a688d875c4cf895d6afac4e3 -r 1cb070e3da5b77f16eb3b8591a1cacd74389c34c lib/galaxy/tools/test.py --- a/lib/galaxy/tools/test.py +++ b/lib/galaxy/tools/test.py @@ -192,10 +192,6 @@ return value def __parse_elem( self, test_elem, i, default_interactor ): - # Composite datasets need a unique name: each test occurs in a fresh - # history, but we'll keep it unique per set of tests - use i (test #) - # and composite_data_names_counter (instance per test #) - composite_data_names_counter = 0 try: # Mechanism test code uses for interacting with Galaxy instance, # until 'api' is the default switch this to API to use its new @@ -203,81 +199,92 @@ # features or workarounds. self.interactor = test_elem.get( 'interactor', default_interactor ) - for param_elem in test_elem.findall( "param" ): - attrib = dict( param_elem.attrib ) - if 'values' in attrib: - value = attrib[ 'values' ].split( ',' ) - elif 'value' in attrib: - value = attrib['value'] - else: - value = None - attrib['children'] = list( param_elem.getchildren() ) - if attrib['children']: - # At this time, we can assume having children only - # occurs on DataToolParameter test items but this could - # change and would cause the below parsing to change - # based upon differences in children items - attrib['metadata'] = [] - attrib['composite_data'] = [] - attrib['edit_attributes'] = [] - # Composite datasets need to be renamed uniquely - composite_data_name = None - for child in attrib['children']: - if child.tag == 'composite_data': - attrib['composite_data'].append( child ) - if composite_data_name is None: - # Generate a unique name; each test uses a - # fresh history - composite_data_name = '_COMPOSITE_RENAMED_t%i_d%i' \ - % ( i, composite_data_names_counter ) - composite_data_names_counter += 1 - elif child.tag == 'metadata': - attrib['metadata'].append( child ) - elif child.tag == 'metadata': - attrib['metadata'].append( child ) - elif child.tag == 'edit_attributes': - attrib['edit_attributes'].append( child ) - if composite_data_name: - # Composite datasets need implicit renaming; - # inserted at front of list so explicit declarations - # take precedence - attrib['edit_attributes'].insert( 0, { 'type': 'name', 'value': composite_data_name } ) - self.__add_param( attrib.pop( 'name' ), value, attrib ) - for output_elem in test_elem.findall( "output" ): - attrib = dict( output_elem.attrib ) - name = attrib.pop( 'name', None ) - if name is None: - raise Exception( "Test output does not have a 'name'" ) + self.__parse_inputs_elems( test_elem, i ) - assert_list = self.__parse_assert_list( output_elem ) - file = attrib.pop( 'file', None ) - # File no longer required if an list of assertions was present. - attributes = {} - # Method of comparison - attributes['compare'] = attrib.pop( 'compare', 'diff' ).lower() - # Number of lines to allow to vary in logs (for dates, etc) - attributes['lines_diff'] = int( attrib.pop( 'lines_diff', '0' ) ) - # Allow a file size to vary if sim_size compare - attributes['delta'] = int( attrib.pop( 'delta', '10000' ) ) - attributes['sort'] = string_as_bool( attrib.pop( 'sort', False ) ) - extra_files = [] - if 'ftype' in attrib: - attributes['ftype'] = attrib['ftype'] - for extra in output_elem.findall( 'extra_files' ): - extra_files.append( self.__parse_extra_files_elem( extra ) ) - metadata = {} - for metadata_elem in output_elem.findall( 'metadata' ): - metadata[ metadata_elem.get('name') ] = metadata_elem.get( 'value' ) - if not (assert_list or file or extra_files or metadata): - raise Exception( "Test output defines not checks (e.g. must have a 'file' check against, assertions to check, etc...)") - attributes['assert_list'] = assert_list - attributes['extra_files'] = extra_files - attributes['metadata'] = metadata - self.__add_output( name, file, attributes ) + self.__parse_output_elems( test_elem ) except Exception, e: self.error = True self.exception = e + def __parse_inputs_elems( self, test_elem, i ): + # Composite datasets need a unique name: each test occurs in a fresh + # history, but we'll keep it unique per set of tests - use i (test #) + # and composite_data_names_counter (instance per test #) + composite_data_names_counter = 0 + for param_elem in test_elem.findall( "param" ): + attrib = dict( param_elem.attrib ) + if 'values' in attrib: + value = attrib[ 'values' ].split( ',' ) + elif 'value' in attrib: + value = attrib['value'] + else: + value = None + attrib['children'] = list( param_elem.getchildren() ) + if attrib['children']: + # At this time, we can assume having children only + # occurs on DataToolParameter test items but this could + # change and would cause the below parsing to change + # based upon differences in children items + attrib['metadata'] = [] + attrib['composite_data'] = [] + attrib['edit_attributes'] = [] + # Composite datasets need to be renamed uniquely + composite_data_name = None + for child in attrib['children']: + if child.tag == 'composite_data': + attrib['composite_data'].append( child ) + if composite_data_name is None: + # Generate a unique name; each test uses a + # fresh history + composite_data_name = '_COMPOSITE_RENAMED_t%i_d%i' \ + % ( i, composite_data_names_counter ) + composite_data_names_counter += 1 + elif child.tag == 'metadata': + attrib['metadata'].append( child ) + elif child.tag == 'metadata': + attrib['metadata'].append( child ) + elif child.tag == 'edit_attributes': + attrib['edit_attributes'].append( child ) + if composite_data_name: + # Composite datasets need implicit renaming; + # inserted at front of list so explicit declarations + # take precedence + attrib['edit_attributes'].insert( 0, { 'type': 'name', 'value': composite_data_name } ) + self.__add_param( attrib.pop( 'name' ), value, attrib ) + + def __parse_output_elems( self, test_elem ): + for output_elem in test_elem.findall( "output" ): + attrib = dict( output_elem.attrib ) + name = attrib.pop( 'name', None ) + if name is None: + raise Exception( "Test output does not have a 'name'" ) + + assert_list = self.__parse_assert_list( output_elem ) + file = attrib.pop( 'file', None ) + # File no longer required if an list of assertions was present. + attributes = {} + # Method of comparison + attributes['compare'] = attrib.pop( 'compare', 'diff' ).lower() + # Number of lines to allow to vary in logs (for dates, etc) + attributes['lines_diff'] = int( attrib.pop( 'lines_diff', '0' ) ) + # Allow a file size to vary if sim_size compare + attributes['delta'] = int( attrib.pop( 'delta', '10000' ) ) + attributes['sort'] = string_as_bool( attrib.pop( 'sort', False ) ) + extra_files = [] + if 'ftype' in attrib: + attributes['ftype'] = attrib['ftype'] + for extra in output_elem.findall( 'extra_files' ): + extra_files.append( self.__parse_extra_files_elem( extra ) ) + metadata = {} + for metadata_elem in output_elem.findall( 'metadata' ): + metadata[ metadata_elem.get('name') ] = metadata_elem.get( 'value' ) + if not (assert_list or file or extra_files or metadata): + raise Exception( "Test output defines not checks (e.g. must have a 'file' check against, assertions to check, etc...)") + attributes['assert_list'] = assert_list + attributes['extra_files'] = extra_files + attributes['metadata'] = metadata + self.__add_output( name, file, attributes ) + def __parse_assert_list( self, output_elem ): assert_elem = output_elem.find("assert_contents") assert_list = None https://bitbucket.org/galaxy/galaxy-central/commits/aa6ec1842ba6/ Changeset: aa6ec1842ba6 User: jmchilton Date: 2013-11-22 02:17:13 Summary: Utilize tool test runtime tree analysis earlier to more correctly determine which inputs are data params. Turns out previous changesets attempting to separately disambiguate and supply implicit defaults for conditionals do not really work with data inputs - because they are not recognized as dataset input params during parsing. Additionally, this change should eliminate any ordering requirements of tool test params - i.e. you shouldn't need to provide the test parameters in an order that makes it clear what branch you are on before suppling a dataset in order for the parser to realize it is a dataset. Finally, this changeset allows deletion of a lot of code and creates some more robust abstractions. Affected #: 2 files diff -r 1cb070e3da5b77f16eb3b8591a1cacd74389c34c -r aa6ec1842ba60b5ba7af69f8f01624c4902bc2be lib/galaxy/tools/test.py --- a/lib/galaxy/tools/test.py +++ b/lib/galaxy/tools/test.py @@ -75,29 +75,7 @@ yield data_dict - def to_dict( self, tool_inputs, declared_inputs ): - expanded_inputs = {} - for key, value in tool_inputs.items(): - if isinstance( value, grouping.Conditional ): - for i, case in enumerate( value.cases ): - if declared_inputs[ value.test_param.name ] == case.value: - pass # TODO - elif isinstance( value, grouping.Repeat ): - values = [] - for r_name, r_value in value.inputs.iteritems(): - values.append( self.to_dict( {r_name: r_value} , declared_inputs ) ) - expanded_inputs[ value.name ] = values - elif value.name not in declared_inputs: - print "%s not declared in tool test, will not change default value." % value.name - else: - expanded_inputs[ value.name ] = declared_inputs[value.name] - return expanded_inputs - - def __matching_case( self, cond, declared_inputs, prefix, index=None ): - param = cond.test_param - declared_value = self.__declared_match( declared_inputs, param.name, prefix) - if index is not None: - declared_value = declared_value[index] + def __matching_case_for_value( self, cond, declared_value ): for i, case in enumerate( cond.cases ): if declared_value is not None and (case.value == declared_value): return case @@ -107,83 +85,7 @@ return case else: return Bunch(value=declared_value, inputs=Bunch(items=lambda: [])) - print "Not matching case found for %s value %s. Test may fail in unexpected ways." % ( param.name, declared_value ) - - def expand_multi_grouping( self, tool_inputs, declared_inputs, prefix='', index=0 ): - """ - Used by API, slight generalization of expand_grouping used by Twill based interactor. Still - not quite the context/tree based specification that should exist! - """ - expanded_inputs = {} - for key, value in tool_inputs.items(): - expanded_key = value.name if not prefix else "%s|%s" % (prefix, value.name) - if isinstance( value, grouping.Conditional ): - new_prefix = expanded_key - case = self.__matching_case( value, declared_inputs, new_prefix, index=index ) - if case: - expanded_value = self.__split_if_str( case.value ) - expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = expanded_value - for input_name, input_value in case.inputs.items(): - expanded_inputs.update( self.expand_multi_grouping( { input_name: input_value }, declared_inputs, prefix=new_prefix ), index=index ) - elif isinstance( value, grouping.Repeat ): - repeat_index = 0 - any_children_matched = True - while any_children_matched: - any_children_matched = False - for r_name, r_value in value.inputs.iteritems(): - new_prefix = "%s_%d" % ( value.name, repeat_index ) - if prefix: - new_prefix = "%s|%s" % ( prefix, new_prefix ) - expanded_input = self.expand_multi_grouping( { new_prefix : r_value }, declared_inputs, prefix=new_prefix, index=repeat_index ) - if expanded_input: - any_children_matched = True - expanded_inputs.update( expanded_input ) - repeat_index += 1 - else: - declared_value = self.__declared_match( declared_inputs, value.name, prefix ) - if declared_value and len(declared_value) > index: - value = self.__split_if_str( declared_value[ index ] ) - expanded_inputs[ expanded_key ] = value - return expanded_inputs - - def expand_grouping( self, tool_inputs, declared_inputs, prefix='' ): - expanded_inputs = {} - for key, value in tool_inputs.items(): - expanded_key = value.name if not prefix else "%s|%s" % (prefix, value.name) - if isinstance( value, grouping.Conditional ): - new_prefix = expanded_key - case = self.__matching_case( value, declared_inputs, new_prefix ) - if case: - expanded_value = self.__split_if_str(case.value) - expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = expanded_value - for input_name, input_value in case.inputs.items(): - expanded_inputs.update( self.expand_grouping( { input_name: input_value }, declared_inputs, prefix=new_prefix ) ) - elif isinstance( value, grouping.Repeat ): - for repeat_index in xrange( 0, 1 ): # need to allow for and figure out how many repeats we have - for r_name, r_value in value.inputs.iteritems(): - new_prefix = "%s_%d" % ( value.name, repeat_index ) - if prefix: - new_prefix = "%s|%s" % ( prefix, new_prefix ) - expanded_inputs.update( self.expand_grouping( { new_prefix : r_value }, declared_inputs, prefix=new_prefix ) ) - else: - declared_value = self.__declared_match( declared_inputs, value.name, prefix ) - if not declared_value: - print "%s not declared in tool test, will not change default value." % value.name - else: - value = self.__split_if_str(declared_value) - expanded_inputs[expanded_key] = value - return expanded_inputs - - def __declared_match( self, declared_inputs, name, prefix ): - prefix_suffixes = [ "%s|" % part for part in prefix.split( "|" ) ] if prefix else [] - prefix_suffixes.append( name ) - prefix_suffixes.reverse() - prefixed_name = "" - for prefix_suffix in prefix_suffixes: - prefixed_name = "%s%s" % ( prefix_suffix, prefixed_name ) - if prefixed_name in declared_inputs: - return declared_inputs[prefixed_name] - return None + print "Not matching case found for %s value %s. Test may fail in unexpected ways." % ( cond.test_param.name, declared_value ) def __split_if_str( self, value ): split = isinstance(value, str) @@ -211,6 +113,7 @@ # history, but we'll keep it unique per set of tests - use i (test #) # and composite_data_names_counter (instance per test #) composite_data_names_counter = 0 + raw_inputs = {} for param_elem in test_elem.findall( "param" ): attrib = dict( param_elem.attrib ) if 'values' in attrib: @@ -250,7 +153,56 @@ # inserted at front of list so explicit declarations # take precedence attrib['edit_attributes'].insert( 0, { 'type': 'name', 'value': composite_data_name } ) - self.__add_param( attrib.pop( 'name' ), value, attrib ) + name = attrib.pop( 'name' ) + if name not in raw_inputs: + raw_inputs[ name ] = [] + raw_inputs[ name ].append( ( value, attrib ) ) + self.inputs = self.__process_raw_inputs( self.tool.inputs, raw_inputs ) + + def __process_raw_inputs( self, tool_inputs, raw_inputs, parent_context=None ): + """ + Recursively expand flat list of inputs into "tree" form of flat list + (| using to nest to new levels) structure and expand dataset + information as proceeding to populate self.required_files. + """ + parent_context = parent_context or RootParamContext() + expanded_inputs = {} + for key, value in tool_inputs.items(): + if isinstance( value, grouping.Conditional ): + cond_context = ParamContext( name=value.name, parent_context=parent_context ) + case_context = ParamContext( name=value.test_param.name, parent_context=cond_context ) + raw_input = case_context.value( raw_inputs ) + case = self.__matching_case_for_value( value, raw_input ) + if case: + expanded_value = self.__split_if_str( case.value ) + expanded_inputs[ case_context.for_state() ] = expanded_value + for input_name, input_value in case.inputs.items(): + expanded_inputs.update( self.__process_raw_inputs( { input_name: input_value }, raw_inputs, parent_context=cond_context ) ) + elif isinstance( value, grouping.Repeat ): + repeat_index = 0 + while True: + context = ParamContext( name=value.name, index=repeat_index, parent_context=parent_context ) + updated = False + for r_name, r_value in value.inputs.iteritems(): + expanded_input = self.__process_raw_inputs( { context.for_state() : r_value }, raw_inputs, parent_context=context ) + if expanded_input: + expanded_inputs.update( expanded_input ) + updated = True + if not updated: + break + repeat_index += 1 + else: + context = ParamContext( name=value.name, parent_context=parent_context ) + raw_input = context.value( raw_inputs ) + if raw_input: + (param_value, param_extra) = raw_input + if isinstance( value, basic.DataToolParameter ): + processed_value = [ self.__add_uploaded_dataset( context.for_state(), param_value, param_extra, value ) ] + else: + param_value = self.__split_if_str( param_value ) + processed_value = param_value + expanded_inputs[ context.for_state() ] = processed_value + return expanded_inputs def __parse_output_elems( self, test_elem ): for output_elem in test_elem.findall( "output" ): @@ -324,78 +276,9 @@ extra_attributes['sort'] = string_as_bool( extra.get( 'sort', False ) ) return extra_type, extra_value, extra_name, extra_attributes - def __add_param( self, name, value, extra ): - try: - if name not in self.tool.inputs: - found_parameter = False - for input_name, input_value in self.tool.inputs.items(): - if isinstance( input_value, grouping.Group ): - found_parameter, new_value = self.__expand_grouping_for_data_input(name, value, extra, input_name, input_value) - if found_parameter: - value = new_value - break - if not found_parameter: - raise ValueError( "Unable to determine parameter type of test input '%s'. " - "Ensure that the parameter exists and that any container groups are defined first." - % name ) - elif isinstance( self.tool.inputs[name], basic.DataToolParameter ): - value = self.__add_uploaded_dataset( name, value, extra, self.tool.inputs[name] ) - except Exception, e: - log.debug( "Error for tool %s: could not add test parameter %s. %s" % ( self.tool.id, name, e ) ) - self.inputs.append( ( name, value, extra ) ) - def __add_output( self, name, file, extra ): self.outputs.append( ( name, file, extra ) ) - def __expand_grouping_for_data_input( self, name, value, extra, grouping_name, grouping_value ): - # Currently handles grouping.Conditional and grouping.Repeat - if isinstance( grouping_value, grouping.Conditional ): - if name == grouping_value.test_param.name: - return True, value - case_test_param_value = None - for input in self.inputs: - if input[0] == grouping_value.test_param.name: - case_test_param_value = input[1] - break - if case_test_param_value is None: - #case for this group has not been set yet - return False, value - for case in grouping_value.cases: - if case.value == case_test_param_value: - break - if case.value != case_test_param_value: - return False, value - #assert case.value == case_test_param_value, "Current case could not be determined for parameter '%s'. Provided value '%s' could not be found in '%s'." % ( grouping_value.name, value, grouping_value.test_param.name ) - if name in case.inputs: - if isinstance( case.inputs[name], basic.DataToolParameter ): - return True, self.__add_uploaded_dataset( name, value, extra, case.inputs[name] ) - else: - return True, value - else: - for input_name, input_parameter in case.inputs.iteritems(): - if isinstance( input_parameter, grouping.Group ): - found_parameter, new_value = self.__expand_grouping_for_data_input( name, value, extra, input_name, input_parameter ) - if found_parameter: - return True, new_value - elif isinstance( grouping_value, grouping.Repeat ): - # FIXME: grouping.Repeat can only handle 1 repeat param element since the param name - # is something like "input2" and the expanded page display is something like "queries_0|input2". - # The problem is that the only param name on the page is "input2", and adding more test input params - # with the same name ( "input2" ) is not yet supported in our test code ( the last one added is the only - # one used ). - if name in grouping_value.inputs: - if isinstance( grouping_value.inputs[name], basic.DataToolParameter ): - return True, self.__add_uploaded_dataset( name, value, extra, grouping_value.inputs[name] ) - else: - return True, value - else: - for input_name, input_parameter in grouping_value.inputs.iteritems(): - if isinstance( input_parameter, grouping.Group ): - found_parameter, new_value = self.__expand_grouping_for_data_input( name, value, extra, input_name, input_parameter ) - if found_parameter: - return True, new_value - return False, value - def __add_uploaded_dataset( self, name, value, extra, input_parameter ): if value is None: assert input_parameter.optional, '%s is not optional. You must provide a valid filename.' % name @@ -413,3 +296,58 @@ break value = os.path.basename( value ) # if uploading a file in a path other than root of test-data return value + + +class ParamContext(object): + + def __init__( self, name, index=None, parent_context=None ): + self.parent_context = parent_context + self.name = name + self.index = None if index is None else int( index ) + + def for_state( self ): + name = self.name if self.index is None else "%s_%d" % ( self.name, self.index ) + parent_for_state = self.parent_context.for_state() + if parent_for_state: + return "%s|%s" % ( parent_for_state, name ) + else: + return name + + def __str__( self ): + return "Context[for_state=%s]" % self.for_state() + + def param_names( self ): + for parent_context_param in self.parent_context.param_names(): + yield "%s|%s" % ( parent_context_param, self.name ) + yield self.name + + def value( self, declared_inputs ): + for param_name in self.param_names(): + if param_name in declared_inputs: + index = self.get_index() + try: + return declared_inputs[ param_name ][ index ] + except IndexError: + return None + return None + + def get_index( self ): + if self.index is not None: + return self.index + else: + return self.parent_context.get_index() + + +class RootParamContext(object): + + def __init__( self ): + pass + + def for_state( self ): + return "" + + def param_names( self ): + return [] + + def get_index( self ): + return 0 diff -r 1cb070e3da5b77f16eb3b8591a1cacd74389c34c -r aa6ec1842ba60b5ba7af69f8f01624c4902bc2be test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -164,41 +164,21 @@ def run_tool( self, testdef, history_id ): # We need to handle the case where we've uploaded a valid compressed file since the upload # tool will have uncompressed it on the fly. - all_inputs = {} - for name, value, _ in testdef.inputs: - # TODO: Restrict this to param inputs. - if value in self.uploads: - value = self.uploads[ value ] - if name in all_inputs: - all_inputs[name].append( value ) - else: - all_inputs[name] = [ value ] + inputs_tree = testdef.inputs.copy() + for key, value in inputs_tree.iteritems(): + values = [value] if not isinstance(value, list) else value + for value in values: + if value in self.uploads: + inputs_tree[ key ] = self.uploads[ value ] - # TODO: Handle pages? - # TODO: Handle force_history_refresh? - flat_inputs = True - if flat_inputs: - # Build up tool_input flately (e.g {"a_repeat_0|a_repeat_param" : "value1"}) - expanded_inputs = {} - expanded_inputs.update(testdef.expand_multi_grouping(testdef.tool.inputs_by_page[0], all_inputs)) - for i in range( 1, testdef.tool.npages ): - expanded_inputs.update(testdef.expand_multi_grouping(testdef.tool.inputs_by_page[i], all_inputs)) + # # HACK: Flatten single-value lists. Required when using expand_grouping + for key, value in inputs_tree.iteritems(): + if isinstance(value, list) and len(value) == 1: + inputs_tree[key] = value[0] - # # HACK: Flatten single-value lists. Required when using expand_grouping - for key, value in expanded_inputs.iteritems(): - if isinstance(value, list) and len(value) == 1: - expanded_inputs[key] = value[0] - tool_input = expanded_inputs - else: - # Build up tool_input as nested dictionary (e.g. {"a_repeat": [{"a_repeat_param" : "value1"}]}) - # Doesn't work with the tool API at this time. - tool_input = {} - tool_input.update(testdef.to_dict(testdef.tool.inputs_by_page[0], all_inputs)) - for i in range( 1, testdef.tool.npages ): - tool_input.update(testdef.to_dict(testdef.tool.inputs_by_page[i], all_inputs)) - - datasets = self.__submit_tool( history_id, tool_id=testdef.tool.id, tool_input=tool_input ) + log.info( "Submiting tool with params %s" % inputs_tree ) + datasets = self.__submit_tool( history_id, tool_id=testdef.tool.id, tool_input=inputs_tree ) datasets_object = datasets.json() try: return self.__dictify_outputs( datasets_object ) @@ -322,9 +302,11 @@ def run_tool( self, testdef, test_history ): # We need to handle the case where we've uploaded a valid compressed file since the upload # tool will have uncompressed it on the fly. + + # Lose tons of information to accomodate legacy repeat handling. all_inputs = {} - for name, value, _ in testdef.inputs: - all_inputs[ name ] = value + for key, value in testdef.inputs.iteritems(): + all_inputs[ key.split("|")[-1] ] = value # See if we have a grouping.Repeat element repeat_name = None @@ -340,15 +322,20 @@ else: job_finish_by_output_count = False + # Strip out just a given page of inputs from inputs "tree". + def filter_page_inputs( n ): + page_input_keys = testdef.tool.inputs_by_page[ n ].keys() + return dict( [ (k, v) for k, v in testdef.inputs.iteritems() if k.split("|")[0] in page_input_keys ] ) + # Do the first page - page_inputs = testdef.expand_grouping(testdef.tool.inputs_by_page[0], all_inputs) + page_inputs = filter_page_inputs( 0 ) # Run the tool self.twill_test_case.run_tool( testdef.tool.id, repeat_name=repeat_name, **page_inputs ) print "page_inputs (0)", page_inputs # Do other pages if they exist for i in range( 1, testdef.tool.npages ): - page_inputs = testdef.expand_grouping(testdef.tool.inputs_by_page[i], all_inputs) + page_inputs = filter_page_inputs( i ) self.twill_test_case.submit_form( **page_inputs ) print "page_inputs (%i)" % i, page_inputs https://bitbucket.org/galaxy/galaxy-central/commits/76bd49e65887/ Changeset: 76bd49e65887 User: jmchilton Date: 2013-11-22 02:17:13 Summary: Previous attempt to allow implicit defaults for conditional test params had many problems. This rewrite has fewer. Affected #: 1 file diff -r aa6ec1842ba60b5ba7af69f8f01624c4902bc2be -r 76bd49e658876f5cc90ec5b80033f13b058593b8 lib/galaxy/tools/test.py --- a/lib/galaxy/tools/test.py +++ b/lib/galaxy/tools/test.py @@ -3,7 +3,6 @@ from parameters import basic from parameters import grouping from galaxy.util import string_as_bool -from galaxy.util.bunch import Bunch import logging log = logging.getLogger( __name__ ) @@ -76,16 +75,43 @@ yield data_dict def __matching_case_for_value( self, cond, declared_value ): + test_param = cond.test_param + if isinstance(test_param, basic.BooleanToolParameter): + if declared_value is None: + # No explicit value for param in test case, determine from default + query_value = test_param.checked + else: + # Test case supplied value, check cases against this. + query_value = string_as_bool( declared_value ) + matches_declared_value = lambda case_value: string_as_bool(case_value) == query_value + elif isinstance(test_param, basic.SelectToolParameter): + if declared_value is not None: + # Test case supplied explicit value to check against. + matches_declared_value = lambda case_value: case_value == declared_value + elif test_param.static_options: + # No explicit value in test case, not much to do if options are dynamic but + # if static options are available can find the one specified as default or + # fallback on top most option (like GUI). + for (name, value, selected) in test_param.static_options: + if selected: + default_option = name + else: + default_option = test_param.static_options[0] + matches_declared_value = lambda case_value: case_value == default_option + else: + # No explicit value for this param and cannot determine a + # default - give up. Previously this would just result in a key + # error exception. + msg = "Failed to find test parameter specification required for conditional %s" % cond + raise Exception( msg ) + + # Check the tool's defined cases against predicate to determine + # selected or default. for i, case in enumerate( cond.cases ): - if declared_value is not None and (case.value == declared_value): - return case - if declared_value is None: - # TODO: Default might not be top value, fix this. - # TODO: Also may be boolean, got to look at checked. + if matches_declared_value( case.value ): return case else: - return Bunch(value=declared_value, inputs=Bunch(items=lambda: [])) - print "Not matching case found for %s value %s. Test may fail in unexpected ways." % ( cond.test_param.name, declared_value ) + log.info("Failed to find case matching test parameter specification for cond %s. Remainder of test behavior is unspecified." % cond) def __split_if_str( self, value ): split = isinstance(value, str) @@ -172,7 +198,8 @@ cond_context = ParamContext( name=value.name, parent_context=parent_context ) case_context = ParamContext( name=value.test_param.name, parent_context=cond_context ) raw_input = case_context.value( raw_inputs ) - case = self.__matching_case_for_value( value, raw_input ) + case_value = raw_input[ 0 ] if raw_input else None + case = self.__matching_case_for_value( value, case_value ) if case: expanded_value = self.__split_if_str( case.value ) expanded_inputs[ case_context.for_state() ] = expanded_value https://bitbucket.org/galaxy/galaxy-central/commits/07d48a9f9a09/ Changeset: 07d48a9f9a09 User: jmchilton Date: 2013-11-22 02:17:13 Summary: Make disambiguate_cond more difficult. Affected #: 2 files diff -r 76bd49e658876f5cc90ec5b80033f13b058593b8 -r 07d48a9f9a09edd916bf4f94e95326211cd0d255 test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -9,6 +9,9 @@ from galaxy.model.mapping import context as sa_session from simplejson import dumps, loads +import logging +log = logging.getLogger( __name__ ) + toolbox = None diff -r 76bd49e658876f5cc90ec5b80033f13b058593b8 -r 07d48a9f9a09edd916bf4f94e95326211cd0d255 test/functional/tools/disambiguate_cond.xml --- a/test/functional/tools/disambiguate_cond.xml +++ b/test/functional/tools/disambiguate_cond.xml @@ -1,7 +1,8 @@ <tool id="handle_cond" name="handle_cond"><description>tail-to-head</description> + <!-- --><command> - echo "$p1.p1v $p2.p2v $p3.p3v" > $out_file1 + echo "$p1.p1v $p2.p2v $p3.p3v" > $out_file1; cat "$files.p4.file" >> $out_file1; </command><inputs><conditional name="p1"> @@ -30,7 +31,18 @@ <when value="false"><param name="p3v" value="7" type="integer" /></when> - </conditional> + </conditional> + <conditional name="files"> + <param name="attach_files" type="boolean" checked="true" /> + <when value="true"> + <conditional name="p4"> + <param type="boolean" name="use" /> + <when value="true"> + <param type="data" name="file" /> + </when> + </conditional> + </when> + </conditional></inputs><outputs><data name="out_file1" format="txt" /> @@ -40,9 +52,12 @@ <param name="p1|use" value="True"/><param name="p2|use" value="False"/><param name="p3|use" value="True"/> + <param name="p4|use" value="True" /> + <param name="p4|file" value="simple_line.txt" /><output name="out_file1"><assert_contents><has_line line="4 7 4" /> + <has_line line="This is a line of text." /></assert_contents></output></test> https://bitbucket.org/galaxy/galaxy-central/commits/265e67c9dca8/ Changeset: 265e67c9dca8 User: jmchilton Date: 2013-11-22 02:17:13 Summary: Allow robust disambiguation of repeat statements in tool tests. Imagine a tool with a repeat statement like: <repeat name="rinst" title="Repeat Parameter"><param name="int_param" type="integer" value="1" /><param name="float_param" type="float" value="2.0" /></repeat> 3 test instances overidding int_param default in all three, but leaving the float_param default in place for the first and last can specified as follows: <param name="rinst_0|int_param" value="4" /><param name="rinst_1|int_param" value="5" /><param name="rtnst_2|int_param" value="6" /><param name="rinst_1|float_param" value="4.0" /> This syntax can be mixed and matched with specifing conditionals and nested repeats, etc.... One could imagine an alternative syntax like: <param name="rinst|int_param" value="4" /><param name="rinst|int_param" value="5" /><param name="rtnst|int_param" value="6" /><param name="rinst|float_param" value="4.0" /> Upon consideration, I have determined that this ambigious syntax is more difficult to reason about, leaves so much ambigiouity in place especially in the context of default and nested elements, and is harder to support. So the plan is to not support it at this time. The following changeset will add "the right" way to do this anyway: <repeat name="rinst"><param name="rinst|int_param" value="4" /></repeat><repeat name="rinst"><param name="rinst|int_param" value="5" /><param name="rinst|float_param" value="4.0" /></repeat><repeat name="rinst"><param name="rinst|int_param" value="6" /></repeat> Affected #: 7 files diff -r 07d48a9f9a09edd916bf4f94e95326211cd0d255 -r 265e67c9dca81c496ce42ba6517b10444ad56d59 lib/galaxy/tools/test.py --- a/lib/galaxy/tools/test.py +++ b/lib/galaxy/tools/test.py @@ -139,7 +139,7 @@ # history, but we'll keep it unique per set of tests - use i (test #) # and composite_data_names_counter (instance per test #) composite_data_names_counter = 0 - raw_inputs = {} + raw_inputs = [] for param_elem in test_elem.findall( "param" ): attrib = dict( param_elem.attrib ) if 'values' in attrib: @@ -180,9 +180,7 @@ # take precedence attrib['edit_attributes'].insert( 0, { 'type': 'name', 'value': composite_data_name } ) name = attrib.pop( 'name' ) - if name not in raw_inputs: - raw_inputs[ name ] = [] - raw_inputs[ name ].append( ( value, attrib ) ) + raw_inputs.append( ( name, value, attrib ) ) self.inputs = self.__process_raw_inputs( self.tool.inputs, raw_inputs ) def __process_raw_inputs( self, tool_inputs, raw_inputs, parent_context=None ): @@ -197,8 +195,8 @@ if isinstance( value, grouping.Conditional ): cond_context = ParamContext( name=value.name, parent_context=parent_context ) case_context = ParamContext( name=value.test_param.name, parent_context=cond_context ) - raw_input = case_context.value( raw_inputs ) - case_value = raw_input[ 0 ] if raw_input else None + raw_input = case_context.extract_value( raw_inputs ) + case_value = raw_input[ 1 ] if raw_input else None case = self.__matching_case_for_value( value, case_value ) if case: expanded_value = self.__split_if_str( case.value ) @@ -220,9 +218,9 @@ repeat_index += 1 else: context = ParamContext( name=value.name, parent_context=parent_context ) - raw_input = context.value( raw_inputs ) + raw_input = context.extract_value( raw_inputs ) if raw_input: - (param_value, param_extra) = raw_input + (name, param_value, param_extra) = raw_input if isinstance( value, basic.DataToolParameter ): processed_value = [ self.__add_uploaded_dataset( context.for_state(), param_value, param_extra, value ) ] else: @@ -345,24 +343,33 @@ def param_names( self ): for parent_context_param in self.parent_context.param_names(): - yield "%s|%s" % ( parent_context_param, self.name ) - yield self.name + if self.index is not None: + yield "%s|%s_%d" % ( parent_context_param, self.name, self.index ) + else: + yield "%s|%s" % ( parent_context_param, self.name ) + if self.index is not None: + yield "%s_%d" % ( self.name, self.index ) + else: + yield self.name - def value( self, declared_inputs ): + def extract_value( self, raw_inputs ): for param_name in self.param_names(): - if param_name in declared_inputs: - index = self.get_index() - try: - return declared_inputs[ param_name ][ index ] - except IndexError: - return None + value = self.__raw_param_found( param_name, raw_inputs) + if value: + return value return None - def get_index( self ): - if self.index is not None: - return self.index + def __raw_param_found( self, param_name, raw_inputs ): + index = None + for i, raw_input in enumerate( raw_inputs ): + if raw_input[ 0 ] == param_name: + index = i + if index is not None: + raw_input = raw_inputs[ index ] + del raw_inputs[ index ] + return raw_input else: - return self.parent_context.get_index() + return None class RootParamContext(object): diff -r 07d48a9f9a09edd916bf4f94e95326211cd0d255 -r 265e67c9dca81c496ce42ba6517b10444ad56d59 test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -325,10 +325,16 @@ else: job_finish_by_output_count = False + inputs_tree = testdef.inputs + # # # HACK: Flatten single-value lists. Required when using expand_grouping + # #for key, value in inputs_tree.iteritems(): + # if isinstance(value, list) and len(value) == 1: + # inputs_tree[key] = value[0] + # Strip out just a given page of inputs from inputs "tree". def filter_page_inputs( n ): page_input_keys = testdef.tool.inputs_by_page[ n ].keys() - return dict( [ (k, v) for k, v in testdef.inputs.iteritems() if k.split("|")[0] in page_input_keys ] ) + return dict( [ (k, v) for k, v in inputs_tree.iteritems() if k.split("|")[0] or k.split("|")[0].resplit("_", 1)[0] in page_input_keys ] ) # Do the first page page_inputs = filter_page_inputs( 0 ) diff -r 07d48a9f9a09edd916bf4f94e95326211cd0d255 -r 265e67c9dca81c496ce42ba6517b10444ad56d59 test/functional/tools/disambiguate_repeats.xml --- /dev/null +++ b/test/functional/tools/disambiguate_repeats.xml @@ -0,0 +1,28 @@ +<tool id="disambiguate_repeats" name="disambiguate_repeats"> + <command> + cat #for $q in $queries# ${q.input} #end for# #for $q in $more_queries# ${q.input} #end for# > $out_file1 + </command> + <inputs> + <repeat name="queries" title="Dataset"> + <param name="input" type="data" label="Select" /> + </repeat> + <repeat name="more_queries" title="Dataset"> + <param name="input" type="data" label="Select" /> + </repeat> + </inputs> + <outputs> + <data name="out_file1" format="txt" /> + </outputs> + <tests> + <!-- Can use prefixes to disambiguate inputs or force order. --> + <test> + <param name="queries_1|input" value="simple_line_alternative.txt"/> + <param name="queries_0|input" value="simple_line.txt"/> + + <param name="more_queries_1|input" value="simple_line_alternative.txt" /> + <param name="more_queries_0|input" value="simple_line.txt"/> + + <output name="out_file1" file="simple_lines_interleaved.txt"/> + </test> + </tests> +</tool> diff -r 07d48a9f9a09edd916bf4f94e95326211cd0d255 -r 265e67c9dca81c496ce42ba6517b10444ad56d59 test/functional/tools/multi_repeats.xml --- a/test/functional/tools/multi_repeats.xml +++ b/test/functional/tools/multi_repeats.xml @@ -21,6 +21,12 @@ <param name="input2" value="simple_line.txt"/><output name="out_file1" file="simple_line_x2.txt"/></test> + <!-- + Following tests continue to work, but for anything more + advanced than this simple case these should be considered + something of an anti-pattern - see disambiguate_repeats.xml + for superior syntax. + --><test><param name="input1" value="simple_line.txt"/><param name="input2" value="simple_line.txt"/> diff -r 07d48a9f9a09edd916bf4f94e95326211cd0d255 -r 265e67c9dca81c496ce42ba6517b10444ad56d59 test/functional/tools/samples_tool_conf.xml --- a/test/functional/tools/samples_tool_conf.xml +++ b/test/functional/tools/samples_tool_conf.xml @@ -11,4 +11,5 @@ <tool file="composite_output.xml" /><tool file="metadata.xml" /><tool file="output_order.xml" /> + <tool file="disambiguate_repeats.xml" /></toolbox> \ No newline at end of file diff -r 07d48a9f9a09edd916bf4f94e95326211cd0d255 -r 265e67c9dca81c496ce42ba6517b10444ad56d59 test/functional/tools/test-data/simple_line_alternative.txt --- /dev/null +++ b/test/functional/tools/test-data/simple_line_alternative.txt @@ -0,0 +1,1 @@ +This is a different line of text. \ No newline at end of file diff -r 07d48a9f9a09edd916bf4f94e95326211cd0d255 -r 265e67c9dca81c496ce42ba6517b10444ad56d59 test/functional/tools/test-data/simple_lines_interleaved.txt --- /dev/null +++ b/test/functional/tools/test-data/simple_lines_interleaved.txt @@ -0,0 +1,4 @@ +This is a line of text. +This is a different line of text. +This is a line of text. +This is a different line of text. \ No newline at end of file https://bitbucket.org/galaxy/galaxy-central/commits/e86455cbd0a5/ Changeset: e86455cbd0a5 User: jmchilton Date: 2013-11-22 02:17:13 Summary: Tree-like input specification in tool XML file. Allow nestable <repeat> and <conditional> statements in test tool definitions. For instance, imagine a tool with a repeat statement like: <repeat name="rinst" title="Repeat Parameter"><param name="int_param" type="integer" value="1" /><param name="float_param" type="float" value="2.0" /></repeat> 3 test instances overidding int_param default in all three, but leaving the float_param default in place for the first and last can specified as follows: <repeat name="rinst"><param name="rinst|int_param" value="4" /></repeat><repeat name="rinst"><param name="rinst|int_param" value="5" /><param name="rinst|float_param" value="4.0" /></repeat><repeat name="rinst"><param name="rinst|int_param" value="6" /></repeat> Likewise, <conditional name="cinst"> can be used analogously though this is for grouping and disambiguation - does not allow multiple instances to specified obviously. Affected #: 2 files diff -r 265e67c9dca81c496ce42ba6517b10444ad56d59 -r e86455cbd0a554613eee96852a1240205f65696b lib/galaxy/tools/test.py --- a/lib/galaxy/tools/test.py +++ b/lib/galaxy/tools/test.py @@ -127,6 +127,7 @@ # features or workarounds. self.interactor = test_elem.get( 'interactor', default_interactor ) + self.__preprocess_input_elems( test_elem ) self.__parse_inputs_elems( test_elem, i ) self.__parse_output_elems( test_elem ) @@ -134,6 +135,9 @@ self.error = True self.exception = e + def __preprocess_input_elems( self, test_elem ): + expand_input_elems( test_elem ) + def __parse_inputs_elems( self, test_elem, i ): # Composite datasets need a unique name: each test occurs in a fresh # history, but we'll keep it unique per set of tests - use i (test #) @@ -385,3 +389,46 @@ def get_index( self ): return 0 + + +def expand_input_elems( root_elem, prefix="" ): + __append_prefix_to_params( root_elem, prefix ) + + repeat_elems = root_elem.findall( 'repeat' ) + indices = {} + for repeat_elem in repeat_elems: + name = repeat_elem.get( "name" ) + if name not in indices: + indices[ name ] = 0 + index = 0 + else: + index = indices[ name ] + 1 + indices[ name ] = index + + new_prefix = __prefix_join( prefix, name, index=index ) + expand_input_elems( repeat_elem, new_prefix ) + __pull_up_params( root_elem, repeat_elem ) + root_elem.remove( repeat_elem ) + + cond_elems = root_elem.findall( 'conditional' ) + for cond_elem in cond_elems: + new_prefix = __prefix_join( prefix, cond_elem.get( "name" ) ) + expand_input_elems( cond_elem, new_prefix ) + __pull_up_params( root_elem, cond_elem ) + root_elem.remove( cond_elem ) + + +def __append_prefix_to_params( elem, prefix ): + for param_elem in elem.findall( 'param' ): + param_elem.set( "name", __prefix_join( prefix, param_elem.get( "name" ) ) ) + + +def __pull_up_params( parent_elem, child_elem ): + for param_elem in child_elem.findall( 'param' ): + parent_elem.append( param_elem ) + child_elem.remove( param_elem ) + + +def __prefix_join( prefix, name, index=None ): + name = name if index is None else "%s_%d" % ( name, index ) + return name if not prefix else "%s|%s" % ( prefix, name ) diff -r 265e67c9dca81c496ce42ba6517b10444ad56d59 -r e86455cbd0a554613eee96852a1240205f65696b test/functional/tools/disambiguate_repeats.xml --- a/test/functional/tools/disambiguate_repeats.xml +++ b/test/functional/tools/disambiguate_repeats.xml @@ -24,5 +24,22 @@ <output name="out_file1" file="simple_lines_interleaved.txt"/></test> + + <test> + <repeat name="queries"> + <param name="input" value="simple_line.txt"/> + </repeat> + <repeat name="queries"> + <param name="input" value="simple_line_alternative.txt"/> + </repeat> + <repeat name="more_queries"> + <param name="input" value="simple_line.txt"/> + </repeat> + <repeat name="more_queries"> + <param name="input" value="simple_line_alternative.txt"/> + </repeat> + <output name="out_file1" file="simple_lines_interleaved.txt"/> + </test> + </tests></tool> https://bitbucket.org/galaxy/galaxy-central/commits/d4f7fbbc3017/ Changeset: d4f7fbbc3017 User: jmchilton Date: 2013-11-22 02:17:13 Summary: Lazy load tool test cases. Running any tool test, will force everything in toolbox to parse out, but at least just starting Galaxy doesn't require parsing all test cases. Affected #: 1 file diff -r e86455cbd0a554613eee96852a1240205f65696b -r d4f7fbbc3017529eef45719fa8a6c54a12e6de78 lib/galaxy/tools/__init__.py --- a/lib/galaxy/tools/__init__.py +++ b/lib/galaxy/tools/__init__.py @@ -1219,14 +1219,9 @@ for key, value in uihints_elem.attrib.iteritems(): self.uihints[ key ] = value # Tests - tests_elem = root.find( "tests" ) - if tests_elem: - try: - self.tests = parse_tests_elem( self, tests_elem ) - except: - log.exception( "Failed to parse tool tests" ) - else: - self.tests = None + self.__tests_elem = root.find( "tests" ) + self.__tests_populated = False + # Requirements (dependencies) self.requirements = parse_requirements_from_xml( root ) # Determine if this tool can be used in workflows @@ -1237,6 +1232,21 @@ self.trackster_conf = TracksterConfig.parse( trackster_conf ) else: self.trackster_conf = None + + @property + def tests( self ): + if not self.__tests_populated: + tests_elem = self.__tests_elem + if tests_elem: + try: + self.__tests = parse_tests_elem( self, tests_elem ) + except: + log.exception( "Failed to parse tool tests" ) + else: + self.__tests = None + self.__tests_populated = True + return self.__tests + def parse_inputs( self, root ): """ Parse the "<inputs>" element and create appropriate `ToolParameter`s. https://bitbucket.org/galaxy/galaxy-central/commits/0923ef9b1edb/ Changeset: 0923ef9b1edb User: jmchilton Date: 2013-11-22 02:17:13 Summary: Small clean up of argument processing in functional_tests.py Affected #: 1 file diff -r d4f7fbbc3017529eef45719fa8a6c54a12e6de78 -r 0923ef9b1edba9d90de4b9d032f4818942489df1 scripts/functional_tests.py --- a/scripts/functional_tests.py +++ b/scripts/functional_tests.py @@ -176,12 +176,11 @@ tool_path = os.environ.get( 'GALAXY_TEST_TOOL_PATH', 'tools' ) if 'HTTP_ACCEPT_LANGUAGE' not in os.environ: os.environ[ 'HTTP_ACCEPT_LANGUAGE' ] = default_galaxy_locales - testing_migrated_tools = '-migrated' in sys.argv - testing_installed_tools = '-installed' in sys.argv + testing_migrated_tools = __check_arg( '-migrated' ) + testing_installed_tools = __check_arg( '-installed' ) datatypes_conf_override = None if testing_migrated_tools or testing_installed_tools: - sys.argv.pop() # Store a jsonified dictionary of tool_id : GALAXY_TEST_FILE_DIR pairs. galaxy_tool_shed_test_file = 'shed_tools_dict' # We need the upload tool for functional tests, so we'll create a temporary tool panel config that defines it. @@ -198,9 +197,8 @@ # Exclude all files except test_toolbox.py. ignore_files = ( re.compile( r'^test_[adghlmsu]*' ), re.compile( r'^test_ta*' ) ) else: - framework_test = '-framework' in sys.argv # Run through suite of tests testing framework. + framework_test = __check_arg( '-framework' ) # Run through suite of tests testing framework. if framework_test: - sys.argv.pop() framework_tool_dir = os.path.join('test', 'functional', 'tools') tool_conf = os.path.join( framework_tool_dir, 'samples_tool_conf.xml' ) datatypes_conf_override = os.path.join( framework_tool_dir, 'sample_datatypes_conf.xml' ) @@ -522,5 +520,19 @@ else: return 1 + +def __check_arg( name, param=False ): + try: + index = sys.argv.index( name ) + del sys.argv[ index ] + if param: + ret_val = sys.argv[ index ] + del sys.argv[ index ] + else: + ret_val = True + except ValueError: + ret_val = False + return ret_val + if __name__ == "__main__": sys.exit( main() ) https://bitbucket.org/galaxy/galaxy-central/commits/d5c0f73b3db4/ Changeset: d5c0f73b3db4 User: jmchilton Date: 2013-11-22 02:17:13 Summary: Refactor interactors out of test_toolbox into base. For potential reuse in other tests. Affected #: 2 files diff -r 0923ef9b1edba9d90de4b9d032f4818942489df1 -r d5c0f73b3db4cdb0b984b8a64da72dfe64ab6f0d test/base/interactor.py --- /dev/null +++ b/test/base/interactor.py @@ -0,0 +1,380 @@ +import os +from galaxy.tools.parameters import grouping +import galaxy.model +from galaxy.model.orm import and_, desc +from galaxy.model.mapping import context as sa_session +from simplejson import dumps, loads + + +def build_interactor( test_case, type="api" ): + interactor_class = GALAXY_INTERACTORS[ type ] + return interactor_class( test_case ) + + +class GalaxyInteractorApi( object ): + + def __init__( self, twill_test_case ): + self.twill_test_case = twill_test_case + self.api_url = "%s/api" % twill_test_case.url.rstrip("/") + self.api_key = self.__get_user_key( twill_test_case.user_api_key, twill_test_case.master_api_key ) + self.uploads = {} + + def verify_output( self, history_id, output_data, outfile, attributes, shed_tool_id, maxseconds ): + self.twill_test_case.wait_for( lambda: not self.__history_ready( history_id ), maxseconds=maxseconds) + hid = output_data.get( 'id' ) + fetcher = self.__dataset_fetcher( history_id ) + ## TODO: Twill version verifys dataset is 'ok' in here. + self.twill_test_case.verify_hid( outfile, hda_id=hid, attributes=attributes, dataset_fetcher=fetcher, shed_tool_id=shed_tool_id ) + metadata = attributes.get( 'metadata', {} ) + if metadata: + dataset = self.__get( "histories/%s/contents/%s" % ( history_id, hid ) ).json() + for key, value in metadata.iteritems(): + dataset_key = "metadata_%s" % key + try: + dataset_value = dataset.get( dataset_key, None ) + if dataset_value != value: + msg = "Dataset metadata verification for [%s] failed, expected [%s] but found [%s]." + msg_params = ( key, value, dataset_value ) + msg = msg % msg_params + raise Exception( msg ) + except KeyError: + msg = "Failed to verify dataset metadata, metadata key [%s] was not found." % key + raise Exception( msg ) + + def get_job_stream( self, history_id, output_data, stream ): + hid = output_data.get( 'id' ) + data = self.__get( "histories/%s/contents/%s/provenance" % (history_id, hid) ).json() + return data.get( stream, '' ) + + def new_history( self ): + history_json = self.__post( "histories", {"name": "test_history"} ).json() + return history_json[ 'id' ] + + def stage_data_async( self, test_data, history_id, shed_tool_id, async=True ): + fname = test_data[ 'fname' ] + tool_input = { + "file_type": test_data[ 'ftype' ], + "dbkey": test_data[ 'dbkey' ], + } + for elem in test_data.get('metadata', []): + tool_input["files_metadata|%s" % elem.get( 'name' )] = elem.get( 'value' ) + + composite_data = test_data[ 'composite_data' ] + if composite_data: + files = {} + for i, composite_file in enumerate( composite_data ): + file_name = self.twill_test_case.get_filename( composite_file.get( 'value' ), shed_tool_id=shed_tool_id ) + files["files_%s|file_data" % i] = open( file_name, 'rb' ) + tool_input.update({ + #"files_%d|NAME" % i: name, + "files_%d|type" % i: "upload_dataset", + ## TODO: + #"files_%d|space_to_tab" % i: composite_file.get( 'space_to_tab', False ) + }) + name = test_data[ 'name' ] + else: + file_name = self.twill_test_case.get_filename( fname, shed_tool_id=shed_tool_id ) + name = test_data.get( 'name', None ) + if not name: + name = os.path.basename( file_name ) + + tool_input.update({ + "files_0|NAME": name, + "files_0|type": "upload_dataset", + }) + files = { + "files_0|file_data": open( file_name, 'rb') + } + submit_response_object = self.__submit_tool( history_id, "upload1", tool_input, extra_data={"type": "upload_dataset"}, files=files ) + submit_response = submit_response_object.json() + try: + dataset = submit_response["outputs"][0] + except KeyError: + raise Exception(submit_response) + #raise Exception(str(dataset)) + hid = dataset['id'] + self.uploads[ os.path.basename(fname) ] = self.uploads[ fname ] = self.uploads[ name ] = {"src": "hda", "id": hid} + return self.__wait_for_history( history_id ) + + def run_tool( self, testdef, history_id ): + # We need to handle the case where we've uploaded a valid compressed file since the upload + # tool will have uncompressed it on the fly. + + inputs_tree = testdef.inputs.copy() + for key, value in inputs_tree.iteritems(): + values = [value] if not isinstance(value, list) else value + for value in values: + if value in self.uploads: + inputs_tree[ key ] = self.uploads[ value ] + + # # HACK: Flatten single-value lists. Required when using expand_grouping + for key, value in inputs_tree.iteritems(): + if isinstance(value, list) and len(value) == 1: + inputs_tree[key] = value[0] + + datasets = self.__submit_tool( history_id, tool_id=testdef.tool.id, tool_input=inputs_tree ) + datasets_object = datasets.json() + try: + return self.__dictify_outputs( datasets_object ) + except KeyError: + raise Exception( datasets_object[ 'message' ] ) + + def __dictify_outputs( self, datasets_object ): + ## Convert outputs list to a dictionary that can be accessed by + ## output_name so can be more flexiable about ordering of outputs + ## but also allows fallback to legacy access as list mode. + outputs_dict = {} + index = 0 + for output in datasets_object[ 'outputs' ]: + outputs_dict[ index ] = outputs_dict[ output.get("output_name") ] = output + index += 1 + return outputs_dict + + def output_hid( self, output_data ): + return output_data[ 'id' ] + + def delete_history( self, history ): + return None + + def __wait_for_history( self, history_id ): + def wait(): + while not self.__history_ready( history_id ): + pass + return wait + + def __history_ready( self, history_id ): + history_json = self.__get( "histories/%s" % history_id ).json() + state = history_json[ 'state' ] + if state == 'ok': + return True + elif state == 'error': + raise Exception("History in error state.") + return False + + def __submit_tool( self, history_id, tool_id, tool_input, extra_data={}, files=None ): + data = dict( + history_id=history_id, + tool_id=tool_id, + inputs=dumps( tool_input ), + **extra_data + ) + return self.__post( "tools", files=files, data=data ) + + def __get_user_key( self, user_key, admin_key ): + if user_key: + return user_key + all_users = self.__get( 'users', key=admin_key ).json() + try: + test_user = [ user for user in all_users if user["email"] == 'test@bx.psu.edu' ][0] + except IndexError: + data = dict( + email='test@bx.psu.edu', + password='testuser', + username='admin-user', + ) + test_user = self.__post( 'users', data, key=admin_key ).json() + return self.__post( "users/%s/api_key" % test_user['id'], key=admin_key ).json() + + def __dataset_fetcher( self, history_id ): + def fetcher( hda_id, base_name=None ): + url = "histories/%s/contents/%s/display?raw=true" % (history_id, hda_id) + if base_name: + url += "&filename=%s" % base_name + return self.__get( url ).content + + return fetcher + + def __post( self, path, data={}, files=None, key=None): + if not key: + key = self.api_key + data = data.copy() + data['key'] = key + return post_request( "%s/%s" % (self.api_url, path), data=data, files=files ) + + def __get( self, path, data={}, key=None ): + if not key: + key = self.api_key + data = data.copy() + data['key'] = key + if path.startswith("/api"): + path = path[ len("/api"): ] + url = "%s/%s" % (self.api_url, path) + return get_request( url, params=data ) + + +class GalaxyInteractorTwill( object ): + + def __init__( self, twill_test_case ): + self.twill_test_case = twill_test_case + + def verify_output( self, history, output_data, outfile, attributes, shed_tool_id, maxseconds ): + hid = output_data.get( 'hid' ) + self.twill_test_case.verify_dataset_correctness( outfile, hid=hid, attributes=attributes, shed_tool_id=shed_tool_id, maxseconds=maxseconds ) + + def get_job_stream( self, history_id, output_data, stream ): + return self.twill_test_case._get_job_stream_output( output_data.get( 'id' ), stream=stream, format=False ) + + def stage_data_async( self, test_data, history, shed_tool_id, async=True ): + name = test_data.get( 'name', None ) + if name: + async = False + self.twill_test_case.upload_file( test_data['fname'], + ftype=test_data['ftype'], + dbkey=test_data['dbkey'], + metadata=test_data['metadata'], + composite_data=test_data['composite_data'], + shed_tool_id=shed_tool_id, + wait=(not async) ) + if name: + hda_id = self.twill_test_case.get_history_as_data_list()[-1].get( 'id' ) + try: + self.twill_test_case.edit_hda_attribute_info( hda_id=str(hda_id), new_name=name ) + except: + print "### call to edit_hda failed for hda_id %s, new_name=%s" % (hda_id, name) + return lambda: self.twill_test_case.wait() + + def run_tool( self, testdef, test_history ): + # We need to handle the case where we've uploaded a valid compressed file since the upload + # tool will have uncompressed it on the fly. + + # Lose tons of information to accomodate legacy repeat handling. + all_inputs = {} + for key, value in testdef.inputs.iteritems(): + all_inputs[ key.split("|")[-1] ] = value + + # See if we have a grouping.Repeat element + repeat_name = None + for input_name, input_value in testdef.tool.inputs_by_page[0].items(): + if isinstance( input_value, grouping.Repeat ) and all_inputs.get( input_name, 1 ) not in [ 0, "0" ]: # default behavior is to test 1 repeat, for backwards compatibility + if not input_value.min: # If input_value.min == 1, the element is already on the page don't add new element. + repeat_name = input_name + break + + #check if we need to verify number of outputs created dynamically by tool + if testdef.tool.force_history_refresh: + job_finish_by_output_count = len( self.twill_test_case.get_history_as_data_list() ) + else: + job_finish_by_output_count = False + + inputs_tree = testdef.inputs + # # # HACK: Flatten single-value lists. Required when using expand_grouping + # #for key, value in inputs_tree.iteritems(): + # if isinstance(value, list) and len(value) == 1: + # inputs_tree[key] = value[0] + + # Strip out just a given page of inputs from inputs "tree". + def filter_page_inputs( n ): + page_input_keys = testdef.tool.inputs_by_page[ n ].keys() + return dict( [ (k, v) for k, v in inputs_tree.iteritems() if k.split("|")[0] or k.split("|")[0].resplit("_", 1)[0] in page_input_keys ] ) + + # Do the first page + page_inputs = filter_page_inputs( 0 ) + + # Run the tool + self.twill_test_case.run_tool( testdef.tool.id, repeat_name=repeat_name, **page_inputs ) + print "page_inputs (0)", page_inputs + # Do other pages if they exist + for i in range( 1, testdef.tool.npages ): + page_inputs = filter_page_inputs( i ) + self.twill_test_case.submit_form( **page_inputs ) + print "page_inputs (%i)" % i, page_inputs + + # Check the results ( handles single or multiple tool outputs ). Make sure to pass the correct hid. + # The output datasets from the tool should be in the same order as the testdef.outputs. + data_list = None + while data_list is None: + data_list = self.twill_test_case.get_history_as_data_list() + if job_finish_by_output_count and len( testdef.outputs ) > ( len( data_list ) - job_finish_by_output_count ): + data_list = None + return data_list + + def new_history( self ): + # Start with a new history + self.twill_test_case.logout() + self.twill_test_case.login( email='test@bx.psu.edu' ) + admin_user = sa_session.query( galaxy.model.User ).filter( galaxy.model.User.table.c.email == 'test@bx.psu.edu' ).one() + self.twill_test_case.new_history() + latest_history = sa_session.query( galaxy.model.History ) \ + .filter( and_( galaxy.model.History.table.c.deleted == False, + galaxy.model.History.table.c.user_id == admin_user.id ) ) \ + .order_by( desc( galaxy.model.History.table.c.create_time ) ) \ + .first() + assert latest_history is not None, "Problem retrieving latest_history from database" + if len( self.twill_test_case.get_history_as_data_list() ) > 0: + raise AssertionError("ToolTestCase.do_it failed") + return latest_history + + def delete_history( self, latest_history ): + self.twill_test_case.delete_history( id=self.twill_test_case.security.encode_id( latest_history.id ) ) + + def output_hid( self, output_data ): + return output_data.get( 'hid' ) + + +GALAXY_INTERACTORS = { + 'api': GalaxyInteractorApi, + 'twill': GalaxyInteractorTwill, +} + + +# Lets just try to use requests if it is available, but if not provide fallback +# on custom implementations of limited requests get/post functionality. +try: + from requests import get as get_request + from requests import post as post_request +except ImportError: + import urllib2 + import httplib + + class RequestsLikeResponse( object ): + + def __init__( self, content ): + self.content = content + + def json( self ): + return loads( self.content ) + + def get_request( url, params={} ): + argsep = '&' + if '?' not in url: + argsep = '?' + url = url + argsep + '&'.join( [ '%s=%s' % (k, v) for k, v in params.iteritems() ] ) + #req = urllib2.Request( url, headers = { 'Content-Type': 'application/json' } ) + return RequestsLikeResponse(urllib2.urlopen( url ).read() ) + + def post_request( url, data, files ): + parsed_url = urllib2.urlparse.urlparse( url ) + return __post_multipart( host=parsed_url.netloc, selector=parsed_url.path, fields=data.iteritems(), files=(files or {}).iteritems() ) + + # http://stackoverflow.com/a/681182 + def __post_multipart(host, selector, fields, files): + content_type, body = __encode_multipart_formdata(fields, files) + h = httplib.HTTP(host) + h.putrequest('POST', selector) + h.putheader('content-type', content_type) + h.putheader('content-length', str(len(body))) + h.endheaders() + h.send(body) + errcode, errmsg, headers = h.getreply() + return RequestsLikeResponse(h.file.read()) + + def __encode_multipart_formdata(fields, files): + LIMIT = '----------lImIt_of_THE_fIle_eW_$' + CRLF = '\r\n' + L = [] + for (key, value) in fields: + L.append('--' + LIMIT) + L.append('Content-Disposition: form-data; name="%s"' % key) + L.append('') + L.append(value) + for (key, value) in files: + L.append('--' + LIMIT) + L.append('Content-Disposition: form-data; name="%s"; filename="%s";' % (key, key)) + L.append('Content-Type: application/octet-stream') + L.append('') + L.append(value.read()) + L.append('--' + LIMIT + '--') + L.append('') + body = CRLF.join(L) + content_type = 'multipart/form-data; boundary=%s' % LIMIT + return content_type, body diff -r 0923ef9b1edba9d90de4b9d032f4818942489df1 -r d5c0f73b3db4cdb0b984b8a64da72dfe64ab6f0d test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -1,14 +1,7 @@ +import new import sys -import new -import os -from galaxy.tools.parameters import grouping -from galaxy.util import string_as_bool from base.twilltestcase import TwillTestCase -import galaxy.model -from galaxy.model.orm import and_, desc -from galaxy.model.mapping import context as sa_session -from simplejson import dumps, loads - +from base.interactor import build_interactor import logging log = logging.getLogger( __name__ ) @@ -45,9 +38,7 @@ galaxy_interactor.delete_history( test_history ) def __galaxy_interactor( self, testdef ): - interactor_key = testdef.interactor - interactor_class = GALAXY_INTERACTORS[ interactor_key ] - return interactor_class( self ) + return build_interactor( self, testdef.interactor ) def __handle_test_def_errors(self, testdef): # If the test generation had an error, raise @@ -79,307 +70,6 @@ raise -class GalaxyInteractorApi( object ): - - def __init__( self, twill_test_case ): - self.twill_test_case = twill_test_case - self.api_url = "%s/api" % twill_test_case.url.rstrip("/") - self.api_key = self.__get_user_key( twill_test_case.user_api_key, twill_test_case.master_api_key ) - self.uploads = {} - - def verify_output( self, history_id, output_data, outfile, attributes, shed_tool_id, maxseconds ): - self.twill_test_case.wait_for( lambda: not self.__history_ready( history_id ), maxseconds=maxseconds) - hid = output_data.get( 'id' ) - fetcher = self.__dataset_fetcher( history_id ) - ## TODO: Twill version verifys dataset is 'ok' in here. - self.twill_test_case.verify_hid( outfile, hda_id=hid, attributes=attributes, dataset_fetcher=fetcher, shed_tool_id=shed_tool_id ) - metadata = attributes.get( 'metadata', {} ) - if metadata: - dataset = self.__get( "histories/%s/contents/%s" % ( history_id, hid ) ).json() - for key, value in metadata.iteritems(): - dataset_key = "metadata_%s" % key - try: - dataset_value = dataset.get( dataset_key, None ) - if dataset_value != value: - msg = "Dataset metadata verification for [%s] failed, expected [%s] but found [%s]." - msg_params = ( key, value, dataset_value ) - msg = msg % msg_params - raise Exception( msg ) - except KeyError: - msg = "Failed to verify dataset metadata, metadata key [%s] was not found." % key - raise Exception( msg ) - - def get_job_stream( self, history_id, output_data, stream ): - hid = output_data.get( 'id' ) - data = self.__get( "histories/%s/contents/%s/provenance" % (history_id, hid) ).json() - return data.get( stream, '' ) - - def new_history( self ): - history_json = self.__post( "histories", {"name": "test_history"} ).json() - return history_json[ 'id' ] - - def stage_data_async( self, test_data, history_id, shed_tool_id, async=True ): - fname = test_data[ 'fname' ] - tool_input = { - "file_type": test_data[ 'ftype' ], - "dbkey": test_data[ 'dbkey' ], - } - for elem in test_data.get('metadata', []): - tool_input["files_metadata|%s" % elem.get( 'name' )] = elem.get( 'value' ) - - composite_data = test_data[ 'composite_data' ] - if composite_data: - files = {} - for i, composite_file in enumerate( composite_data ): - file_name = self.twill_test_case.get_filename( composite_file.get( 'value' ), shed_tool_id=shed_tool_id ) - files["files_%s|file_data" % i] = open( file_name, 'rb' ) - tool_input.update({ - #"files_%d|NAME" % i: name, - "files_%d|type" % i: "upload_dataset", - ## TODO: - #"files_%d|space_to_tab" % i: composite_file.get( 'space_to_tab', False ) - }) - name = test_data[ 'name' ] - else: - file_name = self.twill_test_case.get_filename( fname, shed_tool_id=shed_tool_id ) - name = test_data.get( 'name', None ) - if not name: - name = os.path.basename( file_name ) - - tool_input.update({ - "files_0|NAME": name, - "files_0|type": "upload_dataset", - }) - files = { - "files_0|file_data": open( file_name, 'rb') - } - submit_response_object = self.__submit_tool( history_id, "upload1", tool_input, extra_data={"type": "upload_dataset"}, files=files ) - submit_response = submit_response_object.json() - try: - dataset = submit_response["outputs"][0] - except KeyError: - raise Exception(submit_response) - #raise Exception(str(dataset)) - hid = dataset['id'] - self.uploads[ os.path.basename(fname) ] = self.uploads[ fname ] = self.uploads[ name ] = {"src": "hda", "id": hid} - return self.__wait_for_history( history_id ) - - def run_tool( self, testdef, history_id ): - # We need to handle the case where we've uploaded a valid compressed file since the upload - # tool will have uncompressed it on the fly. - - inputs_tree = testdef.inputs.copy() - for key, value in inputs_tree.iteritems(): - values = [value] if not isinstance(value, list) else value - for value in values: - if value in self.uploads: - inputs_tree[ key ] = self.uploads[ value ] - - # # HACK: Flatten single-value lists. Required when using expand_grouping - for key, value in inputs_tree.iteritems(): - if isinstance(value, list) and len(value) == 1: - inputs_tree[key] = value[0] - - log.info( "Submiting tool with params %s" % inputs_tree ) - datasets = self.__submit_tool( history_id, tool_id=testdef.tool.id, tool_input=inputs_tree ) - datasets_object = datasets.json() - try: - return self.__dictify_outputs( datasets_object ) - except KeyError: - raise Exception( datasets_object[ 'message' ] ) - - def __dictify_outputs( self, datasets_object ): - ## Convert outputs list to a dictionary that can be accessed by - ## output_name so can be more flexiable about ordering of outputs - ## but also allows fallback to legacy access as list mode. - outputs_dict = {} - index = 0 - for output in datasets_object[ 'outputs' ]: - outputs_dict[ index ] = outputs_dict[ output.get("output_name") ] = output - index += 1 - return outputs_dict - - def output_hid( self, output_data ): - return output_data[ 'id' ] - - def delete_history( self, history ): - return None - - def __wait_for_history( self, history_id ): - def wait(): - while not self.__history_ready( history_id ): - pass - return wait - - def __history_ready( self, history_id ): - history_json = self.__get( "histories/%s" % history_id ).json() - state = history_json[ 'state' ] - if state == 'ok': - return True - elif state == 'error': - raise Exception("History in error state.") - return False - - def __submit_tool( self, history_id, tool_id, tool_input, extra_data={}, files=None ): - data = dict( - history_id=history_id, - tool_id=tool_id, - inputs=dumps( tool_input ), - **extra_data - ) - return self.__post( "tools", files=files, data=data ) - - def __get_user_key( self, user_key, admin_key ): - if user_key: - return user_key - all_users = self.__get( 'users', key=admin_key ).json() - try: - test_user = [ user for user in all_users if user["email"] == 'test@bx.psu.edu' ][0] - except IndexError: - data = dict( - email='test@bx.psu.edu', - password='testuser', - username='admin-user', - ) - test_user = self.__post( 'users', data, key=admin_key ).json() - return self.__post( "users/%s/api_key" % test_user['id'], key=admin_key ).json() - - def __dataset_fetcher( self, history_id ): - def fetcher( hda_id, base_name=None ): - url = "histories/%s/contents/%s/display?raw=true" % (history_id, hda_id) - if base_name: - url += "&filename=%s" % base_name - return self.__get( url ).content - - return fetcher - - def __post( self, path, data={}, files=None, key=None): - if not key: - key = self.api_key - data = data.copy() - data['key'] = key - return post_request( "%s/%s" % (self.api_url, path), data=data, files=files ) - - def __get( self, path, data={}, key=None ): - if not key: - key = self.api_key - data = data.copy() - data['key'] = key - if path.startswith("/api"): - path = path[ len("/api"): ] - url = "%s/%s" % (self.api_url, path) - return get_request( url, params=data ) - - -class GalaxyInteractorTwill( object ): - - def __init__( self, twill_test_case ): - self.twill_test_case = twill_test_case - - def verify_output( self, history, output_data, outfile, attributes, shed_tool_id, maxseconds ): - hid = output_data.get( 'hid' ) - self.twill_test_case.verify_dataset_correctness( outfile, hid=hid, attributes=attributes, shed_tool_id=shed_tool_id, maxseconds=maxseconds ) - - def get_job_stream( self, history_id, output_data, stream ): - return self.twill_test_case._get_job_stream_output( output_data.get( 'id' ), stream=stream, format=False ) - - def stage_data_async( self, test_data, history, shed_tool_id, async=True ): - name = test_data.get( 'name', None ) - if name: - async = False - self.twill_test_case.upload_file( test_data['fname'], - ftype=test_data['ftype'], - dbkey=test_data['dbkey'], - metadata=test_data['metadata'], - composite_data=test_data['composite_data'], - shed_tool_id=shed_tool_id, - wait=(not async) ) - if name: - hda_id = self.twill_test_case.get_history_as_data_list()[-1].get( 'id' ) - try: - self.twill_test_case.edit_hda_attribute_info( hda_id=str(hda_id), new_name=name ) - except: - print "### call to edit_hda failed for hda_id %s, new_name=%s" % (hda_id, name) - return lambda: self.twill_test_case.wait() - - def run_tool( self, testdef, test_history ): - # We need to handle the case where we've uploaded a valid compressed file since the upload - # tool will have uncompressed it on the fly. - - # Lose tons of information to accomodate legacy repeat handling. - all_inputs = {} - for key, value in testdef.inputs.iteritems(): - all_inputs[ key.split("|")[-1] ] = value - - # See if we have a grouping.Repeat element - repeat_name = None - for input_name, input_value in testdef.tool.inputs_by_page[0].items(): - if isinstance( input_value, grouping.Repeat ) and all_inputs.get( input_name, 1 ) not in [ 0, "0" ]: # default behavior is to test 1 repeat, for backwards compatibility - if not input_value.min: # If input_value.min == 1, the element is already on the page don't add new element. - repeat_name = input_name - break - - #check if we need to verify number of outputs created dynamically by tool - if testdef.tool.force_history_refresh: - job_finish_by_output_count = len( self.twill_test_case.get_history_as_data_list() ) - else: - job_finish_by_output_count = False - - inputs_tree = testdef.inputs - # # # HACK: Flatten single-value lists. Required when using expand_grouping - # #for key, value in inputs_tree.iteritems(): - # if isinstance(value, list) and len(value) == 1: - # inputs_tree[key] = value[0] - - # Strip out just a given page of inputs from inputs "tree". - def filter_page_inputs( n ): - page_input_keys = testdef.tool.inputs_by_page[ n ].keys() - return dict( [ (k, v) for k, v in inputs_tree.iteritems() if k.split("|")[0] or k.split("|")[0].resplit("_", 1)[0] in page_input_keys ] ) - - # Do the first page - page_inputs = filter_page_inputs( 0 ) - - # Run the tool - self.twill_test_case.run_tool( testdef.tool.id, repeat_name=repeat_name, **page_inputs ) - print "page_inputs (0)", page_inputs - # Do other pages if they exist - for i in range( 1, testdef.tool.npages ): - page_inputs = filter_page_inputs( i ) - self.twill_test_case.submit_form( **page_inputs ) - print "page_inputs (%i)" % i, page_inputs - - # Check the results ( handles single or multiple tool outputs ). Make sure to pass the correct hid. - # The output datasets from the tool should be in the same order as the testdef.outputs. - data_list = None - while data_list is None: - data_list = self.twill_test_case.get_history_as_data_list() - if job_finish_by_output_count and len( testdef.outputs ) > ( len( data_list ) - job_finish_by_output_count ): - data_list = None - return data_list - - def new_history( self ): - # Start with a new history - self.twill_test_case.logout() - self.twill_test_case.login( email='test@bx.psu.edu' ) - admin_user = sa_session.query( galaxy.model.User ).filter( galaxy.model.User.table.c.email == 'test@bx.psu.edu' ).one() - self.twill_test_case.new_history() - latest_history = sa_session.query( galaxy.model.History ) \ - .filter( and_( galaxy.model.History.table.c.deleted == False, - galaxy.model.History.table.c.user_id == admin_user.id ) ) \ - .order_by( desc( galaxy.model.History.table.c.create_time ) ) \ - .first() - assert latest_history is not None, "Problem retrieving latest_history from database" - if len( self.twill_test_case.get_history_as_data_list() ) > 0: - raise AssertionError("ToolTestCase.do_it failed") - return latest_history - - def delete_history( self, latest_history ): - self.twill_test_case.delete_history( id=self.twill_test_case.security.encode_id( latest_history.id ) ) - - def output_hid( self, output_data ): - return output_data.get( 'hid' ) - - def build_tests( testing_shed_tools=False, master_api_key=None, user_api_key=None ): """ If the module level variable `toolbox` is set, generate `ToolTestCase` @@ -421,72 +111,3 @@ # from baseclasses (which should be a tuple of classes) and with namespace dict. new_class_obj = new.classobj( name, baseclasses, namespace ) G[ name ] = new_class_obj - - -GALAXY_INTERACTORS = { - 'api': GalaxyInteractorApi, - 'twill': GalaxyInteractorTwill, -} - - -# Lets just try to use requests if it is available, but if not provide fallback -# on custom implementations of limited requests get/post functionality. -try: - from requests import get as get_request - from requests import post as post_request -except ImportError: - import urllib2 - import httplib - - class RequestsLikeResponse( object ): - - def __init__( self, content ): - self.content = content - - def json( self ): - return loads( self.content ) - - def get_request( url, params={} ): - argsep = '&' - if '?' not in url: - argsep = '?' - url = url + argsep + '&'.join( [ '%s=%s' % (k, v) for k, v in params.iteritems() ] ) - #req = urllib2.Request( url, headers = { 'Content-Type': 'application/json' } ) - return RequestsLikeResponse(urllib2.urlopen( url ).read() ) - - def post_request( url, data, files ): - parsed_url = urllib2.urlparse.urlparse( url ) - return __post_multipart( host=parsed_url.netloc, selector=parsed_url.path, fields=data.iteritems(), files=(files or {}).iteritems() ) - - # http://stackoverflow.com/a/681182 - def __post_multipart(host, selector, fields, files): - content_type, body = __encode_multipart_formdata(fields, files) - h = httplib.HTTP(host) - h.putrequest('POST', selector) - h.putheader('content-type', content_type) - h.putheader('content-length', str(len(body))) - h.endheaders() - h.send(body) - errcode, errmsg, headers = h.getreply() - return RequestsLikeResponse(h.file.read()) - - def __encode_multipart_formdata(fields, files): - LIMIT = '----------lImIt_of_THE_fIle_eW_$' - CRLF = '\r\n' - L = [] - for (key, value) in fields: - L.append('--' + LIMIT) - L.append('Content-Disposition: form-data; name="%s"' % key) - L.append('') - L.append(value) - for (key, value) in files: - L.append('--' + LIMIT) - L.append('Content-Disposition: form-data; name="%s"; filename="%s";' % (key, key)) - L.append('Content-Type: application/octet-stream') - L.append('') - L.append(value.read()) - L.append('--' + LIMIT + '--') - L.append('') - body = CRLF.join(L) - content_type = 'multipart/form-data; boundary=%s' % LIMIT - return content_type, body https://bitbucket.org/galaxy/galaxy-central/commits/f4fd082fdcd2/ Changeset: f4fd082fdcd2 User: jmchilton Date: 2013-11-22 02:17:13 Summary: Simplify test composite history item naming using UUIDs. It needs to be unique, so lets use uuids instead of a composite name counter to make the code more portable. Affected #: 1 file diff -r d5c0f73b3db4cdb0b984b8a64da72dfe64ab6f0d -r f4fd082fdcd25f06b2f0dfcfa35c1c3ed3046732 lib/galaxy/tools/test.py --- a/lib/galaxy/tools/test.py +++ b/lib/galaxy/tools/test.py @@ -1,5 +1,6 @@ import os import os.path +import uuid from parameters import basic from parameters import grouping from galaxy.util import string_as_bool @@ -139,10 +140,6 @@ expand_input_elems( test_elem ) def __parse_inputs_elems( self, test_elem, i ): - # Composite datasets need a unique name: each test occurs in a fresh - # history, but we'll keep it unique per set of tests - use i (test #) - # and composite_data_names_counter (instance per test #) - composite_data_names_counter = 0 raw_inputs = [] for param_elem in test_elem.findall( "param" ): attrib = dict( param_elem.attrib ) @@ -168,10 +165,9 @@ attrib['composite_data'].append( child ) if composite_data_name is None: # Generate a unique name; each test uses a - # fresh history - composite_data_name = '_COMPOSITE_RENAMED_t%i_d%i' \ - % ( i, composite_data_names_counter ) - composite_data_names_counter += 1 + # fresh history. + composite_data_name = '_COMPOSITE_RENAMED_t%d_%s' \ + % ( i, uuid.uuid1().hex ) elif child.tag == 'metadata': attrib['metadata'].append( child ) elif child.tag == 'metadata': https://bitbucket.org/galaxy/galaxy-central/commits/b51a17a1d3ce/ Changeset: b51a17a1d3ce User: jmchilton Date: 2013-11-22 02:17:13 Summary: Refactor tools test param parsing for reuse (in particular w/workflow tests). They will be used in a slightly different context, but will need to describe inputs and outputs so lets reuse this code if possible. None of these methods used tool or self anyway, so they are probably more appropriate outside of ToolTestBuilder and ToolTestBuilder is now much more manageable. Affected #: 1 file diff -r f4fd082fdcd25f06b2f0dfcfa35c1c3ed3046732 -r b51a17a1d3cec70820ec921b66e477d6d8f6bc60 lib/galaxy/tools/test.py --- a/lib/galaxy/tools/test.py +++ b/lib/galaxy/tools/test.py @@ -54,26 +54,7 @@ """ Iterator over metadata representing the required files for upload. """ - for fname, extra in self.required_files: - data_dict = dict( - fname=fname, - metadata=extra.get( 'metadata', [] ), - composite_data=extra.get( 'composite_data', [] ), - ftype=extra.get( 'ftype', DEFAULT_FTYPE ), - dbkey=extra.get( 'dbkey', DEFAULT_DBKEY ), - ) - edit_attributes = extra.get( 'edit_attributes', [] ) - - #currently only renaming is supported - for edit_att in edit_attributes: - if edit_att.get( 'type', None ) == 'name': - new_name = edit_att.get( 'value', None ) - assert new_name, 'You must supply the new dataset name as the value tag of the edit_attributes tag' - data_dict['name'] = new_name - else: - raise Exception( 'edit_attributes type (%s) is unimplemented' % edit_att.get( 'type', None ) ) - - yield data_dict + return test_data_iter( self.required_files ) def __matching_case_for_value( self, cond, declared_value ): test_param = cond.test_param @@ -131,7 +112,7 @@ self.__preprocess_input_elems( test_elem ) self.__parse_inputs_elems( test_elem, i ) - self.__parse_output_elems( test_elem ) + self.outputs = parse_output_elems( test_elem ) except Exception, e: self.error = True self.exception = e @@ -142,44 +123,7 @@ def __parse_inputs_elems( self, test_elem, i ): raw_inputs = [] for param_elem in test_elem.findall( "param" ): - attrib = dict( param_elem.attrib ) - if 'values' in attrib: - value = attrib[ 'values' ].split( ',' ) - elif 'value' in attrib: - value = attrib['value'] - else: - value = None - attrib['children'] = list( param_elem.getchildren() ) - if attrib['children']: - # At this time, we can assume having children only - # occurs on DataToolParameter test items but this could - # change and would cause the below parsing to change - # based upon differences in children items - attrib['metadata'] = [] - attrib['composite_data'] = [] - attrib['edit_attributes'] = [] - # Composite datasets need to be renamed uniquely - composite_data_name = None - for child in attrib['children']: - if child.tag == 'composite_data': - attrib['composite_data'].append( child ) - if composite_data_name is None: - # Generate a unique name; each test uses a - # fresh history. - composite_data_name = '_COMPOSITE_RENAMED_t%d_%s' \ - % ( i, uuid.uuid1().hex ) - elif child.tag == 'metadata': - attrib['metadata'].append( child ) - elif child.tag == 'metadata': - attrib['metadata'].append( child ) - elif child.tag == 'edit_attributes': - attrib['edit_attributes'].append( child ) - if composite_data_name: - # Composite datasets need implicit renaming; - # inserted at front of list so explicit declarations - # take precedence - attrib['edit_attributes'].insert( 0, { 'type': 'name', 'value': composite_data_name } ) - name = attrib.pop( 'name' ) + name, value, attrib = parse_param_elem( param_elem, i ) raw_inputs.append( ( name, value, attrib ) ) self.inputs = self.__process_raw_inputs( self.tool.inputs, raw_inputs ) @@ -229,98 +173,174 @@ expanded_inputs[ context.for_state() ] = processed_value return expanded_inputs - def __parse_output_elems( self, test_elem ): - for output_elem in test_elem.findall( "output" ): - attrib = dict( output_elem.attrib ) - name = attrib.pop( 'name', None ) - if name is None: - raise Exception( "Test output does not have a 'name'" ) - - assert_list = self.__parse_assert_list( output_elem ) - file = attrib.pop( 'file', None ) - # File no longer required if an list of assertions was present. - attributes = {} - # Method of comparison - attributes['compare'] = attrib.pop( 'compare', 'diff' ).lower() - # Number of lines to allow to vary in logs (for dates, etc) - attributes['lines_diff'] = int( attrib.pop( 'lines_diff', '0' ) ) - # Allow a file size to vary if sim_size compare - attributes['delta'] = int( attrib.pop( 'delta', '10000' ) ) - attributes['sort'] = string_as_bool( attrib.pop( 'sort', False ) ) - extra_files = [] - if 'ftype' in attrib: - attributes['ftype'] = attrib['ftype'] - for extra in output_elem.findall( 'extra_files' ): - extra_files.append( self.__parse_extra_files_elem( extra ) ) - metadata = {} - for metadata_elem in output_elem.findall( 'metadata' ): - metadata[ metadata_elem.get('name') ] = metadata_elem.get( 'value' ) - if not (assert_list or file or extra_files or metadata): - raise Exception( "Test output defines not checks (e.g. must have a 'file' check against, assertions to check, etc...)") - attributes['assert_list'] = assert_list - attributes['extra_files'] = extra_files - attributes['metadata'] = metadata - self.__add_output( name, file, attributes ) - - def __parse_assert_list( self, output_elem ): - assert_elem = output_elem.find("assert_contents") - assert_list = None - - # Trying to keep testing patch as localized as - # possible, this function should be relocated - # somewhere more conventional. - def convert_elem(elem): - """ Converts and XML element to a dictionary format, used by assertion checking code. """ - tag = elem.tag - attributes = dict( elem.attrib ) - child_elems = list( elem.getchildren() ) - converted_children = [] - for child_elem in child_elems: - converted_children.append( convert_elem(child_elem) ) - return {"tag": tag, "attributes": attributes, "children": converted_children} - if assert_elem is not None: - assert_list = [] - for assert_child in list(assert_elem): - assert_list.append(convert_elem(assert_child)) - - return assert_list - - def __parse_extra_files_elem( self, extra ): - # File or directory, when directory, compare basename - # by basename - extra_type = extra.get( 'type', 'file' ) - extra_name = extra.get( 'name', None ) - assert extra_type == 'directory' or extra_name is not None, \ - 'extra_files type (%s) requires a name attribute' % extra_type - extra_value = extra.get( 'value', None ) - assert extra_value is not None, 'extra_files requires a value attribute' - extra_attributes = {} - extra_attributes['compare'] = extra.get( 'compare', 'diff' ).lower() - extra_attributes['delta'] = extra.get( 'delta', '0' ) - extra_attributes['lines_diff'] = int( extra.get( 'lines_diff', '0' ) ) - extra_attributes['sort'] = string_as_bool( extra.get( 'sort', False ) ) - return extra_type, extra_value, extra_name, extra_attributes - - def __add_output( self, name, file, extra ): - self.outputs.append( ( name, file, extra ) ) - def __add_uploaded_dataset( self, name, value, extra, input_parameter ): if value is None: assert input_parameter.optional, '%s is not optional. You must provide a valid filename.' % name return value - if ( value, extra ) not in self.required_files: - self.required_files.append( ( value, extra ) ) # these files will be uploaded - name_change = [ att for att in extra.get( 'edit_attributes', [] ) if att.get( 'type' ) == 'name' ] - if name_change: - name_change = name_change[-1].get( 'value' ) # only the last name change really matters - value = name_change # change value for select to renamed uploaded file for e.g. composite dataset - else: - for end in [ '.zip', '.gz' ]: - if value.endswith( end ): - value = value[ :-len( end ) ] - break - value = os.path.basename( value ) # if uploading a file in a path other than root of test-data - return value + return require_file( name, value, extra, self.required_files ) + + +def test_data_iter( required_files ): + for fname, extra in required_files: + data_dict = dict( + fname=fname, + metadata=extra.get( 'metadata', [] ), + composite_data=extra.get( 'composite_data', [] ), + ftype=extra.get( 'ftype', DEFAULT_FTYPE ), + dbkey=extra.get( 'dbkey', DEFAULT_DBKEY ), + ) + edit_attributes = extra.get( 'edit_attributes', [] ) + + #currently only renaming is supported + for edit_att in edit_attributes: + if edit_att.get( 'type', None ) == 'name': + new_name = edit_att.get( 'value', None ) + assert new_name, 'You must supply the new dataset name as the value tag of the edit_attributes tag' + data_dict['name'] = new_name + else: + raise Exception( 'edit_attributes type (%s) is unimplemented' % edit_att.get( 'type', None ) ) + + yield data_dict + + +def require_file( name, value, extra, required_files ): + if ( value, extra ) not in required_files: + required_files.append( ( value, extra ) ) # these files will be uploaded + name_change = [ att for att in extra.get( 'edit_attributes', [] ) if att.get( 'type' ) == 'name' ] + if name_change: + name_change = name_change[-1].get( 'value' ) # only the last name change really matters + value = name_change # change value for select to renamed uploaded file for e.g. composite dataset + else: + for end in [ '.zip', '.gz' ]: + if value.endswith( end ): + value = value[ :-len( end ) ] + break + value = os.path.basename( value ) # if uploading a file in a path other than root of test-data + return value + + +def parse_param_elem( param_elem, i=0 ): + attrib = dict( param_elem.attrib ) + if 'values' in attrib: + value = attrib[ 'values' ].split( ',' ) + elif 'value' in attrib: + value = attrib['value'] + else: + value = None + attrib['children'] = list( param_elem.getchildren() ) + if attrib['children']: + # At this time, we can assume having children only + # occurs on DataToolParameter test items but this could + # change and would cause the below parsing to change + # based upon differences in children items + attrib['metadata'] = [] + attrib['composite_data'] = [] + attrib['edit_attributes'] = [] + # Composite datasets need to be renamed uniquely + composite_data_name = None + for child in attrib['children']: + if child.tag == 'composite_data': + attrib['composite_data'].append( child ) + if composite_data_name is None: + # Generate a unique name; each test uses a + # fresh history. + composite_data_name = '_COMPOSITE_RENAMED_t%d_%s' \ + % ( i, uuid.uuid1().hex ) + elif child.tag == 'metadata': + attrib['metadata'].append( child ) + elif child.tag == 'metadata': + attrib['metadata'].append( child ) + elif child.tag == 'edit_attributes': + attrib['edit_attributes'].append( child ) + if composite_data_name: + # Composite datasets need implicit renaming; + # inserted at front of list so explicit declarations + # take precedence + attrib['edit_attributes'].insert( 0, { 'type': 'name', 'value': composite_data_name } ) + name = attrib.pop( 'name' ) + return ( name, value, attrib ) + + +def parse_output_elems( test_elem ): + outputs = [] + for output_elem in test_elem.findall( "output" ): + name, file, attributes = __parse_output_elem( output_elem ) + outputs.append( ( name, file, attributes ) ) + return outputs + + +def __parse_output_elem( output_elem ): + attrib = dict( output_elem.attrib ) + name = attrib.pop( 'name', None ) + if name is None: + raise Exception( "Test output does not have a 'name'" ) + + assert_list = __parse_assert_list( output_elem ) + file = attrib.pop( 'file', None ) + # File no longer required if an list of assertions was present. + attributes = {} + # Method of comparison + attributes['compare'] = attrib.pop( 'compare', 'diff' ).lower() + # Number of lines to allow to vary in logs (for dates, etc) + attributes['lines_diff'] = int( attrib.pop( 'lines_diff', '0' ) ) + # Allow a file size to vary if sim_size compare + attributes['delta'] = int( attrib.pop( 'delta', '10000' ) ) + attributes['sort'] = string_as_bool( attrib.pop( 'sort', False ) ) + extra_files = [] + if 'ftype' in attrib: + attributes['ftype'] = attrib['ftype'] + for extra in output_elem.findall( 'extra_files' ): + extra_files.append( __parse_extra_files_elem( extra ) ) + metadata = {} + for metadata_elem in output_elem.findall( 'metadata' ): + metadata[ metadata_elem.get('name') ] = metadata_elem.get( 'value' ) + if not (assert_list or file or extra_files or metadata): + raise Exception( "Test output defines not checks (e.g. must have a 'file' check against, assertions to check, etc...)") + attributes['assert_list'] = assert_list + attributes['extra_files'] = extra_files + attributes['metadata'] = metadata + return name, file, attributes + + +def __parse_assert_list( output_elem ): + assert_elem = output_elem.find("assert_contents") + assert_list = None + + # Trying to keep testing patch as localized as + # possible, this function should be relocated + # somewhere more conventional. + def convert_elem(elem): + """ Converts and XML element to a dictionary format, used by assertion checking code. """ + tag = elem.tag + attributes = dict( elem.attrib ) + child_elems = list( elem.getchildren() ) + converted_children = [] + for child_elem in child_elems: + converted_children.append( convert_elem(child_elem) ) + return {"tag": tag, "attributes": attributes, "children": converted_children} + if assert_elem is not None: + assert_list = [] + for assert_child in list(assert_elem): + assert_list.append(convert_elem(assert_child)) + + return assert_list + + +def __parse_extra_files_elem( extra ): + # File or directory, when directory, compare basename + # by basename + extra_type = extra.get( 'type', 'file' ) + extra_name = extra.get( 'name', None ) + assert extra_type == 'directory' or extra_name is not None, \ + 'extra_files type (%s) requires a name attribute' % extra_type + extra_value = extra.get( 'value', None ) + assert extra_value is not None, 'extra_files requires a value attribute' + extra_attributes = {} + extra_attributes['compare'] = extra.get( 'compare', 'diff' ).lower() + extra_attributes['delta'] = extra.get( 'delta', '0' ) + extra_attributes['lines_diff'] = int( extra.get( 'lines_diff', '0' ) ) + extra_attributes['sort'] = string_as_bool( extra.get( 'sort', False ) ) + return extra_type, extra_value, extra_name, extra_attributes class ParamContext(object): https://bitbucket.org/galaxy/galaxy-central/commits/6fadd2ce40e1/ Changeset: 6fadd2ce40e1 User: jmchilton Date: 2013-11-22 02:17:13 Summary: Further refactoring for iteractors for reuse. Affected #: 2 files diff -r b51a17a1d3cec70820ec921b66e477d6d8f6bc60 -r 6fadd2ce40e107fdc9f754cb0b45f1a1263d5921 test/base/interactor.py --- a/test/base/interactor.py +++ b/test/base/interactor.py @@ -5,12 +5,25 @@ from galaxy.model.mapping import context as sa_session from simplejson import dumps, loads +from logging import getLogger +log = getLogger( __name__ ) + def build_interactor( test_case, type="api" ): interactor_class = GALAXY_INTERACTORS[ type ] return interactor_class( test_case ) +def stage_data_in_history( galaxy_interactor, all_test_data, history, shed_tool_id=None ): + # Upload any needed files + upload_waits = [] + + for test_data in all_test_data: + upload_waits.append( galaxy_interactor.stage_data_async( test_data, history, shed_tool_id ) ) + for upload_wait in upload_waits: + upload_wait() + + class GalaxyInteractorApi( object ): def __init__( self, twill_test_case ): @@ -20,14 +33,14 @@ self.uploads = {} def verify_output( self, history_id, output_data, outfile, attributes, shed_tool_id, maxseconds ): - self.twill_test_case.wait_for( lambda: not self.__history_ready( history_id ), maxseconds=maxseconds) - hid = output_data.get( 'id' ) + self.wait_for_history( history_id, maxseconds ) + hid = self.__output_id( output_data ) fetcher = self.__dataset_fetcher( history_id ) ## TODO: Twill version verifys dataset is 'ok' in here. self.twill_test_case.verify_hid( outfile, hda_id=hid, attributes=attributes, dataset_fetcher=fetcher, shed_tool_id=shed_tool_id ) metadata = attributes.get( 'metadata', {} ) if metadata: - dataset = self.__get( "histories/%s/contents/%s" % ( history_id, hid ) ).json() + dataset = self._get( "histories/%s/contents/%s" % ( history_id, hid ) ).json() for key, value in metadata.iteritems(): dataset_key = "metadata_%s" % key try: @@ -41,15 +54,27 @@ msg = "Failed to verify dataset metadata, metadata key [%s] was not found." % key raise Exception( msg ) + def wait_for_history( self, history_id, maxseconds ): + self.twill_test_case.wait_for( lambda: not self.__history_ready( history_id ), maxseconds=maxseconds) + def get_job_stream( self, history_id, output_data, stream ): - hid = output_data.get( 'id' ) - data = self.__get( "histories/%s/contents/%s/provenance" % (history_id, hid) ).json() + hid = self.__output_id( output_data ) + data = self._get( "histories/%s/contents/%s/provenance" % (history_id, hid) ).json() return data.get( stream, '' ) def new_history( self ): - history_json = self.__post( "histories", {"name": "test_history"} ).json() + history_json = self._post( "histories", {"name": "test_history"} ).json() return history_json[ 'id' ] + def __output_id( self, output_data ): + # Allow data structure coming out of tools API - {id: <id>, output_name: <name>, etc...} + # or simple id as comes out of workflow API. + try: + output_id = output_data.get( 'id' ) + except AttributeError: + output_id = output_data + return output_id + def stage_data_async( self, test_data, history_id, shed_tool_id, async=True ): fname = test_data[ 'fname' ] tool_input = { @@ -143,12 +168,15 @@ return wait def __history_ready( self, history_id ): - history_json = self.__get( "histories/%s" % history_id ).json() + history_json = self._get( "histories/%s" % history_id ).json() state = history_json[ 'state' ] - if state == 'ok': + return self._state_ready( state, error_msg="History in error state." ) + + def _state_ready( self, state_str, error_msg ): + if state_str == 'ok': return True - elif state == 'error': - raise Exception("History in error state.") + elif state_str == 'error': + raise Exception( error_msg ) return False def __submit_tool( self, history_id, tool_id, tool_input, extra_data={}, files=None ): @@ -158,12 +186,12 @@ inputs=dumps( tool_input ), **extra_data ) - return self.__post( "tools", files=files, data=data ) + return self._post( "tools", files=files, data=data ) def __get_user_key( self, user_key, admin_key ): if user_key: return user_key - all_users = self.__get( 'users', key=admin_key ).json() + all_users = self._get( 'users', key=admin_key ).json() try: test_user = [ user for user in all_users if user["email"] == 'test@bx.psu.edu' ][0] except IndexError: @@ -172,26 +200,26 @@ password='testuser', username='admin-user', ) - test_user = self.__post( 'users', data, key=admin_key ).json() - return self.__post( "users/%s/api_key" % test_user['id'], key=admin_key ).json() + test_user = self._post( 'users', data, key=admin_key ).json() + return self._post( "users/%s/api_key" % test_user['id'], key=admin_key ).json() def __dataset_fetcher( self, history_id ): def fetcher( hda_id, base_name=None ): url = "histories/%s/contents/%s/display?raw=true" % (history_id, hda_id) if base_name: url += "&filename=%s" % base_name - return self.__get( url ).content + return self._get( url ).content return fetcher - def __post( self, path, data={}, files=None, key=None): + def _post( self, path, data={}, files=None, key=None): if not key: key = self.api_key data = data.copy() data['key'] = key return post_request( "%s/%s" % (self.api_url, path), data=data, files=files ) - def __get( self, path, data={}, key=None ): + def _get( self, path, data={}, key=None ): if not key: key = self.api_key data = data.copy() diff -r b51a17a1d3cec70820ec921b66e477d6d8f6bc60 -r 6fadd2ce40e107fdc9f754cb0b45f1a1263d5921 test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -1,7 +1,7 @@ import new import sys from base.twilltestcase import TwillTestCase -from base.interactor import build_interactor +from base.interactor import build_interactor, stage_data_in_history import logging log = logging.getLogger( __name__ ) @@ -23,12 +23,7 @@ test_history = galaxy_interactor.new_history() - # Upload any needed files - upload_waits = [] - for test_data in testdef.test_data(): - upload_waits.append( galaxy_interactor.stage_data_async( test_data, test_history, shed_tool_id ) ) - for upload_wait in upload_waits: - upload_wait() + stage_data_in_history( galaxy_interactor, testdef.test_data(), test_history, shed_tool_id ) data_list = galaxy_interactor.run_tool( testdef, test_history ) self.assertTrue( data_list ) https://bitbucket.org/galaxy/galaxy-central/commits/7c5305e54c03/ Changeset: 7c5305e54c03 User: jmchilton Date: 2013-11-22 02:17:13 Summary: Functional testing of workflows (for Galaxy). ... tool shed integration will be more challenging, but this can be used for directly configured Galaxy tools and outlines a syntax that could be shared with a tool shed driven approach. The one tricky part is how to match workflow outputs to things to check. Right now it is based on the index of the output across the workflow. This is both difficult to determine and very brittle to workflow modifications - how to proceed - require annontation string to be setup? Modify workflow data model to all assigning names to outputs the way inputs havenames? At any rate this current syntax should be considered beta and may change - if it does Galaxy will not continue to support this syntax. To run the sample test execute the following test command: sh run_functional_tests.sh -workflow test-data/workflows/1.xml Affected #: 8 files diff -r 6fadd2ce40e107fdc9f754cb0b45f1a1263d5921 -r 7c5305e54c03373b9922d60e2c75e90607c61c6f run_functional_tests.sh --- a/run_functional_tests.sh +++ b/run_functional_tests.sh @@ -13,6 +13,7 @@ echo "'run_functional_tests.sh -list' for listing all the tool ids" echo "'run_functional_tests.sh -toolshed' for running all the test scripts in the ./test/tool_shed/functional directory" echo "'run_functional_tests.sh -toolshed testscriptname' for running one test script named testscriptname in the .test/tool_shed/functional directory" + echo "'run_functional_tests.sh -workflow test.xml' for running a workflow test case as defined by supplied workflow xml test file" echo "'run_functional_tests.sh -framework' for running through example tool tests testing framework features in test/functional/tools" echo "'run_functional_tests.sh -framework -id toolid' for testing one framework tool (in test/functional/tools/) with id 'toolid'" elif [ $1 = '-id' ]; then @@ -48,6 +49,8 @@ else python ./test/tool_shed/functional_tests.py -v --with-nosehtml --html-report-file ./test/tool_shed/run_functional_tests.html $2 fi +elif [ $1 = '-workflow' ]; then + python ./scripts/functional_tests.py -v functional.test_workflow:WorkflowTestCase --with-nosehtml --html-report-file ./test/tool_shed/run_functional_tests.html -workflow $2 elif [ $1 = '-framework' ]; then if [ ! $2 ]; then python ./scripts/functional_tests.py -v functional.test_toolbox --with-nosehtml --html-report-file run_functional_tests.html -framework diff -r 6fadd2ce40e107fdc9f754cb0b45f1a1263d5921 -r 7c5305e54c03373b9922d60e2c75e90607c61c6f scripts/functional_tests.py --- a/scripts/functional_tests.py +++ b/scripts/functional_tests.py @@ -420,7 +420,7 @@ new_path = [ os.path.join( cwd, "test" ) ] new_path.extend( sys.path[1:] ) sys.path = new_path - import functional.test_toolbox + # ---- Find tests --------------------------------------------------------- if galaxy_test_proxy_port: log.info( "Functional tests will be run against %s:%s" % ( galaxy_test_host, galaxy_test_proxy_port ) ) @@ -436,6 +436,13 @@ os.environ[ 'GALAXY_TEST_HOST' ] = galaxy_test_host def _run_functional_test( testing_shed_tools=None ): + workflow_test = __check_arg( '-workflow', param=True ) + if workflow_test: + import functional.test_workflow + functional.test_workflow.WorkflowTestCase.workflow_test_file = workflow_test + functional.test_workflow.WorkflowTestCase.master_api_key = master_api_key + functional.test_workflow.WorkflowTestCase.user_api_key = os.environ.get( "GALAXY_TEST_USER_API_KEY", default_galaxy_user_key ) + import functional.test_toolbox functional.test_toolbox.toolbox = app.toolbox functional.test_toolbox.build_tests( testing_shed_tools=testing_shed_tools, diff -r 6fadd2ce40e107fdc9f754cb0b45f1a1263d5921 -r 7c5305e54c03373b9922d60e2c75e90607c61c6f test-data/workflows/1.ga --- /dev/null +++ b/test-data/workflows/1.ga @@ -0,0 +1,145 @@ +{ + "a_galaxy_workflow": "true", + "annotation": "", + "format-version": "0.1", + "name": "TestWorkflowAlpha", + "steps": { + "0": { + "annotation": "", + "id": 0, + "input_connections": {}, + "inputs": [ + { + "description": "", + "name": "Input Dataset1" + } + ], + "name": "Input dataset", + "outputs": [], + "position": { + "left": 206, + "top": 207 + }, + "tool_errors": null, + "tool_id": null, + "tool_state": "{\"name\": \"Input Dataset1\"}", + "tool_version": null, + "type": "data_input", + "user_outputs": [] + }, + "1": { + "annotation": "", + "id": 1, + "input_connections": {}, + "inputs": [ + { + "description": "", + "name": "Input Dataset2" + } + ], + "name": "Input dataset", + "outputs": [], + "position": { + "left": 200, + "top": 320 + }, + "tool_errors": null, + "tool_id": null, + "tool_state": "{\"name\": \"Input Dataset2\"}", + "tool_version": null, + "type": "data_input", + "user_outputs": [] + }, + "2": { + "annotation": "", + "id": 2, + "input_connections": { + "input1": { + "id": 0, + "output_name": "output" + }, + "queries_0|input2": { + "id": 1, + "output_name": "output" + } + }, + "inputs": [], + "name": "Concatenate datasets", + "outputs": [ + { + "name": "out_file1", + "type": "input" + } + ], + "position": { + "left": 420, + "top": 200 + }, + "post_job_actions": {}, + "tool_errors": null, + "tool_id": "cat1", + "tool_state": "{\"__page__\": 0, \"__rerun_remap_job_id__\": null, \"input1\": \"null\", \"chromInfo\": \"\\\"/home/john/workspace/galaxy-central/tool-data/shared/ucsc/chrom/?.len\\\"\", \"queries\": \"[{\\\"input2\\\": null, \\\"__index__\\\": 0}]\"}", + "tool_version": "1.0.0", + "type": "tool", + "user_outputs": [] + }, + "3": { + "annotation": "", + "id": 3, + "input_connections": { + "input": { + "id": 2, + "output_name": "out_file1" + } + }, + "inputs": [], + "name": "Convert", + "outputs": [ + { + "name": "out_file1", + "type": "tabular" + } + ], + "position": { + "left": 640, + "top": 200 + }, + "post_job_actions": {}, + "tool_errors": null, + "tool_id": "Convert characters1", + "tool_state": "{\"__page__\": 0, \"convert_from\": \"\\\"s\\\"\", \"__rerun_remap_job_id__\": null, \"chromInfo\": \"\\\"/home/john/workspace/galaxy-central/tool-data/shared/ucsc/chrom/?.len\\\"\", \"input\": \"null\"}", + "tool_version": "1.0.0", + "type": "tool", + "user_outputs": [] + }, + "4": { + "annotation": "", + "id": 4, + "input_connections": { + "input": { + "id": 3, + "output_name": "out_file1" + } + }, + "inputs": [], + "name": "Add column", + "outputs": [ + { + "name": "out_file1", + "type": "input" + } + ], + "position": { + "left": 860, + "top": 200 + }, + "post_job_actions": {}, + "tool_errors": null, + "tool_id": "addValue", + "tool_state": "{\"__page__\": 0, \"__rerun_remap_job_id__\": null, \"exp\": \"\\\"1\\\"\", \"iterate\": \"\\\"yes\\\"\", \"input\": \"null\", \"chromInfo\": \"\\\"/home/john/workspace/galaxy-central/tool-data/shared/ucsc/chrom/?.len\\\"\"}", + "tool_version": "1.0.0", + "type": "tool", + "user_outputs": [] + } + } +} \ No newline at end of file diff -r 6fadd2ce40e107fdc9f754cb0b45f1a1263d5921 -r 7c5305e54c03373b9922d60e2c75e90607c61c6f test-data/workflows/1.xml --- /dev/null +++ b/test-data/workflows/1.xml @@ -0,0 +1,5 @@ +<test file="1.ga"> + <input name="Input Dataset1" value="workflows/i1.txt" /> + <input name="Input Dataset2" value="workflows/i2.txt" /> + <output name="2" file="workflows/o1.txt" /><!-- index of n'th output, less than ideal syntax --> +</test> diff -r 6fadd2ce40e107fdc9f754cb0b45f1a1263d5921 -r 7c5305e54c03373b9922d60e2c75e90607c61c6f test-data/workflows/i1.txt --- /dev/null +++ b/test-data/workflows/i1.txt @@ -0,0 +1,1 @@ +1 2 3 diff -r 6fadd2ce40e107fdc9f754cb0b45f1a1263d5921 -r 7c5305e54c03373b9922d60e2c75e90607c61c6f test-data/workflows/i2.txt --- /dev/null +++ b/test-data/workflows/i2.txt @@ -0,0 +1,1 @@ +4 5 6 diff -r 6fadd2ce40e107fdc9f754cb0b45f1a1263d5921 -r 7c5305e54c03373b9922d60e2c75e90607c61c6f test-data/workflows/o1.txt --- /dev/null +++ b/test-data/workflows/o1.txt @@ -0,0 +1,2 @@ +1 2 3 1 +4 5 6 2 diff -r 6fadd2ce40e107fdc9f754cb0b45f1a1263d5921 -r 7c5305e54c03373b9922d60e2c75e90607c61c6f test/functional/test_workflow.py --- /dev/null +++ b/test/functional/test_workflow.py @@ -0,0 +1,185 @@ +import os +import sys +from base.twilltestcase import TwillTestCase +from base.interactor import GalaxyInteractorApi, stage_data_in_history + +from galaxy.util import parse_xml +from galaxy.tools.test import parse_param_elem, require_file, test_data_iter, parse_output_elems +from simplejson import load, dumps + +from logging import getLogger +log = getLogger( __name__ ) + + +class WorkflowTestCase( TwillTestCase ): + """ + Kind of a shell of a test case for running workflow tests. Probably + needs to look more like test_toolbox. + """ + workflow_test_file = None + user_api_key = None + master_api_key = None + + def test_workflow( self, workflow_test_file=None ): + maxseconds = 120 + workflow_test_file = workflow_test_file or WorkflowTestCase.workflow_test_file + assert workflow_test_file + workflow_test = parse_test_file( workflow_test_file ) + galaxy_interactor = GalaxyWorkflowInteractor( self ) + + # Calling workflow https://github.com/jmchilton/blend4j/blob/master/src/test/java/com/github/jm... + + # Import workflow + workflow_id, step_id_map, output_defs = self.__import_workflow( galaxy_interactor, workflow_test.workflow ) + + # Stage data and history for workflow + test_history = galaxy_interactor.new_history() + stage_data_in_history( galaxy_interactor, workflow_test.test_data(), test_history ) + + # Build workflow parameters + uploads = galaxy_interactor.uploads + ds_map = {} + for step_index, input_dataset_label in workflow_test.input_datasets(): + # Upload is {"src": "hda", "id": hid} + try: + upload = uploads[ workflow_test.upload_name( input_dataset_label ) ] + except KeyError: + raise AssertionError( "Failed to find upload with label %s in uploaded datasets %s" % ( input_dataset_label, uploads ) ) + + ds_map[ step_id_map[ step_index ] ] = upload + + payload = { + "history": "hist_id=%s" % test_history, + "ds_map": dumps( ds_map ), + "workflow_id": workflow_id, + } + run_response = galaxy_interactor.run_workflow( payload ).json() + + outputs = run_response[ 'outputs' ] + if not len( outputs ) == len( output_defs ): + msg_template = "Number of outputs [%d] created by workflow execution does not equal expected number from input file [%d]." + msg = msg_template % ( len( outputs ), len( output_defs ) ) + raise AssertionError( msg ) + + galaxy_interactor.wait_for_ids( test_history, outputs ) + + for expected_output_def in workflow_test.outputs: + # Get the correct hid + name, outfile, attributes = expected_output_def + + output_data = outputs[ int( name ) ] + try: + galaxy_interactor.verify_output( test_history, output_data, outfile, attributes=attributes, shed_tool_id=None, maxseconds=maxseconds ) + except Exception: + for stream in ['stdout', 'stderr']: + stream_output = galaxy_interactor.get_job_stream( test_history, output_data, stream=stream ) + print >>sys.stderr, self._format_stream( stream_output, stream=stream, format=True ) + raise + + def __import_workflow( self, galaxy_interactor, workflow ): + """ + Import workflow into Galaxy and return id and mapping of step ids. + """ + workflow_info = galaxy_interactor.import_workflow( workflow ).json() + try: + workflow_id = workflow_info[ 'id' ] + except KeyError: + raise AssertionError( "Failed to find id for workflow import response %s" % workflow_info ) + + # Well ideally the local copy of the workflow would have the same step ids + # as the one imported through the API, but API workflow imports are 1-indexed + # and GUI exports 0-indexed as of mid-november 2013. + + imported_workflow = galaxy_interactor.read_workflow( workflow_id ) + #log.info("local %s\nimported%s" % (workflow, imported_workflow)) + step_id_map = {} + local_steps_ids = sorted( [ int( step_id ) for step_id in workflow[ 'steps' ].keys() ] ) + imported_steps_ids = sorted( [ int( step_id ) for step_id in imported_workflow[ 'steps' ].keys() ] ) + for local_step_id, imported_step_id in zip( local_steps_ids, imported_steps_ids ): + step_id_map[ local_step_id ] = imported_step_id + + output_defs = [] + for local_step_id in local_steps_ids: + step_def = workflow['steps'][ str( local_step_id ) ] + output_defs.extend( step_def.get( "outputs", [] ) ) + + return workflow_id, step_id_map, output_defs + + +def parse_test_file( workflow_test_file ): + tree = parse_xml( workflow_test_file ) + root = tree.getroot() + input_elems = root.findall( "input" ) + required_files = [] + dataset_dict = {} + for input_elem in input_elems: + name, value, attrib = parse_param_elem( input_elem ) + require_file( name, value, attrib, required_files ) + dataset_dict[ name ] = value + + outputs = parse_output_elems( root ) + + workflow_file_rel_path = root.get( 'file' ) + if not workflow_file_rel_path: + raise Exception( "Workflow test XML must declare file attribute pointing to workflow under test." ) + + # TODO: Normalize this path, prevent it from accessing arbitrary files on system. + worfklow_file_abs_path = os.path.join( os.path.dirname( workflow_test_file ), workflow_file_rel_path ) + + return WorkflowTest( + dataset_dict, + required_files, + worfklow_file_abs_path, + outputs=outputs, + ) + + +class WorkflowTest( object ): + + def __init__( self, dataset_dict, required_files, workflow_file, outputs ): + self.dataset_dict = dataset_dict + self.required_files = required_files + self.workflow = load( open( workflow_file, "r" ) ) + self.outputs = outputs + + def test_data( self ): + return test_data_iter( self.required_files ) + + def upload_name( self, input_dataset_label ): + return self.dataset_dict[ input_dataset_label ] + + def input_datasets( self ): + steps = self.workflow[ "steps" ] + log.info("in input_datasets with steps %s" % steps) + for step_index, step_dict in steps.iteritems(): + if step_dict.get( "name", None ) == "Input dataset": + yield int( step_index ), step_dict[ "inputs" ][0][ "name" ] + + +class GalaxyWorkflowInteractor(GalaxyInteractorApi): + + def __init__( self, twill_test_case ): + super(GalaxyWorkflowInteractor, self).__init__( twill_test_case ) + + def import_workflow( self, workflow_rep ): + payload = { "workflow": dumps( workflow_rep ) } + return self._post( "workflows/upload", data=payload ) + + def run_workflow( self, data ): + return self._post( "workflows", data=data ) + + def read_workflow( self, id ): + return self._get( "workflows/%s" % id ).json() + + def wait_for_ids( self, history_id, ids ): + self.twill_test_case.wait_for( lambda: not all( [ self.__dataset_ready( history_id, id ) for id in ids ] ), maxseconds=120 ) + + def __dataset_ready( self, history_id, id ): + contents = self._get( 'histories/%s/contents' % history_id ).json() + for content in contents: + + if content["id"] == id: + state = content[ 'state' ] + state_ready = self._state_ready( state, error_msg="Dataset creation failed for dataset with name %s." % content[ 'name' ] ) + return state_ready + return False https://bitbucket.org/galaxy/galaxy-central/commits/87f3e11ced5d/ Changeset: 87f3e11ced5d User: jmchilton Date: 2013-11-22 02:17:13 Summary: Fix for using API driver with tool outputs that change their name. E.g. composite data types. API should probably return the original output name since that is the purpose of the parameter but I am not sure how much work that would be. Affected #: 2 files diff -r 7c5305e54c03373b9922d60e2c75e90607c61c6f -r 87f3e11ced5dbab70f50a2299fcda2859643ff62 test/base/interactor.py --- a/test/base/interactor.py +++ b/test/base/interactor.py @@ -1,5 +1,6 @@ import os from galaxy.tools.parameters import grouping +from galaxy.util.odict import odict import galaxy.model from galaxy.model.orm import and_, desc from galaxy.model.mapping import context as sa_session @@ -148,7 +149,7 @@ ## Convert outputs list to a dictionary that can be accessed by ## output_name so can be more flexiable about ordering of outputs ## but also allows fallback to legacy access as list mode. - outputs_dict = {} + outputs_dict = odict() index = 0 for output in datasets_object[ 'outputs' ]: outputs_dict[ index ] = outputs_dict[ output.get("output_name") ] = output diff -r 7c5305e54c03373b9922d60e2c75e90607c61c6f -r 87f3e11ced5dbab70f50a2299fcda2859643ff62 test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -53,8 +53,12 @@ output_data = data_list[ name ] except (TypeError, KeyError): # Legacy - fall back on ordered data list access if data_list is - # just a list (case with twill variant) - output_data = data_list[ len(data_list) - len(testdef.outputs) + output_index ] + # just a list (case with twill variant or if output changes its + # name). + if hasattr(data_list, "values"): + output_data = data_list.values()[ output_index ] + else: + output_data = data_list[ len(data_list) - len(testdef.outputs) + output_index ] self.assertTrue( output_data is not None ) try: galaxy_interactor.verify_output( history, output_data, outfile, attributes=attributes, shed_tool_id=shed_tool_id, maxseconds=maxseconds ) https://bitbucket.org/galaxy/galaxy-central/commits/f302dae15be9/ Changeset: f302dae15be9 User: jmchilton Date: 2013-11-22 02:17:13 Summary: API Driven Tests: Option for very detailed per dataset error messages for when histories error out. Affected #: 1 file diff -r 87f3e11ced5dbab70f50a2299fcda2859643ff62 -r f302dae15be9185139d8cc17cd0208a448e7e9b0 test/base/interactor.py --- a/test/base/interactor.py +++ b/test/base/interactor.py @@ -1,4 +1,5 @@ import os +from StringIO import StringIO from galaxy.tools.parameters import grouping from galaxy.util.odict import odict import galaxy.model @@ -9,6 +10,9 @@ from logging import getLogger log = getLogger( __name__ ) +VERBOSE_ERRORS = True +ERROR_MESSAGE_DATASET_SEP = "--------------------------------------" + def build_interactor( test_case, type="api" ): interactor_class = GALAXY_INTERACTORS[ type ] @@ -60,7 +64,7 @@ def get_job_stream( self, history_id, output_data, stream ): hid = self.__output_id( output_data ) - data = self._get( "histories/%s/contents/%s/provenance" % (history_id, hid) ).json() + data = self._dataset_provenance( history_id, hid ) return data.get( stream, '' ) def new_history( self ): @@ -171,7 +175,61 @@ def __history_ready( self, history_id ): history_json = self._get( "histories/%s" % history_id ).json() state = history_json[ 'state' ] - return self._state_ready( state, error_msg="History in error state." ) + try: + return self._state_ready( state, error_msg="History in error state." ) + except Exception: + if VERBOSE_ERRORS: + self._summarize_history_errors( history_id ) + raise + + def _summarize_history_errors( self, history_id ): + print "History with id %s in error - summary of datasets in error below." % history_id + try: + history_contents = self.__contents( history_id ) + except Exception: + print "*TEST FRAMEWORK FAILED TO FETCH HISTORY DETAILS*" + + for dataset in history_contents: + if dataset[ 'state' ] != 'error': + continue + + print ERROR_MESSAGE_DATASET_SEP + dataset_id = dataset.get( 'id', None ) + print "| %d - %s (HID - NAME) " % ( int( dataset['hid'] ), dataset['name'] ) + try: + dataset_info = self._dataset_info( history_id, dataset_id ) + print "| Dataset Blurb:" + print self.format_for_error( dataset_info.get( "misc_blurb", "" ), "Dataset blurb was empty." ) + print "| Dataset Info:" + print self.format_for_error( dataset_info.get( "misc_info", "" ), "Dataset info is empty." ) + except Exception: + print "| *TEST FRAMEWORK ERROR FETCHING DATASET DETAILS*" + try: + provenance_info = self._dataset_provenance( history_id, dataset_id ) + print "| Dataset Job Standard Output:" + print self.format_for_error( provenance_info.get( "stdout", "" ), "Standard output was empty." ) + print "| Dataset Job Standard Error:" + print self.format_for_error( provenance_info.get( "stderr", "" ), "Standard error was empty." ) + except Exception: + print "| *TEST FRAMEWORK ERROR FETCHING JOB DETAILS*" + print "|" + print ERROR_MESSAGE_DATASET_SEP + + def format_for_error( self, blob, empty_message, prefix="| " ): + contents = "\n".join([ "%s%s" % (prefix, line.strip()) for line in StringIO(blob).readlines() if line.rstrip("\n\r") ] ) + return contents or "%s*%s*" % ( prefix, empty_message ) + + def _dataset_provenance( self, history_id, id ): + provenance = self._get( "histories/%s/contents/%s/provenance" % ( history_id, id ) ).json() + return provenance + + def _dataset_info( self, history_id, id ): + dataset_json = self._get( "histories/%s/contents/%s" % ( history_id, id ) ).json() + return dataset_json + + def __contents( self, history_id ): + history_contents_json = self._get( "histories/%s/contents" % history_id ).json() + return history_contents_json def _state_ready( self, state_str, error_msg ): if state_str == 'ok': Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.
participants (1)
-
commits-noreply@bitbucket.org