1 new commit in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/changeset/59f0cdeb9afc/ changeset: 59f0cdeb9afc user: greg date: 2012-03-29 21:02:36 summary: Enhance the functional test framework to support testing tools contained in installed repositories that were not migrated from the Galaxy distribution. To test these tools, use: sh run_functional_tests.sh --installed affected #: 4 files diff -r c510097f7018dbc177513a62c0ca46b4cace0c86 -r 59f0cdeb9afcd899727ba2d2b5b4f3d0e6383edd run_functional_tests.sh --- a/run_functional_tests.sh +++ b/run_functional_tests.sh @@ -29,6 +29,15 @@ else python ./scripts/functional_tests.py -v functional.test_toolbox --with-nosehtml --html-report-file run_functional_tests.html --migrated fi +elif [ $1 = '--installed' ]; then + if [ ! $2 ]; then + python ./scripts/functional_tests.py -v functional.test_toolbox --with-nosehtml --html-report-file run_functional_tests.html --installed + elif [ $2 = '-id' ]; then + # TODO: This option is not tested... + python ./scripts/functional_tests.py -v functional.test_toolbox:TestForTool_$3 --with-nosehtml --html-report-file run_functional_tests.html --installed + else + python ./scripts/functional_tests.py -v functional.test_toolbox --with-nosehtml --html-report-file run_functional_tests.html --installed + fi else python ./scripts/functional_tests.py -v --with-nosehtml --html-report-file run_functional_tests.html $1 fi diff -r c510097f7018dbc177513a62c0ca46b4cace0c86 -r 59f0cdeb9afcd899727ba2d2b5b4f3d0e6383edd scripts/functional_tests.py --- a/scripts/functional_tests.py +++ b/scripts/functional_tests.py @@ -48,21 +48,65 @@ default_galaxy_locales = 'en' default_galaxy_test_file_dir = "test-data" migrated_tool_panel_config = 'migrated_tools_conf.xml' +installed_tool_panel_configs = [ 'shed_tool_conf.xml' ] + +def parse_tool_panel_config( config, shed_tools_dict ): + """ + Parse a shed-related tool panel config to generate the shed_tools_dict. This only happens when testing tools installed from the tool shed. + """ + last_galaxy_test_file_dir = None + last_tested_repository_name = None + last_tested_changeset_revision = None + tree = util.parse_xml( config ) + root = tree.getroot() + for elem in root: + if elem.tag == 'tool': + galaxy_test_file_dir, \ + last_tested_repository_name, \ + last_tested_changeset_revision = get_installed_repository_info( elem, + last_galaxy_test_file_dir, + last_tested_repository_name, + last_tested_changeset_revision ) + if galaxy_test_file_dir: + if galaxy_test_file_dir != last_galaxy_test_file_dir: + if not os.path.isabs( galaxy_test_file_dir ): + galaxy_test_file_dir = os.path.join( os.getcwd(), galaxy_test_file_dir ) + guid = elem.get( 'guid' ) + shed_tools_dict[ guid ] = galaxy_test_file_dir + last_galaxy_test_file_dir = galaxy_test_file_dir + elif elem.tag == 'section': + for section_elem in elem: + if section_elem.tag == 'tool': + galaxy_test_file_dir, \ + last_tested_repository_name, \ + last_tested_changeset_revision = get_installed_repository_info( section_elem, + last_galaxy_test_file_dir, + last_tested_repository_name, + last_tested_changeset_revision ) + if galaxy_test_file_dir: + if galaxy_test_file_dir != last_galaxy_test_file_dir: + if not os.path.isabs( galaxy_test_file_dir ): + galaxy_test_file_dir = os.path.join( os.getcwd(), galaxy_test_file_dir ) + guid = section_elem.get( 'guid' ) + shed_tools_dict[ guid ] = galaxy_test_file_dir + last_galaxy_test_file_dir = galaxy_test_file_dir + return shed_tools_dict def get_installed_repository_info( elem, last_galaxy_test_file_dir, last_tested_repository_name, last_tested_changeset_revision ): """ Return the GALAXY_TEST_FILE_DIR, the containing repository name and the change set revision for the tool elem. - This only happens when testing tools eliminated from the distribution and now installed from the tool shed. + This only happens when testing tools installed from the tool shed. """ tool_config_path = elem.get( 'file' ) installed_tool_path_items = tool_config_path.split( '/repos/' ) sans_shed = installed_tool_path_items[ 1 ] path_items = sans_shed.split( '/' ) + repository_owner = path_items[ 0 ] repository_name = path_items[ 1 ] changeset_revision = path_items[ 2 ] if repository_name != last_tested_repository_name or changeset_revision != last_tested_changeset_revision: # Locate the test-data directory. - installed_tool_path = os.path.join( installed_tool_path_items[ 0 ], 'repos', 'devteam', repository_name, changeset_revision ) + installed_tool_path = os.path.join( installed_tool_path_items[ 0 ], 'repos', repository_owner, repository_name, changeset_revision ) for root, dirs, files in os.walk( installed_tool_path ): if 'test-data' in dirs: return os.path.join( root, 'test-data' ), repository_name, changeset_revision @@ -92,12 +136,12 @@ if 'HTTP_ACCEPT_LANGUAGE' not in os.environ: os.environ[ 'HTTP_ACCEPT_LANGUAGE' ] = default_galaxy_locales testing_migrated_tools = '--migrated' in sys.argv + testing_installed_tools = '--installed' in sys.argv - if testing_migrated_tools: + if testing_migrated_tools or testing_installed_tools: sys.argv.pop() # Store a jsonified dictionary of tool_id : GALAXY_TEST_FILE_DIR pairs. - galaxy_migrated_tools_file = 'migrated_tools_dict' - migrated_tools_dict = {} + galaxy_tool_shed_test_file = 'shed_tools_dict' # We need the upload tool for functional tests, so we'll create a temporary tool panel config that defines it. fd, tmp_tool_panel_conf = tempfile.mkstemp() os.write( fd, '<?xml version="1.0"?>\n' ) @@ -127,7 +171,7 @@ tool_data_table_config_path = 'tool_data_table_conf.xml' tool_dependency_dir = os.environ.get( 'GALAXY_TOOL_DEPENDENCY_DIR', None ) use_distributed_object_store = os.environ.get( 'GALAXY_USE_DISTRIBUTED_OBJECT_STORE', False ) - + if start_server: psu_production = False galaxy_test_proxy_port = None @@ -295,54 +339,32 @@ os.environ[ 'GALAXY_TEST_SAVE' ] = galaxy_test_save # Pass in through script setenv, will leave a copy of ALL test validate files os.environ[ 'GALAXY_TEST_HOST' ] = galaxy_test_host - if testing_migrated_tools: - last_galaxy_test_file_dir = None - last_tested_repository_name = None - last_tested_changeset_revision = None - tree = util.parse_xml( migrated_tool_panel_config ) - root = tree.getroot() - migrated_tool_path = root.get( 'tool_path' ) - counter = 0 - for elem in root: - if elem.tag == 'tool': - galaxy_test_file_dir, \ - last_tested_repository_name, \ - last_tested_changeset_revision = get_installed_repository_info( elem, - last_galaxy_test_file_dir, - last_tested_repository_name, - last_tested_changeset_revision ) - if galaxy_test_file_dir: - if galaxy_test_file_dir != last_galaxy_test_file_dir: - if not os.path.isabs( galaxy_test_file_dir ): - galaxy_test_file_dir = os.path.join( os.getcwd(), galaxy_test_file_dir ) - guid = elem.get( 'guid' ) - migrated_tools_dict[ guid ] = galaxy_test_file_dir - last_galaxy_test_file_dir = galaxy_test_file_dir - elif elem.tag == 'section': - for section_elem in elem: - if section_elem.tag == 'tool': - galaxy_test_file_dir, \ - last_tested_repository_name, \ - last_tested_changeset_revision = get_installed_repository_info( section_elem, - last_galaxy_test_file_dir, - last_tested_repository_name, - last_tested_changeset_revision ) - if galaxy_test_file_dir: - if galaxy_test_file_dir != last_galaxy_test_file_dir: - if not os.path.isabs( galaxy_test_file_dir ): - galaxy_test_file_dir = os.path.join( os.getcwd(), galaxy_test_file_dir ) - guid = section_elem.get( 'guid' ) - migrated_tools_dict[ guid ] = galaxy_test_file_dir - last_galaxy_test_file_dir = galaxy_test_file_dir - # Persist the migrated_tools_dict to the galaxy_migrated_tools_file. - migrated_tools_file = open( galaxy_migrated_tools_file, 'w' ) - migrated_tools_file.write( to_json_string( migrated_tools_dict ) ) - migrated_tools_file.close() - if not os.path.isabs( galaxy_migrated_tools_file ): - galaxy_migrated_tools_file = os.path.join( os.getcwd(), galaxy_migrated_tools_file ) - os.environ[ 'GALAXY_MIGRATED_TOOLS_FILE' ] = galaxy_migrated_tools_file + if testing_migrated_tools or testing_installed_tools: + shed_tools_dict = {} + if testing_migrated_tools: + shed_tools_dict = parse_tool_panel_config( migrated_tool_panel_config, shed_tools_dict ) + elif testing_installed_tools: + for shed_tool_config in installed_tool_panel_configs: + shed_tools_dict = parse_tool_panel_config( shed_tool_config, shed_tools_dict ) + # Persist the shed_tools_dict to the galaxy_tool_shed_test_file. + shed_tools_file = open( galaxy_tool_shed_test_file, 'w' ) + shed_tools_file.write( to_json_string( shed_tools_dict ) ) + shed_tools_file.close() + if not os.path.isabs( galaxy_tool_shed_test_file ): + galaxy_tool_shed_test_file = os.path.join( os.getcwd(), galaxy_tool_shed_test_file ) + os.environ[ 'GALAXY_TOOL_SHED_TEST_FILE' ] = galaxy_tool_shed_test_file + if testing_installed_tools: + # Eliminate the migrated_tool_panel_config from the app's tool_configs, append the list of installed_tool_panel_configs, + # and reload the app's toolbox. + relative_migrated_tool_panel_config = os.path.join( app.config.root, migrated_tool_panel_config ) + tool_configs = app.config.tool_configs + if relative_migrated_tool_panel_config in tool_configs: + tool_configs.remove( relative_migrated_tool_panel_config ) + for installed_tool_panel_config in installed_tool_panel_configs: + tool_configs.append( installed_tool_panel_config ) + app.toolbox = tools.ToolBox( tool_configs, app.config.tool_path, app ) functional.test_toolbox.toolbox = app.toolbox - functional.test_toolbox.build_tests( testing_migrated_tools=True ) + functional.test_toolbox.build_tests( testing_shed_tools=True ) test_config = nose.config.Config( env=os.environ, ignoreFiles=ignore_files, plugins=nose.plugins.manager.DefaultPluginManager() ) test_config.configure( sys.argv ) result = run_tests( test_config ) @@ -352,9 +374,9 @@ except: log.info( "Unable to remove temporary file: %s" % tmp_tool_panel_conf ) try: - os.unlink( galaxy_migrated_tools_file ) + os.unlink( galaxy_tool_shed_test_file ) except: - log.info( "Unable to remove file: %s" % galaxy_migrated_tools_file ) + log.info( "Unable to remove file: %s" % galaxy_tool_shed_test_file ) else: functional.test_toolbox.toolbox = app.toolbox functional.test_toolbox.build_tests() diff -r c510097f7018dbc177513a62c0ca46b4cace0c86 -r 59f0cdeb9afcd899727ba2d2b5b4f3d0e6383edd test/base/twilltestcase.py --- a/test/base/twilltestcase.py +++ b/test/base/twilltestcase.py @@ -36,14 +36,14 @@ self.port = os.environ.get( 'GALAXY_TEST_PORT' ) self.url = "http://%s:%s" % ( self.host, self.port ) self.file_dir = os.environ.get( 'GALAXY_TEST_FILE_DIR', None ) - self.migrated_tools_file = os.environ.get( 'GALAXY_MIGRATED_TOOLS_FILE', None ) - if self.migrated_tools_file: - f = open( self.migrated_tools_file, 'r' ) + self.tool_shed_test_file = os.environ.get( 'GALAXY_TOOL_SHED_TEST_FILE', None ) + if self.tool_shed_test_file: + f = open( self.tool_shed_test_file, 'r' ) text = f.read() f.close() - self.migrated_tools_dict = from_json_string( text ) + self.shed_tools_dict = from_json_string( text ) else: - self.migrated_tools_dict = {} + self.shed_tools_dict = {} self.keepOutdir = os.environ.get( 'GALAXY_TEST_SAVE', '' ) if self.keepOutdir > '': try: @@ -169,9 +169,9 @@ if line_diff_count > lines_diff: raise AssertionError, "Failed to find '%s' in history data. (lines_diff=%i):\n" % ( contains, lines_diff ) - def get_filename( self, filename, migrated_tool_id=None ): - if migrated_tool_id and self.migrated_tools_dict: - file_dir = self.migrated_tools_dict[ migrated_tool_id ] + def get_filename( self, filename, shed_tool_id=None ): + if shed_tool_id and self.shed_tools_dict: + file_dir = self.shed_tools_dict[ shed_tool_id ] if not file_dir: file_dir = self.file_dir else: @@ -183,9 +183,9 @@ filename = os.path.join( *path ) file(filename, 'wt').write(buffer.getvalue()) - def upload_file( self, filename, ftype='auto', dbkey='unspecified (?)', space_to_tab=False, metadata=None, composite_data=None, migrated_tool_id=None ): + def upload_file( self, filename, ftype='auto', dbkey='unspecified (?)', space_to_tab=False, metadata=None, composite_data=None, shed_tool_id=None ): """ - Uploads a file. If migrated_tool_id has a value, we're testing tools migrated from the distribution to the tool shed, + Uploads a file. If shed_tool_id has a value, we're testing tools migrated from the distribution to the tool shed, so the tool-data directory of test data files is contained in the installed tool shed repository. """ self.visit_url( "%s/tool_runner?tool_id=upload1" % self.url ) @@ -197,11 +197,11 @@ tc.fv( "1", "files_metadata|%s" % elem.get( 'name' ), elem.get( 'value' ) ) if composite_data: for i, composite_file in enumerate( composite_data ): - filename = self.get_filename( composite_file.get( 'value' ), migrated_tool_id=migrated_tool_id ) + filename = self.get_filename( composite_file.get( 'value' ), shed_tool_id=shed_tool_id ) tc.formfile( "1", "files_%i|file_data" % i, filename ) tc.fv( "1", "files_%i|space_to_tab" % i, composite_file.get( 'space_to_tab', False ) ) else: - filename = self.get_filename( filename, migrated_tool_id=migrated_tool_id ) + filename = self.get_filename( filename, shed_tool_id=shed_tool_id ) tc.formfile( "1", "file_data", filename ) tc.fv( "1", "space_to_tab", space_to_tab ) tc.submit("runtool_btn") @@ -653,7 +653,7 @@ fd,temp_prefix = tempfile.mkstemp(prefix='tmp',suffix=suffix) return temp_prefix - def verify_dataset_correctness( self, filename, hid=None, wait=True, maxseconds=120, attributes=None, migrated_tool_id=None ): + def verify_dataset_correctness( self, filename, hid=None, wait=True, maxseconds=120, attributes=None, shed_tool_id=None ): """Verifies that the attributes and contents of a history item meet expectations""" if wait: self.wait( maxseconds=maxseconds ) #wait for job to finish @@ -692,7 +692,7 @@ errmsg += str( err ) raise AssertionError( errmsg ) if filename is not None: - local_name = self.get_filename( filename, migrated_tool_id=migrated_tool_id ) + local_name = self.get_filename( filename, shed_tool_id=shed_tool_id ) temp_name = self.makeTfname(fname = filename) file( temp_name, 'wb' ).write(data) if self.keepOutdir > '': @@ -726,7 +726,7 @@ else: raise Exception, 'Unimplemented Compare type: %s' % compare if extra_files: - self.verify_extra_files_content( extra_files, elem.get( 'id' ), migrated_tool_id=migrated_tool_id ) + self.verify_extra_files_content( extra_files, elem.get( 'id' ), shed_tool_id=shed_tool_id ) except AssertionError, err: errmsg = 'History item %s different than expected, difference (using %s):\n' % ( hid, compare ) errmsg += str( err ) @@ -745,21 +745,21 @@ os.remove( temp_name ) return temp_local, temp_temp - def verify_extra_files_content( self, extra_files, hda_id, migrated_tool_id=None ): + def verify_extra_files_content( self, extra_files, hda_id, shed_tool_id=None ): files_list = [] for extra_type, extra_value, extra_name, extra_attributes in extra_files: if extra_type == 'file': files_list.append( ( extra_name, extra_value, extra_attributes ) ) elif extra_type == 'directory': - for filename in os.listdir( self.get_filename( extra_value, migrated_tool_id=migrated_tool_id ) ): + for filename in os.listdir( self.get_filename( extra_value, shed_tool_id=shed_tool_id ) ): files_list.append( ( filename, os.path.join( extra_value, filename ), extra_attributes ) ) else: raise ValueError, 'unknown extra_files type: %s' % extra_type for filename, filepath, attributes in files_list: - self.verify_composite_datatype_file_content( filepath, hda_id, base_name=filename, attributes=attributes, migrated_tool_id=migrated_tool_id ) + self.verify_composite_datatype_file_content( filepath, hda_id, base_name=filename, attributes=attributes, shed_tool_id=shed_tool_id ) - def verify_composite_datatype_file_content( self, file_name, hda_id, base_name=None, attributes=None, migrated_tool_id=None ): - local_name = self.get_filename( file_name, migrated_tool_id=migrated_tool_id ) + def verify_composite_datatype_file_content( self, file_name, hda_id, base_name=None, attributes=None, shed_tool_id=None ): + local_name = self.get_filename( file_name, shed_tool_id=shed_tool_id ) if base_name is None: base_name = os.path.split(file_name)[-1] temp_name = self.makeTfname(fname = base_name) @@ -1015,8 +1015,8 @@ def last_page( self ): return tc.browser.get_html() - def load_cookies( self, file, migrated_tool_id=None ): - filename = self.get_filename( file, migrated_tool_id=migrated_tool_id ) + def load_cookies( self, file, shed_tool_id=None ): + filename = self.get_filename( file, shed_tool_id=shed_tool_id ) tc.load_cookies(filename) def reload_page( self ): diff -r c510097f7018dbc177513a62c0ca46b4cace0c86 -r 59f0cdeb9afcd899727ba2d2b5b4f3d0e6383edd test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -10,7 +10,7 @@ class ToolTestCase( TwillTestCase ): """Abstract test case that runs tests based on a `galaxy.tools.test.ToolTest`""" - def do_it( self, testdef, migrated_tool_id=None ): + def do_it( self, testdef, shed_tool_id=None ): # If the test generation had an error, raise if testdef.error: if testdef.exception: @@ -40,7 +40,7 @@ dbkey=extra.get( 'dbkey', 'hg17' ), metadata=metadata, composite_data=composite_data, - migrated_tool_id=migrated_tool_id ) + shed_tool_id=shed_tool_id ) print "Uploaded file: ", fname, ", ftype: ", extra.get( 'ftype', 'auto' ), ", extra: ", extra #Post upload attribute editing edit_attributes = extra.get( 'edit_attributes', [] ) @@ -99,7 +99,7 @@ elem_hid = elem.get( 'hid' ) elem_index += 1 try: - self.verify_dataset_correctness( outfile, hid=elem_hid, maxseconds=testdef.maxseconds, attributes=attributes, migrated_tool_id=migrated_tool_id ) + self.verify_dataset_correctness( outfile, hid=elem_hid, maxseconds=testdef.maxseconds, attributes=attributes, shed_tool_id=shed_tool_id ) except Exception, e: print >>sys.stderr, self.get_job_stdout( elem.get( 'id' ), format=True ) print >>sys.stderr, self.get_job_stderr( elem.get( 'id' ), format=True ) @@ -143,7 +143,7 @@ expanded_inputs[value.name] = declared_inputs[value.name] return expanded_inputs -def build_tests( testing_migrated_tools=False ): +def build_tests( testing_shed_tools=False ): """ If the module level variable `toolbox` is set, generate `ToolTestCase` classes for all of its tests and put them into this modules globals() so @@ -166,12 +166,12 @@ baseclasses = ( ToolTestCase, ) namespace = dict() for j, testdef in enumerate( tool.tests ): - def make_test_method( td, migrated_tool_id=None ): + def make_test_method( td, shed_tool_id=None ): def test_tool( self ): - self.do_it( td, migrated_tool_id=migrated_tool_id ) + self.do_it( td, shed_tool_id=shed_tool_id ) return test_tool - if testing_migrated_tools: - test_method = make_test_method( testdef, migrated_tool_id=tool.id ) + if testing_shed_tools: + test_method = make_test_method( testdef, shed_tool_id=tool.id ) else: test_method = make_test_method( testdef ) test_method.__doc__ = "%s ( %s ) > %s" % ( tool.name, tool.id, testdef.name ) Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.