commit/galaxy-central: 4 new changesets
4 new commits in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/commits/8e00d2fd674a/ changeset: 8e00d2fd674a user: jmchilton date: 2013-02-10 17:16:26 summary: Breakup big get_path_paste_uploaded_datasets function in library_common.py into smaller more extensible pieces. This is useful in implementing multiple file dataset uploads downstream but is also a good refactoring on its own. affected #: 1 file diff -r 506484344db3a370f8ae24096041d38557d1967e -r 8e00d2fd674acafb36e7118afaaf7f739235be2e lib/galaxy/webapps/galaxy/controllers/library_common.py --- a/lib/galaxy/webapps/galaxy/controllers/library_common.py +++ b/lib/galaxy/webapps/galaxy/controllers/library_common.py @@ -1075,6 +1075,7 @@ return output def make_library_uploaded_dataset( self, trans, cntrller, params, name, path, type, library_bunch, in_folder=None ): link_data_only = params.get( 'link_data_only', 'copy_files' ) + file_type = params.file_type library_bunch.replace_dataset = None # not valid for these types of upload uploaded_dataset = util.bunch.Bunch() new_name = name @@ -1089,7 +1090,7 @@ uploaded_dataset.path = path uploaded_dataset.type = type uploaded_dataset.ext = None - uploaded_dataset.file_type = params.file_type + uploaded_dataset.file_type = file_type uploaded_dataset.dbkey = params.dbkey uploaded_dataset.space_to_tab = params.space_to_tab if in_folder: @@ -1145,44 +1146,57 @@ uploaded_datasets.append( self.make_library_uploaded_dataset( trans, cntrller, params, name, file, 'server_dir', library_bunch ) ) return uploaded_datasets, 200, None def get_path_paste_uploaded_datasets( self, trans, cntrller, params, library_bunch, response_code, message ): + preserve_dirs = util.string_as_bool( params.get( 'preserve_dirs', False ) ) + uploaded_datasets = [] + (files_and_folders, _response_code, _message) = self._get_path_files_and_folders(params, preserve_dirs) + if _response_code: + return (uploaded_datasets, _response_code, _message) + for (path, name, folder) in files_and_folders: + uploaded_datasets.append( self.make_library_uploaded_dataset( trans, cntrller, params, name, path, 'path_paste', library_bunch, folder ) ) + return uploaded_datasets, 200, None + + def _get_path_files_and_folders( self, params, preserve_dirs ): + problem_response = self._check_path_paste_params( params ) + if problem_response: + return problem_response + files_and_folders = [] + for (line, path) in self._paths_list( params ): + line_files_and_folders = self._get_single_path_files_and_folders( line, path, preserve_dirs ) + files_and_folders.extend( line_files_and_folders ) + return files_and_folders, None, None + + def _get_single_path_files_and_folders(self, line, path, preserve_dirs): + files_and_folders = [] + if os.path.isfile( path ): + name = os.path.basename( path ) + files_and_folders.append((path, name, None)) + for basedir, dirs, files in os.walk( line ): + for file in files: + file_path = os.path.abspath( os.path.join( basedir, file ) ) + if preserve_dirs: + in_folder = os.path.dirname( file_path.replace( path, '', 1 ).lstrip( '/' ) ) + else: + in_folder = None + files_and_folders.append((file_path, file, in_folder)) + return files_and_folders + def _paths_list(self, params): + return [ (l.strip(), os.path.abspath(l.strip())) for l in params.filesystem_paths.splitlines() if l.strip() ] + + def _check_path_paste_params(self, params): if params.get( 'filesystem_paths', '' ) == '': message = "No paths entered in the upload form" response_code = 400 return None, response_code, message - preserve_dirs = util.string_as_bool( params.get( 'preserve_dirs', False ) ) - # locate files bad_paths = [] - uploaded_datasets = [] - for line in [ l.strip() for l in params.filesystem_paths.splitlines() if l.strip() ]: - path = os.path.abspath( line ) + for (_, path) in self._paths_list( params ): if not os.path.exists( path ): bad_paths.append( path ) - continue - # don't bother processing if we're just going to return an error - if not bad_paths: - if os.path.isfile( path ): - name = os.path.basename( path ) - uploaded_datasets.append( self.make_library_uploaded_dataset( trans, cntrller, params, name, path, 'path_paste', library_bunch ) ) - for basedir, dirs, files in os.walk( line ): - for file in files: - file_path = os.path.abspath( os.path.join( basedir, file ) ) - if preserve_dirs: - in_folder = os.path.dirname( file_path.replace( path, '', 1 ).lstrip( '/' ) ) - else: - in_folder = None - uploaded_datasets.append( self.make_library_uploaded_dataset( trans, - cntrller, - params, - file, - file_path, - 'path_paste', - library_bunch, - in_folder ) ) if bad_paths: message = "Invalid paths:<br><ul><li>%s</li></ul>" % "</li><li>".join( bad_paths ) response_code = 400 return None, response_code, message - return uploaded_datasets, 200, None + return None + @web.expose def add_history_datasets_to_library( self, trans, cntrller, library_id, folder_id, hda_ids='', **kwd ): params = util.Params( kwd ) https://bitbucket.org/galaxy/galaxy-central/commits/c85bf30a5c35/ changeset: c85bf30a5c35 user: jmchilton date: 2013-02-10 17:16:27 summary: Breakup big get_server_dir_uploaded_datasets function in library_common.py into smaller more extensible pieces. This is useful in implementing multiple file dataset uploads downstream but is also a good refactoring on its own. affected #: 1 file diff -r 8e00d2fd674acafb36e7118afaaf7f739235be2e -r c85bf30a5c35a7fab80d9499b0852eeb2e433039 lib/galaxy/webapps/galaxy/controllers/library_common.py --- a/lib/galaxy/webapps/galaxy/controllers/library_common.py +++ b/lib/galaxy/webapps/galaxy/controllers/library_common.py @@ -1106,6 +1106,16 @@ trans.sa_session.flush() return uploaded_dataset def get_server_dir_uploaded_datasets( self, trans, cntrller, params, full_dir, import_dir_desc, library_bunch, response_code, message ): + dir_response = self._get_server_dir_files(params, full_dir, import_dir_desc) + files = dir_response[0] + if not files: + return dir_response + uploaded_datasets = [] + for file in files: + name = os.path.basename( file ) + uploaded_datasets.append( self.make_library_uploaded_dataset( trans, cntrller, params, name, file, 'server_dir', library_bunch ) ) + return uploaded_datasets, 200, None + def _get_server_dir_files( self, params, full_dir, import_dir_desc ): files = [] try: for entry in os.listdir( full_dir ): @@ -1140,11 +1150,7 @@ message = "The directory '%s' contains no valid files" % full_dir response_code = 400 return None, response_code, message - uploaded_datasets = [] - for file in files: - name = os.path.basename( file ) - uploaded_datasets.append( self.make_library_uploaded_dataset( trans, cntrller, params, name, file, 'server_dir', library_bunch ) ) - return uploaded_datasets, 200, None + return files, None, None def get_path_paste_uploaded_datasets( self, trans, cntrller, params, library_bunch, response_code, message ): preserve_dirs = util.string_as_bool( params.get( 'preserve_dirs', False ) ) uploaded_datasets = [] https://bitbucket.org/galaxy/galaxy-central/commits/e1e8ddf3401a/ changeset: e1e8ddf3401a user: dannon date: 2013-03-08 20:32:34 summary: Merge pull request 108, https://bitbucket.org/galaxy/galaxy-central/pull-request/108/collaborate-on-... affected #: 1 file diff -r a37fbe9cbd93a4e73a765d50be3aca4f31968f0b -r e1e8ddf3401ab1e028fbebd9fa8f4307b8d3ae03 lib/galaxy/webapps/galaxy/controllers/library_common.py --- a/lib/galaxy/webapps/galaxy/controllers/library_common.py +++ b/lib/galaxy/webapps/galaxy/controllers/library_common.py @@ -1077,6 +1077,7 @@ def make_library_uploaded_dataset( self, trans, cntrller, params, name, path, type, library_bunch, in_folder=None ): link_data_only = params.get( 'link_data_only', 'copy_files' ) uuid_str = params.get( 'uuid', None ) + file_type = params.file_type library_bunch.replace_dataset = None # not valid for these types of upload uploaded_dataset = util.bunch.Bunch() new_name = name @@ -1091,7 +1092,7 @@ uploaded_dataset.path = path uploaded_dataset.type = type uploaded_dataset.ext = None - uploaded_dataset.file_type = params.file_type + uploaded_dataset.file_type = file_type uploaded_dataset.dbkey = params.dbkey uploaded_dataset.space_to_tab = params.space_to_tab if in_folder: @@ -1108,6 +1109,16 @@ trans.sa_session.flush() return uploaded_dataset def get_server_dir_uploaded_datasets( self, trans, cntrller, params, full_dir, import_dir_desc, library_bunch, response_code, message ): + dir_response = self._get_server_dir_files(params, full_dir, import_dir_desc) + files = dir_response[0] + if not files: + return dir_response + uploaded_datasets = [] + for file in files: + name = os.path.basename( file ) + uploaded_datasets.append( self.make_library_uploaded_dataset( trans, cntrller, params, name, file, 'server_dir', library_bunch ) ) + return uploaded_datasets, 200, None + def _get_server_dir_files( self, params, full_dir, import_dir_desc ): files = [] try: for entry in os.listdir( full_dir ): @@ -1142,50 +1153,59 @@ message = "The directory '%s' contains no valid files" % full_dir response_code = 400 return None, response_code, message + return files, None, None + def get_path_paste_uploaded_datasets( self, trans, cntrller, params, library_bunch, response_code, message ): + preserve_dirs = util.string_as_bool( params.get( 'preserve_dirs', False ) ) uploaded_datasets = [] - for file in files: - name = os.path.basename( file ) - uploaded_datasets.append( self.make_library_uploaded_dataset( trans, cntrller, params, name, file, 'server_dir', library_bunch ) ) + (files_and_folders, _response_code, _message) = self._get_path_files_and_folders(params, preserve_dirs) + if _response_code: + return (uploaded_datasets, _response_code, _message) + for (path, name, folder) in files_and_folders: + uploaded_datasets.append( self.make_library_uploaded_dataset( trans, cntrller, params, name, path, 'path_paste', library_bunch, folder ) ) return uploaded_datasets, 200, None - def get_path_paste_uploaded_datasets( self, trans, cntrller, params, library_bunch, response_code, message ): + + def _get_path_files_and_folders( self, params, preserve_dirs ): + problem_response = self._check_path_paste_params( params ) + if problem_response: + return problem_response + files_and_folders = [] + for (line, path) in self._paths_list( params ): + line_files_and_folders = self._get_single_path_files_and_folders( line, path, preserve_dirs ) + files_and_folders.extend( line_files_and_folders ) + return files_and_folders, None, None + + def _get_single_path_files_and_folders(self, line, path, preserve_dirs): + files_and_folders = [] + if os.path.isfile( path ): + name = os.path.basename( path ) + files_and_folders.append((path, name, None)) + for basedir, dirs, files in os.walk( line ): + for file in files: + file_path = os.path.abspath( os.path.join( basedir, file ) ) + if preserve_dirs: + in_folder = os.path.dirname( file_path.replace( path, '', 1 ).lstrip( '/' ) ) + else: + in_folder = None + files_and_folders.append((file_path, file, in_folder)) + return files_and_folders + def _paths_list(self, params): + return [ (l.strip(), os.path.abspath(l.strip())) for l in params.filesystem_paths.splitlines() if l.strip() ] + + def _check_path_paste_params(self, params): if params.get( 'filesystem_paths', '' ) == '': message = "No paths entered in the upload form" response_code = 400 return None, response_code, message - preserve_dirs = util.string_as_bool( params.get( 'preserve_dirs', False ) ) - # locate files bad_paths = [] - uploaded_datasets = [] - for line in [ l.strip() for l in params.filesystem_paths.splitlines() if l.strip() ]: - path = os.path.abspath( line ) + for (_, path) in self._paths_list( params ): if not os.path.exists( path ): bad_paths.append( path ) - continue - # don't bother processing if we're just going to return an error - if not bad_paths: - if os.path.isfile( path ): - name = os.path.basename( path ) - uploaded_datasets.append( self.make_library_uploaded_dataset( trans, cntrller, params, name, path, 'path_paste', library_bunch ) ) - for basedir, dirs, files in os.walk( line ): - for file in files: - file_path = os.path.abspath( os.path.join( basedir, file ) ) - if preserve_dirs: - in_folder = os.path.dirname( file_path.replace( path, '', 1 ).lstrip( '/' ) ) - else: - in_folder = None - uploaded_datasets.append( self.make_library_uploaded_dataset( trans, - cntrller, - params, - file, - file_path, - 'path_paste', - library_bunch, - in_folder ) ) if bad_paths: message = "Invalid paths:<br><ul><li>%s</li></ul>" % "</li><li>".join( bad_paths ) response_code = 400 return None, response_code, message - return uploaded_datasets, 200, None + return None + @web.expose def add_history_datasets_to_library( self, trans, cntrller, library_id, folder_id, hda_ids='', **kwd ): params = util.Params( kwd ) https://bitbucket.org/galaxy/galaxy-central/commits/c937f80188d8/ changeset: c937f80188d8 user: dannon date: 2013-03-08 20:41:10 summary: Merge affected #: 3 files diff -r e1e8ddf3401ab1e028fbebd9fa8f4307b8d3ae03 -r c937f80188d8026f2074f8a897e2ff2972d8da6d lib/tool_shed/scripts/check_repositories_for_functional_tests.py --- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py +++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py @@ -38,7 +38,16 @@ '''Script that checks repositories to see if the tools contained within them have functional tests defined.''' parser = OptionParser() parser.add_option( "-i", "--info_only", action="store_true", dest="info_only", help="info about the requested action", default=False ) - parser.add_option( "-v", "--verbose", action="store_true", dest="verbose", help="verbose mode, print the name, owner, and changeset revision of each repository", default=False ) + parser.add_option( + "-v", "--verbose", + action="count", dest="verbosity", + default=1, + help="Control the amount of detail in the log output.") + parser.add_option( + "--verbosity", action="store", dest="verbosity", + metavar='VERBOSITY', + type="int", help="Control the amount of detail in the log output. --verbosity=1 is " + "the same as -v") ( options, args ) = parser.parse_args() ini_file = args[0] config_parser = ConfigParser.ConfigParser( {'here':os.getcwd()} ) @@ -56,12 +65,12 @@ if options.info_only: print "# Displaying info only ( --info_only )" - if options.verbose: - print "# Displaying extra information ( --verbose )" + if options.verbosity: + print "# Displaying extra information ( --verbosity = %d )" % options.verbosity - check_and_flag_repositories( app, info_only=options.info_only, verbose=options.verbose ) + check_and_flag_repositories( app, info_only=options.info_only, verbosity=options.verbosity ) -def check_and_flag_repositories( app, info_only=False, verbose=False ): +def check_and_flag_repositories( app, info_only=False, verbosity=1 ): ''' This method will iterate through all records in the repository_metadata table, checking each one for tool metadata, then checking the tool metadata for tests. @@ -97,10 +106,15 @@ and test repositories script to process. If the tested changeset revision does not have a test-data directory, this script will also mark the revision not to be tested. - If any error is encountered, the script will update the repository_metadata.tool_test_errors attribute with the following structure: + If any error is encountered, the script will update the repository_metadata.tool_test_errors attribute following this structure: { "test_environment": { + "galaxy_revision": "9001:abcd1234", + "galaxy_database_version": "114", + "tool_shed_revision": "9001:abcd1234", + "tool_shed_mercurial_version": "2.3.1", + "tool_shed_database_version": "17", "python_version": "2.7.2", "architecture": "x86_64", "system": "Darwin 12.2.0" @@ -108,8 +122,28 @@ "test_errors": [ { - "test_id": "Something that will easily identify what the problem is", - "stdout": "The output of the test, or a more detailed description of what was tested and why it failed." + "test_id": "The test ID, generated by twill", + "tool_id": "The tool ID that was tested", + "tool_version": "The tool version that was tested", + "stderr": "The output of the test, or a more detailed description of what was tested and what the error was." + "traceback": "The traceback, if any." + }, + ] + "passed_tests": + [ + { + "test_id": "The test ID, generated by twill", + "tool_id": "The tool ID that was tested", + "tool_version": "The tool version that was tested", + }, + ] + "invalid_tests": + [ + { + "tool_id": "The tool ID that does not have functional tests defined.", + "tool_version": "The version of the tool." + "tool_guid": "The guid of the tool." + "reason_test_is_invalid": "A short explanation of what is invalid. }, ] } @@ -129,19 +163,24 @@ repository_status[ 'test_environment' ][ 'tool_shed_database_version' ] = get_database_version( app ) repository_status[ 'test_environment' ][ 'tool_shed_mercurial_version' ] = __version__.version repository_status[ 'test_environment' ][ 'tool_shed_revision' ] = get_repository_current_revision( os.getcwd() ) - repository_status[ 'test_errors' ] = [] + repository_status[ 'invalid_tests' ] = [] metadata_records_to_check = app.sa_session.query( app.model.RepositoryMetadata ) \ .filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True, app.model.RepositoryMetadata.table.c.do_not_test == False, app.model.RepositoryMetadata.table.c.tools_functionally_correct == False ) ) \ .all() for metadata_record in metadata_records_to_check: - repository_status[ 'test_errors' ] = [] + if metadata_record.tool_test_errors: + repository_status = metadata_record.tool_test_errors name = metadata_record.repository.name owner = metadata_record.repository.user.username changeset_revision = str( metadata_record.changeset_revision ) + repository_status[ 'invalid_tests' ] = [] if metadata_record.repository.id not in checked_repository_ids: checked_repository_ids.append( metadata_record.repository.id ) + if verbosity >= 1: + print '# Checking for functional tests in changeset revision %s of %s, owned by %s.' % \ + ( changeset_revision, name, owner ) # If this changeset revision has no tools, we don't need to do anything here, the install and test script has a filter for returning # only repositories that contain tools. if 'tools' not in metadata_record.metadata: @@ -152,19 +191,26 @@ for tool_metadata in metadata_record.metadata[ 'tools' ]: tool_count += 1 tool_id = tool_metadata[ 'id' ] - if verbose: - print '# Checking for functional tests in changeset revision %s of %s, tool ID %s.' % \ - ( changeset_revision, name, tool_id ) + tool_version = tool_metadata[ 'version' ] + tool_guid = tool_metadata[ 'guid' ] + if verbosity >= 2: + print "# Checking tool ID '%s' in changeset revision %s of %s." % \ + ( tool_id, changeset_revision, name ) # If there are no tests, this tool should not be tested, since the tool functional tests only report failure if the test itself fails, # not if it's missing or undefined. Filtering out those repositories at this step will reduce the number of "false negatives" the # automated functional test framework produces. if 'tests' not in tool_metadata or not tool_metadata[ 'tests' ]: - if verbose: + if verbosity >= 2: print '# No functional tests defined for %s.' % tool_id # The repository_metadata.tool_test_errors attribute should always have the following structure: # { # "test_environment": # { + # "galaxy_revision": "9001:abcd1234", + # "galaxy_database_version": "114", + # "tool_shed_revision": "9001:abcd1234", + # "tool_shed_mercurial_version": "2.3.1", + # "tool_shed_database_version": "17", # "python_version": "2.7.2", # "architecture": "x86_64", # "system": "Darwin 12.2.0" @@ -172,22 +218,44 @@ # "test_errors": # [ # { - # "test_id": "Something that will easily identify what the problem is", - # "stderr": "The output of the test, or a more detailed description of what was tested and why it failed." + # "test_id": "The test ID, generated by twill", + # "tool_id": "The tool ID that was tested", + # "tool_version": "The tool version that was tested", + # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was." + # "traceback": "The captured traceback." + # }, + # ] + # "passed_tests": + # [ + # { + # "test_id": "The test ID, generated by twill", + # "tool_id": "The tool ID that was tested", + # "tool_version": "The tool version that was tested", + # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was." + # }, + # ] + # "invalid_tests": + # [ + # { + # "tool_id": "The tool ID that does not have functional tests defined.", + # "tool_version": "The version of the tool." + # "tool_guid": "The guid of the tool." + # "reason_test_is_invalid": "A short explanation of what is invalid. # }, # ] # } - # Optionally, "stdout" and "traceback" may be included in a test_errors dict, if they are relevant. - test_id = 'Functional tests for %s' % tool_id - test_errors = dict( stderr='No functional tests defined for tool %s in changeset revision %s of repository %s owned by %s.' % \ - ( tool_id, changeset_revision, name, owner ) ) - repository_status[ 'test_errors' ].append( test_errors ) - repository_status[ 'status' ] = 'failed' + # Optionally, "traceback" may be included in a test_errors dict, if it is relevant. No script should overwrite anything other + # than the list relevant to what it is testing. + test_errors = dict( tool_id=tool_id, tool_version=tool_version, tool_guid=tool_guid ) + repository_status[ 'invalid_tests' ].append( test_errors ) no_tests += 1 else: + if verbosity >= 2: + print "# Tool ID '%s' in changeset revision %s of %s has one or more valid functional tests defined." % \ + ( tool_id, changeset_revision, name ) has_tests += 1 - if verbose: - if not repository_status[ 'test_errors' ]: + if verbosity >= 1: + if not repository_status[ 'invalid_tests' ]: print '# All tools have functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) else: print '# Some tools missing functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) @@ -211,12 +279,17 @@ if os.path.exists( work_dir ): shutil.rmtree( work_dir ) if not has_test_data: - if verbose: + if verbosity >= 1: print '# Test data missing in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) # The repository_metadata.tool_test_errors attribute should always have the following structure: # { # "test_environment": # { + # "galaxy_revision": "9001:abcd1234", + # "galaxy_database_version": "114", + # "tool_shed_revision": "9001:abcd1234", + # "tool_shed_mercurial_version": "2.3.1", + # "tool_shed_database_version": "17", # "python_version": "2.7.2", # "architecture": "x86_64", # "system": "Darwin 12.2.0" @@ -224,23 +297,44 @@ # "test_errors": # [ # { - # "test_id": "Something that will easily identify what the problem is", - # "stderr": "The output of the test, or a more detailed description of what was tested and why it failed." + # "test_id": "The test ID, generated by twill", + # "tool_id": "The tool ID that was tested", + # "tool_version": "The tool version that was tested", + # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was." + # "traceback": "The captured traceback." + # }, + # ] + # "passed_tests": + # [ + # { + # "test_id": "The test ID, generated by twill", + # "tool_id": "The tool ID that was tested", + # "tool_version": "The tool version that was tested", + # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was." + # }, + # ] + # "invalid_tests": + # [ + # { + # "tool_id": "The tool ID that does not have functional tests defined.", + # "tool_version": "The version of the tool." + # "tool_guid": "The guid of the tool." + # "reason_test_is_invalid": "A short explanation of what is invalid. # }, # ] # } - # Optionally, "stdout" and "traceback" may be included in a test_errors dict, if they are relevant. - test_id = 'Find functional test data for %s' % metadata_record.repository.name - test_errors = dict( stderr='No test data found for changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) ) - repository_status[ 'test_errors' ].append( test_errors ) - repository_status[ 'status' ] = 'failed' + # Optionally, "traceback" may be included in a test_errors dict, if it is relevant. No script should overwrite anything other + # than the list relevant to what it is testing. + test_errors = dict( tool_id=None, tool_version=None, tool_guid=None, + reason_test_is_invalid="Repository %s is missing a test-data directory." % name ) + repository_status[ 'invalid_tests' ].append( test_errors ) else: - if verbose: + if verbosity >= 1: print '# Test data found in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) if not info_only: # If repository_status[ 'test_errors' ] is empty, no issues were found, and we can just update time_last_tested with the platform # on which this script was run. - if repository_status[ 'test_errors' ]: + if repository_status[ 'invalid_tests' ]: # If functional test definitions or test data are missing, set do_not_test = True if and only if: # a) There are multiple downloadable revisions, and the revision being tested is not the most recent downloadable revision. In this case, # the revision will never be updated with correct data, and re-testing it would be redundant. @@ -250,8 +344,6 @@ if should_set_do_not_test_flag( app, metadata_record.repository, changeset_revision ): metadata_record.do_not_test = True metadata_record.tools_functionally_correct = False - else: - repository_status[ 'status' ] = 'passed' metadata_record.tool_test_errors = repository_status metadata_record.time_last_tested = datetime.utcnow() app.sa_session.add( metadata_record ) diff -r e1e8ddf3401ab1e028fbebd9fa8f4307b8d3ae03 -r c937f80188d8026f2074f8a897e2ff2972d8da6d test/base/util.py --- a/test/base/util.py +++ b/test/base/util.py @@ -74,8 +74,8 @@ return None, repository_name, changeset_revision return last_galaxy_test_file_dir, last_tested_repository_name, last_tested_changeset_revision -def get_test_environment(): - rval = {} +def get_test_environment( current_environment={} ): + rval = current_environment rval[ 'python_version' ] = platform.python_version() rval[ 'architecture' ] = platform.machine() os, hostname, os_version, uname, arch, processor = platform.uname() diff -r e1e8ddf3401ab1e028fbebd9fa8f4307b8d3ae03 -r c937f80188d8026f2074f8a897e2ff2972d8da6d test/install_and_test_tool_shed_repositories/functional_tests.py --- a/test/install_and_test_tool_shed_repositories/functional_tests.py +++ b/test/install_and_test_tool_shed_repositories/functional_tests.py @@ -60,6 +60,7 @@ import nose.config import nose.loader import nose.plugins.manager +from nose.plugins import Plugin from base.util import parse_tool_panel_config, get_database_version, get_test_environment, get_repository_current_revision @@ -115,9 +116,36 @@ else: galaxy_encode_secret = os.environ[ 'GALAXY_INSTALL_TEST_SECRET' ] + +class ReportResults( Plugin ): + '''Simple Nose plugin to record the IDs of all tests run, regardless of success.''' + name = "reportresults" + passed = [] + + def options( self, parser, env=os.environ ): + super( ReportResults, self ).options( parser, env=env ) + + def configure(self, options, conf): + super( ReportResults, self ).configure( options, conf ) + if not self.enabled: + return + + def addSuccess( self, test ): + '''Only record test IDs that correspond to tool functional tests.''' + if 'TestForTool' in test.id(): + test_id = test.id() + # Rearrange the test ID to match the format that is produced in test_results.failures + test_id_parts = test_id.split( '.' ) + fixed_test_id = '%s (%s)' % ( test_id_parts[ -1 ], '.'.join( test_id_parts[ :-1 ] ) ) + self.passed.append( fixed_test_id ) + + def getTestStatus( self ): + return self.passed + def execute_uninstall_method( repository_dict ): # Delete any configured tool functional tests from the test_toolbox.__dict__, otherwise nose will find them - # and try to re-run the tests after uninstalling the repository. + # and try to re-run the tests after uninstalling the repository, which will cause false failure reports, + # since the test data has been deleted from disk by now. tests_to_delete = [] for key in test_toolbox.__dict__: if key.startswith( 'TestForTool_' ): @@ -132,7 +160,7 @@ test_config.configure( sys.argv ) # Run the uninstall method. This method uses the Galaxy web interface to uninstall the previously installed # repository and delete it from disk. - result = run_tests( test_config ) + result, _ = run_tests( test_config ) success = result.wasSuccessful() return success @@ -191,6 +219,23 @@ else: raise AssertonError( 'Unknown format %s.' % format ) +def get_tool_info_from_test_id( test_id ): + ''' + Test IDs come in the form test_tool_number (functional.test_toolbox.TestForTool_toolshed_url/repos/owner/repository_name/tool_id/tool_version) + We want the tool ID and tool version. + ''' + parts = test_id.replace( ')', '' ).split( '/' ) + tool_version = parts[ -1 ] + tool_id = parts[ -2 ] + return tool_id, tool_version + +def get_tool_test_errors_from_api( tool_shed_url, metadata_dict ): + params = dict() + api_path = metadata_dict[ 'url' ].split( '/' ) + api_url = get_api_url( base=tool_shed_url, parts=api_path ) + repository_metadata = json_from_url( api_url ) + return repository_metadata[ 'tool_test_errors' ] + def json_from_url( url ): url_handle = urllib.urlopen( url ) url_contents = url_handle.read() @@ -201,16 +246,15 @@ if tests_passed: params[ 'tools_functionally_correct' ] = 'true' params[ 'do_not_test' ] = 'true' - test_results_dict[ 'status' ] = 'passed' else: params[ 'tools_functionally_correct' ] = 'false' params[ 'do_not_test' ] = 'true' - test_results_dict[ 'status' ] = 'failed' params[ 'tool_test_errors' ] = test_results_dict return update( tool_shed_api_key, '%s' % ( url_join( galaxy_tool_shed_url, 'api', 'repository_revisions', metadata_id ) ), params, return_formatted=False ) def run_tests( test_config ): loader = nose.loader.TestLoader( config=test_config ) + test_config.plugins.addPlugin( ReportResults() ) plug_loader = test_config.plugins.prepareTestLoader( loader ) if plug_loader is not None: loader = plug_loader @@ -221,7 +265,8 @@ plug_runner = test_config.plugins.prepareTestRunner( test_runner ) if plug_runner is not None: test_runner = plug_runner - return test_runner.run( tests ) + result = test_runner.run( tests ) + return result, test_config.plugins._plugins def main(): # ---- Configuration ------------------------------------------------------ @@ -391,10 +436,6 @@ log.info( "The embedded Galaxy application is running on %s:%s" % ( galaxy_test_host, galaxy_test_port ) ) log.info( "Repositories will be installed from the tool shed at %s" % galaxy_tool_shed_url ) success = False - repository_status = dict() - test_environment = get_test_environment() - test_environment[ 'galaxy_database_version' ] = get_database_version( app ) - test_environment[ 'galaxy_revision'] = get_repository_current_revision( os.getcwd() ) try: # Iterate through a list of repository info dicts. log.info( "Retrieving repositories to install from the URL:\n%s\n" % str( galaxy_tool_shed_url ) ) @@ -472,7 +513,7 @@ test_config.configure( sys.argv ) # Run the configured install method as a test. This method uses the embedded Galaxy application's web interface to install the specified # repository, with tool and repository dependencies also selected for installation. - result = run_tests( test_config ) + result, _ = run_tests( test_config ) success = result.wasSuccessful() # If the installation succeeds, configure and run functional tests for this repository. This is equivalent to # sh run_functional_tests.sh -installed @@ -483,14 +524,63 @@ # and the tools_functionally_correct flag to False, as well as updating tool_test_errors. file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( dict() ) ) has_test_data, shed_tools_dict = parse_tool_panel_config( galaxy_shed_tool_conf_file, from_json_string( file( galaxy_shed_tools_dict, 'r' ).read() ) ) + # The repository_status dict should always have the following structure: + # { + # "test_environment": + # { + # "galaxy_revision": "9001:abcd1234", + # "galaxy_database_version": "114", + # "tool_shed_revision": "9001:abcd1234", + # "tool_shed_mercurial_version": "2.3.1", + # "tool_shed_database_version": "17", + # "python_version": "2.7.2", + # "architecture": "x86_64", + # "system": "Darwin 12.2.0" + # }, + # "test_errors": + # [ + # { + # "test_id": "The test ID, generated by twill", + # "tool_id": "The tool ID that was tested", + # "tool_version": "The tool version that was tested", + # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was." + # "traceback": "The captured traceback." + # }, + # ] + # "passed_tests": + # [ + # { + # "test_id": "The test ID, generated by twill", + # "tool_id": "The tool ID that was tested", + # "tool_version": "The tool version that was tested", + # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was." + # }, + # ] + # "invalid_tests": + # [ + # { + # "tool_id": "The tool ID that does not have functional tests defined.", + # "tool_version": "The version of the tool." + # "tool_guid": "The guid of the tool." + # "reason_test_is_invalid": "A short explanation of what is invalid. + # }, + # ] + # } + repository_status = get_tool_test_errors_from_api( galaxy_tool_shed_url, repository_dict ) + test_environment = get_test_environment( repository_status[ 'test_environment' ] ) + test_environment[ 'galaxy_database_version' ] = get_database_version( app ) + test_environment[ 'galaxy_revision'] = get_repository_current_revision( os.getcwd() ) + repository_status[ 'test_environment' ] = test_environment + repository_status[ 'tests_passed' ] = [] + repository_status[ 'test_errors' ] = [] if not has_test_data: log.error( 'Test data is missing for this repository. Updating repository and skipping functional tests.' ) repository_status[ 'test_environment' ] = get_test_environment() test_id = 'Find functional test data for %s' % repository_dict[ 'name' ] - test_errors = dict( test_id=test_id, - stdout='No test data found for changeset revision %s of repository %s owned by %s.' % \ - ( repository_dict[ 'changeset_revision' ], repository_dict[ 'name' ], repository_dict[ 'owner' ] ) ) - repository_status[ 'test_errors' ] = [ test_errors ] + # Record the lack of test data. + test_errors = dict( tool_id=None, tool_version=None, tool_guid=None, + reason_test_is_invalid="Repository %s is missing a test-data directory." % name ) + repository_status[ 'invalid_tests' ].append( test_errors ) # Record the status of this repository in the tool shed. register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, tests_passed=False ) # Run the cleanup method. This removes tool functional test methods from the test_toolbox module and uninstalls the @@ -518,12 +608,20 @@ test_config = nose.config.Config( env=os.environ, plugins=nose.plugins.manager.DefaultPluginManager() ) test_config.configure( sys.argv ) # Run the configured tests. - result = run_tests( test_config ) + result, test_plugins = run_tests( test_config ) success = result.wasSuccessful() # Record some information about the environment in which this test was run, in case a failure is specific to a certain processor # architecture or operating system. repository_dict[ 'test_environment' ] = test_environment - test_errors = [] + for plugin in test_plugins: + if hasattr( plugin, 'getTestStatus' ): + tests_passed = plugin.getTestStatus() + break + repository_status[ 'tests_passed' ] = [] + for test_id in tests_passed: + tool_id, tool_version = get_tool_info_from_test_id( test_id ) + test_result = dict( test_id=test_id, tool_id=tool_id, tool_version=tool_version ) + repository_status[ 'tests_passed' ].append( test_result ) if success: # This repository's tools passed all functional tests. Update the repository_metadata table in the tool shed's database # to reflect that. Call the register_test_result method, which executes a PUT request to the repository_revisions API @@ -531,15 +629,16 @@ # updates the time_last_tested field to today's date. repositories_passed.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) ) repository_status[ 'test_environment' ] = test_environment - repository_status[ 'test_errors' ] = [] register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, tests_passed=True ) log.debug( 'Revision %s of repository %s installed and passed functional tests.' % \ ( repository_dict[ 'changeset_revision' ], repository_dict[ 'name' ] ) ) else: # If the functional tests fail, log the output and update the failed changeset revision's metadata record in the tool shed via the API. for failure in result.failures: - # Record the twill test identifier, so the repository owner can discover which test is failing. - test_status = dict( test_id=str( failure[0] ) ) + # Record the twill test identifier and information about the tool, so the repository owner can discover which test is failing. + test_id = str( failure[0] ) + tool_id, tool_version = get_tool_info_from_test_id( test_id ) + test_status = dict( test_id=test_id, tool_id=tool_id, tool_version=tool_version ) log_output = failure[1].replace( '\\n', '\n' ) # Remove debug output that the reviewer or owner doesn't need. log_output = re.sub( r'control \d+:.+', r'', log_output ) @@ -547,7 +646,7 @@ appending_to = 'output' tmp_output = {} output = {} - # Iterate through the functional test output and extract only the important data. Captured logging is not recorded. + # Iterate through the functional test output and extract only the important data. Captured logging and stdout are not recorded. for line in log_output.split( '\n' ): if line.startswith( 'Traceback' ): appending_to = 'traceback' @@ -567,17 +666,15 @@ if appending_to not in tmp_output: tmp_output[ appending_to ] = [] tmp_output[ appending_to ].append( line ) - for output_type in [ 'stderr', 'stdout', 'traceback' ]: + for output_type in [ 'stderr', 'traceback' ]: if output_type in tmp_output: test_status[ output_type ] = '\n'.join( tmp_output[ output_type ] ) - test_errors.append( test_status ) - if test_errors: - # Only update test_errors for this repository if it's not empty. - repository_status[ 'test_environment' ] = test_environment - repository_status[ 'test_errors' ] = test_errors + repository_status[ 'test_errors' ].append( test_status ) + log.debug( to_json_string( repository_status, indent=2, sort_keys=True ) ) # Call the register_test_result method, which executes a PUT request to the repository_revisions API controller with the outcome - # status of the tests, and updates tool_test_errors with the relevant log data. - # This also sets the do_not_test and tools_functionally correct flags, and updates the time_last_tested field to today's date. + # of the tests, and updates tool_test_errors with the relevant log data. + # This also sets the do_not_test and tools_functionally correct flags to the appropriate values, and updates the time_last_tested + # field to today's date. repositories_failed.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) ) register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, tests_passed=False ) log.debug( 'Revision %s of repository %s installed successfully, but did not pass functional tests.' % \ Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.
participants (1)
-
commits-noreply@bitbucket.org