commit/galaxy-central: inithello: Update tool_test_results dict's attribute names for consistency. Only flag a changeset revision not to be tested if there no valid tests found in that revision.
1 new commit in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/commits/41d8cdde4729/ Changeset: 41d8cdde4729 User: inithello Date: 2013-04-25 17:13:53 Summary: Update tool_test_results dict's attribute names for consistency. Only flag a changeset revision not to be tested if there no valid tests found in that revision. Affected #: 2 files diff -r f11a2c7a7d325deaf5cf2c2f05a513b1e1b4a2a6 -r 41d8cdde47297746aa82ce4858006bd2632331db lib/tool_shed/scripts/check_repositories_for_functional_tests.py --- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py +++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py @@ -130,16 +130,6 @@ "architecture": "x86_64", "system": "Darwin 12.2.0" }, - "test_errors": - [ - { - "test_id": "The test ID, generated by twill", - "tool_id": "The tool ID that was tested", - "tool_version": "The tool version that was tested", - "stderr": "The output of the test, or a more detailed description of what was tested and what the error was." - "traceback": "The traceback, if any." - }, - ] "passed_tests": [ { @@ -147,14 +137,24 @@ "tool_id": "The tool ID that was tested", "tool_version": "The tool version that was tested", }, - ] - "invalid_tests": + ], + "failed_tests": [ { - "tool_id": "The tool ID that does not have functional tests defined.", - "tool_version": "The version of the tool." - "tool_guid": "The guid of the tool." - "reason_test_is_invalid": "A short explanation of what is invalid. + "test_id": "The test ID, generated by twill", + "tool_id": "The tool ID that was tested", + "tool_version": "The tool version that was tested", + "stderr": "The output of the test, or a more detailed description of what was tested and what the error was.", + "traceback": "The traceback, if any." + }, + ], + "missing_test_components": + [ + { + "tool_id": "The tool ID that is missing functional test definitions and/or test data.", + "tool_version": "The version of the tool.", + "tool_guid": "The guid of the tool.", + "missing_components": "The components that are missing for this tool to be considered testable." }, ] } @@ -182,7 +182,7 @@ repository_status = metadata_record.tool_test_results # Clear any old invalid tests for this metadata revision, since this could lead to duplication of invalid test rows, # or tests incorrectly labeled as invalid. - repository_status[ 'invalid_tests' ] = [] + repository_status[ 'missing_test_components' ] = [] if 'test_environment' in repository_status: repository_status[ 'test_environment' ] = get_test_environment( repository_status[ 'test_environment' ] ) else: @@ -204,6 +204,7 @@ continue else: has_test_data = False + testable_revision_found = False # Clone the repository up to the changeset revision we're checking. repo_dir = metadata_record.repository.repo_path( app ) repo = hg.repository( get_configured_ui(), repo_dir ) @@ -254,12 +255,15 @@ failure_reason = '' problem_found = False missing_test_files = [] + has_test_files = False if tool_has_tests and has_test_data: missing_test_files = check_for_missing_test_files( tool_metadata[ 'tests' ], test_data_path ) if missing_test_files: if verbosity >= 2: print "# Tool ID '%s' in changeset revision %s of %s is missing one or more required test files: %s" % \ ( tool_id, changeset_revision, name, ', '.join( missing_test_files ) ) + else: + has_test_files = True if not has_test_data: failure_reason += 'Repository does not have a test-data directory. ' problem_found = True @@ -270,7 +274,7 @@ failure_reason += 'One or more test files are missing for tool %s: %s' % ( tool_id, ', '.join( missing_test_files ) ) problem_found = True test_errors = dict( tool_id=tool_id, tool_version=tool_version, tool_guid=tool_guid, - reason_test_is_invalid=failure_reason ) + missing_components=failure_reason ) # The repository_metadata.tool_test_results attribute should always have the following structure: # { # "test_environment": @@ -284,7 +288,15 @@ # "architecture": "x86_64", # "system": "Darwin 12.2.0" # }, - # "test_errors": + # "passed_tests": + # [ + # { + # "test_id": "The test ID, generated by twill", + # "tool_id": "The tool ID that was tested", + # "tool_version": "The tool version that was tested", + # }, + # ], + # "failed_tests": # [ # { # "test_id": "The test ID, generated by twill", @@ -293,23 +305,14 @@ # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was." # "traceback": "The captured traceback." # }, - # ] - # "passed_tests": - # [ - # { - # "test_id": "The test ID, generated by twill", - # "tool_id": "The tool ID that was tested", - # "tool_version": "The tool version that was tested", - # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was." - # }, - # ] - # "invalid_tests": + # ], + # "missing_test_components": # [ # { # "tool_id": "The ID of the tool that does not have valid tests.", # "tool_version": "The version of the tool." # "tool_guid": "The guid of the tool." - # "reason_test_is_invalid": "A short explanation of what is invalid." + # "missing_components": "The components that are missing for this tool to be considered testable." # }, # ] # } @@ -318,12 +321,14 @@ # than the list relevant to what it is testing. # Only append this error dict if it hasn't already been added. if problem_found: - if test_errors not in repository_status[ 'invalid_tests' ]: - repository_status[ 'invalid_tests' ].append( test_errors ) + if test_errors not in repository_status[ 'missing_test_components' ]: + repository_status[ 'missing_test_components' ].append( test_errors ) + if tool_has_tests and has_test_files: + testable_revision_found = True # Remove the cloned repository path. This has to be done after the check for required test files, for obvious reasons. if os.path.exists( work_dir ): shutil.rmtree( work_dir ) - if not repository_status[ 'invalid_tests' ]: + if not repository_status[ 'missing_test_components' ]: valid_revisions += 1 if verbosity >= 1: print '# All tools have functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) @@ -332,22 +337,27 @@ if verbosity >= 1: print '# Some tools have problematic functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) if verbosity >= 2: - for invalid_test in repository_status[ 'invalid_tests' ]: - if 'reason_test_is_invalid' in invalid_test: - print '# %s' % invalid_test[ 'reason_test_is_invalid' ] + for invalid_test in repository_status[ 'missing_test_components' ]: + if 'missing_components' in invalid_test: + print '# %s' % invalid_test[ 'missing_components' ] if not info_only: # If repository_status[ 'test_errors' ] is empty, no issues were found, and we can just update time_last_tested with the platform # on which this script was run. - if repository_status[ 'invalid_tests' ]: - # If functional test definitions or test data are missing, set do_not_test = True if and only if: + if repository_status[ 'missing_test_components' ]: + # If functional test definitions or test data are missing, set do_not_test = True if no tool with valid tests has been + # found in this revision, and: # a) There are multiple downloadable revisions, and the revision being tested is not the most recent downloadable revision. - # In this case, the revision will never be updated with correct data, and re-testing it would be redundant. - # b) There are one or more downloadable revisions, and the revision being tested is the most recent downloadable revision. - # In this case, if the repository is updated with test data or functional tests, the downloadable changeset revision - # that was tested will be replaced with the new changeset revision, which will be automatically tested. - if should_set_do_not_test_flag( app, metadata_record.repository, changeset_revision ): + # In this case, the revision will never be updated with the missing components, and re-testing it would be redundant. + # b) There are one or more downloadable revisions, and the provided changeset revision is the most recent downloadable + # revision. In this case, if the repository is updated with test data or functional tests, the downloadable + # changeset revision that was tested will either be replaced with the new changeset revision, or a new downloadable + # changeset revision will be created, either of which will be automatically checked and flagged as appropriate. + # In the install and test script, this behavior is slightly different, since we do want to always run functional + # tests on the most recent downloadable changeset revision. + if should_set_do_not_test_flag( app, metadata_record.repository, changeset_revision ) and not testable_revision_found: metadata_record.do_not_test = True metadata_record.tools_functionally_correct = False + metadata_record.missing_test_components = True metadata_record.tool_test_results = repository_status metadata_record.time_last_tested = datetime.utcnow() app.sa_session.add( metadata_record ) diff -r f11a2c7a7d325deaf5cf2c2f05a513b1e1b4a2a6 -r 41d8cdde47297746aa82ce4858006bd2632331db test/install_and_test_tool_shed_repositories/functional_tests.py --- a/test/install_and_test_tool_shed_repositories/functional_tests.py +++ b/test/install_and_test_tool_shed_repositories/functional_tests.py @@ -174,9 +174,9 @@ def getTestStatus( self, test_identifier ): if test_identifier in self.passed: - tests_passed = self.passed[ test_identifier ] + passed_tests = self.passed[ test_identifier ] del self.passed[ test_identifier ] - return tests_passed + return passed_tests return [] def execute_uninstall_method( repository_dict ): @@ -278,13 +278,13 @@ url_contents = url_handle.read() return from_json_string( url_contents ) -def register_test_result( url, metadata_id, test_results_dict, tests_passed=False ): +def register_test_result( url, metadata_id, test_results_dict, passed_tests=False ): ''' This script should never set do_not_test = True, because the repositories should always be re-tested against the most recent code. ''' params = {} - if tests_passed: + if passed_tests: params[ 'tools_functionally_correct' ] = 'true' params[ 'do_not_test' ] = 'false' else: @@ -599,7 +599,15 @@ # "architecture": "x86_64", # "system": "Darwin 12.2.0" # }, - # "test_errors": + # "passed_tests": + # [ + # { + # "test_id": "The test ID, generated by twill", + # "tool_id": "The tool ID that was tested", + # "tool_version": "The tool version that was tested", + # }, + # ] + # "failed_tests": # [ # { # "test_id": "The test ID, generated by twill", @@ -609,22 +617,13 @@ # "traceback": "The captured traceback." # }, # ] - # "passed_tests": - # [ - # { - # "test_id": "The test ID, generated by twill", - # "tool_id": "The tool ID that was tested", - # "tool_version": "The tool version that was tested", - # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was." - # }, - # ] - # "invalid_tests": + # "missing_test_components": # [ # { # "tool_id": "The tool ID that does not have functional tests defined.", # "tool_version": "The version of the tool." # "tool_guid": "The guid of the tool." - # "reason_test_is_invalid": "A short explanation of what is invalid. + # "missing_components": "A short explanation of what is invalid. # }, # ] # } @@ -635,17 +634,17 @@ test_environment[ 'galaxy_database_version' ] = get_database_version( app ) test_environment[ 'galaxy_revision'] = get_repository_current_revision( os.getcwd() ) repository_status[ 'test_environment' ] = test_environment - repository_status[ 'tests_passed' ] = [] - repository_status[ 'test_errors' ] = [] - repository_status[ 'invalid_tests' ] = [] + repository_status[ 'passed_tests' ] = [] + repository_status[ 'failed_tests' ] = [] + repository_status[ 'missing_test_components' ] = [] if not has_test_data: log.error( 'Test data is missing for this repository. Updating repository and skipping functional tests.' ) # Record the lack of test data. - test_errors = dict( tool_id=None, tool_version=None, tool_guid=None, - reason_test_is_invalid="Repository %s is missing a test-data directory." % name ) - repository_status[ 'invalid_tests' ].append( test_errors ) + failed_tests = dict( tool_id=None, tool_version=None, tool_guid=None, + missing_components="Repository %s is missing a test-data directory." % name ) + repository_status[ 'missing_test_components' ].append( failed_tests ) # Record the status of this repository in the tool shed. - register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, tests_passed=False ) + register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, passed_tests=False ) # Run the cleanup method. This removes tool functional test methods from the test_toolbox module and uninstalls the # repository using Twill. execute_uninstall_method( repository_info_dict ) @@ -677,20 +676,20 @@ for plugin in test_plugins: if hasattr( plugin, 'getTestStatus' ): test_identifier = '%s/%s' % ( owner, name ) - tests_passed = plugin.getTestStatus( test_identifier ) + passed_tests = plugin.getTestStatus( test_identifier ) break - repository_status[ 'tests_passed' ] = [] - for test_id in tests_passed: + repository_status[ 'passed_tests' ] = [] + for test_id in passed_tests: tool_id, tool_version = get_tool_info_from_test_id( test_id ) test_result = dict( test_id=test_id, tool_id=tool_id, tool_version=tool_version ) - repository_status[ 'tests_passed' ].append( test_result ) + repository_status[ 'passed_tests' ].append( test_result ) if success: # This repository's tools passed all functional tests. Update the repository_metadata table in the tool shed's database # to reflect that. Call the register_test_result method, which executes a PUT request to the repository_revisions API # controller with the status of the test. This also sets the do_not_test and tools_functionally correct flags, and # updates the time_last_tested field to today's date. repositories_passed.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) ) - register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, tests_passed=True ) + register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, passed_tests=True ) log.debug( 'Revision %s of repository %s installed and passed functional tests.' % ( changeset_revision, name ) ) else: # If the functional tests fail, log the output and update the failed changeset revision's metadata record in the tool shed via the API. @@ -729,13 +728,13 @@ for output_type in [ 'stderr', 'traceback' ]: if output_type in tmp_output: test_status[ output_type ] = '\n'.join( tmp_output[ output_type ] ) - repository_status[ 'test_errors' ].append( test_status ) + repository_status[ 'failed_tests' ].append( test_status ) # Call the register_test_result method, which executes a PUT request to the repository_revisions API controller with the outcome # of the tests, and updates tool_test_results with the relevant log data. # This also sets the do_not_test and tools_functionally correct flags to the appropriate values, and updates the time_last_tested # field to today's date. repositories_failed.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) ) - register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, tests_passed=False ) + register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, passed_tests=False ) log.debug( 'Revision %s of repository %s installed successfully, but did not pass functional tests.' % \ ( changeset_revision, name ) ) # Run the cleanup method. This removes tool functional test methods from the test_toolbox module and uninstalls the Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.
participants (1)
-
commits-noreply@bitbucket.org