2 new commits in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/commits/b51fa0eb1524/ Changeset: b51fa0eb1524 Branch: next-stable User: Dave Bouvier Date: 2013-05-24 20:02:14 Summary: Fix for missing_test_components always being present, causing misleading test result display on repository pages. Affected #: 1 file diff -r 12978bbb5ffa91743afdf6aed29355843735f96f -r b51fa0eb15244712e2d410dc2930b8c4fb657d59 lib/tool_shed/scripts/check_repositories_for_functional_tests.py --- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py +++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py @@ -117,6 +117,8 @@ and test repositories script to process. If the tested changeset revision does not have a test-data directory, this script will also mark the revision not to be tested. + TODO: Update this dict structure with the recently added components. + If any error is encountered, the script will update the repository_metadata.tool_test_results attribute following this structure: { "test_environment": @@ -137,24 +139,62 @@ "tool_id": "The tool ID that was tested", "tool_version": "The tool version that was tested", }, - ], + ] "failed_tests": [ { "test_id": "The test ID, generated by twill", "tool_id": "The tool ID that was tested", "tool_version": "The tool version that was tested", - "stderr": "The output of the test, or a more detailed description of what was tested and what the error was.", - "traceback": "The traceback, if any." + "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was." + "traceback": "The captured traceback." }, - ], + ] + "installation_errors": + { + 'tool_dependencies': + [ + { + 'type': 'Type of tool dependency, e.g. package, set_environment, etc.', + 'name': 'Name of the tool dependency.', + 'version': 'Version if this is a package, otherwise blank.', + 'error_message': 'The error message returned when installation was attempted.', + }, + ], + 'repository_dependencies': + [ + { + 'tool_shed': 'The tool shed that this repository was installed from.', + 'name': 'The name of the repository that failed to install.', + 'owner': 'Owner of the failed repository.', + 'changeset_revision': 'Changeset revision of the failed repository.', + 'error_message': 'The error message that was returned when the repository failed to install.', + }, + ], + 'current_repository': + [ + { + 'tool_shed': 'The tool shed that this repository was installed from.', + 'name': 'The name of the repository that failed to install.', + 'owner': 'Owner of the failed repository.', + 'changeset_revision': 'Changeset revision of the failed repository.', + 'error_message': 'The error message that was returned when the repository failed to install.', + }, + ], + { + "name": "The name of the repository.", + "owner": "The owner of the repository.", + "changeset_revision": "The changeset revision of the repository.", + "error_message": "The message stored in tool_dependency.error_message." + }, + } "missing_test_components": [ { - "tool_id": "The tool ID that is missing functional test definitions and/or test data.", - "tool_version": "The version of the tool.", - "tool_guid": "The guid of the tool.", - "missing_components": "The components that are missing for this tool to be considered testable." + "tool_id": "The tool ID that missing components.", + "tool_version": "The version of the tool." + "tool_guid": "The guid of the tool." + "missing_components": "Which components are missing, e.g. the test data filename, or the test-data directory." }, ] } @@ -167,22 +207,22 @@ no_tools = 0 valid_revisions = 0 invalid_revisions = 0 + records_checked = 0 # Get the list of metadata records to check for functional tests and test data. Limit this to records that have not been flagged do_not_test, # since there's no need to check them again if they won't be tested anyway. Also filter out changeset revisions that are not downloadable, # because it's redundant to test a revision that a user can't install. - metadata_records_to_check = app.sa_session.query( app.model.RepositoryMetadata ) \ - .filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True, - app.model.RepositoryMetadata.table.c.includes_tools == True, - app.model.RepositoryMetadata.table.c.do_not_test == False ) ) \ - .all() - for metadata_record in metadata_records_to_check: + for metadata_record in app.sa_session.query( app.model.RepositoryMetadata ) \ + .filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True, + app.model.RepositoryMetadata.table.c.includes_tools == True, + app.model.RepositoryMetadata.table.c.do_not_test == False ) ): + records_checked += 1 # Initialize the repository_status dict with the test environment, but leave the test_errors empty. repository_status = {} if metadata_record.tool_test_results: repository_status = metadata_record.tool_test_results # Clear any old invalid tests for this metadata revision, since this could lead to duplication of invalid test rows, # or tests incorrectly labeled as invalid. - repository_status[ 'missing_test_components' ] = [] + missing_test_components = [] if 'test_environment' in repository_status: repository_status[ 'test_environment' ] = get_test_environment( repository_status[ 'test_environment' ] ) else: @@ -295,7 +335,7 @@ # "tool_id": "The tool ID that was tested", # "tool_version": "The tool version that was tested", # }, - # ], + # ] # "failed_tests": # [ # { @@ -305,14 +345,52 @@ # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was." # "traceback": "The captured traceback." # }, - # ], + # ] + # "installation_errors": + # { + # 'tool_dependencies': + # [ + # { + # 'type': 'Type of tool dependency, e.g. package, set_environment, etc.', + # 'name': 'Name of the tool dependency.', + # 'version': 'Version if this is a package, otherwise blank.', + # 'error_message': 'The error message returned when installation was attempted.', + # }, + # ], + # 'repository_dependencies': + # [ + # { + # 'tool_shed': 'The tool shed that this repository was installed from.', + # 'name': 'The name of the repository that failed to install.', + # 'owner': 'Owner of the failed repository.', + # 'changeset_revision': 'Changeset revision of the failed repository.', + # 'error_message': 'The error message that was returned when the repository failed to install.', + # }, + # ], + # 'current_repository': + # [ + # { + # 'tool_shed': 'The tool shed that this repository was installed from.', + # 'name': 'The name of the repository that failed to install.', + # 'owner': 'Owner of the failed repository.', + # 'changeset_revision': 'Changeset revision of the failed repository.', + # 'error_message': 'The error message that was returned when the repository failed to install.', + # }, + # ], + # { + # "name": "The name of the repository.", + # "owner": "The owner of the repository.", + # "changeset_revision": "The changeset revision of the repository.", + # "error_message": "The message stored in tool_dependency.error_message." + # }, + # } # "missing_test_components": # [ # { - # "tool_id": "The ID of the tool that does not have valid tests.", + # "tool_id": "The tool ID that missing components.", # "tool_version": "The version of the tool." # "tool_guid": "The guid of the tool." - # "missing_components": "The components that are missing for this tool to be considered testable." + # "missing_components": "Which components are missing, e.g. the test data filename, or the test-data directory." # }, # ] # } @@ -321,14 +399,14 @@ # than the list relevant to what it is testing. # Only append this error dict if it hasn't already been added. if problem_found: - if test_errors not in repository_status[ 'missing_test_components' ]: - repository_status[ 'missing_test_components' ].append( test_errors ) + if test_errors not in missing_test_components: + missing_test_components.append( test_errors ) if tool_has_tests and has_test_files: testable_revision_found = True # Remove the cloned repository path. This has to be done after the check for required test files, for obvious reasons. if os.path.exists( work_dir ): shutil.rmtree( work_dir ) - if not repository_status[ 'missing_test_components' ]: + if not missing_test_components: valid_revisions += 1 if verbosity >= 1: print '# All tools have functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) @@ -337,13 +415,13 @@ if verbosity >= 1: print '# Some tools have problematic functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) if verbosity >= 2: - for invalid_test in repository_status[ 'missing_test_components' ]: + for invalid_test in missing_test_components: if 'missing_components' in invalid_test: print '# %s' % invalid_test[ 'missing_components' ] if not info_only: # If repository_status[ 'test_errors' ] is empty, no issues were found, and we can just update time_last_tested with the platform # on which this script was run. - if repository_status[ 'missing_test_components' ]: + if missing_test_components: # If functional test definitions or test data are missing, set do_not_test = True if no tool with valid tests has been # found in this revision, and: # a) There are multiple downloadable revisions, and the revision being tested is not the most recent downloadable revision. @@ -358,13 +436,14 @@ metadata_record.do_not_test = True metadata_record.tools_functionally_correct = False metadata_record.missing_test_components = True + repository_status[ 'missing_test_components' ] = missing_test_components metadata_record.tool_test_results = repository_status metadata_record.time_last_tested = datetime.utcnow() app.sa_session.add( metadata_record ) app.sa_session.flush() stop = time.time() print '# -------------------------------------------------------------------------------------------' - print '# Checked %d repositories with %d tools in %d changeset revisions.' % ( len( checked_repository_ids ), tool_count, len( metadata_records_to_check ) ) + print '# Checked %d repositories with %d tools in %d changeset revisions.' % ( len( checked_repository_ids ), tool_count, records_checked ) print '# %d revisions found with functional tests and test data for all tools.' % valid_revisions print '# %d revisions found with one or more tools missing functional tests and/or test data.' % invalid_revisions print '# Found %d tools without functional tests.' % no_tests https://bitbucket.org/galaxy/galaxy-central/commits/308719efff06/ Changeset: 308719efff06 User: Dave Bouvier Date: 2013-05-24 20:02:41 Summary: Merge in next-stable. Affected #: 1 file diff -r ffa2061034a8266d27c34dc47defb485addf469e -r 308719efff061598955f5b54fae2621c4f902ee9 lib/tool_shed/scripts/check_repositories_for_functional_tests.py --- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py +++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py @@ -117,6 +117,8 @@ and test repositories script to process. If the tested changeset revision does not have a test-data directory, this script will also mark the revision not to be tested. + TODO: Update this dict structure with the recently added components. + If any error is encountered, the script will update the repository_metadata.tool_test_results attribute following this structure: { "test_environment": @@ -137,24 +139,62 @@ "tool_id": "The tool ID that was tested", "tool_version": "The tool version that was tested", }, - ], + ] "failed_tests": [ { "test_id": "The test ID, generated by twill", "tool_id": "The tool ID that was tested", "tool_version": "The tool version that was tested", - "stderr": "The output of the test, or a more detailed description of what was tested and what the error was.", - "traceback": "The traceback, if any." + "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was." + "traceback": "The captured traceback." }, - ], + ] + "installation_errors": + { + 'tool_dependencies': + [ + { + 'type': 'Type of tool dependency, e.g. package, set_environment, etc.', + 'name': 'Name of the tool dependency.', + 'version': 'Version if this is a package, otherwise blank.', + 'error_message': 'The error message returned when installation was attempted.', + }, + ], + 'repository_dependencies': + [ + { + 'tool_shed': 'The tool shed that this repository was installed from.', + 'name': 'The name of the repository that failed to install.', + 'owner': 'Owner of the failed repository.', + 'changeset_revision': 'Changeset revision of the failed repository.', + 'error_message': 'The error message that was returned when the repository failed to install.', + }, + ], + 'current_repository': + [ + { + 'tool_shed': 'The tool shed that this repository was installed from.', + 'name': 'The name of the repository that failed to install.', + 'owner': 'Owner of the failed repository.', + 'changeset_revision': 'Changeset revision of the failed repository.', + 'error_message': 'The error message that was returned when the repository failed to install.', + }, + ], + { + "name": "The name of the repository.", + "owner": "The owner of the repository.", + "changeset_revision": "The changeset revision of the repository.", + "error_message": "The message stored in tool_dependency.error_message." + }, + } "missing_test_components": [ { - "tool_id": "The tool ID that is missing functional test definitions and/or test data.", - "tool_version": "The version of the tool.", - "tool_guid": "The guid of the tool.", - "missing_components": "The components that are missing for this tool to be considered testable." + "tool_id": "The tool ID that missing components.", + "tool_version": "The version of the tool." + "tool_guid": "The guid of the tool." + "missing_components": "Which components are missing, e.g. the test data filename, or the test-data directory." }, ] } @@ -167,22 +207,22 @@ no_tools = 0 valid_revisions = 0 invalid_revisions = 0 + records_checked = 0 # Get the list of metadata records to check for functional tests and test data. Limit this to records that have not been flagged do_not_test, # since there's no need to check them again if they won't be tested anyway. Also filter out changeset revisions that are not downloadable, # because it's redundant to test a revision that a user can't install. - metadata_records_to_check = app.sa_session.query( app.model.RepositoryMetadata ) \ - .filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True, - app.model.RepositoryMetadata.table.c.includes_tools == True, - app.model.RepositoryMetadata.table.c.do_not_test == False ) ) \ - .all() - for metadata_record in metadata_records_to_check: + for metadata_record in app.sa_session.query( app.model.RepositoryMetadata ) \ + .filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True, + app.model.RepositoryMetadata.table.c.includes_tools == True, + app.model.RepositoryMetadata.table.c.do_not_test == False ) ): + records_checked += 1 # Initialize the repository_status dict with the test environment, but leave the test_errors empty. repository_status = {} if metadata_record.tool_test_results: repository_status = metadata_record.tool_test_results # Clear any old invalid tests for this metadata revision, since this could lead to duplication of invalid test rows, # or tests incorrectly labeled as invalid. - repository_status[ 'missing_test_components' ] = [] + missing_test_components = [] if 'test_environment' in repository_status: repository_status[ 'test_environment' ] = get_test_environment( repository_status[ 'test_environment' ] ) else: @@ -295,7 +335,7 @@ # "tool_id": "The tool ID that was tested", # "tool_version": "The tool version that was tested", # }, - # ], + # ] # "failed_tests": # [ # { @@ -305,14 +345,52 @@ # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was." # "traceback": "The captured traceback." # }, - # ], + # ] + # "installation_errors": + # { + # 'tool_dependencies': + # [ + # { + # 'type': 'Type of tool dependency, e.g. package, set_environment, etc.', + # 'name': 'Name of the tool dependency.', + # 'version': 'Version if this is a package, otherwise blank.', + # 'error_message': 'The error message returned when installation was attempted.', + # }, + # ], + # 'repository_dependencies': + # [ + # { + # 'tool_shed': 'The tool shed that this repository was installed from.', + # 'name': 'The name of the repository that failed to install.', + # 'owner': 'Owner of the failed repository.', + # 'changeset_revision': 'Changeset revision of the failed repository.', + # 'error_message': 'The error message that was returned when the repository failed to install.', + # }, + # ], + # 'current_repository': + # [ + # { + # 'tool_shed': 'The tool shed that this repository was installed from.', + # 'name': 'The name of the repository that failed to install.', + # 'owner': 'Owner of the failed repository.', + # 'changeset_revision': 'Changeset revision of the failed repository.', + # 'error_message': 'The error message that was returned when the repository failed to install.', + # }, + # ], + # { + # "name": "The name of the repository.", + # "owner": "The owner of the repository.", + # "changeset_revision": "The changeset revision of the repository.", + # "error_message": "The message stored in tool_dependency.error_message." + # }, + # } # "missing_test_components": # [ # { - # "tool_id": "The ID of the tool that does not have valid tests.", + # "tool_id": "The tool ID that missing components.", # "tool_version": "The version of the tool." # "tool_guid": "The guid of the tool." - # "missing_components": "The components that are missing for this tool to be considered testable." + # "missing_components": "Which components are missing, e.g. the test data filename, or the test-data directory." # }, # ] # } @@ -321,14 +399,14 @@ # than the list relevant to what it is testing. # Only append this error dict if it hasn't already been added. if problem_found: - if test_errors not in repository_status[ 'missing_test_components' ]: - repository_status[ 'missing_test_components' ].append( test_errors ) + if test_errors not in missing_test_components: + missing_test_components.append( test_errors ) if tool_has_tests and has_test_files: testable_revision_found = True # Remove the cloned repository path. This has to be done after the check for required test files, for obvious reasons. if os.path.exists( work_dir ): shutil.rmtree( work_dir ) - if not repository_status[ 'missing_test_components' ]: + if not missing_test_components: valid_revisions += 1 if verbosity >= 1: print '# All tools have functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) @@ -337,13 +415,13 @@ if verbosity >= 1: print '# Some tools have problematic functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) if verbosity >= 2: - for invalid_test in repository_status[ 'missing_test_components' ]: + for invalid_test in missing_test_components: if 'missing_components' in invalid_test: print '# %s' % invalid_test[ 'missing_components' ] if not info_only: # If repository_status[ 'test_errors' ] is empty, no issues were found, and we can just update time_last_tested with the platform # on which this script was run. - if repository_status[ 'missing_test_components' ]: + if missing_test_components: # If functional test definitions or test data are missing, set do_not_test = True if no tool with valid tests has been # found in this revision, and: # a) There are multiple downloadable revisions, and the revision being tested is not the most recent downloadable revision. @@ -358,13 +436,14 @@ metadata_record.do_not_test = True metadata_record.tools_functionally_correct = False metadata_record.missing_test_components = True + repository_status[ 'missing_test_components' ] = missing_test_components metadata_record.tool_test_results = repository_status metadata_record.time_last_tested = datetime.utcnow() app.sa_session.add( metadata_record ) app.sa_session.flush() stop = time.time() print '# -------------------------------------------------------------------------------------------' - print '# Checked %d repositories with %d tools in %d changeset revisions.' % ( len( checked_repository_ids ), tool_count, len( metadata_records_to_check ) ) + print '# Checked %d repositories with %d tools in %d changeset revisions.' % ( len( checked_repository_ids ), tool_count, records_checked ) print '# %d revisions found with functional tests and test data for all tools.' % valid_revisions print '# %d revisions found with one or more tools missing functional tests and/or test data.' % invalid_revisions print '# Found %d tools without functional tests.' % no_tests Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.