1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/3d569f107f1d/
Changeset: 3d569f107f1d
User: inithello
Date: 2013-04-25 21:16:23
Summary: Fix for failing upload tool functional tests.
Affected #: 1 file
diff -r 1fa287b15af5dab0ac84545835752b4f87e87328 -r 3d569f107f1d449b028fc4d5390dd8d07fdf26ce test/base/twilltestcase.py
--- a/test/base/twilltestcase.py
+++ b/test/base/twilltestcase.py
@@ -242,7 +242,20 @@
# Wait for upload processing to finish (TODO: this should be done in each test case instead)
self.wait()
+ def json_from_url( self, url ):
+ self.visit_url( url )
+ return from_json_string( self.last_page() )
+
# Functions associated with histories
+ def get_history_from_api( self, encoded_history_id=None ):
+ if encoded_history_id is None:
+ history = self.get_latest_history()
+ encoded_history_id = history[ 'id' ]
+ return self.json_from_url( '/api/histories/%s/contents' % encoded_history_id )
+
+ def get_latest_history( self ):
+ return self.json_from_url( '/api/histories' )[ 0 ]
+
def check_history_for_errors( self ):
"""Raises an exception if there are errors in a history"""
self.home()
@@ -317,43 +330,29 @@
Uses history page JSON to determine whether this history is empty
(i.e. has no undeleted datasets).
"""
- def has_no_undeleted_hdas( hda_list ):
- if not len( hda_list ):
- return True
- for hda in hda_list:
- if not( hda[ 'deleted' ] or hda[ 'purged' ] ):
- return False
- return True
- try:
- self.check_history_json( r'\bhdas\s*=\s*(.*);', has_no_undeleted_hdas )
- except AssertionError, exc:
- log.error( 'history is not empty' )
- raise exc
+ return len( self.get_history_from_api() ) == 0
def check_hda_json_for_key_value( self, hda_id, key, value, use_string_contains=False ):
"""
- Uses history page JSON to determine whether the current history:
- (1) has an hda with hda_id,
- (2) that hda has a JSON var named 'key',
- (3) that var 'key' == value
- If use_string_contains=True, this will search for value in var 'key'
- instead of testing for an entire, exact match (string only).
+ Uses the history API to determine whether the current history:
+ (1) Has a history dataset with the required ID.
+ (2) That dataset has the required key.
+ (3) The contents of that key match the provided value.
+ If use_string_contains=True, this will perform a substring match, otherwise an exact match.
"""
#TODO: multi key, value
- def hda_has_key_value( hda_list ):
- for hda in hda_list:
- # if we found the hda and there's a var in the json named key
- if( ( hda[ 'id' ] == hda_id )
- and ( key in hda ) ):
- var = hda[ key ]
- # test for partial string containment if str and requested
- if( ( type( var ) == str )
- and ( use_string_contains ) ):
- return ( value in var )
- # otherwise, test for equivalence
- return ( var == value )
- return False
- self.check_history_json( r'\bhdas\s*=\s*(.*);', hda_has_key_value )
+ hda = dict()
+ for history_item in self.get_history_from_api():
+ if history_item[ 'id' ] == hda_id:
+ hda = self.json_from_url( history_item[ 'url' ] )
+ break
+ if hda:
+ if key in hda:
+ if use_string_contains:
+ return value in hda[ key ]
+ else:
+ return value == hda[ key ]
+ return False
def clear_history( self ):
"""Empties a history of all datasets"""
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/41d8cdde4729/
Changeset: 41d8cdde4729
User: inithello
Date: 2013-04-25 17:13:53
Summary: Update tool_test_results dict's attribute names for consistency. Only flag a changeset revision not to be tested if there no valid tests found in that revision.
Affected #: 2 files
diff -r f11a2c7a7d325deaf5cf2c2f05a513b1e1b4a2a6 -r 41d8cdde47297746aa82ce4858006bd2632331db lib/tool_shed/scripts/check_repositories_for_functional_tests.py
--- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
+++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
@@ -130,16 +130,6 @@
"architecture": "x86_64",
"system": "Darwin 12.2.0"
},
- "test_errors":
- [
- {
- "test_id": "The test ID, generated by twill",
- "tool_id": "The tool ID that was tested",
- "tool_version": "The tool version that was tested",
- "stderr": "The output of the test, or a more detailed description of what was tested and what the error was."
- "traceback": "The traceback, if any."
- },
- ]
"passed_tests":
[
{
@@ -147,14 +137,24 @@
"tool_id": "The tool ID that was tested",
"tool_version": "The tool version that was tested",
},
- ]
- "invalid_tests":
+ ],
+ "failed_tests":
[
{
- "tool_id": "The tool ID that does not have functional tests defined.",
- "tool_version": "The version of the tool."
- "tool_guid": "The guid of the tool."
- "reason_test_is_invalid": "A short explanation of what is invalid.
+ "test_id": "The test ID, generated by twill",
+ "tool_id": "The tool ID that was tested",
+ "tool_version": "The tool version that was tested",
+ "stderr": "The output of the test, or a more detailed description of what was tested and what the error was.",
+ "traceback": "The traceback, if any."
+ },
+ ],
+ "missing_test_components":
+ [
+ {
+ "tool_id": "The tool ID that is missing functional test definitions and/or test data.",
+ "tool_version": "The version of the tool.",
+ "tool_guid": "The guid of the tool.",
+ "missing_components": "The components that are missing for this tool to be considered testable."
},
]
}
@@ -182,7 +182,7 @@
repository_status = metadata_record.tool_test_results
# Clear any old invalid tests for this metadata revision, since this could lead to duplication of invalid test rows,
# or tests incorrectly labeled as invalid.
- repository_status[ 'invalid_tests' ] = []
+ repository_status[ 'missing_test_components' ] = []
if 'test_environment' in repository_status:
repository_status[ 'test_environment' ] = get_test_environment( repository_status[ 'test_environment' ] )
else:
@@ -204,6 +204,7 @@
continue
else:
has_test_data = False
+ testable_revision_found = False
# Clone the repository up to the changeset revision we're checking.
repo_dir = metadata_record.repository.repo_path( app )
repo = hg.repository( get_configured_ui(), repo_dir )
@@ -254,12 +255,15 @@
failure_reason = ''
problem_found = False
missing_test_files = []
+ has_test_files = False
if tool_has_tests and has_test_data:
missing_test_files = check_for_missing_test_files( tool_metadata[ 'tests' ], test_data_path )
if missing_test_files:
if verbosity >= 2:
print "# Tool ID '%s' in changeset revision %s of %s is missing one or more required test files: %s" % \
( tool_id, changeset_revision, name, ', '.join( missing_test_files ) )
+ else:
+ has_test_files = True
if not has_test_data:
failure_reason += 'Repository does not have a test-data directory. '
problem_found = True
@@ -270,7 +274,7 @@
failure_reason += 'One or more test files are missing for tool %s: %s' % ( tool_id, ', '.join( missing_test_files ) )
problem_found = True
test_errors = dict( tool_id=tool_id, tool_version=tool_version, tool_guid=tool_guid,
- reason_test_is_invalid=failure_reason )
+ missing_components=failure_reason )
# The repository_metadata.tool_test_results attribute should always have the following structure:
# {
# "test_environment":
@@ -284,7 +288,15 @@
# "architecture": "x86_64",
# "system": "Darwin 12.2.0"
# },
- # "test_errors":
+ # "passed_tests":
+ # [
+ # {
+ # "test_id": "The test ID, generated by twill",
+ # "tool_id": "The tool ID that was tested",
+ # "tool_version": "The tool version that was tested",
+ # },
+ # ],
+ # "failed_tests":
# [
# {
# "test_id": "The test ID, generated by twill",
@@ -293,23 +305,14 @@
# "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
# "traceback": "The captured traceback."
# },
- # ]
- # "passed_tests":
- # [
- # {
- # "test_id": "The test ID, generated by twill",
- # "tool_id": "The tool ID that was tested",
- # "tool_version": "The tool version that was tested",
- # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
- # },
- # ]
- # "invalid_tests":
+ # ],
+ # "missing_test_components":
# [
# {
# "tool_id": "The ID of the tool that does not have valid tests.",
# "tool_version": "The version of the tool."
# "tool_guid": "The guid of the tool."
- # "reason_test_is_invalid": "A short explanation of what is invalid."
+ # "missing_components": "The components that are missing for this tool to be considered testable."
# },
# ]
# }
@@ -318,12 +321,14 @@
# than the list relevant to what it is testing.
# Only append this error dict if it hasn't already been added.
if problem_found:
- if test_errors not in repository_status[ 'invalid_tests' ]:
- repository_status[ 'invalid_tests' ].append( test_errors )
+ if test_errors not in repository_status[ 'missing_test_components' ]:
+ repository_status[ 'missing_test_components' ].append( test_errors )
+ if tool_has_tests and has_test_files:
+ testable_revision_found = True
# Remove the cloned repository path. This has to be done after the check for required test files, for obvious reasons.
if os.path.exists( work_dir ):
shutil.rmtree( work_dir )
- if not repository_status[ 'invalid_tests' ]:
+ if not repository_status[ 'missing_test_components' ]:
valid_revisions += 1
if verbosity >= 1:
print '# All tools have functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
@@ -332,22 +337,27 @@
if verbosity >= 1:
print '# Some tools have problematic functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
if verbosity >= 2:
- for invalid_test in repository_status[ 'invalid_tests' ]:
- if 'reason_test_is_invalid' in invalid_test:
- print '# %s' % invalid_test[ 'reason_test_is_invalid' ]
+ for invalid_test in repository_status[ 'missing_test_components' ]:
+ if 'missing_components' in invalid_test:
+ print '# %s' % invalid_test[ 'missing_components' ]
if not info_only:
# If repository_status[ 'test_errors' ] is empty, no issues were found, and we can just update time_last_tested with the platform
# on which this script was run.
- if repository_status[ 'invalid_tests' ]:
- # If functional test definitions or test data are missing, set do_not_test = True if and only if:
+ if repository_status[ 'missing_test_components' ]:
+ # If functional test definitions or test data are missing, set do_not_test = True if no tool with valid tests has been
+ # found in this revision, and:
# a) There are multiple downloadable revisions, and the revision being tested is not the most recent downloadable revision.
- # In this case, the revision will never be updated with correct data, and re-testing it would be redundant.
- # b) There are one or more downloadable revisions, and the revision being tested is the most recent downloadable revision.
- # In this case, if the repository is updated with test data or functional tests, the downloadable changeset revision
- # that was tested will be replaced with the new changeset revision, which will be automatically tested.
- if should_set_do_not_test_flag( app, metadata_record.repository, changeset_revision ):
+ # In this case, the revision will never be updated with the missing components, and re-testing it would be redundant.
+ # b) There are one or more downloadable revisions, and the provided changeset revision is the most recent downloadable
+ # revision. In this case, if the repository is updated with test data or functional tests, the downloadable
+ # changeset revision that was tested will either be replaced with the new changeset revision, or a new downloadable
+ # changeset revision will be created, either of which will be automatically checked and flagged as appropriate.
+ # In the install and test script, this behavior is slightly different, since we do want to always run functional
+ # tests on the most recent downloadable changeset revision.
+ if should_set_do_not_test_flag( app, metadata_record.repository, changeset_revision ) and not testable_revision_found:
metadata_record.do_not_test = True
metadata_record.tools_functionally_correct = False
+ metadata_record.missing_test_components = True
metadata_record.tool_test_results = repository_status
metadata_record.time_last_tested = datetime.utcnow()
app.sa_session.add( metadata_record )
diff -r f11a2c7a7d325deaf5cf2c2f05a513b1e1b4a2a6 -r 41d8cdde47297746aa82ce4858006bd2632331db test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -174,9 +174,9 @@
def getTestStatus( self, test_identifier ):
if test_identifier in self.passed:
- tests_passed = self.passed[ test_identifier ]
+ passed_tests = self.passed[ test_identifier ]
del self.passed[ test_identifier ]
- return tests_passed
+ return passed_tests
return []
def execute_uninstall_method( repository_dict ):
@@ -278,13 +278,13 @@
url_contents = url_handle.read()
return from_json_string( url_contents )
-def register_test_result( url, metadata_id, test_results_dict, tests_passed=False ):
+def register_test_result( url, metadata_id, test_results_dict, passed_tests=False ):
'''
This script should never set do_not_test = True, because the repositories should always be re-tested
against the most recent code.
'''
params = {}
- if tests_passed:
+ if passed_tests:
params[ 'tools_functionally_correct' ] = 'true'
params[ 'do_not_test' ] = 'false'
else:
@@ -599,7 +599,15 @@
# "architecture": "x86_64",
# "system": "Darwin 12.2.0"
# },
- # "test_errors":
+ # "passed_tests":
+ # [
+ # {
+ # "test_id": "The test ID, generated by twill",
+ # "tool_id": "The tool ID that was tested",
+ # "tool_version": "The tool version that was tested",
+ # },
+ # ]
+ # "failed_tests":
# [
# {
# "test_id": "The test ID, generated by twill",
@@ -609,22 +617,13 @@
# "traceback": "The captured traceback."
# },
# ]
- # "passed_tests":
- # [
- # {
- # "test_id": "The test ID, generated by twill",
- # "tool_id": "The tool ID that was tested",
- # "tool_version": "The tool version that was tested",
- # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
- # },
- # ]
- # "invalid_tests":
+ # "missing_test_components":
# [
# {
# "tool_id": "The tool ID that does not have functional tests defined.",
# "tool_version": "The version of the tool."
# "tool_guid": "The guid of the tool."
- # "reason_test_is_invalid": "A short explanation of what is invalid.
+ # "missing_components": "A short explanation of what is invalid.
# },
# ]
# }
@@ -635,17 +634,17 @@
test_environment[ 'galaxy_database_version' ] = get_database_version( app )
test_environment[ 'galaxy_revision'] = get_repository_current_revision( os.getcwd() )
repository_status[ 'test_environment' ] = test_environment
- repository_status[ 'tests_passed' ] = []
- repository_status[ 'test_errors' ] = []
- repository_status[ 'invalid_tests' ] = []
+ repository_status[ 'passed_tests' ] = []
+ repository_status[ 'failed_tests' ] = []
+ repository_status[ 'missing_test_components' ] = []
if not has_test_data:
log.error( 'Test data is missing for this repository. Updating repository and skipping functional tests.' )
# Record the lack of test data.
- test_errors = dict( tool_id=None, tool_version=None, tool_guid=None,
- reason_test_is_invalid="Repository %s is missing a test-data directory." % name )
- repository_status[ 'invalid_tests' ].append( test_errors )
+ failed_tests = dict( tool_id=None, tool_version=None, tool_guid=None,
+ missing_components="Repository %s is missing a test-data directory." % name )
+ repository_status[ 'missing_test_components' ].append( failed_tests )
# Record the status of this repository in the tool shed.
- register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, tests_passed=False )
+ register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, passed_tests=False )
# Run the cleanup method. This removes tool functional test methods from the test_toolbox module and uninstalls the
# repository using Twill.
execute_uninstall_method( repository_info_dict )
@@ -677,20 +676,20 @@
for plugin in test_plugins:
if hasattr( plugin, 'getTestStatus' ):
test_identifier = '%s/%s' % ( owner, name )
- tests_passed = plugin.getTestStatus( test_identifier )
+ passed_tests = plugin.getTestStatus( test_identifier )
break
- repository_status[ 'tests_passed' ] = []
- for test_id in tests_passed:
+ repository_status[ 'passed_tests' ] = []
+ for test_id in passed_tests:
tool_id, tool_version = get_tool_info_from_test_id( test_id )
test_result = dict( test_id=test_id, tool_id=tool_id, tool_version=tool_version )
- repository_status[ 'tests_passed' ].append( test_result )
+ repository_status[ 'passed_tests' ].append( test_result )
if success:
# This repository's tools passed all functional tests. Update the repository_metadata table in the tool shed's database
# to reflect that. Call the register_test_result method, which executes a PUT request to the repository_revisions API
# controller with the status of the test. This also sets the do_not_test and tools_functionally correct flags, and
# updates the time_last_tested field to today's date.
repositories_passed.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
- register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, tests_passed=True )
+ register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, passed_tests=True )
log.debug( 'Revision %s of repository %s installed and passed functional tests.' % ( changeset_revision, name ) )
else:
# If the functional tests fail, log the output and update the failed changeset revision's metadata record in the tool shed via the API.
@@ -729,13 +728,13 @@
for output_type in [ 'stderr', 'traceback' ]:
if output_type in tmp_output:
test_status[ output_type ] = '\n'.join( tmp_output[ output_type ] )
- repository_status[ 'test_errors' ].append( test_status )
+ repository_status[ 'failed_tests' ].append( test_status )
# Call the register_test_result method, which executes a PUT request to the repository_revisions API controller with the outcome
# of the tests, and updates tool_test_results with the relevant log data.
# This also sets the do_not_test and tools_functionally correct flags to the appropriate values, and updates the time_last_tested
# field to today's date.
repositories_failed.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
- register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, tests_passed=False )
+ register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, passed_tests=False )
log.debug( 'Revision %s of repository %s installed successfully, but did not pass functional tests.' % \
( changeset_revision, name ) )
# Run the cleanup method. This removes tool functional test methods from the test_toolbox module and uninstalls the
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/33bec3cae5b7/
Changeset: 33bec3cae5b7
User: dannon
Date: 2013-04-25 14:13:16
Summary: Organize multi.py imports.
Affected #: 1 file
diff -r 75c5ab4e924192528a6d250d61109c05d16f0118 -r 33bec3cae5b7731916349977918c847f7b255267 lib/galaxy/jobs/splitters/multi.py
--- a/lib/galaxy/jobs/splitters/multi.py
+++ b/lib/galaxy/jobs/splitters/multi.py
@@ -1,5 +1,8 @@
-import os, logging, shutil
+import os
+import logging
+import shutil
import inspect
+
from galaxy import model, util
https://bitbucket.org/galaxy/galaxy-central/commits/a599aba4d18c/
Changeset: a599aba4d18c
User: dannon
Date: 2013-04-25 14:19:33
Summary: Patch from Peter Cock to ensure stdout/stderr are separated in dataset info.
Affected #: 1 file
diff -r 33bec3cae5b7731916349977918c847f7b255267 -r a599aba4d18c634c51d40c68c64148e45f4e083b lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py
+++ b/lib/galaxy/jobs/__init__.py
@@ -924,7 +924,13 @@
for dataset in dataset_assoc.dataset.dataset.history_associations + dataset_assoc.dataset.dataset.library_associations: #need to update all associated output hdas, i.e. history was shared with job running
dataset.blurb = 'done'
dataset.peek = 'no peek'
- dataset.info = ( dataset.info or '' ) + context['stdout'] + context['stderr']
+ dataset.info = (dataset.info or '')
+ if context['stdout'].strip():
+ #Ensure white space between entries
+ dataset.info = dataset.info.rstrip() + "\n" + context['stdout'].strip()
+ if context['stderr'].strip():
+ #Ensure white space between entries
+ dataset.info = dataset.info.rstrip() + "\n" + context['stderr'].strip()
dataset.tool_version = self.version_string
dataset.set_size()
if 'uuid' in context:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.