1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/5d9328262021/
changeset: 5d9328262021
user: inithello
date: 2013-03-11 19:21:32
summary: Fix for installing and testing repositories where all functional tests fail.
affected #: 1 file
diff -r c07417582dd369a02625d3b3d170718fd69bdd12 -r 5d9328262021b414c9005f79b0c97605acdd4c94 test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -155,9 +155,11 @@
self.passed[ test_identifier ].append( fixed_test_id )
def getTestStatus( self, test_identifier ):
- tests_passed = self.passed[ test_identifier ]
- del self.passed[ test_identifier ]
- return tests_passed
+ if test_identifier in self.passed:
+ tests_passed = self.passed[ test_identifier ]
+ del self.passed[ test_identifier ]
+ return tests_passed
+ return []
def execute_uninstall_method( repository_dict ):
# Delete any configured tool functional tests from the test_toolbox.__dict__, otherwise nose will find them
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/c07417582dd3/
changeset: c07417582dd3
user: inithello
date: 2013-03-11 18:49:12
summary: Improve structure of invalid tools dict. Clarify summary and detailed log output. Add check for required environment variables.
affected #: 3 files
diff -r a2f58a2b87876f8db9a8703782633b84f4dc13ef -r c07417582dd369a02625d3b3d170718fd69bdd12 install_and_test_tool_shed_repositories.sh
--- a/install_and_test_tool_shed_repositories.sh
+++ b/install_and_test_tool_shed_repositories.sh
@@ -2,5 +2,42 @@
# A good place to look for nose info: http://somethingaboutorange.com/mrl/projects/nose/
-python test/install_and_test_tool_shed_repositories/functional_tests.py $* -v --with-nosehtml --html-report-file ./test/install_and_test_tool_shed_repositories/run_functional_tests.html test/install_and_test_tool_shed_repositories/functional/test_install_repositories.py test/functional/test_toolbox.py
+# The test/install_and_test_tool_shed_repositories/functional_tests.py can not be executed directly, because it must have certain functional test definitions
+# in sys.argv. Running it through this shell script is the best way to ensure that it has the required definitions.
+# This script requires the following environment variables:
+# GALAXY_INSTALL_TEST_TOOL_SHED_API_KEY - must be set to the API key for the tool shed that is being checked.
+# GALAXY_INSTALL_TEST_TOOL_SHED_URL - must be set to a URL that the tool shed is listening on.
+# If the tool shed url is not specified in tool_sheds_conf.xml, GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF must be set to a tool sheds configuration file
+# that does specify that url, otherwise repository installation will fail.
+
+if [ -z $GALAXY_INSTALL_TEST_TOOL_SHED_API_KEY ] ; then
+ echo "This script requires the GALAXY_INSTALL_TEST_TOOL_SHED_API_KEY environment variable to be set and non-empty."
+ exit 1
+fi
+
+if [ -z $GALAXY_INSTALL_TEST_TOOL_SHED_URL ] ; then
+ echo "This script requires the GALAXY_INSTALL_TEST_TOOL_SHED_URL environment variable to be set and non-empty."
+ exit 1
+fi
+
+if [ -z "$GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF" ] ; then
+ if grep --quiet $GALAXY_INSTALL_TEST_TOOL_SHED_URL tool_sheds_conf.xml; then
+ echo "Tool sheds configuration tool_sheds_conf.xml ok, proceeding."
+ else
+ echo "ERROR: Tool sheds configuration tool_sheds_conf.xml does not have an entry for $GALAXY_INSTALL_TEST_TOOL_SHED_URL."
+ exit 1
+ fi
+else
+ if grep --quiet $GALAXY_INSTALL_TEST_TOOL_SHED_URL $GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF; then
+ echo "Tool sheds configuration $GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF ok, proceeding."
+ else
+ echo "ERROR: Tool sheds configuration $GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF does not have an entry for $GALAXY_INSTALL_TEST_TOOL_SHED_URL"
+ exit 1
+ fi
+fi
+
+python test/install_and_test_tool_shed_repositories/functional_tests.py $* -v --with-nosehtml --html-report-file \
+ test/install_and_test_tool_shed_repositories/run_functional_tests.html \
+ test/install_and_test_tool_shed_repositories/functional/test_install_repositories.py \
+ test/functional/test_toolbox.py
diff -r a2f58a2b87876f8db9a8703782633b84f4dc13ef -r c07417582dd369a02625d3b3d170718fd69bdd12 lib/tool_shed/scripts/check_repositories_for_functional_tests.py
--- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
+++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
@@ -55,7 +55,11 @@
type="int", help="Control the amount of detail in the log output. --verbosity=1 is "
"the same as -v")
( options, args ) = parser.parse_args()
- ini_file = args[0]
+ try:
+ ini_file = args[0]
+ except IndexError:
+ print "Usage: python %s <tool shed .ini file> [options]" % sys.argv[ 0 ]
+ exit( 127 )
config_parser = ConfigParser.ConfigParser( {'here':os.getcwd()} )
config_parser.read( ini_file )
config_dict = {}
@@ -161,6 +165,8 @@
has_tests = 0
no_tests = 0
no_tools = 0
+ valid_revisions = 0
+ invalid_revisions = 0
# Get the list of metadata records to check for functional tests and test data. Limit this to records that have not been flagged do_not_test
# or tools_functionally_correct. Also filter out changeset revisions that are not downloadable, because it's redundant to test a revision that
# a user can't install.
@@ -173,6 +179,7 @@
repository_status[ 'invalid_tests' ] = []
metadata_records_to_check = app.sa_session.query( app.model.RepositoryMetadata ) \
.filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True,
+ app.model.RepositoryMetadata.table.c.includes_tools == True,
app.model.RepositoryMetadata.table.c.do_not_test == False,
app.model.RepositoryMetadata.table.c.tools_functionally_correct == False ) ) \
.all()
@@ -185,87 +192,14 @@
repository_status[ 'invalid_tests' ] = []
if metadata_record.repository.id not in checked_repository_ids:
checked_repository_ids.append( metadata_record.repository.id )
- if verbosity >= 1:
- print '# Checking for functional tests in changeset revision %s of %s, owned by %s.' % \
- ( changeset_revision, name, owner )
+ if verbosity >= 1:
+ print '# -------------------------------------------------------------------------------------------'
+ print '# Now checking revision %s of %s, owned by %s.' % ( changeset_revision, name, owner )
# If this changeset revision has no tools, we don't need to do anything here, the install and test script has a filter for returning
# only repositories that contain tools.
if 'tools' not in metadata_record.metadata:
- no_tools += 1
continue
else:
- # Loop through all the tools in this metadata record, checking each one for defined functional tests.
- for tool_metadata in metadata_record.metadata[ 'tools' ]:
- tool_count += 1
- tool_id = tool_metadata[ 'id' ]
- tool_version = tool_metadata[ 'version' ]
- tool_guid = tool_metadata[ 'guid' ]
- if verbosity >= 2:
- print "# Checking tool ID '%s' in changeset revision %s of %s." % \
- ( tool_id, changeset_revision, name )
- # If there are no tests, this tool should not be tested, since the tool functional tests only report failure if the test itself fails,
- # not if it's missing or undefined. Filtering out those repositories at this step will reduce the number of "false negatives" the
- # automated functional test framework produces.
- if 'tests' not in tool_metadata or not tool_metadata[ 'tests' ]:
- if verbosity >= 2:
- print '# No functional tests defined for %s.' % tool_id
- # The repository_metadata.tool_test_errors attribute should always have the following structure:
- # {
- # "test_environment":
- # {
- # "galaxy_revision": "9001:abcd1234",
- # "galaxy_database_version": "114",
- # "tool_shed_revision": "9001:abcd1234",
- # "tool_shed_mercurial_version": "2.3.1",
- # "tool_shed_database_version": "17",
- # "python_version": "2.7.2",
- # "architecture": "x86_64",
- # "system": "Darwin 12.2.0"
- # },
- # "test_errors":
- # [
- # {
- # "test_id": "The test ID, generated by twill",
- # "tool_id": "The tool ID that was tested",
- # "tool_version": "The tool version that was tested",
- # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
- # "traceback": "The captured traceback."
- # },
- # ]
- # "passed_tests":
- # [
- # {
- # "test_id": "The test ID, generated by twill",
- # "tool_id": "The tool ID that was tested",
- # "tool_version": "The tool version that was tested",
- # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
- # },
- # ]
- # "invalid_tests":
- # [
- # {
- # "tool_id": "The tool ID that does not have functional tests defined.",
- # "tool_version": "The version of the tool."
- # "tool_guid": "The guid of the tool."
- # "reason_test_is_invalid": "A short explanation of what is invalid.
- # },
- # ]
- # }
- # Optionally, "traceback" may be included in a test_errors dict, if it is relevant. No script should overwrite anything other
- # than the list relevant to what it is testing.
- test_errors = dict( tool_id=tool_id, tool_version=tool_version, tool_guid=tool_guid )
- repository_status[ 'invalid_tests' ].append( test_errors )
- no_tests += 1
- else:
- if verbosity >= 2:
- print "# Tool ID '%s' in changeset revision %s of %s has one or more valid functional tests defined." % \
- ( tool_id, changeset_revision, name )
- has_tests += 1
- if verbosity >= 1:
- if not repository_status[ 'invalid_tests' ]:
- print '# All tools have functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
- else:
- print '# Some tools missing functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
has_test_data = False
# Clone the repository up to the changeset revision we're checking.
repo_dir = metadata_record.repository.repo_path( app )
@@ -285,9 +219,47 @@
# Remove the cloned repository path.
if os.path.exists( work_dir ):
shutil.rmtree( work_dir )
- if not has_test_data:
- if verbosity >= 1:
+ if verbosity >= 1:
+ if not has_test_data:
print '# Test data missing in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
+ else:
+ print '# Test data found in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
+ print '# Checking for functional tests in changeset revision %s of %s, owned by %s.' % \
+ ( changeset_revision, name, owner )
+ # Loop through all the tools in this metadata record, checking each one for defined functional tests.
+ for tool_metadata in metadata_record.metadata[ 'tools' ]:
+ tool_count += 1
+ tool_id = tool_metadata[ 'id' ]
+ tool_version = tool_metadata[ 'version' ]
+ tool_guid = tool_metadata[ 'guid' ]
+ if verbosity >= 2:
+ print "# Checking tool ID '%s' in changeset revision %s of %s." % \
+ ( tool_id, changeset_revision, name )
+ # If there are no tests, this tool should not be tested, since the tool functional tests only report failure if the test itself fails,
+ # not if it's missing or undefined. Filtering out those repositories at this step will reduce the number of "false negatives" the
+ # automated functional test framework produces.
+ tool_has_tests = True
+ if 'tests' not in tool_metadata or not tool_metadata[ 'tests' ]:
+ tool_has_tests = False
+ if verbosity >= 2:
+ print '# No functional tests defined for %s.' % tool_id
+ no_tests += 1
+ else:
+ tool_has_tests = True
+ if verbosity >= 2:
+ print "# Tool ID '%s' in changeset revision %s of %s has one or more valid functional tests defined." % \
+ ( tool_id, changeset_revision, name )
+ has_tests += 1
+ failure_reason = ''
+ problem_found = False
+ if not has_test_data:
+ failure_reason += 'Repository does not have a test-data directory. '
+ problem_found = True
+ if not tool_has_tests:
+ failure_reason += 'Functional test definitions missing for %s. ' % tool_id
+ problem_found = True
+ test_errors = dict( tool_id=tool_id, tool_version=tool_version, tool_guid=tool_guid,
+ reason_test_is_invalid=failure_reason )
# The repository_metadata.tool_test_errors attribute should always have the following structure:
# {
# "test_environment":
@@ -323,21 +295,28 @@
# "invalid_tests":
# [
# {
- # "tool_id": "The tool ID that does not have functional tests defined.",
+ # "tool_id": "The ID of the tool that does not have valid tests.",
# "tool_version": "The version of the tool."
# "tool_guid": "The guid of the tool."
- # "reason_test_is_invalid": "A short explanation of what is invalid.
+ # "reason_test_is_invalid": "A short explanation of what is invalid."
# },
# ]
# }
+ #
# Optionally, "traceback" may be included in a test_errors dict, if it is relevant. No script should overwrite anything other
# than the list relevant to what it is testing.
- test_errors = dict( tool_id=None, tool_version=None, tool_guid=None,
- reason_test_is_invalid="Repository %s is missing a test-data directory." % name )
- repository_status[ 'invalid_tests' ].append( test_errors )
+ # Only append this error dict if it hasn't already been added.
+ if problem_found:
+ if test_errors not in repository_status[ 'invalid_tests' ]:
+ repository_status[ 'invalid_tests' ].append( test_errors )
+ if not repository_status[ 'invalid_tests' ]:
+ valid_revisions += 1
+ if verbosity >= 1:
+ print '# All tools have functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
else:
+ invalid_revisions += 1
if verbosity >= 1:
- print '# Test data found in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
+ print '# Some tools missing functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
if not info_only:
# If repository_status[ 'test_errors' ] is empty, no issues were found, and we can just update time_last_tested with the platform
# on which this script was run.
@@ -356,8 +335,10 @@
app.sa_session.add( metadata_record )
app.sa_session.flush()
stop = time.time()
+ print '# -------------------------------------------------------------------------------------------'
print '# Checked %d repositories with %d tools in %d changeset revisions.' % ( len( checked_repository_ids ), tool_count, len( metadata_records_to_check ) )
- print '# Skipped %d changeset revisions without tools.' % no_tools
+ print '# %d revisions found with functional tests and test data for all tools.' % valid_revisions
+ print '# %d revisions found with one or more tools missing functional tests and/or test data.' % invalid_revisions
print '# Found %d tools without functional tests.' % no_tests
print '# Found %d tools with functional tests.' % has_tests
if info_only:
diff -r a2f58a2b87876f8db9a8703782633b84f4dc13ef -r c07417582dd369a02625d3b3d170718fd69bdd12 test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -104,11 +104,20 @@
</tables>
'''
-# Define a default location to find the list of repositories to check.
-galaxy_repository_list = os.environ.get( 'GALAXY_INSTALL_TEST_REPOSITORY_LIST_LOCATIOM', 'repository_list.json' )
-galaxy_tool_shed_url = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHED_URL', 'http://localhost:9009' )
+# The tool shed url and api key must be set for this script to work correctly. Additionally, if the tool shed url does not
+# point to one of the defaults, the GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF needs to point to a tool sheds configuration file
+# that contains a definition for that tool shed.
+
+galaxy_tool_shed_url = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHED_URL', None )
tool_shed_api_key = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHED_API_KEY', None )
-assert tool_shed_api_key is not None, 'Unable to proceed without API key.'
+
+if tool_shed_api_key is None:
+ print "This script requires the GALAXY_INSTALL_TEST_TOOL_SHED_API_KEY environment variable to be set and non-empty."
+ exit( 1 )
+
+if galaxy_tool_shed_url is None:
+ print "This script requires the GALAXY_INSTALL_TEST_TOOL_SHED_URL environment variable to be set and non-empty."
+ exit( 1 )
if 'GALAXY_INSTALL_TEST_SECRET' not in os.environ:
galaxy_encode_secret = 'changethisinproductiontoo'
@@ -462,8 +471,7 @@
# Get a list of repositories to test from the tool shed specified in the GALAXY_INSTALL_TEST_TOOL_SHED_URL environment variable.
log.info( "Retrieving repositories to install from the URL:\n%s\n" % str( galaxy_tool_shed_url ) )
repositories_to_install = get_repositories_to_install( galaxy_tool_shed_url, source='url' )
- log.info( "Retrieved %d repositories to install..." % len( repositories_to_install ) )
- repositories_tested = len( repositories_to_install )
+ log.info( "Retrieved %d repositories from the API." % len( repositories_to_install ) )
for repository_to_install_dict in repositories_to_install:
# We need to get some details from the tool shed API, such as repository name and owner, to pass on to the
# module that will generate the install methods.
@@ -473,13 +481,15 @@
# and therefore do not need to be checked. If they are undeleted, this script will then test them the next time it runs.
if repository_info_dict[ 'deleted' ]:
log.info( "Skipping revision %s of repository id %s (%s/%s) since the repository is deleted..." % \
- ( repository_info_dict[ 'changeset_revision' ],
- repository_info_dict[ 'repository_id' ],
+ ( repository_to_install_dict[ 'changeset_revision' ],
+ repository_to_install_dict[ 'repository_id' ],
repository_info_dict[ 'owner' ],
repository_info_dict[ 'name' ] ) )
continue
# Now merge the dict returned from /api/repository_revisions with the detailed dict we just retrieved.
detailed_repository_list.append( dict( repository_info_dict.items() + repository_to_install_dict.items() ) )
+ repositories_tested = len( detailed_repository_list )
+ log.info( 'After removing deleted repositories from the list, %d remain to be tested.' % repositories_tested )
if '-list_repositories' in sys.argv:
log.info( "The API returned the following repositories, not counting deleted:" )
for repository_info_dict in detailed_repository_list:
@@ -741,15 +751,15 @@
print "# Repository revisions tested: %d" % repositories_tested
if repositories_tested > 0:
if repositories_passed:
- print "# "
+ print '# ----------------------------------------------------------------------------------'
print "# Repositories passed:"
show_summary_output( repositories_passed )
if repositories_failed:
- print "# "
+ print '# ----------------------------------------------------------------------------------'
print "# Repositories failed:"
show_summary_output( repositories_failed )
if repositories_failed_install:
- print "# "
+ print '# ----------------------------------------------------------------------------------'
print "# Repositories not installed correctly:"
show_summary_output( repositories_failed_install )
print "####################################################################################"
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/0a6b63eca3ff/
changeset: 0a6b63eca3ff
user: inithello
date: 2013-03-11 05:47:47
summary: Refactor summary output. Improve handling of empty tool_test_errors dict. Refactor repository information retrieval.
affected #: 2 files
diff -r 2b4d6af45a04ddb49c2c87a0ead3874503d35832 -r 0a6b63eca3ff6a72cd874bf36f73a2b5905aff65 install_and_test_tool_shed_repositories.sh
--- a/install_and_test_tool_shed_repositories.sh
+++ b/install_and_test_tool_shed_repositories.sh
@@ -1,7 +1,6 @@
#!/bin/sh
# A good place to look for nose info: http://somethingaboutorange.com/mrl/projects/nose/
-#rm -f ./test/tool_shed/run_functional_tests.log
-python test/install_and_test_tool_shed_repositories/functional_tests.py -v --with-nosehtml --html-report-file ./test/install_and_test_tool_shed_repositories/run_functional_tests.html test/install_and_test_tool_shed_repositories/functional/test_install_repositories.py test/functional/test_toolbox.py
+python test/install_and_test_tool_shed_repositories/functional_tests.py $* -v --with-nosehtml --html-report-file ./test/install_and_test_tool_shed_repositories/run_functional_tests.html test/install_and_test_tool_shed_repositories/functional/test_install_repositories.py test/functional/test_toolbox.py
diff -r 2b4d6af45a04ddb49c2c87a0ead3874503d35832 -r 0a6b63eca3ff6a72cd874bf36f73a2b5905aff65 test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -242,6 +242,8 @@
api_path = metadata_dict[ 'url' ].split( '/' )
api_url = get_api_url( base=tool_shed_url, parts=api_path )
repository_metadata = json_from_url( api_url )
+ if repository_metadata[ 'tool_test_errors' ] is None:
+ return {}
return repository_metadata[ 'tool_test_errors' ]
def json_from_url( url ):
@@ -276,16 +278,22 @@
result = test_runner.run( tests )
return result, test_config.plugins._plugins
+def show_summary_output( repository_info_dicts ):
+ repositories_by_owner = dict()
+ for repository in repository_info_dicts:
+ if repository[ 'owner' ] not in repositories_by_owner:
+ repositories_by_owner[ repository[ 'owner' ] ] = []
+ repositories_by_owner[ repository[ 'owner' ] ].append( repository )
+ for owner in repositories_by_owner:
+ print "# "
+ for repository in repositories_by_owner[ owner ]:
+ print "# %s owned by %s, changeset revision %s" % ( repository[ 'name' ], repository[ 'owner' ], repository[ 'changeset_revision' ] )
+
def main():
# ---- Configuration ------------------------------------------------------
galaxy_test_host = os.environ.get( 'GALAXY_INSTALL_TEST_HOST', default_galaxy_test_host )
galaxy_test_port = os.environ.get( 'GALAXY_INSTALL_TEST_PORT', str( default_galaxy_test_port_max ) )
- # Initialize some variables for the summary that will be printed to stdout.
- repositories_tested = 0
- repositories_passed = []
- repositories_failed = []
-
tool_path = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_PATH', 'tools' )
if 'HTTP_ACCEPT_LANGUAGE' not in os.environ:
os.environ[ 'HTTP_ACCEPT_LANGUAGE' ] = default_galaxy_locales
@@ -444,38 +452,19 @@
log.info( "The embedded Galaxy application is running on %s:%s" % ( galaxy_test_host, galaxy_test_port ) )
log.info( "Repositories will be installed from the tool shed at %s" % galaxy_tool_shed_url )
success = False
+ # Initialize some variables for the summary that will be printed to stdout.
+ repositories_tested = 0
+ repositories_passed = []
+ repositories_failed = []
+ repositories_failed_install = []
try:
- # Iterate through a list of repository info dicts.
+ detailed_repository_list = []
+ # Get a list of repositories to test from the tool shed specified in the GALAXY_INSTALL_TEST_TOOL_SHED_URL environment variable.
log.info( "Retrieving repositories to install from the URL:\n%s\n" % str( galaxy_tool_shed_url ) )
repositories_to_install = get_repositories_to_install( galaxy_tool_shed_url, source='url' )
log.info( "Retrieved %d repositories to install..." % len( repositories_to_install ) )
repositories_tested = len( repositories_to_install )
- # This loop will iterate through the list of repositories returned by the above method, skipping any that are marked
- # as deleted. For each repository, it will generate a test method that will use Twill to install that repository into the
- # embedded Galaxy application that was started up, selecting to install repository and tool dependencies if they are
- # defined. If the installation completes successfully, it will then generate a test case for each functional test
- # defined for each tool in the repository, and execute the generated test cases. When this completes, it will record
- # the result of the tests, and if any failed, the traceback and captured output of the tool that was run.
- # After all tests have completed, the repository is uninstalled, so that the previous test cases don't interfere with
- # the next repository's functional tests.
for repository_to_install_dict in repositories_to_install:
- """
- Each repository_to_install_dict looks something like:
- {
- "changeset_revision": "13fa22a258b5",
- "downloadable": true,
- "id": "529fd61ab1c6cc36",
- "malicious": false,
- "repository_id": "529fd61ab1c6cc36",
- "url": "/api/repository_revisions/529fd61ab1c6cc36"
- }
- """
- repository_status = dict()
- repository_id = repository_to_install_dict.get( 'repository_id', None )
- changeset_revision = repository_to_install_dict.get( 'changeset_revision', None )
- metadata_revision_id = repository_to_install_dict.get( 'id', None )
- # Add the URL for the tool shed we're installing from, so the automated installation methods go to the right place.
- repository_to_install_dict[ 'tool_shed_url' ] = galaxy_tool_shed_url
# We need to get some details from the tool shed API, such as repository name and owner, to pass on to the
# module that will generate the install methods.
repository_info_dict = get_repository_info_from_api( galaxy_tool_shed_url, repository_to_install_dict )
@@ -483,13 +472,31 @@
# and functionally correct tools that someone has previously installed. Deleted repositories have never been installed,
# and therefore do not need to be checked. If they are undeleted, this script will then test them the next time it runs.
if repository_info_dict[ 'deleted' ]:
- log.info( "Skipping revision %s of repository id %s since the repository is deleted..." % ( str( changeset_revision ), str( repository_id ) ) )
+ log.info( "Skipping revision %s of repository id %s (%s/%s) since the repository is deleted..." % \
+ ( repository_info_dict[ 'changeset_revision' ],
+ repository_info_dict[ 'repository_id' ],
+ repository_info_dict[ 'owner' ],
+ repository_info_dict[ 'name' ] ) )
continue
- log.info( "Installing and testing revision %s of repository id %s..." % ( str( changeset_revision ), str( repository_id ) ) )
- # Add repository details to the basic repository dict.
- repository_dict = dict( repository_info_dict.items() + repository_to_install_dict.items() )
+ # Now merge the dict returned from /api/repository_revisions with the detailed dict we just retrieved.
+ detailed_repository_list.append( dict( repository_info_dict.items() + repository_to_install_dict.items() ) )
+ if '-list_repositories' in sys.argv:
+ log.info( "The API returned the following repositories, not counting deleted:" )
+ for repository_info_dict in detailed_repository_list:
+ log.info( "%s owned by %s changeset revision %s" % ( repository_info_dict.get( 'name', None ),
+ repository_info_dict.get( 'owner', None ),
+ repository_info_dict.get( 'changeset_revision', None ) ) )
+ # This loop will iterate through the list of repositories generated by the above code, having already filtered out any
+ # that were marked as deleted. For each repository, it will generate a test method that will use Twill to install that
+ # repository into the embedded Galaxy application that was started up, selecting to install repository and tool
+ # dependencies if they are defined. If the installation completes successfully, it will then generate a test case for
+ # each functional test defined for each tool in the repository, and execute the generated test cases. When this completes,
+ # it will record the result of the tests, and if any failed, the traceback and captured output of the tool that was run.
+ # After all tests have completed, the repository is uninstalled, so that the previous test cases don't interfere with
+ # the next repository's functional tests.
+ for repository_info_dict in detailed_repository_list:
"""
- After the addition of the repository details, each repository_dict should now contain something like:
+ Each repository_info_dict looks something like:
{
"changeset_revision": "13fa22a258b5",
"contents_url": "/api/repositories/529fd61ab1c6cc36/contents",
@@ -510,11 +517,19 @@
"user_id": "529fd61ab1c6cc36"
}
"""
- name = repository_dict[ 'name' ]
- owner = repository_dict[ 'owner' ]
+ repository_status = dict()
+ repository_id = repository_info_dict.get( 'repository_id', None )
+ changeset_revision = repository_info_dict.get( 'changeset_revision', None )
+ metadata_revision_id = repository_info_dict.get( 'id', None )
+ # Add the URL for the tool shed we're installing from, so the automated installation methods go to the right place.
+ repository_info_dict[ 'tool_shed_url' ] = galaxy_tool_shed_url
+ log.info( "Installing and testing revision %s of repository id %s..." % ( str( changeset_revision ), str( repository_id ) ) )
+ # Get the name and owner out of the repository info dict.
+ name = repository_info_dict[ 'name' ]
+ owner = repository_info_dict[ 'owner' ]
# Use the repository information dict to generate an install method that will install the repository into the embedded
# Galaxy application, with tool dependencies and repository dependencies, if any.
- test_install_repositories.generate_install_method( repository_dict )
+ test_install_repositories.generate_install_method( repository_info_dict )
os.environ[ 'GALAXY_INSTALL_TEST_HOST' ] = galaxy_test_host
# Configure nose to run the install method as a test.
test_config = nose.config.Config( env=os.environ, plugins=nose.plugins.manager.DefaultPluginManager() )
@@ -526,7 +541,7 @@
# If the installation succeeds, configure and run functional tests for this repository. This is equivalent to
# sh run_functional_tests.sh -installed
if success:
- log.debug( 'Installation of %s succeeded, running all defined functional tests.' % repository_dict[ 'name' ] )
+ log.debug( 'Installation of %s succeeded, running all defined functional tests.' % name )
# Generate the shed_tools_dict that specifies the location of test data contained within this repository. If the repository
# does not have a test-data directory, this will return has_test_data = False, and we will set the do_not_test flag to True,
# and the tools_functionally_correct flag to False, as well as updating tool_test_errors.
@@ -574,17 +589,18 @@
# },
# ]
# }
- repository_status = get_tool_test_errors_from_api( galaxy_tool_shed_url, repository_dict )
+ repository_status = get_tool_test_errors_from_api( galaxy_tool_shed_url, repository_info_dict )
+ if 'test_environment' not in repository_status:
+ repository_status[ 'test_environment' ] = {}
test_environment = get_test_environment( repository_status[ 'test_environment' ] )
test_environment[ 'galaxy_database_version' ] = get_database_version( app )
test_environment[ 'galaxy_revision'] = get_repository_current_revision( os.getcwd() )
repository_status[ 'test_environment' ] = test_environment
repository_status[ 'tests_passed' ] = []
repository_status[ 'test_errors' ] = []
+ repository_status[ 'invalid_tests' ] = []
if not has_test_data:
log.error( 'Test data is missing for this repository. Updating repository and skipping functional tests.' )
- repository_status[ 'test_environment' ] = get_test_environment()
- test_id = 'Find functional test data for %s' % repository_dict[ 'name' ]
# Record the lack of test data.
test_errors = dict( tool_id=None, tool_version=None, tool_guid=None,
reason_test_is_invalid="Repository %s is missing a test-data directory." % name )
@@ -593,7 +609,7 @@
register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, tests_passed=False )
# Run the cleanup method. This removes tool functional test methods from the test_toolbox module and uninstalls the
# repository using Twill.
- execute_uninstall_method( repository_dict )
+ execute_uninstall_method( repository_info_dict )
# Set the test_toolbox.toolbox module-level variable to the new app.toolbox.
test_toolbox.toolbox = app.toolbox
repositories_failed.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
@@ -618,9 +634,7 @@
# Run the configured tests.
result, test_plugins = run_tests( test_config )
success = result.wasSuccessful()
- # Record some information about the environment in which this test was run, in case a failure is specific to a certain processor
- # architecture or operating system.
- repository_dict[ 'test_environment' ] = test_environment
+ # Use the ReportResults nose plugin to get a list of tests that passed.
for plugin in test_plugins:
if hasattr( plugin, 'getTestStatus' ):
test_identifier = '%s/%s' % ( owner, name )
@@ -637,10 +651,8 @@
# controller with the status of the test. This also sets the do_not_test and tools_functionally correct flags, and
# updates the time_last_tested field to today's date.
repositories_passed.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
- repository_status[ 'test_environment' ] = test_environment
register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, tests_passed=True )
- log.debug( 'Revision %s of repository %s installed and passed functional tests.' % \
- ( repository_dict[ 'changeset_revision' ], repository_dict[ 'name' ] ) )
+ log.debug( 'Revision %s of repository %s installed and passed functional tests.' % ( changeset_revision, name ) )
else:
# If the functional tests fail, log the output and update the failed changeset revision's metadata record in the tool shed via the API.
for failure in result.failures:
@@ -686,14 +698,15 @@
repositories_failed.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, tests_passed=False )
log.debug( 'Revision %s of repository %s installed successfully, but did not pass functional tests.' % \
- ( repository_dict[ 'changeset_revision' ], repository_dict[ 'name' ] ) )
+ ( changeset_revision, name ) )
# Run the cleanup method. This removes tool functional test methods from the test_toolbox module and uninstalls the
# repository using Twill.
- execute_uninstall_method( repository_dict )
+ execute_uninstall_method( repository_info_dict )
# Set the test_toolbox.toolbox module-level variable to the new app.toolbox.
test_toolbox.toolbox = app.toolbox
else:
- log.debug( 'Repository %s failed to install correctly.' % repository_dict[ 'name' ] )
+ repositories_failed_install.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
+ log.debug( 'Repository %s failed to install correctly.' % name )
except:
log.exception( "Failure running tests" )
@@ -728,29 +741,17 @@
print "# Repository revisions tested: %d" % repositories_tested
if repositories_tested > 0:
if repositories_passed:
- passed_repositories_by_owner = dict()
- for repository in repositories_passed:
- if repository[ 'owner' ] not in passed_repositories_by_owner:
- passed_repositories_by_owner[ repository[ 'owner' ] ] = []
- passed_repositories_by_owner[ repository[ 'owner' ] ].append( repository )
print "# "
print "# Repositories passed:"
- for owner in passed_repositories_by_owner:
- print "# "
- for repository in passed_repositories_by_owner[ owner ]:
- print "# %s owned by %s, changeset revision %s" % ( repository[ 'name' ], repository[ 'owner' ], repository[ 'changeset_revision' ] )
+ show_summary_output( repositories_passed )
if repositories_failed:
- failed_repositories_by_owner = dict()
- for repository in repositories_failed:
- if repository[ 'owner' ] not in failed_repositories_by_owner:
- failed_repositories_by_owner[ repository[ 'owner' ] ] = []
- failed_repositories_by_owner[ repository[ 'owner' ] ].append( repository )
print "# "
print "# Repositories failed:"
- for owner in failed_repositories_by_owner:
- print "# "
- for repository in failed_repositories_by_owner[ owner ]:
- print "# %s owned by %s, changeset revision %s" % ( repository[ 'name' ], repository[ 'owner' ], repository[ 'changeset_revision' ] )
+ show_summary_output( repositories_failed )
+ if repositories_failed_install:
+ print "# "
+ print "# Repositories not installed correctly:"
+ show_summary_output( repositories_failed_install )
print "####################################################################################"
if success:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/f25f3fee4da7/
changeset: f25f3fee4da7
user: dannon
date: 2013-03-08 22:01:25
summary: Remove unused yet exposed 'stdoutX' method of the datasets controller.
affected #: 1 file
diff -r 619995bcb99feda43d9a369a7aa563108a19f3c7 -r f25f3fee4da76fc0660ba27a1b26673d7592ddc1 lib/galaxy/webapps/galaxy/controllers/dataset.py
--- a/lib/galaxy/webapps/galaxy/controllers/dataset.py
+++ b/lib/galaxy/webapps/galaxy/controllers/dataset.py
@@ -180,16 +180,6 @@
if not hda or not self._can_access_dataset( trans, hda ):
return trans.show_error_message( "Either this dataset does not exist or you do not have permission to access it." )
return trans.fill_template( "dataset/errors.mako", hda=hda )
- @web.expose
- def stdoutX( self, trans, dataset_id=None, **kwargs ):
- trans.response.set_content_type( 'text/plain' )
- try:
- hda = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( trans.security.decode_id( dataset_id ) )
- assert hda and self._can_access_dataset( trans, hda )
- job = hda.creating_job_associations[0].job
- except:
- return "Invalid dataset ID or you are not allowed to access this dataset"
- return job.stdout
@web.expose
def stdout( self, trans, dataset_id=None, **kwargs ):
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/619995bcb99f/
changeset: 619995bcb99f
user: inithello
date: 2013-03-08 20:44:57
summary: Enhance the script that checks repositories for functional tests, adding support for multiple web frontends.
affected #: 1 file
diff -r c937f80188d8026f2074f8a897e2ff2972d8da6d -r 619995bcb99feda43d9a369a7aa563108a19f3c7 lib/tool_shed/scripts/check_repositories_for_functional_tests.py
--- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
+++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
@@ -38,6 +38,12 @@
'''Script that checks repositories to see if the tools contained within them have functional tests defined.'''
parser = OptionParser()
parser.add_option( "-i", "--info_only", action="store_true", dest="info_only", help="info about the requested action", default=False )
+ parser.add_option( "-s",
+ "--section",
+ action="store",
+ dest="section",
+ default='server:main',
+ help="which .ini file section to extract the host and port from" )
parser.add_option(
"-v", "--verbose",
action="count", dest="verbosity",
@@ -57,10 +63,11 @@
config_dict[key] = value
config = tool_shed_config.Configuration( **config_dict )
+ config_section = options.section
now = strftime( "%Y-%m-%d %H:%M:%S" )
print "#############################################################################"
print "# %s - Checking repositories for tools with functional tests." % now
- print "# This tool shed is configured to listen on %s:%s." % ( config_parser.get( 'server:main', 'host' ), config_parser.get( 'server:main', 'port' ) )
+ print "# This tool shed is configured to listen on %s:%s." % ( config_parser.get( config_section, 'host' ), config_parser.get( config_section, 'port' ) )
app = FlagRepositoriesApplication( config )
if options.info_only:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.