galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
December 2013
- 1 participants
- 207 discussions
commit/galaxy-central: greg: Eliminate the need for deactivating or uninstalling repositories in order to manage the tools loaded into app.toolbox within the tool shed's install and test framework.
by commits-noreply@bitbucket.org 20 Dec '13
by commits-noreply@bitbucket.org 20 Dec '13
20 Dec '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/0de3c7e7d632/
Changeset: 0de3c7e7d632
User: greg
Date: 2013-12-20 17:13:20
Summary: Eliminate the need for deactivating or uninstalling repositories in order to manage the tools loaded into app.toolbox within the tool shed's install and test framework.
Affected #: 5 files
diff -r 0fb6c5d7312aafece427c4e9b19dc5e5b991c6d8 -r 0de3c7e7d6327e1af295fc4d81419efd8b5f41af test/install_and_test_tool_shed_repositories/base/twilltestcase.py
--- a/test/install_and_test_tool_shed_repositories/base/twilltestcase.py
+++ b/test/install_and_test_tool_shed_repositories/base/twilltestcase.py
@@ -128,17 +128,6 @@
del( kwd[ field_name ] )
return kwd
- def uninstall_repository( self, repository ):
- """Uninstall a repository."""
- # A repository can be uninstalled only if no dependent repositories are installed.
- url = '/admin_toolshed/deactivate_or_uninstall_repository?id=%s' % self.security.encode_id( repository.id )
- self.visit_url( url )
- tc.fv ( 1, "remove_from_disk", 'true' )
- tc.submit( 'deactivate_or_uninstall_repository_button' )
- strings_displayed = [ 'The repository named' ]
- strings_displayed.append( 'has been uninstalled' )
- self.check_for_strings( strings_displayed, strings_not_displayed=[] )
-
def visit_url( self, url, allowed_codes=[ 200 ] ):
new_url = tc.go( url )
return_code = tc.browser.get_code()
diff -r 0fb6c5d7312aafece427c4e9b19dc5e5b991c6d8 -r 0de3c7e7d6327e1af295fc4d81419efd8b5f41af test/install_and_test_tool_shed_repositories/base/util.py
--- a/test/install_and_test_tool_shed_repositories/base/util.py
+++ b/test/install_and_test_tool_shed_repositories/base/util.py
@@ -518,8 +518,6 @@
repository_dict,
params,
can_update_tool_shed )
- # Uninstall this repository since it is missing dependencies.
- uninstall_repository_and_repository_dependencies( app, repository_dict )
def initialize_install_and_test_statistics_dict( test_framework ):
# Initialize a dictionary for the summary that will be printed to stdout.
@@ -591,7 +589,7 @@
log.exception( error_message )
return repository, error_message
-def is_excluded( exclude_list_dicts, name, owner, changeset_revision ):
+def is_excluded( exclude_list_dicts, name, owner, changeset_revision, encoded_repository_metadata_id ):
"""
Return True if the repository defined by the received name, owner, changeset_revision should
be excluded from testing for any reason.
@@ -606,7 +604,7 @@
return True, reason
# Skip this repository if it has a repository dependency that is in the exclude list.
repository_dependency_dicts, error_message = \
- get_repository_dependencies_for_changeset_revision( install_and_test_base_util.galaxy_tool_shed_url,
+ get_repository_dependencies_for_changeset_revision( galaxy_tool_shed_url,
encoded_repository_metadata_id )
if error_message:
log.debug( 'Error getting repository dependencies for revision %s of repository %s owned by %s:' % \
@@ -800,96 +798,3 @@
test_runner = plug_runner
result = test_runner.run( tests )
return result, test_config.plugins._plugins
-
-def uninstall_repository_and_repository_dependencies( app, repository_dict ):
- """Uninstall a repository and all of its repository dependencies."""
- # This method assumes that the repositor defined by the received repository_dict is not a repository
- # dependency of another repository.
- sa_session = app.install_model.context
- # The dict contains the only repository the app should have installed at this point.
- name = str( repository_dict[ 'name' ] )
- owner = str( repository_dict[ 'owner' ] )
- changeset_revision = str( repository_dict[ 'changeset_revision' ] )
- # Since this install and test framework uninstalls repositories immediately after installing and testing
- # them, the values of repository.installed_changeset_revision and repository.changeset_revision should be
- # the same.
- repository = test_db_util.get_installed_repository_by_name_owner_changeset_revision( name, owner, changeset_revision )
- if repository.can_uninstall( app ):
- # A repository can be uninstalled only if no dependent repositories are installed. So uninstallation order
- # id critical. A repository is always uninstalled first, and the each of its dependencies is checked to see
- # if it can be uninstalled.
- uninstall_repository_dict = dict( name=name,
- owner=owner,
- changeset_revision=changeset_revision )
- log.debug( 'Revision %s of repository %s owned by %s selected for uninstallation.' % ( changeset_revision, name, owner ) )
- try:
- test_install_repositories.generate_uninstall_method( uninstall_repository_dict )
- # Set up nose to run the generated uninstall method as a functional test.
- test_config = nose.config.Config( env=os.environ, plugins=nose.plugins.manager.DefaultPluginManager() )
- test_config.configure( sys.argv )
- # Run the uninstall method. This method uses the Galaxy web interface to uninstall the previously installed
- # repository and all of its repository dependencies, deleting each of them from disk.
- result, _ = run_tests( test_config )
- repository_uninstall_successful = result.wasSuccessful()
- except Exception, e:
- repository_uninstall_successful = False
- log.exception( 'Uninstallation of revision %s of repository %s owned by %s failed: %s.' % \
- ( rd_changeset_revision, rd_name, rd_owner, str( e ) ) )
- if repository_uninstall_successful:
- # Now that the repository is uninstalled we can attempt to uninstall each of its repository dependencies.
- # We have to do this through Twill in order to maintain app.toolbox and shed_tool_conf.xml in a state that
- # is valid for future tests. Since some of the repository's repository dependencies may require other of
- # the repository's repository dependencies, we'll keep track of the repositories we've been able to unistall.
- processed_repository_dependency_ids = []
- while len( processed_repository_dependency_ids ) < len( repository.repository_dependencies ):
- for repository_dependency in repository.repository_dependencies:
- if repository_dependency.id not in processed_repository_dependency_ids and repository_dependency.can_uninstall( app ):
- processed_repository_dependency_ids.append( repository_dependency.id )
- rd_name = str( repository_dependency.name )
- rd_owner = str( repository_dependency.owner )
- rd_changeset_revision = str( repository_dependency.changeset_revision )
- uninstall_repository_dict = dict( name=rd_name,
- owner=rd_owner,
- changeset_revision=rd_changeset_revision )
- log.debug( 'Revision %s of repository dependency %s owned by %s selected for uninstallation.' % \
- ( rd_changeset_revision, rd_name, rd_owner ) )
- # Generate a test method to uninstall the repository dependency through the embedded Galaxy application's
- # web interface.
- try:
- test_install_repositories.generate_uninstall_method( uninstall_repository_dict )
- # Set up nose to run the generated uninstall method as a functional test.
- test_config = nose.config.Config( env=os.environ, plugins=nose.plugins.manager.DefaultPluginManager() )
- test_config.configure( sys.argv )
- # Run the uninstall method.
- result, _ = run_tests( test_config )
- if not result.wasSuccessful():
- # We won't set ok here because we'll continue to uninstall whatever we can.
- log.debug( 'Uninstallation of revision %s of repository %s owned by %s failed.' % \
- ( rd_changeset_revision, rd_name, rd_owner ) )
- except Exception, e:
- log.exception( 'Uninstallation of revision %s of repository %s owned by %s failed: %s.' % \
- ( rd_changeset_revision, rd_name, rd_owner, str( e ) ) )
- else:
- log.debug( 'Uninstallation of revision %s of repository %s owned by %s failed.' % ( changeset_revision, name, owner ) )
- else:
- log_reason_repository_cannot_be_uninstalled( app, repository )
-
-def uninstall_tool_dependency( app, tool_dependency ):
- """Attempt to uninstall a tool dependency."""
- sa_session = app.install_model.context
- # Clean out any generated tests. This is necessary for Twill.
- tool_dependency_install_path = tool_dependency.installation_directory( app )
- uninstalled, error_message = tool_dependency_util.remove_tool_dependency( app, tool_dependency )
- if error_message:
- log.debug( 'Error attempting to remove directory: %s' % str( tool_dependency_install_path ) )
- log.debug( error_message )
- else:
- log.debug( 'Successfully removed tool dependency installation directory: %s' % str( tool_dependency_install_path ) )
- if not uninstalled or tool_dependency.status != app.model.ToolDependency.installation_status.UNINSTALLED:
- tool_dependency.status = app.model.ToolDependency.installation_status.UNINSTALLED
- sa_session.add( tool_dependency )
- sa_session.flush()
- if os.path.exists( tool_dependency_install_path ):
- log.debug( 'Uninstallation of tool dependency succeeded, but the installation path still exists on the filesystem. It is now being explicitly deleted.')
- suc.remove_dir( tool_dependency_install_path )
-
diff -r 0fb6c5d7312aafece427c4e9b19dc5e5b991c6d8 -r 0de3c7e7d6327e1af295fc4d81419efd8b5f41af test/install_and_test_tool_shed_repositories/functional/test_install_repositories.py
--- a/test/install_and_test_tool_shed_repositories/functional/test_install_repositories.py
+++ b/test/install_and_test_tool_shed_repositories/functional/test_install_repositories.py
@@ -9,21 +9,6 @@
class InstallTestRepositories( InstallTestRepository ):
"""Abstract test case that installs and uninstalls a predefined list of repositories."""
- def do_deactivate( self, repository_dict ):
- self.logout()
- self.login( email='test(a)bx.psu.edu', username='test' )
- admin_user = test_db_util.get_user( 'test(a)bx.psu.edu' )
- assert admin_user is not None, 'Problem retrieving user with email %s from the database' % admin_email
- # Get the repository defined by the received repository_dict along with all of its repository dependencies
- # from the database.
- name = repository_dict[ 'name' ]
- owner = repository_dict[ 'owner' ]
- changeset_revision = repository_dict[ 'changeset_revision' ]
- repository = test_db_util.get_installed_repository_by_name_owner_changeset_revision( name, owner, changeset_revision )
- admin_user_private_role = test_db_util.get_private_role( admin_user )
- # Uninstall the repository through the web interface using twill.
- self.deactivate_repository( repository )
-
def do_install( self, repository_dict ):
self.logout()
self.login( email='test(a)bx.psu.edu', username='test' )
@@ -34,50 +19,6 @@
# actually install more than this singe repository because repository dependencies can be installed.
self.install_repository( repository_dict )
- def do_uninstall( self, repository_dict ):
- self.logout()
- self.login( email='test(a)bx.psu.edu', username='test' )
- admin_user = test_db_util.get_user( 'test(a)bx.psu.edu' )
- assert admin_user is not None, 'Problem retrieving user with email %s from the database' % admin_email
- # Get the repository defined by the received repository_dict along with all of its repository dependencies
- # from the database.
- name = repository_dict[ 'name' ]
- owner = repository_dict[ 'owner' ]
- changeset_revision = repository_dict[ 'changeset_revision' ]
- repository = test_db_util.get_installed_repository_by_name_owner_changeset_revision( name, owner, changeset_revision )
- admin_user_private_role = test_db_util.get_private_role( admin_user )
- # Uninstall the repository and all of its repository dependencies through the Galaxy web interface using twill.
- self.uninstall_repository( repository )
-
-def generate_deactivate_method( repository_dict=None ):
- """Generate abstract test cases for the received repository_dict."""
- if repository_dict is None:
- return
- # Push all the toolbox tests to module level
- G = globals()
- # Eliminate all previous tests from G.
- for key, val in G.items():
- if key.startswith( 'TestInstallRepository_' ) or key.startswith( 'TestForTool_' ):
- del G[ key ]
- # Create a new subclass with a method named install_repository_XXX that deactivates the
- # repository specified by the received repository_dict.
- name = "TestDeactivateRepository_%s_%s" % \
- ( str( repository_dict[ 'name' ] ), str( repository_dict[ 'changeset_revision' ] ) )
- baseclasses = ( InstallTestRepositories, )
- namespace = dict()
- def make_deactivate_method( repository_dict ):
- def test_deactivate_repository( self ):
- self.do_deactivate( repository_dict )
- return test_deactivate_repository
- test_method = make_deactivate_method( repository_dict )
- test_method.__doc__ = "Deactivate the repository %s." % str( repository_dict[ 'name' ] )
- namespace[ 'uninstall_repository_%s_%s' % ( str( repository_dict[ 'name' ] ),
- str( repository_dict[ 'changeset_revision' ] ) ) ] = test_method
- # The new.classobj function returns a new class object, with name name, derived
- # from baseclasses (which should be a tuple of classes) and with namespace dict.
- new_class_obj = new.classobj( str( name ), baseclasses, namespace )
- G[ name ] = new_class_obj
-
def generate_install_method( repository_dict=None ):
"""Generate abstract test cases for the defined list of repositories."""
if repository_dict is None:
@@ -104,32 +45,3 @@
# from baseclasses (which should be a tuple of classes) and with namespace dict.
new_class_obj = new.classobj( str( name ), baseclasses, namespace )
G[ name ] = new_class_obj
-
-def generate_uninstall_method( repository_dict=None ):
- """Generate abstract test cases for the received repository_dict."""
- if repository_dict is None:
- return
- # Push all the toolbox tests to module level
- G = globals()
- # Eliminate all previous tests from G.
- for key, val in G.items():
- if key.startswith( 'TestInstallRepository_' ) or key.startswith( 'TestForTool_' ):
- del G[ key ]
- # Create a new subclass with a method named install_repository_XXX that installs the repository
- # specified by the received repository_dict.
- name = "TestUninstallRepository_%s_%s" % \
- ( str( repository_dict[ 'name' ] ), str( repository_dict[ 'changeset_revision' ] ) )
- baseclasses = ( InstallTestRepositories, )
- namespace = dict()
- def make_uninstall_method( repository_dict ):
- def test_uninstall_repository( self ):
- self.do_uninstall( repository_dict )
- return test_uninstall_repository
- test_method = make_uninstall_method( repository_dict )
- test_method.__doc__ = "Uninstall the repository %s." % repository_dict[ 'name' ]
- namespace[ 'uninstall_repository_%s_%s' % ( str( repository_dict[ 'name' ] ),
- str( repository_dict[ 'changeset_revision' ] ) ) ] = test_method
- # The new.classobj function returns a new class object, with name name, derived
- # from baseclasses (which should be a tuple of classes) and with namespace dict.
- new_class_obj = new.classobj( str( name ), baseclasses, namespace )
- G[ name ] = new_class_obj
diff -r 0fb6c5d7312aafece427c4e9b19dc5e5b991c6d8 -r 0de3c7e7d6327e1af295fc4d81419efd8b5f41af test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
@@ -76,7 +76,7 @@
can_update_tool_shed = True
test_framework = install_and_test_base_util.REPOSITORIES_WITH_TOOLS
-
+
def get_failed_test_dicts( test_result, from_tool_test=True ):
"""Extract any useful data from the test_result.failures and test_result.errors attributes."""
failed_test_dicts = []
@@ -177,7 +177,11 @@
log.debug( error_message )
else:
tool_test_results_dict = install_and_test_base_util.get_tool_test_results_dict( tool_test_results_dicts )
- is_excluded, reason = install_and_test_base_util.is_excluded( exclude_list_dicts, name, owner, changeset_revision )
+ is_excluded, reason = install_and_test_base_util.is_excluded( exclude_list_dicts,
+ name,
+ owner,
+ changeset_revision,
+ encoded_repository_metadata_id )
if is_excluded:
# If this repository is being skipped, register the reason.
tool_test_results_dict[ 'not_tested' ] = dict( reason=reason )
@@ -193,28 +197,15 @@
( changeset_revision, name, owner ) )
else:
tool_test_results_dict = install_and_test_base_util.initialize_tool_tests_results_dict( app, tool_test_results_dict )
- # Explicitly clear tests from twill's test environment.
- remove_generated_tests( app )
# Proceed with installing repositories and testing contained tools.
repository, error_message = install_and_test_base_util.install_repository( app, repository_dict )
install_and_test_statistics_dict[ 'total_repositories_processed' ] += 1
if error_message:
# The repository installation failed.
- log.debug( 'Installation failed for revision %s of repository %s owned by %s.' % \
- ( changeset_revision, name, owner ) )
+ log.debug( 'Installation failed for revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) )
install_and_test_statistics_dict[ 'repositories_with_installation_error' ].append( repository_identifier_dict )
tool_test_results_dict[ 'installation_errors' ][ 'current_repository' ] = error_message
- # Even if the repository failed to install, execute the uninstall method, in case a dependency did succeed.
- log.debug( 'Attempting to uninstall revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) )
- try:
- repository = \
- test_db_util.get_installed_repository_by_name_owner_changeset_revision( name, owner, changeset_revision )
- except Exception, e:
- error_message = 'Unable to find revision %s of repository %s owned by %s: %s.' % \
- ( changeset_revision, name, owner, str( e ) )
- log.exception( error_message )
- params = dict( test_install_error=True,
- do_not_test=False )
+ params = dict( test_install_error=True )
# TODO: do something useful with response_dict
response_dict = install_and_test_base_util.register_test_result( install_and_test_base_util.galaxy_tool_shed_url,
tool_test_results_dicts,
@@ -222,19 +213,10 @@
repository_dict,
params,
can_update_tool_shed )
- try:
- # We are uninstalling this repository and all of its repository dependencies.
- install_and_test_base_util.uninstall_repository_and_repository_dependencies( app, repository_dict )
- except Exception, e:
- log.exception( 'Error attempting to uninstall revision %s of repository %s owned by %s: %s' % \
- ( changeset_revision, name, owner, str( e ) ) )
- # Clean out any generated tests. This is necessary for Twill.
- remove_generated_tests( app )
- install_and_test_statistics_dict[ 'repositories_with_installation_error' ].append( repository_identifier_dict )
+ else:
+ # The repository was successfully installed.
log.debug( 'Installation succeeded for revision %s of repository %s owned by %s.' % \
( changeset_revision, name, owner ) )
- else:
- # The repository was successfully installed.
params, install_and_test_statistics_dict, tool_test_results_dict = \
install_and_test_base_util.register_installed_and_missing_dependencies( app,
repository,
@@ -257,26 +239,23 @@
tool_test_results_dict=tool_test_results_dict,
params=params,
can_update_tool_shed=can_update_tool_shed )
- # Set the test_toolbox.toolbox module-level variable to the new app.toolbox.
- test_toolbox.toolbox = app.toolbox
else:
- # This repository and all of its dependencies were successfully installed.
- # Configure and run functional tests for this repository. This is equivalent to
+ log.debug( 'Installation of %s succeeded, running all defined functional tests.' % str( repository.name ) )
+ # Generate the shed_tools_dict that specifies the location of test data contained within this repository.
+ # and configure and run functional tests for this repository. This is equivalent to
# sh run_functional_tests.sh -installed
- remove_install_tests()
- log.debug( 'Installation of %s succeeded, running all defined functional tests.' % str( repository.name ) )
- # Generate the shed_tools_dict that specifies the location of test data contained within this repository. If the repository
- # does not have a test-data directory, this will return has_test_data = False, and we will set the do_not_test flag to True,
- # and the tools_functionally_correct flag to False, as well as updating tool_test_results.
file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( {} ) )
+ # Find the path to the test-data directory within the installed repository.
has_test_data, shed_tools_dict = \
parse_tool_panel_config( galaxy_shed_tool_conf_file,
from_json_string( file( galaxy_shed_tools_dict, 'r' ).read() ) )
- # If the repository has a test-data directory we write the generated shed_tools_dict to a file, so the functional
- # test framework can find it.
+ # If the repository has a test-data directory we write the generated shed_tools_dict to a temporary
+ # file so the functional test framework can find it.
file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( shed_tools_dict ) )
log.debug( 'Saved generated shed_tools_dict to %s\nContents: %s' % ( str( galaxy_shed_tools_dict ),
str( shed_tools_dict ) ) )
+ # Remove twills' old generated test before building the new tests for the current tools.
+ remove_tests( app )
try:
install_and_test_statistics_dict = test_repository_tools( app,
repository,
@@ -441,6 +420,8 @@
config_items.append( config_tuple )
# Write a temporary file, based on universe_wsgi.ini.sample, using the configuration options defined above.
generate_config_file( 'universe_wsgi.ini.sample', galaxy_config_file, config_items )
+ # kwargs must be a list when passed to the Galaxy webapp (i.e., UniverseApplication).
+ # The following is used to set metadata externally.
kwargs[ 'tool_config_file' ] = [ galaxy_tool_conf_file, galaxy_shed_tool_conf_file ]
# Set the global_conf[ '__file__' ] option to the location of the temporary .ini file, which gets passed to set_metadata.sh.
kwargs[ 'global_conf' ] = install_and_test_base_util.get_webapp_global_conf()
@@ -581,46 +562,45 @@
# Return a "successful" response to buildbot.
return 0
-def remove_generated_tests( app ):
+def remove_tests( app ):
"""
Delete any configured tool functional tests from the test_toolbox.__dict__, otherwise nose will find them
and try to re-run the tests after uninstalling the repository, which will cause false failure reports,
since the test data has been deleted from disk by now.
"""
tests_to_delete = []
- tools_to_delete = []
+ tools_to_delete_by_id = []
global test_toolbox
+ # Find all tests previously generated by twill.
for key in test_toolbox.__dict__:
if key.startswith( 'TestForTool_' ):
- log.debug( 'Tool test found in test_toolbox, deleting: %s' % str( key ) )
+ log.debug( 'Tool test %s discovered in test_toolbox.' % str( key ) )
# We can't delete this test just yet, we're still iterating over __dict__.
tests_to_delete.append( key )
tool_id = key.replace( 'TestForTool_', '' )
- for tool in app.toolbox.tools_by_id:
- if tool.replace( '_', ' ' ) == tool_id.replace( '_', ' ' ):
- tools_to_delete.append( tool )
+ for app_tool_id in app.toolbox.tools_by_id:
+ if app_tool_id.replace( '_', ' ' ) == tool_id.replace( '_', ' ' ):
+ tools_to_delete_by_id.append( tool_id )
+ # Delete the discovered twill-generated tests.
for key in tests_to_delete:
- # Now delete the tests found in the previous loop.
- del test_toolbox.__dict__[ key ]
- for tool in tools_to_delete:
- del app.toolbox.tools_by_id[ tool ]
-
-def remove_install_tests():
- """
- Delete any configured repository installation tests from the test_toolbox.__dict__, otherwise nose will find them
- and try to install the repository again while running tool functional tests.
- """
- tests_to_delete = []
- global test_toolbox
+ if key in test_toolbox.__dict__:
+ log.debug( 'Deleting test %s from test_toolbox.' % str( key ) )
+ del test_toolbox.__dict__[ key ]
+ for tool_id in tools_to_delete_by_id:
+ if tool_id in app.toolbox.tools_by_id:
+ log.debug( 'Deleting tool id %s from app.toolbox[ tools_by_id ].' % str( tool_id ) )
+ del app.toolbox.tools_by_id[ tool_id ]
# Push all the toolbox tests to module level
for key in test_install_repositories.__dict__:
if key.startswith( 'TestInstallRepository_' ):
- log.debug( 'Repository installation process found, deleting: %s' % str( key ) )
+ log.debug( 'Repository installation process test %s discovered.' % str( key ) )
# We can't delete this test just yet, we're still iterating over __dict__.
tests_to_delete.append( key )
+ # Delete the discovered twill-generated tests.
for key in tests_to_delete:
- # Now delete the tests found in the previous loop.
- del test_install_repositories.__dict__[ key ]
+ if key in test_install_repositories.__dict__:
+ log.debug( 'Deleting test %s from test_toolbox.' % str( key ) )
+ del test_install_repositories.__dict__[ key ]
def test_repository_tools( app, repository, repository_dict, tool_test_results_dicts, tool_test_results_dict,
install_and_test_statistics_dict ):
@@ -629,7 +609,9 @@
owner = str( repository.owner )
changeset_revision = str( repository.changeset_revision )
repository_identifier_dict = dict( name=name, owner=owner, changeset_revision=changeset_revision )
- # Set the module-level variable 'toolbox', so that test.functional.test_toolbox will generate the appropriate test methods.
+ # Set the module-level variable 'toolbox', so that test.functional.test_toolbox will generate the
+ # appropriate test methods. At this point, app.toolbox contains the upload tool and all tools contained
+ # in the repository.
test_toolbox.toolbox = app.toolbox
# Generate the test methods for this installed repository. We need to pass in True here, or it will look
# in $GALAXY_HOME/test-data for test data, which may result in missing or invalid test files.
@@ -692,15 +674,6 @@
can_update_tool_shed )
log.debug( 'Revision %s of repository %s owned by %s installed successfully but did not pass functional tests.' % \
( changeset_revision, name, owner ) )
- # Run the uninstall method. This removes tool functional test methods from the test_toolbox module and uninstalls the
- # repository using Twill.
- log.debug( 'Uninstalling changeset revision %s of repository %s' % ( str( changeset_revision ), str( name ) ) )
- # We are uninstalling this repository and all of its repository dependencies.
- install_and_test_base_util.uninstall_repository_and_repository_dependencies( app, repository_dict )
- # Clean out any generated tests. This is necessary for Twill.
- remove_generated_tests( app )
- # Set the test_toolbox.toolbox module-level variable to the new app.toolbox.
- test_toolbox.toolbox = app.toolbox
return install_and_test_statistics_dict
if __name__ == "__main__":
diff -r 0fb6c5d7312aafece427c4e9b19dc5e5b991c6d8 -r 0de3c7e7d6327e1af295fc4d81419efd8b5f41af test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py
@@ -115,7 +115,11 @@
log.debug( error_message )
else:
tool_test_results_dict = install_and_test_base_util.get_tool_test_results_dict( tool_test_results_dicts )
- is_excluded, reason = install_and_test_base_util.is_excluded( exclude_list_dicts, name, owner, changeset_revision )
+ is_excluded, reason = install_and_test_base_util.is_excluded( exclude_list_dicts,
+ name,
+ owner,
+ changeset_revision,
+ encoded_repository_metadata_id )
if this_repository_is_in_the_exclude_list or requires_excluded:
# If this repository is being skipped, register the reason.
tool_test_results_dict[ 'not_tested' ] = dict( reason=reason )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
4 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/882d74715634/
Changeset: 882d74715634
Branch: ccat3
User: BjoernGruening
Date: 2013-11-11 10:09:54
Summary: add dependency of type package to the ccat wrapper
Affected #: 1 file
diff -r ea6ff3d7224db5d218eefa91af1b08626229eae1 -r 882d7471563422315959211baf12e89ae0e94a29 tools/peak_calling/ccat_wrapper.xml
--- a/tools/peak_calling/ccat_wrapper.xml
+++ b/tools/peak_calling/ccat_wrapper.xml
@@ -10,6 +10,7 @@
'$output_peak_file' '$output_region_file' '$output_top_file' '$output_log_file'</command><requirements><requirement type="binary" version="3.0">CCAT</requirement>
+ <requirement type="package" version="3.0">CCAT</requirement></requirements><inputs><param name="input_tag_file" type="data" format="bed" label="ChIP-Seq Tag File" >
https://bitbucket.org/galaxy/galaxy-central/commits/446d410097fd/
Changeset: 446d410097fd
Branch: ccat3
User: BjoernGruening
Date: 2013-11-11 10:10:41
Summary: remove ccat_2_wrapper.xml, afaik it is not used anymore
Affected #: 1 file
diff -r 882d7471563422315959211baf12e89ae0e94a29 -r 446d410097fd99711d3ef3160f8fd4e74f9d1a94 tools/peak_calling/ccat_2_wrapper.xml
--- a/tools/peak_calling/ccat_2_wrapper.xml
+++ /dev/null
@@ -1,130 +0,0 @@
-<tool id="peakcalling_ccat2" name="CCAT" version="0.0.1">
- <description>Control-based ChIP-seq Analysis Tool</description>
- <command interpreter="python">ccat_wrapper.py '$input_tag_file' '$input_control_file' '$chromInfo'
- #if str( $options_type[ 'options_type_selector' ] ) == 'advanced':
- '$input_advanced_config_file'
- #else:
- '${ options_type.input_config_file.fields.path }'
- #end if
- 'CCAT in Galaxy'
- '$output_peak_file' '$output_region_file' '$output_top_file' '$output_log_file'</command>
- <requirements>
- <requirement type="binary">CCAT</requirement>
- </requirements>
- <inputs>
- <param name="input_tag_file" type="data" format="bed" label="ChIP-Seq Tag File" >
- <validator type="unspecified_build" />
- </param>
- <param name="input_control_file" type="data" format="bed" label="ChIP-Seq Control File" >
- <validator type="unspecified_build" />
- </param>
- <conditional name="options_type">
- <param name="options_type_selector" type="select" label="Advanced Options">
- <option value="basic" selected="True">Hide Advanced Options</option>
- <option value="advanced">Show Advanced Options</option>
- </param>
- <when value="basic">
- <param name="input_config_file" type="select" label="Select a pre-defined configuration file">
- <options from_data_table="ccat_configurations">
- <validator type="no_options" message="No configurations are available"/>
- </options>
- </param>
- </when>
- <when value="advanced">
- <param name="fragment_size" type="integer" label="Length of DNA fragment" value="200"/>
- <param name="sliding_window_size" type="integer" label="Sliding window size" value="500" help="transcription factor binding default: 300; histone modifications default: 500"/>
- <param name="moving_step" type="integer" label="Step of sliding window" value="50" help="transcription factor binding default: 10; histone modifications default: 50"/>
- <param name="is_strand_sensitive_mode" type="select" label="isStrandSensitiveMode" >
- <option value="1">Transition from sense strand to anti-sense strand</option>
- <option value="0" selected="True">Local maximum of read-enrichment profile</option>
- </param>
- <param name="min_count" type="integer" label="Minimum number of read counts at the peak" value="4"/>
- <param name="output_num" type="integer" label="Number of peaks reported in top peak file" value="100000"/>
- <param name="random_seed" type="integer" label="Random Seed" value="123456"/>
- <param name="min_score" type="float" label="Minimum score of normalized difference" value="3.0"/>
- <param name="bootstrap_pass" type="integer" label="Number of passes in the bootstrapping process" value="50"/>
- </when>
- </conditional>
- </inputs>
- <outputs>
- <data name="output_peak_file" format="interval" label="${tool.name} on ${on_string} (peaks)">
- <actions>
- <action type="metadata" name="chromCol" default="1"/>
- <action type="metadata" name="startCol" default="3"/>
- <action type="metadata" name="endCol" default="4"/>
- </actions>
- </data>
- <data name="output_region_file" format="interval" label="${tool.name} on ${on_string} (regions)">
- <actions>
- <action type="metadata" name="chromCol" default="1"/>
- <action type="metadata" name="startCol" default="3"/>
- <action type="metadata" name="endCol" default="4"/>
- </actions>
- </data>
- <data name="output_top_file" format="interval" label="${tool.name} on ${on_string} (top peaks)">
- <actions>
- <action type="metadata" name="chromCol" default="1"/>
- <action type="metadata" name="startCol" default="3"/>
- <action type="metadata" name="endCol" default="4"/>
- </actions>
- </data>
- <data name="output_log_file" format="txt" label="${tool.name} on ${on_string} (log)"/>
- </outputs>
- <configfiles>
- <configfile name="input_advanced_config_file">#if str( $options_type['options_type_selector' ] ) == 'advanced':
-fragmentSize ${options_type[ 'fragment_size' ]}
-slidingWinSize ${options_type[ 'sliding_window_size' ]}
-movingStep ${options_type[ 'moving_step' ]}
-isStrandSensitiveMode ${options_type[ 'is_strand_sensitive_mode' ]}
-minCount ${options_type[ 'min_count' ]}
-outputNum ${options_type[ 'output_num' ]}
-randomSeed ${options_type[ 'random_seed' ]}
-minScore ${options_type[ 'min_score' ]}
-bootstrapPass ${options_type[ 'bootstrap_pass' ]}
-#end if</configfile>
- </configfiles>
- <tests>
- <test>
- <param name="input_tag_file" value="chipseq_enriched.bed.gz" ftype="bed" dbkey="hg18" />
- <param name="input_control_file" value="chipseq_input.bed.gz" ftype="bed" dbkey="hg18" />
- <param name="options_type_selector" value="basic" />
- <param name="input_config_file" value="ccat_2.0_histone_config" />
- <output name="output_peak_file" file="peakcalling_ccat2/ccat2_test_peak_out_1.interval" />
- <output name="output_region_file" file="peakcalling_ccat2/ccat2_test_region_out_1.interval" />
- <output name="output_top_file" file="peakcalling_ccat2/ccat2_test_top_out_1.interval" />
- <output name="output_log_file" file="peakcalling_ccat2/ccat2_test_log_out_1.interval" />
- </test>
- <test>
- <param name="input_tag_file" value="chipseq_enriched.bed.gz" ftype="bed" dbkey="hg18" />
- <param name="input_control_file" value="chipseq_input.bed.gz" ftype="bed" dbkey="hg18" />
- <param name="options_type_selector" value="advanced" />
- <param name="fragment_size" value="200" />
- <param name="sliding_window_size" value="500" />
- <param name="moving_step" value="50" />
- <param name="is_strand_sensitive_mode" value="0" />
- <param name="min_count" value="4" />
- <param name="output_num" value="100000" />
- <param name="random_seed" value="123456" />
- <param name="min_score" value="3.0" />
- <param name="bootstrap_pass" value="50" />
- <output name="output_peak_file" file="peakcalling_ccat2/ccat2_test_peak_out_1.interval" />
- <output name="output_region_file" file="peakcalling_ccat2/ccat2_test_region_out_1.interval" />
- <output name="output_top_file" file="peakcalling_ccat2/ccat2_test_top_out_1.interval" />
- <output name="output_log_file" file="peakcalling_ccat2/ccat2_test_log_out_1.interval" />
- </test>
- </tests>
- <help>
-**What it does**
-
-This tool allows ChIP-seq peak/region calling using CCAT.
-
-View the original CCAT documentation: http://cmb.gis.a-star.edu.sg/ChIPSeq/paperCCAT.htm.
-
-------
-
-**Citation**
-
-If you use this tool in Galaxy, please cite Blankenberg D, et al. *In preparation.*
-
- </help>
-</tool>
https://bitbucket.org/galaxy/galaxy-central/commits/936b96845324/
Changeset: 936b96845324
Branch: ccat3
User: BjoernGruening
Date: 2013-11-11 15:35:45
Summary: remove binary dependency
Affected #: 1 file
diff -r 446d410097fd99711d3ef3160f8fd4e74f9d1a94 -r 936b96845324311668e5ee82761ed18a551a9fa1 tools/peak_calling/ccat_wrapper.xml
--- a/tools/peak_calling/ccat_wrapper.xml
+++ b/tools/peak_calling/ccat_wrapper.xml
@@ -9,7 +9,6 @@
'CCAT in Galaxy'
'$output_peak_file' '$output_region_file' '$output_top_file' '$output_log_file'</command><requirements>
- <requirement type="binary" version="3.0">CCAT</requirement><requirement type="package" version="3.0">CCAT</requirement></requirements><inputs>
https://bitbucket.org/galaxy/galaxy-central/commits/0fb6c5d7312a/
Changeset: 0fb6c5d7312a
User: natefoo
Date: 2013-12-20 16:23:16
Summary: Merged in BjoernGruening/galaxy-central-bgruening/ccat3 (pull request #257)
Adding requirement tag to the ccat wrapper and remove old ccat2 wrapper.
Affected #: 1 file
diff -r 2fcb2c12183df5ee7ec96d0f1a253c23448e4410 -r 0fb6c5d7312aafece427c4e9b19dc5e5b991c6d8 tools/peak_calling/ccat_2_wrapper.xml
--- a/tools/peak_calling/ccat_2_wrapper.xml
+++ /dev/null
@@ -1,130 +0,0 @@
-<tool id="peakcalling_ccat2" name="CCAT" version="0.0.1">
- <description>Control-based ChIP-seq Analysis Tool</description>
- <command interpreter="python">ccat_wrapper.py '$input_tag_file' '$input_control_file' '$chromInfo'
- #if str( $options_type[ 'options_type_selector' ] ) == 'advanced':
- '$input_advanced_config_file'
- #else:
- '${ options_type.input_config_file.fields.path }'
- #end if
- 'CCAT in Galaxy'
- '$output_peak_file' '$output_region_file' '$output_top_file' '$output_log_file'</command>
- <requirements>
- <requirement type="binary">CCAT</requirement>
- </requirements>
- <inputs>
- <param name="input_tag_file" type="data" format="bed" label="ChIP-Seq Tag File" >
- <validator type="unspecified_build" />
- </param>
- <param name="input_control_file" type="data" format="bed" label="ChIP-Seq Control File" >
- <validator type="unspecified_build" />
- </param>
- <conditional name="options_type">
- <param name="options_type_selector" type="select" label="Advanced Options">
- <option value="basic" selected="True">Hide Advanced Options</option>
- <option value="advanced">Show Advanced Options</option>
- </param>
- <when value="basic">
- <param name="input_config_file" type="select" label="Select a pre-defined configuration file">
- <options from_data_table="ccat_configurations">
- <validator type="no_options" message="No configurations are available"/>
- </options>
- </param>
- </when>
- <when value="advanced">
- <param name="fragment_size" type="integer" label="Length of DNA fragment" value="200"/>
- <param name="sliding_window_size" type="integer" label="Sliding window size" value="500" help="transcription factor binding default: 300; histone modifications default: 500"/>
- <param name="moving_step" type="integer" label="Step of sliding window" value="50" help="transcription factor binding default: 10; histone modifications default: 50"/>
- <param name="is_strand_sensitive_mode" type="select" label="isStrandSensitiveMode" >
- <option value="1">Transition from sense strand to anti-sense strand</option>
- <option value="0" selected="True">Local maximum of read-enrichment profile</option>
- </param>
- <param name="min_count" type="integer" label="Minimum number of read counts at the peak" value="4"/>
- <param name="output_num" type="integer" label="Number of peaks reported in top peak file" value="100000"/>
- <param name="random_seed" type="integer" label="Random Seed" value="123456"/>
- <param name="min_score" type="float" label="Minimum score of normalized difference" value="3.0"/>
- <param name="bootstrap_pass" type="integer" label="Number of passes in the bootstrapping process" value="50"/>
- </when>
- </conditional>
- </inputs>
- <outputs>
- <data name="output_peak_file" format="interval" label="${tool.name} on ${on_string} (peaks)">
- <actions>
- <action type="metadata" name="chromCol" default="1"/>
- <action type="metadata" name="startCol" default="3"/>
- <action type="metadata" name="endCol" default="4"/>
- </actions>
- </data>
- <data name="output_region_file" format="interval" label="${tool.name} on ${on_string} (regions)">
- <actions>
- <action type="metadata" name="chromCol" default="1"/>
- <action type="metadata" name="startCol" default="3"/>
- <action type="metadata" name="endCol" default="4"/>
- </actions>
- </data>
- <data name="output_top_file" format="interval" label="${tool.name} on ${on_string} (top peaks)">
- <actions>
- <action type="metadata" name="chromCol" default="1"/>
- <action type="metadata" name="startCol" default="3"/>
- <action type="metadata" name="endCol" default="4"/>
- </actions>
- </data>
- <data name="output_log_file" format="txt" label="${tool.name} on ${on_string} (log)"/>
- </outputs>
- <configfiles>
- <configfile name="input_advanced_config_file">#if str( $options_type['options_type_selector' ] ) == 'advanced':
-fragmentSize ${options_type[ 'fragment_size' ]}
-slidingWinSize ${options_type[ 'sliding_window_size' ]}
-movingStep ${options_type[ 'moving_step' ]}
-isStrandSensitiveMode ${options_type[ 'is_strand_sensitive_mode' ]}
-minCount ${options_type[ 'min_count' ]}
-outputNum ${options_type[ 'output_num' ]}
-randomSeed ${options_type[ 'random_seed' ]}
-minScore ${options_type[ 'min_score' ]}
-bootstrapPass ${options_type[ 'bootstrap_pass' ]}
-#end if</configfile>
- </configfiles>
- <tests>
- <test>
- <param name="input_tag_file" value="chipseq_enriched.bed.gz" ftype="bed" dbkey="hg18" />
- <param name="input_control_file" value="chipseq_input.bed.gz" ftype="bed" dbkey="hg18" />
- <param name="options_type_selector" value="basic" />
- <param name="input_config_file" value="ccat_2.0_histone_config" />
- <output name="output_peak_file" file="peakcalling_ccat2/ccat2_test_peak_out_1.interval" />
- <output name="output_region_file" file="peakcalling_ccat2/ccat2_test_region_out_1.interval" />
- <output name="output_top_file" file="peakcalling_ccat2/ccat2_test_top_out_1.interval" />
- <output name="output_log_file" file="peakcalling_ccat2/ccat2_test_log_out_1.interval" />
- </test>
- <test>
- <param name="input_tag_file" value="chipseq_enriched.bed.gz" ftype="bed" dbkey="hg18" />
- <param name="input_control_file" value="chipseq_input.bed.gz" ftype="bed" dbkey="hg18" />
- <param name="options_type_selector" value="advanced" />
- <param name="fragment_size" value="200" />
- <param name="sliding_window_size" value="500" />
- <param name="moving_step" value="50" />
- <param name="is_strand_sensitive_mode" value="0" />
- <param name="min_count" value="4" />
- <param name="output_num" value="100000" />
- <param name="random_seed" value="123456" />
- <param name="min_score" value="3.0" />
- <param name="bootstrap_pass" value="50" />
- <output name="output_peak_file" file="peakcalling_ccat2/ccat2_test_peak_out_1.interval" />
- <output name="output_region_file" file="peakcalling_ccat2/ccat2_test_region_out_1.interval" />
- <output name="output_top_file" file="peakcalling_ccat2/ccat2_test_top_out_1.interval" />
- <output name="output_log_file" file="peakcalling_ccat2/ccat2_test_log_out_1.interval" />
- </test>
- </tests>
- <help>
-**What it does**
-
-This tool allows ChIP-seq peak/region calling using CCAT.
-
-View the original CCAT documentation: http://cmb.gis.a-star.edu.sg/ChIPSeq/paperCCAT.htm.
-
-------
-
-**Citation**
-
-If you use this tool in Galaxy, please cite Blankenberg D, et al. *In preparation.*
-
- </help>
-</tool>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Refactor method to determine excluded repositories in the tool shed's install and test framework.
by commits-noreply@bitbucket.org 20 Dec '13
by commits-noreply@bitbucket.org 20 Dec '13
20 Dec '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/2fcb2c12183d/
Changeset: 2fcb2c12183d
User: greg
Date: 2013-12-20 16:05:57
Summary: Refactor method to determine excluded repositories in the tool shed's install and test framework.
Affected #: 3 files
diff -r bedb1e48e5c724d817f1a695ed068e0745f066cd -r 2fcb2c12183df5ee7ec96d0f1a253c23448e4410 test/install_and_test_tool_shed_repositories/base/util.py
--- a/test/install_and_test_tool_shed_repositories/base/util.py
+++ b/test/install_and_test_tool_shed_repositories/base/util.py
@@ -591,6 +591,41 @@
log.exception( error_message )
return repository, error_message
+def is_excluded( exclude_list_dicts, name, owner, changeset_revision ):
+ """
+ Return True if the repository defined by the received name, owner, changeset_revision should
+ be excluded from testing for any reason.
+ """
+ for exclude_dict in exclude_list_dicts:
+ reason = exclude_dict[ 'reason' ]
+ exclude_repositories = exclude_dict[ 'repositories' ]
+ # 'repositories':
+ # [( name, owner, changeset_revision if changeset_revision else None ),
+ # ( name, owner, changeset_revision if changeset_revision else None )]
+ if ( name, owner, changeset_revision ) in exclude_repositories or ( name, owner, None ) in exclude_repositories:
+ return True, reason
+ # Skip this repository if it has a repository dependency that is in the exclude list.
+ repository_dependency_dicts, error_message = \
+ get_repository_dependencies_for_changeset_revision( install_and_test_base_util.galaxy_tool_shed_url,
+ encoded_repository_metadata_id )
+ if error_message:
+ log.debug( 'Error getting repository dependencies for revision %s of repository %s owned by %s:' % \
+ ( changeset_revision, name, owner ) )
+ log.debug( error_message )
+ else:
+ for repository_dependency_dict in repository_dependency_dicts:
+ rd_name = repository_dependency_dict[ 'name' ]
+ rd_owner = repository_dependency_dict[ 'owner' ]
+ rd_changeset_revision = repository_dependency_dict[ 'changeset_revision' ]
+ if ( rd_name, rd_owner, rd_changeset_revision ) in exclude_repositories or \
+ ( rd_name, rd_owner, None ) in exclude_repositories:
+ reason = 'This repository requires revision %s of repository %s owned by %s which is excluded from testing.' % \
+ ( rd_changeset_revision, rd_name, rd_owner )
+ return True, reason
+ break
+ return False, None
+
+
def is_latest_downloadable_revision( url, repository_dict ):
name = str( repository_dict[ 'name' ] )
owner = str( repository_dict[ 'owner' ] )
diff -r bedb1e48e5c724d817f1a695ed068e0745f066cd -r 2fcb2c12183df5ee7ec96d0f1a253c23448e4410 test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
@@ -151,33 +151,15 @@
# [{ 'reason': The default reason or the reason specified in this section,
# 'repositories': [( name, owner, changeset_revision if changeset_revision else None ),
# ( name, owner, changeset_revision if changeset_revision else None )]}]
- exclude_list = install_and_test_base_util.parse_exclude_list( exclude_list_file )
+ exclude_list_dicts = install_and_test_base_util.parse_exclude_list( exclude_list_file )
else:
- exclude_list = []
+ exclude_list_dicts = []
# Generate a test method that will use Twill to install each repository into the embedded Galaxy application that was
# started up, installing repository and tool dependencies. Upon successful installation, generate a test case for each
# functional test defined for each tool in the repository and execute the test cases. Record the result of the tests.
# The traceback and captured output of the tool that was run will be recored for test failures. After all tests have
# completed, the repository is uninstalled, so test cases don't interfere with the next repository's functional tests.
for repository_dict in repositories_to_install:
- # Each repository_dict looks something like:
- # { "changeset_revision": "13fa22a258b5",
- # "contents_url": "/api/repositories/529fd61ab1c6cc36/contents",
- # "deleted": false,
- # "deprecated": false,
- # "description": "Convert column case.",
- # "downloadable": true,
- # "id": "529fd61ab1c6cc36",
- # "long_description": "This tool takes the specified columns and converts them to uppercase or lowercase.",
- # "malicious": false,
- # "name": "change_case",
- # "owner": "test",
- # "private": false,
- # "repository_id": "529fd61ab1c6cc36",
- # "times_downloaded": 0,
- # "tool_shed_url": "http://toolshed.local:10001",
- # "url": "/api/repository_revisions/529fd61ab1c6cc36",
- # "user_id": "529fd61ab1c6cc36" }
encoded_repository_metadata_id = repository_dict.get( 'id', None )
# Add the URL for the tool shed we're installing from, so the automated installation methods go to the right place.
repository_dict[ 'tool_shed_url' ] = install_and_test_base_util.galaxy_tool_shed_url
@@ -195,43 +177,10 @@
log.debug( error_message )
else:
tool_test_results_dict = install_and_test_base_util.get_tool_test_results_dict( tool_test_results_dicts )
- # See if this repository should be skipped for any reason.
- this_repository_is_in_the_exclude_list = False
- requires_excluded = False
- skip_reason = None
- for exclude_dict in exclude_list:
- reason = exclude_dict[ 'reason' ]
- exclude_repositories = exclude_dict[ 'repositories' ]
- # 'repositories':
- # [( name, owner, changeset_revision if changeset_revision else None ),
- # ( name, owner, changeset_revision if changeset_revision else None )]
- if ( name, owner, changeset_revision ) in exclude_repositories or ( name, owner, None ) in exclude_repositories:
- this_repository_is_in_the_exclude_list = True
- skip_reason = reason
- break
- if not this_repository_is_in_the_exclude_list:
- # Skip this repository if it has a repository dependency that is in the exclude list.
- repository_dependency_dicts, error_message = \
- install_and_test_base_util.get_repository_dependencies_for_changeset_revision( install_and_test_base_util.galaxy_tool_shed_url,
- encoded_repository_metadata_id )
- if error_message:
- log.debug( 'Error getting repository dependencies for revision %s of repository %s owned by %s:' % \
- ( changeset_revision, name, owner ) )
- log.debug( error_message )
- else:
- for repository_dependency_dict in repository_dependency_dicts:
- rd_name = repository_dependency_dict[ 'name' ]
- rd_owner = repository_dependency_dict[ 'owner' ]
- rd_changeset_revision = repository_dependency_dict[ 'changeset_revision' ]
- if ( rd_name, rd_owner, rd_changeset_revision ) in exclude_repositories or \
- ( rd_name, rd_owner, None ) in exclude_repositories:
- skip_reason = 'This repository requires revision %s of repository %s owned by %s which is excluded from testing.' % \
- ( rd_changeset_revision, rd_name, rd_owner )
- requires_excluded = True
- break
- if this_repository_is_in_the_exclude_list or requires_excluded:
+ is_excluded, reason = install_and_test_base_util.is_excluded( exclude_list_dicts, name, owner, changeset_revision )
+ if is_excluded:
# If this repository is being skipped, register the reason.
- tool_test_results_dict[ 'not_tested' ] = dict( reason=skip_reason )
+ tool_test_results_dict[ 'not_tested' ] = dict( reason=reason )
params = dict( do_not_test=False )
# TODO: do something useful with response_dict
response_dict = install_and_test_base_util.register_test_result( install_and_test_base_util.galaxy_tool_shed_url,
diff -r bedb1e48e5c724d817f1a695ed068e0745f066cd -r 2fcb2c12183df5ee7ec96d0f1a253c23448e4410 test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py
@@ -98,24 +98,6 @@
# The traceback and captured output of the tool that was run will be recored for test failures. After all tests have
# completed, the repository is uninstalled, so test cases don't interfere with the next repository's functional tests.
for repository_dict in repositories_to_install:
- # Each repository_dict looks something like:
- # { "changeset_revision": "13fa22a258b5",
- # "contents_url": "/api/repositories/529fd61ab1c6cc36/contents",
- # "deleted": false,
- # "deprecated": false,
- # "description": "Convert column case.",
- # "downloadable": true,
- # "id": "529fd61ab1c6cc36",
- # "long_description": "This tool takes the specified columns and converts them to uppercase or lowercase.",
- # "malicious": false,
- # "name": "change_case",
- # "owner": "test",
- # "private": false,
- # "repository_id": "529fd61ab1c6cc36",
- # "times_downloaded": 0,
- # "tool_shed_url": "http://toolshed.local:10001",
- # "url": "/api/repository_revisions/529fd61ab1c6cc36",
- # "user_id": "529fd61ab1c6cc36" }
encoded_repository_metadata_id = repository_dict.get( 'id', None )
# Add the URL for the tool shed we're installing from, so the automated installation methods go to the right place.
repository_dict[ 'tool_shed_url' ] = install_and_test_base_util.galaxy_tool_shed_url
@@ -133,43 +115,10 @@
log.debug( error_message )
else:
tool_test_results_dict = install_and_test_base_util.get_tool_test_results_dict( tool_test_results_dicts )
- # See if this repository should be skipped for any reason.
- this_repository_is_in_the_exclude_list = False
- requires_excluded = False
- skip_reason = None
- for exclude_dict in exclude_list_dicts:
- reason = exclude_dict[ 'reason' ]
- exclude_repositories = exclude_dict[ 'repositories' ]
- # 'repositories':
- # [( name, owner, changeset_revision if changeset_revision else None ),
- # ( name, owner, changeset_revision if changeset_revision else None )]
- if ( name, owner, changeset_revision ) in exclude_repositories or ( name, owner, None ) in exclude_repositories:
- this_repository_is_in_the_exclude_list = True
- skip_reason = reason
- break
- if not this_repository_is_in_the_exclude_list:
- # Skip this repository if it has a repository dependency that is in the exclude list.
- repository_dependency_dicts, error_message = \
- install_and_test_base_util.get_repository_dependencies_for_changeset_revision( install_and_test_base_util.galaxy_tool_shed_url,
- encoded_repository_metadata_id )
- if error_message:
- log.debug( 'Error getting repository dependencies for revision %s of repository %s owned by %s:' % \
- ( changeset_revision, name, owner ) )
- log.debug( error_message )
- else:
- for repository_dependency_dict in repository_dependency_dicts:
- rd_name = repository_dependency_dict[ 'name' ]
- rd_owner = repository_dependency_dict[ 'owner' ]
- rd_changeset_revision = repository_dependency_dict[ 'changeset_revision' ]
- if ( rd_name, rd_owner, rd_changeset_revision ) in exclude_repositories or \
- ( rd_name, rd_owner, None ) in exclude_repositories:
- skip_reason = 'This repository requires revision %s of repository %s owned by %s which is excluded from testing.' % \
- ( rd_changeset_revision, rd_name, rd_owner )
- requires_excluded = True
- break
+ is_excluded, reason = install_and_test_base_util.is_excluded( exclude_list_dicts, name, owner, changeset_revision )
if this_repository_is_in_the_exclude_list or requires_excluded:
# If this repository is being skipped, register the reason.
- tool_test_results_dict[ 'not_tested' ] = dict( reason=skip_reason )
+ tool_test_results_dict[ 'not_tested' ] = dict( reason=reason )
params = dict( do_not_test=False )
# TODO: do something useful with response_dict
response_dict = install_and_test_base_util.register_test_result( install_and_test_base_util.galaxy_tool_shed_url,
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: inithello: Fix for install_and_test_statistics_dict being set to None when a repository's tool dependencies were determined to be unsuccessfully installed.
by commits-noreply@bitbucket.org 20 Dec '13
by commits-noreply@bitbucket.org 20 Dec '13
20 Dec '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/bedb1e48e5c7/
Changeset: bedb1e48e5c7
User: inithello
Date: 2013-12-20 15:55:14
Summary: Fix for install_and_test_statistics_dict being set to None when a repository's tool dependencies were determined to be unsuccessfully installed.
Affected #: 1 file
diff -r c77a66edf25294e1743cc0a3886f8062db5c1fff -r bedb1e48e5c724d817f1a695ed068e0745f066cd test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
@@ -300,15 +300,14 @@
tool_test_results_dict[ 'missing_test_components' ] = []
missing_tool_dependencies = install_and_test_base_util.get_missing_tool_dependencies( repository )
if missing_tool_dependencies or repository.missing_repository_dependencies:
- install_and_test_statistics_dict = \
- install_and_test_base_util.handle_missing_dependencies( app=app,
- repository=repository,
- missing_tool_dependencies=missing_tool_dependencies,
- repository_dict=repository_dict,
- tool_test_results_dicts=tool_test_results_dicts,
- tool_test_results_dict=tool_test_results_dict,
- params=params,
- can_update_tool_shed=can_update_tool_shed )
+ install_and_test_base_util.handle_missing_dependencies( app=app,
+ repository=repository,
+ missing_tool_dependencies=missing_tool_dependencies,
+ repository_dict=repository_dict,
+ tool_test_results_dicts=tool_test_results_dicts,
+ tool_test_results_dict=tool_test_results_dict,
+ params=params,
+ can_update_tool_shed=can_update_tool_shed )
# Set the test_toolbox.toolbox module-level variable to the new app.toolbox.
test_toolbox.toolbox = app.toolbox
else:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Make the tool shed's install and test exclude list locally accessible rather than stored in cfengine.
by commits-noreply@bitbucket.org 20 Dec '13
by commits-noreply@bitbucket.org 20 Dec '13
20 Dec '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/c77a66edf252/
Changeset: c77a66edf252
User: greg
Date: 2013-12-20 15:50:03
Summary: Make the tool shed's install and test exclude list locally accessible rather than stored in cfengine.
Affected #: 4 files
diff -r 3e2b61de5a2322563c1ffa49e03c118144f5068c -r c77a66edf25294e1743cc0a3886f8062db5c1fff lib/tool_shed/scripts/check_tool_dependency_definition_repositories.py
--- a/lib/tool_shed/scripts/check_tool_dependency_definition_repositories.py
+++ b/lib/tool_shed/scripts/check_tool_dependency_definition_repositories.py
@@ -197,7 +197,16 @@
test_environment_dict[ 'tool_shed_mercurial_version' ] = __version__.version
test_environment_dict[ 'tool_shed_revision' ] = get_repository_current_revision( os.getcwd() )
tool_test_results_dict[ 'test_environment' ] = test_environment_dict
- repository_metadata.tool_test_results = tool_test_results_dict
+ # Store only the configured number of test runs.
+ num_tool_test_results_saved = int( app.config.num_tool_test_results_saved )
+ if len( tool_test_results_dicts ) >= num_tool_test_results_saved:
+ test_results_index = num_tool_test_results_saved - 1
+ new_tool_test_results_dicts = tool_test_results_dicts[ :test_results_index ]
+ else:
+ new_tool_test_results_dicts = [ d for d in tool_test_results_dicts ]
+ # Insert the new element into the first position in the list.
+ new_tool_test_results_dicts.insert( 0, tool_test_results_dict )
+ repository_metadata.tool_test_results = new_tool_test_results_dicts
app.sa_session.add( repository_metadata )
app.sa_session.flush()
stop = time.time()
diff -r 3e2b61de5a2322563c1ffa49e03c118144f5068c -r c77a66edf25294e1743cc0a3886f8062db5c1fff test/install_and_test_tool_shed_repositories/base/twilltestcase.py
--- a/test/install_and_test_tool_shed_repositories/base/twilltestcase.py
+++ b/test/install_and_test_tool_shed_repositories/base/twilltestcase.py
@@ -41,16 +41,6 @@
self.shed_tools_dict = {}
self.home()
- def deactivate_repository( self, repository ):
- """Deactivate a repository."""
- url = '/admin_toolshed/deactivate_or_uninstall_repository?id=%s' % self.security.encode_id( repository.id )
- self.visit_url( url )
- tc.fv ( 1, "remove_from_disk", 'false' )
- tc.submit( 'deactivate_or_uninstall_repository_button' )
- strings_displayed = [ 'The repository named' ]
- strings_displayed.append( 'has been deactivated' )
- self.check_for_strings( strings_displayed, strings_not_displayed=[] )
-
def initiate_installation_process( self,
install_tool_dependencies=False,
install_repository_dependencies=True,
diff -r 3e2b61de5a2322563c1ffa49e03c118144f5068c -r c77a66edf25294e1743cc0a3886f8062db5c1fff test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
@@ -63,12 +63,11 @@
# Here's the directory where everything happens. Temporary directories are created within this directory to contain
# the database, new repositories, etc.
galaxy_test_tmp_dir = os.path.join( test_home_directory, 'tmp' )
+# File containing information about problematic repositories to exclude from test runs.
+exclude_list_file = os.path.join( test_home_directory, 'exclude.xml' )
default_galaxy_locales = 'en'
default_galaxy_test_file_dir = "test-data"
os.environ[ 'GALAXY_INSTALL_TEST_TMP_DIR' ] = galaxy_test_tmp_dir
-# This file is copied to the Galaxy root directory by buildbot.
-# It is managed by cfengine and is not locally available.
-exclude_list_file = os.environ.get( 'GALAXY_INSTALL_TEST_EXCLUDE_REPOSITORIES', 'repositories_with_tools_exclude.xml' )
# This script can be run in such a way that no Tool Shed database records should be changed.
if '-info_only' in sys.argv or 'GALAXY_INSTALL_TEST_INFO_ONLY' in os.environ:
@@ -138,25 +137,20 @@
return tool_id, tool_version
def install_and_test_repositories( app, galaxy_shed_tools_dict, galaxy_shed_tool_conf_file ):
+ # Initialize a dictionary for the summary that will be printed to stdout.
install_and_test_statistics_dict = install_and_test_base_util.initialize_install_and_test_statistics_dict( test_framework )
error_message = ''
- # Initialize a dictionary for the summary that will be printed to stdout.
- total_repositories_processed = install_and_test_statistics_dict[ 'total_repositories_processed' ]
repositories_to_install, error_message = \
install_and_test_base_util.get_repositories_to_install( install_and_test_base_util.galaxy_tool_shed_url, test_framework )
if error_message:
return None, error_message
# Handle repositories not to be tested.
if os.path.exists( exclude_list_file ):
- # Entries in the exclude_list look something like this.
- # { 'reason': The default reason or the reason specified in this section,
- # 'repositories':
- # [( name, owner, changeset revision if changeset revision else None ),
- # ( name, owner, changeset revision if changeset revision else None )] }
- # If changeset revision is None, that means the entire repository is excluded from testing, otherwise only the specified
- # revision should be skipped.
- log.debug( 'Loading the list of repositories excluded from testing from the file %s...' % \
- str( exclude_list_file ) )
+ log.debug( 'Loading the list of repositories excluded from testing from the file %s...' % str( exclude_list_file ) )
+ # The following exclude_list will look something like this:
+ # [{ 'reason': The default reason or the reason specified in this section,
+ # 'repositories': [( name, owner, changeset_revision if changeset_revision else None ),
+ # ( name, owner, changeset_revision if changeset_revision else None )]}]
exclude_list = install_and_test_base_util.parse_exclude_list( exclude_list_file )
else:
exclude_list = []
diff -r 3e2b61de5a2322563c1ffa49e03c118144f5068c -r c77a66edf25294e1743cc0a3886f8062db5c1fff test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py
@@ -61,12 +61,11 @@
# Here's the directory where everything happens. Temporary directories are created within this directory to contain
# the database, new repositories, etc.
galaxy_test_tmp_dir = os.path.join( test_home_directory, 'tmp' )
+# File containing information about problematic repositories to exclude from test runs.
+exclude_list_file = os.path.join( test_home_directory, 'exclude.xml' )
default_galaxy_locales = 'en'
default_galaxy_test_file_dir = "test-data"
os.environ[ 'GALAXY_INSTALL_TEST_TMP_DIR' ] = galaxy_test_tmp_dir
-# This file is copied to the Galaxy root directory by buildbot.
-# It is managed by cfengine and is not locally available.
-exclude_list_file = os.environ.get( 'GALAXY_INSTALL_TEST_EXCLUDE_REPOSITORIES', 'tool_dependency_definition_exclude.xml' )
# This script can be run in such a way that no Tool Shed database records should be changed.
if '-info_only' in sys.argv or 'GALAXY_INSTALL_TEST_INFO_ONLY' in os.environ:
@@ -77,29 +76,15 @@
test_framework = install_and_test_base_util.TOOL_DEPENDENCY_DEFINITIONS
def install_and_test_repositories( app, galaxy_shed_tools_dict, galaxy_shed_tool_conf_file ):
+ # Initialize a dictionary for the summary that will be printed to stdout.
install_and_test_statistics_dict = install_and_test_base_util.initialize_install_and_test_statistics_dict( test_framework )
error_message = ''
- # Initialize a dictionary for the summary that will be printed to stdout.
- total_repositories_processed = install_and_test_statistics_dict[ 'total_repositories_processed' ]
repositories_to_install, error_message = \
install_and_test_base_util.get_repositories_to_install( install_and_test_base_util.galaxy_tool_shed_url, test_framework )
if error_message:
return None, error_message
- # Handle repositories not to be tested.
if os.path.exists( exclude_list_file ):
- # Entries in the exclude_list look something like this.
- # { 'reason': The default reason or the reason specified in this section,
- # 'repositories':
- # [( name, owner, changeset revision if changeset revision else None ),
- # ( name, owner, changeset revision if changeset revision else None )] }
- # If changeset revision is None, that means the entire repository is excluded from testing, otherwise only the specified
- # revision should be skipped.
- # We are testing deprecated repositories because it is possible that a deprecated repository contains valid tools that
- # someone has previously installed. Deleted repositories have never been installed, so should not be tested. If they are
- # undeleted, this script will then test them the next time it runs. We don't need to check if a repository has been deleted
- # here because our call to the Tool Shed API filters by downloadable='true', in which case deleted will always be False.
- log.debug( 'Loading the list of repositories excluded from testing from the file %s...' % \
- str( exclude_list_file ) )
+ log.debug( 'Loading the list of repositories excluded from testing from the file %s...' % str( exclude_list_file ) )
# The following exclude_list will look something like this:
# [{ 'reason': The default reason or the reason specified in this section,
# 'repositories': [( name, owner, changeset_revision if changeset_revision else None ),
@@ -201,19 +186,9 @@
install_and_test_statistics_dict[ 'total_repositories_processed' ] += 1
if error_message:
# The repository installation failed.
- log.debug( 'Installation failed for revision %s of repository %s owned by %s.' % \
- ( changeset_revision, name, owner ) )
+ log.debug( 'Installation failed for revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) )
install_and_test_statistics_dict[ 'repositories_with_installation_error' ].append( repository_identifier_dict )
tool_test_results_dict[ 'installation_errors' ][ 'current_repository' ] = error_message
- # Even if the repository failed to install, execute the uninstall method, in case a dependency did succeed.
- log.debug( 'Attempting to uninstall revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) )
- try:
- repository = \
- test_db_util.get_installed_repository_by_name_owner_changeset_revision( name, owner, changeset_revision )
- except Exception, e:
- error_message = 'Unable to find revision %s of repository %s owned by %s: %s.' % \
- ( changeset_revision, name, owner, str( e ) )
- log.exception( error_message )
params = dict( test_install_error=True,
do_not_test=False )
# TODO: do something useful with response_dict
@@ -223,14 +198,6 @@
repository_dict,
params,
can_update_tool_shed )
- try:
- # We are uninstalling this repository and all of its repository dependencies.
- install_and_test_base_util.uninstall_repository_and_repository_dependencies( app, repository_dict )
- except Exception, e:
- log.exception( 'Error attempting to uninstall revision %s of repository %s owned by %s: %s' % \
- ( changeset_revision, name, owner, str( e ) ) )
- log.debug( 'Installation failed for revision %s of repository %s owned by %s.' % \
- ( changeset_revision, name, owner ) )
else:
# The repository was successfully installed.
params, install_and_test_statistics_dict, tool_test_results_dict = \
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Fix for testing repositories with tools using the tool shed's install and test framework.
by commits-noreply@bitbucket.org 19 Dec '13
by commits-noreply@bitbucket.org 19 Dec '13
19 Dec '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/3e2b61de5a23/
Changeset: 3e2b61de5a23
User: greg
Date: 2013-12-20 03:20:57
Summary: Fix for testing repositories with tools using the tool shed's install and test framework.
Affected #: 1 file
diff -r 9e9746198fd8c49380d42a2b4657ef241dda8f6a -r 3e2b61de5a2322563c1ffa49e03c118144f5068c test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
@@ -155,10 +155,6 @@
# ( name, owner, changeset revision if changeset revision else None )] }
# If changeset revision is None, that means the entire repository is excluded from testing, otherwise only the specified
# revision should be skipped.
- # We are testing deprecated repositories because it is possible that a deprecated repository contains valid tools that
- # someone has previously installed. Deleted repositories have never been installed, so should not be tested. If they are
- # undeleted, this script will then test them the next time it runs. We don't need to check if a repository has been deleted
- # here because our call to the Tool Shed API filters by downloadable='true', in which case deleted will always be False.
log.debug( 'Loading the list of repositories excluded from testing from the file %s...' % \
str( exclude_list_file ) )
exclude_list = install_and_test_base_util.parse_exclude_list( exclude_list_file )
@@ -739,7 +735,9 @@
tool_test_results_dict[ 'failed_tests' ] = failed_test_dicts
failed_repository_dict = repository_identifier_dict
install_and_test_statistics_dict[ 'at_least_one_test_failed' ].append( failed_repository_dict )
- set_do_not_test = not is_latest_downloadable_revision( install_and_test_base_util.galaxy_tool_shed_url, repository_dict )
+ set_do_not_test = \
+ not install_and_test_base_util.is_latest_downloadable_revision( install_and_test_base_util.galaxy_tool_shed_url,
+ repository_dict )
params = dict( tools_functionally_correct=False,
test_install_error=False,
do_not_test=str( set_do_not_test ) )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Re-add changeset_revision to the information returned from the tool shed's repository_dependencies api method.
by commits-noreply@bitbucket.org 19 Dec '13
by commits-noreply@bitbucket.org 19 Dec '13
19 Dec '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/9e9746198fd8/
Changeset: 9e9746198fd8
User: greg
Date: 2013-12-19 23:30:29
Summary: Re-add changeset_revision to the information returned from the tool shed's repository_dependencies api method.
Affected #: 1 file
diff -r 7701d6b5e1b62f501e827a3cc6ee880e8955f036 -r 9e9746198fd8c49380d42a2b4657ef241dda8f6a lib/galaxy/webapps/tool_shed/api/repository_revisions.py
--- a/lib/galaxy/webapps/tool_shed/api/repository_revisions.py
+++ b/lib/galaxy/webapps/tool_shed/api/repository_revisions.py
@@ -91,6 +91,8 @@
tool_shed, name, owner, changeset_revision = rd_tup[ 0:4 ]
repository_dependency = suc.get_repository_by_name_and_owner( trans.app, name, owner )
repository_dependency_dict = repository_dependency.to_dict( view='element', value_mapper=value_mapper )
+ # We have to add the changeset_revision of of the repository dependency.
+ repository_dependency_dict[ 'changeset_revision' ] = changeset_revision
repository_dependency_dict[ 'url' ] = web.url_for( controller='repositories',
action='show',
id=trans.security.encode_id( repository_dependency.id ) )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Code cleanup for the tool shed's new repository_revisions' repository_dependencies method.
by commits-noreply@bitbucket.org 19 Dec '13
by commits-noreply@bitbucket.org 19 Dec '13
19 Dec '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/7701d6b5e1b6/
Changeset: 7701d6b5e1b6
User: greg
Date: 2013-12-19 23:22:32
Summary: Code cleanup for the tool shed's new repository_revisions' repository_dependencies method.
Affected #: 1 file
diff -r f8e9f9f0f64d853fe6db058b6c4ffaa9e497c781 -r 7701d6b5e1b62f501e827a3cc6ee880e8955f036 lib/galaxy/webapps/tool_shed/api/repository_revisions.py
--- a/lib/galaxy/webapps/tool_shed/api/repository_revisions.py
+++ b/lib/galaxy/webapps/tool_shed/api/repository_revisions.py
@@ -79,6 +79,8 @@
:param id: the encoded id of the `RepositoryMetadata` object
"""
# Example URL: http://localhost:9009/api/repository_revisions/repository_dependencies/bb12…
+ value_mapper = { 'id' : trans.security.encode_id,
+ 'user_id' : trans.security.encode_id }
repository_dependencies_dicts = []
try:
repository_metadata = metadata_util.get_repository_metadata_by_id( trans, id )
@@ -87,22 +89,12 @@
rd_tups = metadata[ 'repository_dependencies' ][ 'repository_dependencies' ]
for rd_tup in rd_tups:
tool_shed, name, owner, changeset_revision = rd_tup[ 0:4 ]
- repository_dependencies_dict = dict( tool_shed=str( tool_shed ),
- name=str( name ),
- owner=str( owner ),
- changeset_revision=str( changeset_revision ) )
- rd = suc.get_repository_by_name_and_owner( trans.app, name, owner )
- encoded_rd_id = trans.security.encode_id( rd.id )
- rd_repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans,
- encoded_rd_id,
- changeset_revision )
- if rd_repository_metadata is None:
- repo = hg.repository( suc.get_configured_ui(), repository.repo_path( trans.app ) )
- rd_repository_metadata = suc.get_next_downloadable_changeset_revision( repository, repo, changeset_revision )
- repository_dependencies_dict[ 'url' ] = web.url_for( controller='repository_revisions',
- action='show',
- id=trans.security.encode_id( rd_repository_metadata.id ) )
- repository_dependencies_dicts.append( repository_dependencies_dict )
+ repository_dependency = suc.get_repository_by_name_and_owner( trans.app, name, owner )
+ repository_dependency_dict = repository_dependency.to_dict( view='element', value_mapper=value_mapper )
+ repository_dependency_dict[ 'url' ] = web.url_for( controller='repositories',
+ action='show',
+ id=trans.security.encode_id( repository_dependency.id ) )
+ repository_dependencies_dicts.append( repository_dependency_dict )
return repository_dependencies_dicts
except Exception, e:
message = "Error in the Tool Shed repository_revisions API in repository_dependencies: %s" % str( e )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: guerler: Grids: Add history refresh
by commits-noreply@bitbucket.org 19 Dec '13
by commits-noreply@bitbucket.org 19 Dec '13
19 Dec '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/f8e9f9f0f64d/
Changeset: f8e9f9f0f64d
User: guerler
Date: 2013-12-19 22:30:23
Summary: Grids: Add history refresh
Affected #: 3 files
diff -r 4ac5eb19c0bdd45b558f1c262198009e0106f49c -r f8e9f9f0f64d853fe6db058b6c4ffaa9e497c781 static/scripts/galaxy.grids.js
--- a/static/scripts/galaxy.grids.js
+++ b/static/scripts/galaxy.grids.js
@@ -158,7 +158,18 @@
.keyup(function () { $(this).css("font-style", "normal"); });
});
},
-
+
+ // refresh frames
+ handle_refresh: function (refresh_frames) {
+ if (refresh_frames) {
+ if ($.inArray('history', refresh_frames) > -1) {
+ if( top.Galaxy && top.Galaxy.currHistoryPanel ){
+ top.Galaxy.currHistoryPanel.loadCurrentHistory();
+ }
+ }
+ }
+ },
+
// Initialize
init_grid: function(grid_config)
{
@@ -167,7 +178,10 @@
// get options
var options = this.grid.attributes;
-
+
+ // handle refresh requests
+ this.handle_refresh(options.refresh_frames);
+
// strip protocol and domain
var url = this.grid.get('url_base');
url = url.replace(/^.*\/\/[^\/]+/, '');
diff -r 4ac5eb19c0bdd45b558f1c262198009e0106f49c -r f8e9f9f0f64d853fe6db058b6c4ffaa9e497c781 static/scripts/packed/galaxy.grids.js
--- a/static/scripts/packed/galaxy.grids.js
+++ b/static/scripts/packed/galaxy.grids.js
@@ -1,1 +1,1 @@
-jQuery.ajaxSettings.traditional=true;define(["mvc/ui"],function(){var a=Backbone.Model.extend({defaults:{url_base:"",async:false,async_ops:[],categorical_filters:[],filters:{},sort_key:null,show_item_checkboxes:false,advanced_search:false,cur_page:1,num_pages:1,operation:undefined,item_ids:undefined},can_async_op:function(c){return _.indexOf(this.attributes.async_ops,c)!==-1},add_filter:function(g,h,d){if(d){var e=this.attributes.filters[g],c;if(e===null||e===undefined){c=h}else{if(typeof(e)=="string"){if(e=="All"){c=h}else{var f=[];f[0]=e;f[1]=h;c=f}}else{c=e;c.push(h)}}this.attributes.filters[g]=c}else{this.attributes.filters[g]=h}},remove_filter:function(d,g){var c=this.attributes.filters[d];if(c===null||c===undefined){return false}var f=true;if(typeof(c)==="string"){if(c=="All"){f=false}else{delete this.attributes.filters[d]}}else{var e=_.indexOf(c,g);if(e!==-1){c.splice(e,1)}else{f=false}}return f},get_url_data:function(){var c={async:this.attributes.async,sort:this.attributes.sort_key,page:this.attributes.cur_page,show_item_checkboxes:this.attributes.show_item_checkboxes,advanced_search:this.attributes.advanced_search};if(this.attributes.operation){c.operation=this.attributes.operation}if(this.attributes.item_ids){c.id=this.attributes.item_ids}var d=this;_.each(_.pairs(d.attributes.filters),function(e){c["f-"+e[0]]=e[1]});return c},get_url:function(c){return this.get("url_base")+"?"+$.param(this.get_url_data())+"&"+$.param(c)}});var b=Backbone.View.extend({grid:null,initialize:function(c){this.init_grid(c);this.init_grid_controls();$("input[type=text]").each(function(){$(this).click(function(){$(this).select()}).keyup(function(){$(this).css("font-style","normal")})})},init_grid:function(e){this.grid=new a(e);var d=this.grid.attributes;var c=this.grid.get("url_base");c=c.replace(/^.*\/\/[^\/]+/,"");this.grid.set("url_base",c);$("#grid-table-body").html(this.template_body(d));$("#grid-table-footer").html(this.template_footer(d));if(d.message){$("#grid-message").html(this.template_message(d));setTimeout(function(){$("#grid-message").html("")},5000)}this.init_grid_elements()},init_grid_controls:function(){$(".submit-image").each(function(){$(this).mousedown(function(){$(this).addClass("gray-background")});$(this).mouseup(function(){$(this).removeClass("gray-background")})});var c=this;$(".sort-link").each(function(){$(this).click(function(){c.set_sort_condition($(this).attr("sort_key"));return false})});$(".categorical-filter > a").each(function(){$(this).click(function(){c.set_categorical_filter($(this).attr("filter_key"),$(this).attr("filter_val"));return false})});$(".text-filter-form").each(function(){$(this).submit(function(){var g=$(this).attr("column_key");var f=$("#input-"+g+"-filter");var h=f.val();f.val("");c.add_filter_condition(g,h);return false})});var d=$("#input-tags-filter");if(d.length){d.autocomplete(this.grid.history_tag_autocomplete_url,{selectFirst:false,autoFill:false,highlight:false,mustMatch:false})}var e=$("#input-name-filter");if(e.length){e.autocomplete(this.grid.history_name_autocomplete_url,{selectFirst:false,autoFill:false,highlight:false,mustMatch:false})}$(".advanced-search-toggle").each(function(){$(this).click(function(){$("#standard-search").slideToggle("fast");$("#advanced-search").slideToggle("fast");return false})})},init_grid_elements:function(){$(".grid").each(function(){var s=$(this).find("input.grid-row-select-checkbox");var r=$(this).find("span.grid-selected-count");var t=function(){r.text($(s).filter(":checked").length)};$(s).each(function(){$(this).change(t)});t()});if($(".community_rating_star").length!==0){$(".community_rating_star").rating({})}var q=this.grid.attributes;var p=this;$(".page-link > a").each(function(){$(this).click(function(){p.set_page($(this).attr("page_num"));return false})});$(".use-inbound").each(function(){$(this).click(function(r){p.execute({href:$(this).attr("href"),inbound:true});return false})});$(".use-outbound").each(function(){$(this).click(function(r){p.execute({href:$(this).attr("href")});return false})});var f=q.items.length;if(f==0){return}for(var k in q.items){var o=q.items[k];var l=$("#grid-"+k+"-popup");l.off();var d=new PopupMenu(l);for(var h in q.operations){var e=q.operations[h];var m=e.label;var c=o.operation_config[m];var g=o.encode_id;if(c.allowed&&e.allow_popup){var n={html:e.label,href:c.url_args,target:c.target,confirmation_text:e.confirm,inbound:e.inbound};n.func=function(t){t.preventDefault();var s=$(t.target).html();var r=this.findItemByHtml(s);p.execute(r)};d.addItem(n)}}}},add_filter_condition:function(e,g){if(g===""){return false}this.grid.add_filter(e,g,true);var f=$("<span>"+g+"<a href='javascript:void(0);'><span class='delete-search-icon' /></span></a>");f.addClass("text-filter-val");var d=this;f.click(function(){d.grid.remove_filter(e,g);$(this).remove();d.go_page_one();d.execute()});var c=$("#"+e+"-filtering-criteria");c.append(f);this.go_page_one();this.execute()},set_sort_condition:function(h){var g=this.grid.get("sort_key");var f=h;if(g.indexOf(h)!==-1){if(g.substring(0,1)!=="-"){f="-"+h}else{}}$(".sort-arrow").remove();var e=(f.substring(0,1)=="-")?"↑":"↓";var c=$("<span>"+e+"</span>").addClass("sort-arrow");var d=$("#"+h+"-header");d.append(c);this.grid.set("sort_key",f);this.go_page_one();this.execute()},set_categorical_filter:function(e,g){var d=this.grid.get("categorical_filters")[e],f=this.grid.get("filters")[e];var c=this;$("."+e+"-filter").each(function(){var m=$.trim($(this).text());var k=d[m];var l=k[e];if(l==g){$(this).empty();$(this).addClass("current-filter");$(this).append(m)}else{if(l==f){$(this).empty();var h=$("<a href='#'>"+m+"</a>");h.click(function(){c.set_categorical_filter(e,l)});$(this).removeClass("current-filter");$(this).append(h)}}});this.grid.add_filter(e,g);this.go_page_one();this.execute()},set_page:function(c){var d=this;$(".page-link").each(function(){var k=$(this).attr("id"),g=parseInt(k.split("-")[2],10),e=d.grid.get("cur_page"),h;if(g===c){h=$(this).children().text();$(this).empty();$(this).addClass("inactive-link");$(this).text(h)}else{if(g===e){h=$(this).text();$(this).empty();$(this).removeClass("inactive-link");var f=$("<a href='#'>"+h+"</a>");f.click(function(){d.set_page(g)});$(this).append(f)}}});if(c==="all"){this.grid.set("cur_page",c)}else{this.grid.set("cur_page",parseInt(c,10))}this.execute()},submit_operation:function(f,g){var e=$('input[name="id"]:checked').length;if(!e>0){return false}var d=$(f).val();var c=[];$("input[name=id]:checked").each(function(){c.push($(this).val())});this.execute({operation:d,id:c,confirmation_text:g});return true},execute:function(n){var f=null;var e=null;var g=null;var c=null;var m=null;if(n){e=n.href;g=n.operation;f=n.id;c=n.confirmation_text;m=n.inbound;if(e!==undefined&&e.indexOf("operation=")!=-1){var l=e.split("?");if(l.length>1){var k=l[1];var d=k.split("&");for(var h=0;h<d.length;h++){if(d[h].indexOf("operation")!=-1){g=d[h].split("=")[1];g=g.replace(/\+/g," ")}else{if(d[h].indexOf("id")!=-1){f=d[h].split("=")[1]}}}}}}if(g&&f){if(c&&c!=""&&c!="None"&&c!="null"){if(!confirm(c)){return false}}g=g.toLowerCase();this.grid.set({operation:g,item_ids:f});if(this.grid.can_async_op(g)){this.update_grid()}else{this.go_to(m,"")}return false}if(e){this.go_to(m,e);return false}if(this.grid.get("async")){this.update_grid()}else{this.go_to(m,"")}return false},go_to:function(f,d){var e=this.grid.get("async");this.grid.set("async",false);advanced_search=$("#advanced-search").is(":visible");this.grid.set("advanced_search",advanced_search);if(!d){d=this.grid.get("url_base")+"?"+$.param(this.grid.get_url_data())}this.grid.set({operation:undefined,item_ids:undefined,async:e});if(f){var c=$(".grid-header").closest(".inbound");if(c.length!==0){c.load(d);return}}window.location=d},update_grid:function(){var d=(this.grid.get("operation")?"POST":"GET");$(".loading-elt-overlay").show();var c=this;$.ajax({type:d,url:c.grid.get("url_base"),data:c.grid.get_url_data(),error:function(e){alert("Grid refresh failed")},success:function(e){c.init_grid($.parseJSON(e));$(".loading-elt-overlay").hide()},complete:function(){c.grid.set({operation:undefined,item_ids:undefined})}})},check_all_items:function(){var c=document.getElementById("check_all"),d=document.getElementsByTagName("input"),f=0,e;if(c.checked===true){for(e=0;e<d.length;e++){if(d[e].name.indexOf("id")!==-1){d[e].checked=true;f++}}}else{for(e=0;e<d.length;e++){if(d[e].name.indexOf("id")!==-1){d[e].checked=false}}}this.init_grid_elements()},go_page_one:function(){var c=this.grid.get("cur_page");if(c!==null&&c!==undefined&&c!=="all"){this.grid.set("cur_page",1)}},template_body:function(t){var m="";var u=0;var g=t.items.length;if(g==0){m+='<tr><td colspan="100"><em>No Items</em></td></tr>';u=1}for(var h in t.items){var r=t.items[h];var c=r.encode_id;var k="grid-"+h+"-popup";m+="<tr ";if(t.current_item_id==r.id){m+='class="current"'}m+=">";if(t.show_item_checkboxes){m+='<td style="width: 1.5em;"><input type="checkbox" name="id" value="'+c+'" id="'+c+'" class="grid-row-select-checkbox" /></td>'}for(j in t.columns){var f=t.columns[j];if(f.visible){var e="";if(f.nowrap){e='style="white-space:nowrap;"'}var s=r.column_config[f.label];var l=s.link;var n=s.value;var q=s.inbound;if(jQuery.type(n)==="string"){n=n.replace(/\/\//g,"/")}var d="";var p="";if(f.attach_popup){d="grid-"+h+"-popup";p="menubutton";if(l!=""){p+=" split"}p+=" popup"}m+="<td "+e+">";if(l){if(t.operations.length!=0){m+='<div id="'+d+'" class="'+p+'" style="float: left;">'}var o="";if(q){o="use-inbound"}else{o="use-outbound"}m+='<a class="label '+o+'" href="'+l+'" onclick="return false;">'+n+"</a>";if(t.operations.length!=0){m+="</div>"}}else{m+='<div id="'+d+'" class="'+p+'"><label id="'+f.label_id_prefix+c+'" for="'+c+'">'+n+"</label></div>"}m+="</td>"}}m+="</tr>";u++}return m},template_footer:function(q){var m="";if(q.use_paging&&q.num_pages>1){var o=q.num_page_links;var c=q.cur_page_num;var p=q.num_pages;var l=o/2;var k=c-l;var g=0;if(k==0){k=1;g=l-(c-k)}var f=l+g;var e=c+f;if(e<=p){max_offset=0}else{e=p;max_offset=f-(e+1-c)}if(max_offset!=0){k-=max_offset;if(k<1){k=1}}m+='<tr id="page-links-row">';if(q.show_item_checkboxes){m+="<td></td>"}m+='<td colspan="100"><span id="page-link-container">Page:';if(k>1){m+='<span class="page-link" id="page-link-1"><a href="'+this.grid.get_url({page:n})+'" page_num="1" onclick="return false;">1</a></span> ...'}for(var n=k;n<e+1;n++){if(n==q.cur_page_num){m+='<span class="page-link inactive-link" id="page-link-'+n+'">'+n+"</span>"}else{m+='<span class="page-link" id="page-link-'+n+'"><a href="'+this.grid.get_url({page:n})+'" onclick="return false;" page_num="'+n+'">'+n+"</a></span>"}}if(e<p){m+='...<span class="page-link" id="page-link-'+p+'"><a href="'+this.grid.get_url({page:p})+'" onclick="return false;" page_num="'+p+'">'+p+"</a></span>"}m+="</span>";m+='<span class="page-link" id="show-all-link-span"> | <a href="'+this.grid.get_url({page:"all"})+'" onclick="return false;" page_num="all">Show All</a></span></td></tr>'}if(q.show_item_checkboxes){m+='<tr><input type="hidden" id="operation" name="operation" value=""><td></td><td colspan="100">For <span class="grid-selected-count"></span> selected '+q.get_class_plural+": ";for(i in q.operations){var d=q.operations[i];if(d.allow_multiple){m+='<input type="button" value="'+d.label+'" class="action-button" onclick="gridView.submit_operation(this, \''+d.confirm+"')\"> "}}m+="</td></tr>"}var h=false;for(i in q.operations){if(q.operations[i].global_operation){h=true;break}}if(h){m+='<tr><td colspan="100">';for(i in q.operations){var d=q.operations[i];if(d.global_operation){m+='<a class="action-button" href="'+d.global_operation+'">'+d.label+"</a>"}}m+="</td></tr>"}if(q.legend){m+='<tr><td colspan="100">'+q.legend+"</td></tr>"}return m},template_message:function(c){return'<p><div class="'+c.status+'message transient-message">'+c.message+'</div><div style="clear: both"></div></p>'}});return{Grid:a,GridView:b}});
\ No newline at end of file
+jQuery.ajaxSettings.traditional=true;define(["mvc/ui"],function(){var a=Backbone.Model.extend({defaults:{url_base:"",async:false,async_ops:[],categorical_filters:[],filters:{},sort_key:null,show_item_checkboxes:false,advanced_search:false,cur_page:1,num_pages:1,operation:undefined,item_ids:undefined},can_async_op:function(c){return _.indexOf(this.attributes.async_ops,c)!==-1},add_filter:function(g,h,d){if(d){var e=this.attributes.filters[g],c;if(e===null||e===undefined){c=h}else{if(typeof(e)=="string"){if(e=="All"){c=h}else{var f=[];f[0]=e;f[1]=h;c=f}}else{c=e;c.push(h)}}this.attributes.filters[g]=c}else{this.attributes.filters[g]=h}},remove_filter:function(d,g){var c=this.attributes.filters[d];if(c===null||c===undefined){return false}var f=true;if(typeof(c)==="string"){if(c=="All"){f=false}else{delete this.attributes.filters[d]}}else{var e=_.indexOf(c,g);if(e!==-1){c.splice(e,1)}else{f=false}}return f},get_url_data:function(){var c={async:this.attributes.async,sort:this.attributes.sort_key,page:this.attributes.cur_page,show_item_checkboxes:this.attributes.show_item_checkboxes,advanced_search:this.attributes.advanced_search};if(this.attributes.operation){c.operation=this.attributes.operation}if(this.attributes.item_ids){c.id=this.attributes.item_ids}var d=this;_.each(_.pairs(d.attributes.filters),function(e){c["f-"+e[0]]=e[1]});return c},get_url:function(c){return this.get("url_base")+"?"+$.param(this.get_url_data())+"&"+$.param(c)}});var b=Backbone.View.extend({grid:null,initialize:function(c){this.init_grid(c);this.init_grid_controls();$("input[type=text]").each(function(){$(this).click(function(){$(this).select()}).keyup(function(){$(this).css("font-style","normal")})})},handle_refresh:function(c){if(c){if($.inArray("history",c)>-1){if(top.Galaxy&&top.Galaxy.currHistoryPanel){top.Galaxy.currHistoryPanel.loadCurrentHistory()}}}},init_grid:function(e){this.grid=new a(e);var d=this.grid.attributes;this.handle_refresh(d.refresh_frames);var c=this.grid.get("url_base");c=c.replace(/^.*\/\/[^\/]+/,"");this.grid.set("url_base",c);$("#grid-table-body").html(this.template_body(d));$("#grid-table-footer").html(this.template_footer(d));if(d.message){$("#grid-message").html(this.template_message(d));setTimeout(function(){$("#grid-message").html("")},5000)}this.init_grid_elements()},init_grid_controls:function(){$(".submit-image").each(function(){$(this).mousedown(function(){$(this).addClass("gray-background")});$(this).mouseup(function(){$(this).removeClass("gray-background")})});var c=this;$(".sort-link").each(function(){$(this).click(function(){c.set_sort_condition($(this).attr("sort_key"));return false})});$(".categorical-filter > a").each(function(){$(this).click(function(){c.set_categorical_filter($(this).attr("filter_key"),$(this).attr("filter_val"));return false})});$(".text-filter-form").each(function(){$(this).submit(function(){var g=$(this).attr("column_key");var f=$("#input-"+g+"-filter");var h=f.val();f.val("");c.add_filter_condition(g,h);return false})});var d=$("#input-tags-filter");if(d.length){d.autocomplete(this.grid.history_tag_autocomplete_url,{selectFirst:false,autoFill:false,highlight:false,mustMatch:false})}var e=$("#input-name-filter");if(e.length){e.autocomplete(this.grid.history_name_autocomplete_url,{selectFirst:false,autoFill:false,highlight:false,mustMatch:false})}$(".advanced-search-toggle").each(function(){$(this).click(function(){$("#standard-search").slideToggle("fast");$("#advanced-search").slideToggle("fast");return false})})},init_grid_elements:function(){$(".grid").each(function(){var s=$(this).find("input.grid-row-select-checkbox");var r=$(this).find("span.grid-selected-count");var t=function(){r.text($(s).filter(":checked").length)};$(s).each(function(){$(this).change(t)});t()});if($(".community_rating_star").length!==0){$(".community_rating_star").rating({})}var q=this.grid.attributes;var p=this;$(".page-link > a").each(function(){$(this).click(function(){p.set_page($(this).attr("page_num"));return false})});$(".use-inbound").each(function(){$(this).click(function(r){p.execute({href:$(this).attr("href"),inbound:true});return false})});$(".use-outbound").each(function(){$(this).click(function(r){p.execute({href:$(this).attr("href")});return false})});var f=q.items.length;if(f==0){return}for(var k in q.items){var o=q.items[k];var l=$("#grid-"+k+"-popup");l.off();var d=new PopupMenu(l);for(var h in q.operations){var e=q.operations[h];var m=e.label;var c=o.operation_config[m];var g=o.encode_id;if(c.allowed&&e.allow_popup){var n={html:e.label,href:c.url_args,target:c.target,confirmation_text:e.confirm,inbound:e.inbound};n.func=function(t){t.preventDefault();var s=$(t.target).html();var r=this.findItemByHtml(s);p.execute(r)};d.addItem(n)}}}},add_filter_condition:function(e,g){if(g===""){return false}this.grid.add_filter(e,g,true);var f=$("<span>"+g+"<a href='javascript:void(0);'><span class='delete-search-icon' /></span></a>");f.addClass("text-filter-val");var d=this;f.click(function(){d.grid.remove_filter(e,g);$(this).remove();d.go_page_one();d.execute()});var c=$("#"+e+"-filtering-criteria");c.append(f);this.go_page_one();this.execute()},set_sort_condition:function(h){var g=this.grid.get("sort_key");var f=h;if(g.indexOf(h)!==-1){if(g.substring(0,1)!=="-"){f="-"+h}else{}}$(".sort-arrow").remove();var e=(f.substring(0,1)=="-")?"↑":"↓";var c=$("<span>"+e+"</span>").addClass("sort-arrow");var d=$("#"+h+"-header");d.append(c);this.grid.set("sort_key",f);this.go_page_one();this.execute()},set_categorical_filter:function(e,g){var d=this.grid.get("categorical_filters")[e],f=this.grid.get("filters")[e];var c=this;$("."+e+"-filter").each(function(){var m=$.trim($(this).text());var k=d[m];var l=k[e];if(l==g){$(this).empty();$(this).addClass("current-filter");$(this).append(m)}else{if(l==f){$(this).empty();var h=$("<a href='#'>"+m+"</a>");h.click(function(){c.set_categorical_filter(e,l)});$(this).removeClass("current-filter");$(this).append(h)}}});this.grid.add_filter(e,g);this.go_page_one();this.execute()},set_page:function(c){var d=this;$(".page-link").each(function(){var k=$(this).attr("id"),g=parseInt(k.split("-")[2],10),e=d.grid.get("cur_page"),h;if(g===c){h=$(this).children().text();$(this).empty();$(this).addClass("inactive-link");$(this).text(h)}else{if(g===e){h=$(this).text();$(this).empty();$(this).removeClass("inactive-link");var f=$("<a href='#'>"+h+"</a>");f.click(function(){d.set_page(g)});$(this).append(f)}}});if(c==="all"){this.grid.set("cur_page",c)}else{this.grid.set("cur_page",parseInt(c,10))}this.execute()},submit_operation:function(f,g){var e=$('input[name="id"]:checked').length;if(!e>0){return false}var d=$(f).val();var c=[];$("input[name=id]:checked").each(function(){c.push($(this).val())});this.execute({operation:d,id:c,confirmation_text:g});return true},execute:function(n){var f=null;var e=null;var g=null;var c=null;var m=null;if(n){e=n.href;g=n.operation;f=n.id;c=n.confirmation_text;m=n.inbound;if(e!==undefined&&e.indexOf("operation=")!=-1){var l=e.split("?");if(l.length>1){var k=l[1];var d=k.split("&");for(var h=0;h<d.length;h++){if(d[h].indexOf("operation")!=-1){g=d[h].split("=")[1];g=g.replace(/\+/g," ")}else{if(d[h].indexOf("id")!=-1){f=d[h].split("=")[1]}}}}}}if(g&&f){if(c&&c!=""&&c!="None"&&c!="null"){if(!confirm(c)){return false}}g=g.toLowerCase();this.grid.set({operation:g,item_ids:f});if(this.grid.can_async_op(g)){this.update_grid()}else{this.go_to(m,"")}return false}if(e){this.go_to(m,e);return false}if(this.grid.get("async")){this.update_grid()}else{this.go_to(m,"")}return false},go_to:function(f,d){var e=this.grid.get("async");this.grid.set("async",false);advanced_search=$("#advanced-search").is(":visible");this.grid.set("advanced_search",advanced_search);if(!d){d=this.grid.get("url_base")+"?"+$.param(this.grid.get_url_data())}this.grid.set({operation:undefined,item_ids:undefined,async:e});if(f){var c=$(".grid-header").closest(".inbound");if(c.length!==0){c.load(d);return}}window.location=d},update_grid:function(){var d=(this.grid.get("operation")?"POST":"GET");$(".loading-elt-overlay").show();var c=this;$.ajax({type:d,url:c.grid.get("url_base"),data:c.grid.get_url_data(),error:function(e){alert("Grid refresh failed")},success:function(e){c.init_grid($.parseJSON(e));$(".loading-elt-overlay").hide()},complete:function(){c.grid.set({operation:undefined,item_ids:undefined})}})},check_all_items:function(){var c=document.getElementById("check_all"),d=document.getElementsByTagName("input"),f=0,e;if(c.checked===true){for(e=0;e<d.length;e++){if(d[e].name.indexOf("id")!==-1){d[e].checked=true;f++}}}else{for(e=0;e<d.length;e++){if(d[e].name.indexOf("id")!==-1){d[e].checked=false}}}this.init_grid_elements()},go_page_one:function(){var c=this.grid.get("cur_page");if(c!==null&&c!==undefined&&c!=="all"){this.grid.set("cur_page",1)}},template_body:function(t){var m="";var u=0;var g=t.items.length;if(g==0){m+='<tr><td colspan="100"><em>No Items</em></td></tr>';u=1}for(var h in t.items){var r=t.items[h];var c=r.encode_id;var k="grid-"+h+"-popup";m+="<tr ";if(t.current_item_id==r.id){m+='class="current"'}m+=">";if(t.show_item_checkboxes){m+='<td style="width: 1.5em;"><input type="checkbox" name="id" value="'+c+'" id="'+c+'" class="grid-row-select-checkbox" /></td>'}for(j in t.columns){var f=t.columns[j];if(f.visible){var e="";if(f.nowrap){e='style="white-space:nowrap;"'}var s=r.column_config[f.label];var l=s.link;var n=s.value;var q=s.inbound;if(jQuery.type(n)==="string"){n=n.replace(/\/\//g,"/")}var d="";var p="";if(f.attach_popup){d="grid-"+h+"-popup";p="menubutton";if(l!=""){p+=" split"}p+=" popup"}m+="<td "+e+">";if(l){if(t.operations.length!=0){m+='<div id="'+d+'" class="'+p+'" style="float: left;">'}var o="";if(q){o="use-inbound"}else{o="use-outbound"}m+='<a class="label '+o+'" href="'+l+'" onclick="return false;">'+n+"</a>";if(t.operations.length!=0){m+="</div>"}}else{m+='<div id="'+d+'" class="'+p+'"><label id="'+f.label_id_prefix+c+'" for="'+c+'">'+n+"</label></div>"}m+="</td>"}}m+="</tr>";u++}return m},template_footer:function(q){var m="";if(q.use_paging&&q.num_pages>1){var o=q.num_page_links;var c=q.cur_page_num;var p=q.num_pages;var l=o/2;var k=c-l;var g=0;if(k==0){k=1;g=l-(c-k)}var f=l+g;var e=c+f;if(e<=p){max_offset=0}else{e=p;max_offset=f-(e+1-c)}if(max_offset!=0){k-=max_offset;if(k<1){k=1}}m+='<tr id="page-links-row">';if(q.show_item_checkboxes){m+="<td></td>"}m+='<td colspan="100"><span id="page-link-container">Page:';if(k>1){m+='<span class="page-link" id="page-link-1"><a href="'+this.grid.get_url({page:n})+'" page_num="1" onclick="return false;">1</a></span> ...'}for(var n=k;n<e+1;n++){if(n==q.cur_page_num){m+='<span class="page-link inactive-link" id="page-link-'+n+'">'+n+"</span>"}else{m+='<span class="page-link" id="page-link-'+n+'"><a href="'+this.grid.get_url({page:n})+'" onclick="return false;" page_num="'+n+'">'+n+"</a></span>"}}if(e<p){m+='...<span class="page-link" id="page-link-'+p+'"><a href="'+this.grid.get_url({page:p})+'" onclick="return false;" page_num="'+p+'">'+p+"</a></span>"}m+="</span>";m+='<span class="page-link" id="show-all-link-span"> | <a href="'+this.grid.get_url({page:"all"})+'" onclick="return false;" page_num="all">Show All</a></span></td></tr>'}if(q.show_item_checkboxes){m+='<tr><input type="hidden" id="operation" name="operation" value=""><td></td><td colspan="100">For <span class="grid-selected-count"></span> selected '+q.get_class_plural+": ";for(i in q.operations){var d=q.operations[i];if(d.allow_multiple){m+='<input type="button" value="'+d.label+'" class="action-button" onclick="gridView.submit_operation(this, \''+d.confirm+"')\"> "}}m+="</td></tr>"}var h=false;for(i in q.operations){if(q.operations[i].global_operation){h=true;break}}if(h){m+='<tr><td colspan="100">';for(i in q.operations){var d=q.operations[i];if(d.global_operation){m+='<a class="action-button" href="'+d.global_operation+'">'+d.label+"</a>"}}m+="</td></tr>"}if(q.legend){m+='<tr><td colspan="100">'+q.legend+"</td></tr>"}return m},template_message:function(c){return'<p><div class="'+c.status+'message transient-message">'+c.message+'</div><div style="clear: both"></div></p>'}});return{Grid:a,GridView:b}});
\ No newline at end of file
diff -r 4ac5eb19c0bdd45b558f1c262198009e0106f49c -r f8e9f9f0f64d853fe6db058b6c4ffaa9e497c781 templates/grid_base.mako
--- a/templates/grid_base.mako
+++ b/templates/grid_base.mako
@@ -15,7 +15,6 @@
return '/base.mako'
%><%inherit file="${inherit(context)}"/>
-<%namespace file="/refresh_frames.mako" import="handle_refresh_frames" /><%namespace file="/display_common.mako" import="get_class_plural" /><%def name="load(embedded = False, insert = None)">
@@ -67,9 +66,14 @@
'use_panels' : context.get('use_panels'),
'insert' : insert,
'default_filter_dict' : default_filter_dict,
- 'advanced_search' : advanced_search
+ 'advanced_search' : advanced_search,
+ 'refresh_frames' : []
}
+ # add refresh frames
+ if refresh_frames:
+ self.grid_config['refresh_frames'] = refresh_frames
+
## add current item if exists
if current_item:
self.grid_config['current_item_id'] = current_item.id
@@ -243,7 +247,6 @@
});
</script>
- ${handle_refresh_frames()}
</%def><%def name="stylesheets()">
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Enhancements and fixes for registering statistics and test results in the tool shed's install and test framework.
by commits-noreply@bitbucket.org 19 Dec '13
by commits-noreply@bitbucket.org 19 Dec '13
19 Dec '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/4ac5eb19c0bd/
Changeset: 4ac5eb19c0bd
User: greg
Date: 2013-12-19 21:56:50
Summary: Enhancements and fixes for registering statistics and test results in the tool shed's install and test framework.
Affected #: 3 files
diff -r 3bbb2f8e43b109b5adc8925a52ed23d9f31cd236 -r 4ac5eb19c0bdd45b558f1c262198009e0106f49c test/install_and_test_tool_shed_repositories/base/util.py
--- a/test/install_and_test_tool_shed_repositories/base/util.py
+++ b/test/install_and_test_tool_shed_repositories/base/util.py
@@ -476,7 +476,7 @@
return global_conf
def handle_missing_dependencies( app, repository, missing_tool_dependencies, repository_dict, tool_test_results_dicts,
- tool_test_results_dict, can_update_tool_shed ):
+ tool_test_results_dict, params, can_update_tool_shed ):
"""Handle missing repository or tool dependencies for an installed repository."""
# If a tool dependency fails to install correctly, this should be considered an installation error,
# and functional tests should be skipped, since the tool dependency needs to be correctly installed
@@ -508,11 +508,9 @@
owner=owner,
changeset_revision=changeset_revision,
error_message=error_message )
- tool_test_results_dict[ 'installation_errors' ][ 'repository_dependencies' ].append( missing_repository_dependency_info_dict )
+ tool_test_results_dict[ 'installation_errors' ][ 'repository_dependencies' ]\
+ .append( missing_repository_dependency_info_dict )
# Record the status of this repository in the tool shed.
- params = dict( tools_functionally_correct=False,
- do_not_test=False,
- test_install_error=True )
# TODO: do something useful with response_dict
response_dict = register_test_result( galaxy_tool_shed_url,
tool_test_results_dicts,
@@ -663,6 +661,73 @@
log.debug( 'Repository %s owned by %s, all revisions.' % ( str( name ), str( owner ) ) )
return exclude_list
+def register_installed_and_missing_dependencies( app, repository, repository_identifier_dict, install_and_test_statistics_dict,
+ tool_test_results_dict ):
+ # The repository was successfully installed.
+ log.debug( 'Installation succeeded for revision %s of repository %s owned by %s.' % \
+ ( str( repository.changeset_revision ), str( repository.name ), str( repository.owner ) ) )
+ install_and_test_statistics_dict[ 'successful_repository_installations' ].append( repository_identifier_dict )
+ tool_test_results_dict[ 'successful_installations' ][ 'current_repository' ].append( repository_identifier_dict )
+ params = dict( test_install_error=False,
+ do_not_test=False )
+ if repository.missing_repository_dependencies:
+ params[ 'test_install_error' ] = True
+ # Keep statistics for this repository's repository dependencies that resulted in installation errors.
+ for missing_repository_dependency in repository.missing_repository_dependencies:
+ tool_shed = str( missing_repository_dependency.tool_shed )
+ name = str( missing_repository_dependency.name )
+ owner = str( missing_repository_dependency.owner )
+ changset_revision = str( missing_repository_dependency.changeset_revision )
+ error_message = unicodify( missing_repository_dependency.error_message )
+ missing_repository_dependency_info_dict = dict( tool_shed=tool_shed,
+ name=name,
+ owner=owner,
+ changset_revision=changset_revision,
+ error_message=error_message )
+ install_and_test_statistics_dict[ 'repositories_with_installation_error' ].append( missing_repository_dependency_dict )
+ tool_test_results_dict[ 'installation_errors' ][ 'repository_dependencies' ].append( missing_repository_dependency_info_dict )
+ if repository.missing_tool_dependencies:
+ params[ 'test_install_error' ] = True
+ # Keep statistics for this repository's tool dependencies that resulted in installation errors.
+ for missing_tool_dependency in repository.missing_tool_dependencies:
+ name = str( missing_tool_dependency.name )
+ type = str( missing_tool_dependency.type )
+ version = str( missing_tool_dependency.version )
+ error_message = unicodify( missing_tool_dependency.error_message )
+ missing_tool_dependency_info_dict = dict( type=type,
+ name=name,
+ version=version,
+ error_message=error_message )
+ install_and_test_statistics_dict[ 'tool_dependencies_with_installation_error' ].append( missing_tool_dependency_info_dict )
+ tool_test_results_dict[ 'installation_errors' ][ 'tool_dependencies' ].append( missing_tool_dependency_info_dict )
+ if repository.installed_repository_dependencies:
+ # Keep statistics for this repository's tool dependencies that resulted in successful installations.
+ for repository_dependency in repository.installed_repository_dependencies:
+ tool_shed = str( repository_dependency.tool_shed )
+ name = str( repository_dependency.name )
+ owner = str( repository_dependency.owner )
+ changeset_revision = str( repository_dependency.changeset_revision )
+ repository_dependency_info_dict = dict( tool_shed=tool_shed,
+ name=name,
+ owner=owner,
+ changeset_revision=changeset_revision )
+ install_and_test_statistics_dict[ 'successful_repository_installations' ].append( repository_dependency_info_dict )
+ tool_test_results_dict[ 'successful_installations' ][ 'repository_dependencies' ].append( repository_dependency_info_dict )
+ if repository.installed_tool_dependencies:
+ # Keep statistics for this repository's tool dependencies that resulted in successful installations.
+ for tool_dependency in repository.installed_tool_dependencies:
+ name = str( tool_dependency.name )
+ type = str( tool_dependency.type )
+ version = str( tool_dependency.version )
+ installation_directory = tool_dependency.installation_directory( app )
+ tool_dependency_info_dict = dict( type=type,
+ name=name,
+ version=version,
+ installation_directory=installation_directory )
+ install_and_test_statistics_dict[ 'successful_tool_dependency_installations' ].append( tool_dependency_info_dict )
+ tool_test_results_dict[ 'successful_installations' ][ 'tool_dependencies' ].append( tool_dependency_info_dict )
+ return params, install_and_test_statistics_dict, tool_test_results_dict
+
def register_test_result( url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params, can_update_tool_shed ):
"""
Update the repository metadata tool_test_results and appropriate flags using the Tool SHed API. This method
diff -r 3bbb2f8e43b109b5adc8925a52ed23d9f31cd236 -r 4ac5eb19c0bdd45b558f1c262198009e0106f49c test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
@@ -207,15 +207,40 @@
tool_test_results_dict = install_and_test_base_util.get_tool_test_results_dict( tool_test_results_dicts )
# See if this repository should be skipped for any reason.
this_repository_is_in_the_exclude_list = False
+ requires_excluded = False
skip_reason = None
for exclude_dict in exclude_list:
reason = exclude_dict[ 'reason' ]
exclude_repositories = exclude_dict[ 'repositories' ]
+ # 'repositories':
+ # [( name, owner, changeset_revision if changeset_revision else None ),
+ # ( name, owner, changeset_revision if changeset_revision else None )]
if ( name, owner, changeset_revision ) in exclude_repositories or ( name, owner, None ) in exclude_repositories:
this_repository_is_in_the_exclude_list = True
skip_reason = reason
break
- if this_repository_is_in_the_exclude_list:
+ if not this_repository_is_in_the_exclude_list:
+ # Skip this repository if it has a repository dependency that is in the exclude list.
+ repository_dependency_dicts, error_message = \
+ install_and_test_base_util.get_repository_dependencies_for_changeset_revision( install_and_test_base_util.galaxy_tool_shed_url,
+ encoded_repository_metadata_id )
+ if error_message:
+ log.debug( 'Error getting repository dependencies for revision %s of repository %s owned by %s:' % \
+ ( changeset_revision, name, owner ) )
+ log.debug( error_message )
+ else:
+ for repository_dependency_dict in repository_dependency_dicts:
+ rd_name = repository_dependency_dict[ 'name' ]
+ rd_owner = repository_dependency_dict[ 'owner' ]
+ rd_changeset_revision = repository_dependency_dict[ 'changeset_revision' ]
+ if ( rd_name, rd_owner, rd_changeset_revision ) in exclude_repositories or \
+ ( rd_name, rd_owner, None ) in exclude_repositories:
+ skip_reason = 'This repository requires revision %s of repository %s owned by %s which is excluded from testing.' % \
+ ( rd_changeset_revision, rd_name, rd_owner )
+ requires_excluded = True
+ break
+ if this_repository_is_in_the_exclude_list or requires_excluded:
+ # If this repository is being skipped, register the reason.
tool_test_results_dict[ 'not_tested' ] = dict( reason=skip_reason )
params = dict( do_not_test=False )
# TODO: do something useful with response_dict
@@ -243,7 +268,8 @@
# Even if the repository failed to install, execute the uninstall method, in case a dependency did succeed.
log.debug( 'Attempting to uninstall revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) )
try:
- repository = test_db_util.get_installed_repository_by_name_owner_changeset_revision( name, owner, changeset_revision )
+ repository = \
+ test_db_util.get_installed_repository_by_name_owner_changeset_revision( name, owner, changeset_revision )
except Exception, e:
error_message = 'Unable to find revision %s of repository %s owned by %s: %s.' % \
( changeset_revision, name, owner, str( e ) )
@@ -269,19 +295,13 @@
log.debug( 'Installation succeeded for revision %s of repository %s owned by %s.' % \
( changeset_revision, name, owner ) )
else:
- log.debug( 'Installation succeeded for revision %s of repository %s owned by %s.' % \
- ( changeset_revision, name, owner ) )
- # Keep statistics for this repository's tool dependencies that resulted in installation errors.
- for missing_tool_dependency in repository.missing_tool_dependencies:
- name = str( missing_tool_dependency.name )
- type = str( missing_tool_dependency.type )
- version = str( missing_tool_dependency.version )
- error_message = unicodify( missing_tool_dependency.error_message )
- missing_tool_dependency_info_dict = dict( type=type,
- name=name,
- version=version,
- error_message=error_message )
- install_and_test_statistics_dict[ 'tool_dependencies_with_installation_error' ].append( missing_tool_dependency_info_dict )
+ # The repository was successfully installed.
+ params, install_and_test_statistics_dict, tool_test_results_dict = \
+ install_and_test_base_util.register_installed_and_missing_dependencies( app,
+ repository,
+ repository_identifier_dict,
+ install_and_test_statistics_dict,
+ tool_test_results_dict )
# Add an empty 'missing_test_results' entry if it is missing from the tool_test_results_dict. The
# ~/tool_shed/scripts/check_repositories_for_functional_tests.py will have entered information in the
# 'missing_test_components' entry of the tool_test_results_dict dictionary for repositories that are
@@ -297,25 +317,28 @@
repository_dict=repository_dict,
tool_test_results_dicts=tool_test_results_dicts,
tool_test_results_dict=tool_test_results_dict,
+ params=params,
can_update_tool_shed=can_update_tool_shed )
# Set the test_toolbox.toolbox module-level variable to the new app.toolbox.
test_toolbox.toolbox = app.toolbox
else:
# This repository and all of its dependencies were successfully installed.
- install_and_test_statistics_dict[ 'successful_repository_installations' ].append( repository_identifier_dict )
- # Configure and run functional tests for this repository. This is equivalent to sh run_functional_tests.sh -installed
+ # Configure and run functional tests for this repository. This is equivalent to
+ # sh run_functional_tests.sh -installed
remove_install_tests()
log.debug( 'Installation of %s succeeded, running all defined functional tests.' % str( repository.name ) )
# Generate the shed_tools_dict that specifies the location of test data contained within this repository. If the repository
# does not have a test-data directory, this will return has_test_data = False, and we will set the do_not_test flag to True,
# and the tools_functionally_correct flag to False, as well as updating tool_test_results.
file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( {} ) )
- has_test_data, shed_tools_dict = parse_tool_panel_config( galaxy_shed_tool_conf_file,
- from_json_string( file( galaxy_shed_tools_dict, 'r' ).read() ) )
+ has_test_data, shed_tools_dict = \
+ parse_tool_panel_config( galaxy_shed_tool_conf_file,
+ from_json_string( file( galaxy_shed_tools_dict, 'r' ).read() ) )
# If the repository has a test-data directory we write the generated shed_tools_dict to a file, so the functional
# test framework can find it.
file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( shed_tools_dict ) )
- log.debug( 'Saved generated shed_tools_dict to %s\nContents: %s' % ( str( galaxy_shed_tools_dict ), str( shed_tools_dict ) ) )
+ log.debug( 'Saved generated shed_tools_dict to %s\nContents: %s' % ( str( galaxy_shed_tools_dict ),
+ str( shed_tools_dict ) ) )
try:
install_and_test_statistics_dict = test_repository_tools( app,
repository,
@@ -327,10 +350,9 @@
exception_message = 'Error executing tests for repository %s: %s' % ( name, str( e ) )
log.exception( exception_message )
tool_test_results_dict[ 'failed_tests' ].append( exception_message )
+ install_and_test_statistics_dict[ 'at_least_one_test_failed' ].append( repository_identifier_dict )
# Record the status of this repository in the tool shed.
- params = dict( tools_functionally_correct=False,
- do_not_test=False,
- test_install_error=False )
+ params[ 'tools_functionally_correct' ] = False
# TODO: do something useful with response_dict
response_dict = \
install_and_test_base_util.register_test_result( install_and_test_base_util.galaxy_tool_shed_url,
@@ -339,7 +361,6 @@
repository_dict,
params,
can_update_tool_shed )
- install_and_test_statistics_dict[ 'at_least_one_test_failed' ].append( repository_identifier_dict )
return install_and_test_statistics_dict, error_message
def main():
@@ -564,11 +585,12 @@
all_tests_passed = install_and_test_statistics_dict[ 'all_tests_passed' ]
at_least_one_test_failed = install_and_test_statistics_dict[ 'at_least_one_test_failed' ]
successful_repository_installations = install_and_test_statistics_dict[ 'successful_repository_installations' ]
+ successful_tool_dependency_installations = install_and_test_statistics_dict[ 'successful_tool_dependency_installations' ]
repositories_with_installation_error = install_and_test_statistics_dict[ 'repositories_with_installation_error' ]
tool_dependencies_with_installation_error = install_and_test_statistics_dict[ 'tool_dependencies_with_installation_error' ]
now = time.strftime( "%Y-%m-%d %H:%M:%S" )
print "####################################################################################"
- print "# %s - installation script for repositories containing tools completed." % now
+ print "# %s - installation and test script for repositories containing tools completed." % now
print "# Repository revisions processed: %s" % str( total_repositories_processed )
if successful_repository_installations:
print "# ----------------------------------------------------------------------------------"
@@ -586,6 +608,10 @@
print '# ----------------------------------------------------------------------------------'
print "# The following %d revisions have installation errors:" % len( repositories_with_installation_error )
install_and_test_base_util.display_repositories_by_owner( repositories_with_installation_error )
+ if successful_tool_dependency_installations:
+ print "# ----------------------------------------------------------------------------------"
+ print "# The following %d tool dependencies were successfully installed:" % len( successful_tool_dependency_installations )
+ install_and_test_base_util.display_tool_dependencies_by_name( successful_tool_dependency_installations )
if tool_dependencies_with_installation_error:
print "# ----------------------------------------------------------------------------------"
print "# The following %d tool dependencies have installation errors:" % len( tool_dependencies_with_installation_error )
@@ -691,7 +717,7 @@
tool_test_results_dict[ 'passed_tests' ].append( test_result )
# Update the repository_metadata table in the tool shed's database to include the passed tests.
passed_repository_dict = repository_identifier_dict
- results_dict[ 'all_tests_passed' ].append( passed_repository_dict )
+ install_and_test_statistics_dict[ 'all_tests_passed' ].append( passed_repository_dict )
params = dict( tools_functionally_correct=True,
do_not_test=False,
test_install_error=False )
diff -r 3bbb2f8e43b109b5adc8925a52ed23d9f31cd236 -r 4ac5eb19c0bdd45b558f1c262198009e0106f49c test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py
@@ -182,8 +182,8 @@
( rd_changeset_revision, rd_name, rd_owner )
requires_excluded = True
break
- # Register the reason this repository is being skipped if, in fact, it is.
if this_repository_is_in_the_exclude_list or requires_excluded:
+ # If this repository is being skipped, register the reason.
tool_test_results_dict[ 'not_tested' ] = dict( reason=skip_reason )
params = dict( do_not_test=False )
# TODO: do something useful with response_dict
@@ -233,77 +233,12 @@
( changeset_revision, name, owner ) )
else:
# The repository was successfully installed.
- log.debug( 'Installation succeeded for revision %s of repository %s owned by %s.' % \
- ( changeset_revision, name, owner ) )
- install_and_test_statistics_dict[ 'successful_repository_installations' ].append( repository_identifier_dict )
- tool_test_results_dict[ 'successful_installations' ][ 'current_repository' ]\
- .append( repository_identifier_dict )
- params = dict( test_install_error=False,
- do_not_test=False )
- if repository.missing_repository_dependencies:
- params[ 'test_install_error' ] = True
- # Keep statistics for this repository's repository dependencies that resulted in installation errors.
- for missing_repository_dependency in repository.missing_repository_dependencies:
- tool_shed = str( missing_repository_dependency.tool_shed )
- name = str( missing_repository_dependency.name )
- owner = str( missing_repository_dependency.owner )
- changset_revision = str( missing_repository_dependency.changeset_revision )
- error_message = unicodify( missing_repository_dependency.error_message )
- missing_repository_dependency_info_dict = dict( tool_shed=tool_shed,
- name=name,
- owner=owner,
- changset_revision=changset_revision,
- error_message=error_message )
- install_and_test_statistics_dict[ 'repositories_with_installation_error' ]\
- .append( missing_repository_dependency_dict )
- tool_test_results_dict[ 'installation_errors' ][ 'repository_dependencies' ]\
- .append( missing_repository_dependency_info_dict )
- if repository.missing_tool_dependencies:
- params[ 'test_install_error' ] = True
- # Keep statistics for this repository's tool dependencies that resulted in installation errors.
- for missing_tool_dependency in repository.missing_tool_dependencies:
- name = str( missing_tool_dependency.name )
- type = str( missing_tool_dependency.type )
- version = str( missing_tool_dependency.version )
- error_message = unicodify( missing_tool_dependency.error_message )
- missing_tool_dependency_info_dict = dict( type=type,
- name=name,
- version=version,
- error_message=error_message )
- install_and_test_statistics_dict[ 'tool_dependencies_with_installation_error' ]\
- .append( missing_tool_dependency_info_dict )
- tool_test_results_dict[ 'installation_errors' ][ 'tool_dependencies' ]\
- .append( missing_tool_dependency_info_dict )
- if repository.installed_repository_dependencies:
- # Keep statistics for this repository's tool dependencies that resulted in successful installations.
- for repository_dependency in repository.installed_repository_dependencies:
- tool_shed = str( repository_dependency.tool_shed )
- name = str( repository_dependency.name )
- owner = str( repository_dependency.owner )
- changeset_revision = str( repository_dependency.changeset_revision )
- repository_dependency_info_dict = dict( tool_shed=tool_shed,
- name=name,
- owner=owner,
- changeset_revision=changeset_revision )
- install_and_test_statistics_dict[ 'successful_repository_installations' ]\
- .append( repository_dependency_info_dict )
- tool_test_results_dict[ 'successful_installations' ][ 'repository_dependencies' ]\
- .append( repository_dependency_info_dict )
- if repository.installed_tool_dependencies:
- # Keep statistics for this repository's tool dependencies that resulted in successful installations.
- for tool_dependency in repository.installed_tool_dependencies:
- name = str( tool_dependency.name )
- type = str( tool_dependency.type )
- version = str( tool_dependency.version )
- installation_directory = tool_dependency.installation_directory( app )
- tool_dependency_info_dict = dict( type=type,
- name=name,
- version=version,
- installation_directory=installation_directory )
- install_and_test_statistics_dict[ 'successful_tool_dependency_installations' ]\
- .append( tool_dependency_info_dict )
- tool_test_results_dict[ 'successful_installations' ][ 'tool_dependencies' ]\
- .append( tool_dependency_info_dict )
+ params, install_and_test_statistics_dict, tool_test_results_dict = \
+ install_and_test_base_util.register_installed_and_missing_dependencies( app,
+ repository,
+ repository_identifier_dict,
+ install_and_test_statistics_dict,
+ tool_test_results_dict )
# TODO: do something useful with response_dict
response_dict = install_and_test_base_util.register_test_result( install_and_test_base_util.galaxy_tool_shed_url,
tool_test_results_dicts,
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0