galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
November 2013
- 1 participants
- 208 discussions
27 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/7756b29bea29/
Changeset: 7756b29bea29
User: greg
Date: 2013-11-28 03:25:03
Summary: Fix a debug statement.
Affected #: 1 file
diff -r 849898bb73009a5be2387a03e44ff8afdeed12cc -r 7756b29bea292228d3472a7f613f5be5adee38a2 test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -380,7 +380,7 @@
# module that will generate the install methods.
repository_dict, error_message = get_repository_dict( galaxy_tool_shed_url, baseline_repository_dict )
if error_message:
- log.debug( 'Error getting additional details about repository %s from the API: %s' % ( str( name ), error_message ) )
+ log.debug( 'Error getting additional details from the API: %s' % str( error_message ) )
else:
# Don't test empty repositories.
changeset_revision = baseline_repository_dict[ 'changeset_revision' ]
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Handle errors attempting to get the list of repositories to test for the Tool Shed's install and test framework.
by commits-noreply@bitbucket.org 27 Nov '13
by commits-noreply@bitbucket.org 27 Nov '13
27 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/849898bb7300/
Changeset: 849898bb7300
User: greg
Date: 2013-11-28 03:20:38
Summary: Handle errors attempting to get the list of repositories to test for the Tool Shed's install and test framework.
Affected #: 1 file
diff -r 3c7edca24e5aa0563e7078b56469730fd2bc27a1 -r 849898bb73009a5be2387a03e44ff8afdeed12cc test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -620,130 +620,137 @@
changeset_revision = str( repository_dict[ 'changeset_revision' ] )
# Populate the tool_test_results_dict.
tool_test_results_dicts, error_message = get_tool_test_results_dicts( galaxy_tool_shed_url, encoded_repository_metadata_id )
- # The preparation script ~/tool_shed/scripts/check_repositories_for_functional_tests.py will have entered
- # information in the 'test_environment' and possibly the 'missing_test_components' entries of the first
- # tool_test_results_dict in the list of tool_test_results_dicts. We need to be careful to not lose this
- # information.
- tool_test_results_dict = tool_test_results_dicts.pop( 0 )
- # See if this repository should be skipped for any reason.
- skip_this_repository = False
- skip_reason = None
- for exclude_dict in exclude_list:
- reason = exclude_dict[ 'reason' ]
- exclude_repositories = exclude_dict[ 'repositories' ]
- if ( name, owner, changeset_revision ) in exclude_repositories or ( name, owner, None ) in exclude_repositories:
- skip_this_repository = True
- skip_reason = reason
- break
- if skip_this_repository:
- tool_test_results_dict[ 'not_tested' ] = dict( reason=skip_reason )
- params = dict( tools_functionally_correct=False,
- do_not_test=False )
- # TODO: do something usefule with response_dict
- response_dict = register_test_result( galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params )
- log.debug( "Not testing revision %s of repository %s owned by %s." % ( changeset_revision, name, owner ) )
+ if error_message:
+ log.debug( error_message )
else:
- if error_message:
- log.debug( error_message )
+ # The preparation script ~/tool_shed/scripts/check_repositories_for_functional_tests.py will have entered
+ # information in the 'test_environment' and possibly the 'missing_test_components' entries of the first
+ # tool_test_results_dict in the list of tool_test_results_dicts. We need to be careful to not lose this
+ # information.
+ try:
+ tool_test_results_dict = tool_test_results_dicts.pop( 0 )
+ except Exception, e:
+ log.exception( "Invalid list of tool_test_results_dicts %s: %s" % ( str( tool_test_results_dicts ), str( e ) ) )
+ continue
+ # See if this repository should be skipped for any reason.
+ skip_this_repository = False
+ skip_reason = None
+ for exclude_dict in exclude_list:
+ reason = exclude_dict[ 'reason' ]
+ exclude_repositories = exclude_dict[ 'repositories' ]
+ if ( name, owner, changeset_revision ) in exclude_repositories or ( name, owner, None ) in exclude_repositories:
+ skip_this_repository = True
+ skip_reason = reason
+ break
+ if skip_this_repository:
+ tool_test_results_dict[ 'not_tested' ] = dict( reason=skip_reason )
+ params = dict( tools_functionally_correct=False,
+ do_not_test=False )
+ # TODO: do something usefule with response_dict
+ response_dict = register_test_result( galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params )
+ log.debug( "Not testing revision %s of repository %s owned by %s." % ( changeset_revision, name, owner ) )
else:
- test_environment_dict = tool_test_results_dict.get( 'test_environment', None )
- test_environment_dict = get_test_environment( test_environment_dict )
- # Add the current time as the approximate time that this test run occurs. A similar value will also be
- # set to the repository_metadata.time_last_tested column, but we also store it here because the Tool Shed
- # may be configured to store multiple test run results, so each must be associated with a time stamp.
- now = time.strftime( "%Y-%m-%d %H:%M:%S" )
- test_environment_dict[ 'time_tested' ] = now
- test_environment_dict[ 'galaxy_database_version' ] = get_database_version( app )
- test_environment_dict[ 'galaxy_revision' ] = get_repository_current_revision( os.getcwd() )
- tool_test_results_dict[ 'test_environment' ] = test_environment_dict
- tool_test_results_dict[ 'passed_tests' ] = []
- tool_test_results_dict[ 'failed_tests' ] = []
- tool_test_results_dict[ 'installation_errors' ] = dict( current_repository=[], repository_dependencies=[], tool_dependencies=[] )
- repository, error_message = install_repository( app, repository_dict )
if error_message:
- tool_test_results_dict[ 'installation_errors' ][ 'current_repository' ] = error_message
- # Even if the repository failed to install, execute the uninstall method, in case a dependency did succeed.
- log.debug( 'Attempting to uninstall repository %s owned by %s.' % ( name, owner ) )
- try:
- repository = test_db_util.get_installed_repository_by_name_owner_changeset_revision( name, owner, changeset_revision )
- except Exception, e:
- error_message = 'Unable to find installed repository %s owned by %s: %s.' % ( name, owner, str( e ) )
- log.exception( error_message )
- test_result = dict( tool_shed=galaxy_tool_shed_url,
- name=name,
- owner=owner,
- changeset_revision=changeset_revision,
- error_message=error_message )
- tool_test_results_dict[ 'installation_errors' ][ 'repository_dependencies' ].append( test_result )
- params = dict( tools_functionally_correct=False,
- test_install_error=True,
- do_not_test=False )
- # TODO: do something usefule with response_dict
- response_dict = register_test_result( galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params )
- try:
- if deactivate:
- # We are deactivating this repository and all of its repository dependencies.
- deactivate_repository( app, repository_dict )
+ log.debug( error_message )
+ else:
+ test_environment_dict = tool_test_results_dict.get( 'test_environment', None )
+ test_environment_dict = get_test_environment( test_environment_dict )
+ # Add the current time as the approximate time that this test run occurs. A similar value will also be
+ # set to the repository_metadata.time_last_tested column, but we also store it here because the Tool Shed
+ # may be configured to store multiple test run results, so each must be associated with a time stamp.
+ now = time.strftime( "%Y-%m-%d %H:%M:%S" )
+ test_environment_dict[ 'time_tested' ] = now
+ test_environment_dict[ 'galaxy_database_version' ] = get_database_version( app )
+ test_environment_dict[ 'galaxy_revision' ] = get_repository_current_revision( os.getcwd() )
+ tool_test_results_dict[ 'test_environment' ] = test_environment_dict
+ tool_test_results_dict[ 'passed_tests' ] = []
+ tool_test_results_dict[ 'failed_tests' ] = []
+ tool_test_results_dict[ 'installation_errors' ] = dict( current_repository=[], repository_dependencies=[], tool_dependencies=[] )
+ repository, error_message = install_repository( app, repository_dict )
+ if error_message:
+ tool_test_results_dict[ 'installation_errors' ][ 'current_repository' ] = error_message
+ # Even if the repository failed to install, execute the uninstall method, in case a dependency did succeed.
+ log.debug( 'Attempting to uninstall repository %s owned by %s.' % ( name, owner ) )
+ try:
+ repository = test_db_util.get_installed_repository_by_name_owner_changeset_revision( name, owner, changeset_revision )
+ except Exception, e:
+ error_message = 'Unable to find installed repository %s owned by %s: %s.' % ( name, owner, str( e ) )
+ log.exception( error_message )
+ test_result = dict( tool_shed=galaxy_tool_shed_url,
+ name=name,
+ owner=owner,
+ changeset_revision=changeset_revision,
+ error_message=error_message )
+ tool_test_results_dict[ 'installation_errors' ][ 'repository_dependencies' ].append( test_result )
+ params = dict( tools_functionally_correct=False,
+ test_install_error=True,
+ do_not_test=False )
+ # TODO: do something usefule with response_dict
+ response_dict = register_test_result( galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params )
+ try:
+ if deactivate:
+ # We are deactivating this repository and all of its repository dependencies.
+ deactivate_repository( app, repository_dict )
+ else:
+ # We are uninstalling this repository and all of its repository dependencies.
+ uninstall_repository( app, repository_dict )
+ except:
+ log.exception( 'Encountered error attempting to deactivate or uninstall %s.', str( repository_dict[ 'name' ] ) )
+ results_dict[ 'repositories_failed_install' ].append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
+ log.debug( 'Repository %s failed to install correctly.' % str( name ) )
+ else:
+ # Configure and run functional tests for this repository. This is equivalent to sh run_functional_tests.sh -installed
+ remove_install_tests()
+ log.debug( 'Installation of %s succeeded, running all defined functional tests.' % str( repository.name ) )
+ # Generate the shed_tools_dict that specifies the location of test data contained within this repository. If the repository
+ # does not have a test-data directory, this will return has_test_data = False, and we will set the do_not_test flag to True,
+ # and the tools_functionally_correct flag to False, as well as updating tool_test_results.
+ file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( {} ) )
+ has_test_data, shed_tools_dict = parse_tool_panel_config( galaxy_shed_tool_conf_file,
+ from_json_string( file( galaxy_shed_tools_dict, 'r' ).read() ) )
+ # Add an empty 'missing_test_results' entry if it is missing from the tool_test_results_dict. The
+ # ~/tool_shed/scripts/check_repositories_for_functional_tests.py will have entered information in the
+ # 'missing_test_components' entry of the tool_test_results_dict dictionary for repositories that are
+ # missing test components.
+ if 'missing_test_components' not in tool_test_results_dict:
+ tool_test_results_dict[ 'missing_test_components' ] = []
+ missing_tool_dependencies = get_missing_tool_dependencies( repository )
+ if missing_tool_dependencies or repository.missing_repository_dependencies:
+ results_dict = handle_missing_dependencies( app,
+ repository,
+ missing_tool_dependencies,
+ repository_dict,
+ tool_test_results_dicts,
+ tool_test_results_dict,
+ results_dict )
else:
- # We are uninstalling this repository and all of its repository dependencies.
- uninstall_repository( app, repository_dict )
- except:
- log.exception( 'Encountered error attempting to deactivate or uninstall %s.', str( repository_dict[ 'name' ] ) )
- results_dict[ 'repositories_failed_install' ].append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
- log.debug( 'Repository %s failed to install correctly.' % str( name ) )
- else:
- # Configure and run functional tests for this repository. This is equivalent to sh run_functional_tests.sh -installed
- remove_install_tests()
- log.debug( 'Installation of %s succeeded, running all defined functional tests.' % str( repository.name ) )
- # Generate the shed_tools_dict that specifies the location of test data contained within this repository. If the repository
- # does not have a test-data directory, this will return has_test_data = False, and we will set the do_not_test flag to True,
- # and the tools_functionally_correct flag to False, as well as updating tool_test_results.
- file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( {} ) )
- has_test_data, shed_tools_dict = parse_tool_panel_config( galaxy_shed_tool_conf_file,
- from_json_string( file( galaxy_shed_tools_dict, 'r' ).read() ) )
- # Add an empty 'missing_test_results' entry if it is missing from the tool_test_results_dict. The
- # ~/tool_shed/scripts/check_repositories_for_functional_tests.py will have entered information in the
- # 'missing_test_components' entry of the tool_test_results_dict dictionary for repositories that are
- # missing test components.
- if 'missing_test_components' not in tool_test_results_dict:
- tool_test_results_dict[ 'missing_test_components' ] = []
- missing_tool_dependencies = get_missing_tool_dependencies( repository )
- if missing_tool_dependencies or repository.missing_repository_dependencies:
- results_dict = handle_missing_dependencies( app,
- repository,
- missing_tool_dependencies,
- repository_dict,
- tool_test_results_dicts,
- tool_test_results_dict,
- results_dict )
- else:
- # If the repository has a test-data directory we write the generated shed_tools_dict to a file, so the functional
- # test framework can find it.
- file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( shed_tools_dict ) )
- log.debug( 'Saved generated shed_tools_dict to %s\nContents: %s' % ( str( galaxy_shed_tools_dict ), str( shed_tools_dict ) ) )
- try:
- results_dict = test_repository_tools( app,
- repository,
- repository_dict,
- tool_test_results_dicts,
- tool_test_results_dict,
- results_dict )
- except Exception, e:
- exception_message = 'Error executing tests for repository %s: %s' % ( name, str( e ) )
- log.exception( exception_message )
- tool_test_results_dict[ 'failed_tests' ].append( exception_message )
- # Record the status of this repository in the tool shed.
- params = dict( tools_functionally_correct=False,
- do_not_test=False,
- test_install_error=False )
- # TODO: do something usefule with response_dict
- response_dict = register_test_result( galaxy_tool_shed_url,
- tool_test_results_dicts,
- tool_test_results_dict,
- repository_dict,
- params )
- results_dict[ 'repositories_failed' ].append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
- total_repositories_tested += 1
+ # If the repository has a test-data directory we write the generated shed_tools_dict to a file, so the functional
+ # test framework can find it.
+ file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( shed_tools_dict ) )
+ log.debug( 'Saved generated shed_tools_dict to %s\nContents: %s' % ( str( galaxy_shed_tools_dict ), str( shed_tools_dict ) ) )
+ try:
+ results_dict = test_repository_tools( app,
+ repository,
+ repository_dict,
+ tool_test_results_dicts,
+ tool_test_results_dict,
+ results_dict )
+ except Exception, e:
+ exception_message = 'Error executing tests for repository %s: %s' % ( name, str( e ) )
+ log.exception( exception_message )
+ tool_test_results_dict[ 'failed_tests' ].append( exception_message )
+ # Record the status of this repository in the tool shed.
+ params = dict( tools_functionally_correct=False,
+ do_not_test=False,
+ test_install_error=False )
+ # TODO: do something usefule with response_dict
+ response_dict = register_test_result( galaxy_tool_shed_url,
+ tool_test_results_dicts,
+ tool_test_results_dict,
+ repository_dict,
+ params )
+ results_dict[ 'repositories_failed' ].append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
+ total_repositories_tested += 1
results_dict[ 'total_repositories_tested' ] = total_repositories_tested
return results_dict, error_message
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Moinor fixes and adjustments to the tool shed's install and test framework.
by commits-noreply@bitbucket.org 27 Nov '13
by commits-noreply@bitbucket.org 27 Nov '13
27 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/3c7edca24e5a/
Changeset: 3c7edca24e5a
User: greg
Date: 2013-11-28 03:10:52
Summary: Moinor fixes and adjustments to the tool shed's install and test framework.
Affected #: 3 files
diff -r 32205f911e7442fb6eed7155f8306df7aafb731e -r 3c7edca24e5aa0563e7078b56469730fd2bc27a1 lib/tool_shed/scripts/check_repositories_for_functional_tests.py
--- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
+++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
@@ -259,6 +259,11 @@
# Create a new dictionary for the most recent test run.
tool_test_results_dict = {}
test_environment_dict = tool_test_results_dict.get( 'test_environment', {} )
+ # Add the current time as the approximate time that this test run occurs. A similar value will also be
+ # set to the repository_metadata.time_last_tested column, but we also store it here because the Tool Shed
+ # may be configured to store multiple test run results, so each must be associated with a time stamp.
+ now = time.strftime( "%Y-%m-%d %H:%M:%S" )
+ test_environment_dict[ 'time_tested' ] = now
test_environment_dict[ 'tool_shed_database_version' ] = get_database_version( app )
test_environment_dict[ 'tool_shed_mercurial_version' ] = __version__.version
test_environment_dict[ 'tool_shed_revision' ] = get_repository_current_revision( os.getcwd() )
diff -r 32205f911e7442fb6eed7155f8306df7aafb731e -r 3c7edca24e5aa0563e7078b56469730fd2bc27a1 lib/tool_shed/util/container_util.py
--- a/lib/tool_shed/util/container_util.py
+++ b/lib/tool_shed/util/container_util.py
@@ -1052,17 +1052,22 @@
"""Return a folder hierarchy containing tool dependencies."""
# This container is displayed only in the tool shed.
if tool_test_results_dicts:
- multiple_tool_test_results_dicts = len( tool_test_results_dicts ) > 1
test_results_dict_id = 0
folder_id += 1
tool_test_results_root_folder = Folder( id=folder_id, key='root', label='root', parent=None )
+ multiple_tool_test_results_dicts = len( tool_test_results_dicts ) > 1
+ if multiple_tool_test_results_dicts:
+ folder_id += 1
+ test_runs_folder = Folder( id=folder_id, key='test_runs', label='Test runs', parent=tool_test_results_root_folder )
+ tool_test_results_root_folder.folders.append( test_runs_folder )
for index, tool_test_results_dict in enumerate( tool_test_results_dicts ):
test_environment_dict = tool_test_results_dict.get( 'test_environment', None )
if test_environment_dict is not None:
time_tested = test_environment_dict.get( 'time_tested', 'unknown_%d' % index )
if multiple_tool_test_results_dicts:
folder_id += 1
- containing_folder = Folder( id=folder_id, key='test_results', label=time_tested, parent=tool_test_results_root_folder )
+ containing_folder = Folder( id=folder_id, key='test_results', label=time_tested, parent=test_runs_folder )
+ test_runs_folder.folders.append( containing_folder )
else:
containing_folder = tool_test_results_root_folder
test_results_dict_id += 1
@@ -1150,6 +1155,7 @@
key='installation_errors',
label='Installation errors',
parent=containing_folder )
+ containing_folder.installation_errors.append( installation_error_base_folder )
if current_repository_errors:
folder_id += 1
subfolder = Folder( id=folder_id,
@@ -1200,9 +1206,6 @@
error_message=tool_dependency_error_dict.get( 'error_message', '' ) )
subfolder.tool_dependency_installation_errors.append( tool_dependency_installation_error )
installation_error_base_folder.folders.append( subfolder )
- containing_folder.installation_errors.append( installation_error_base_folder )
- if multiple_tool_test_results_dicts:
- tool_test_results_root_folder.folders.append( containing_folder )
else:
tool_test_results_root_folder = None
return folder_id, tool_test_results_root_folder
diff -r 32205f911e7442fb6eed7155f8306df7aafb731e -r 3c7edca24e5aa0563e7078b56469730fd2bc27a1 test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -1206,7 +1206,8 @@
response_dict = register_test_result( galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params )
log.debug( 'Revision %s of repository %s installed and passed functional tests.' % ( str( changeset_revision ), str( name ) ) )
else:
- tool_test_results_dict[ 'failed_tests' ].append( extract_log_data( result, from_tool_test=True ) )
+ # The extract_log_data() netod returns a list.
+ tool_test_results_dict[ 'failed_tests' ] = extract_log_data( result, from_tool_test=True )
results_dict[ 'repositories_failed' ].append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
set_do_not_test = not is_latest_downloadable_revision( galaxy_tool_shed_url, repository_dict )
params = dict( tools_functionally_correct=False,
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
27 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/32205f911e74/
Changeset: 32205f911e74
User: greg
Date: 2013-11-27 21:53:29
Summary: A bit of code cleanup.
Affected #: 1 file
diff -r 8b613225f1def637af4774b860578a77a7fb0fe4 -r 32205f911e7442fb6eed7155f8306df7aafb731e lib/tool_shed/util/container_util.py
--- a/lib/tool_shed/util/container_util.py
+++ b/lib/tool_shed/util/container_util.py
@@ -1066,9 +1066,6 @@
else:
containing_folder = tool_test_results_root_folder
test_results_dict_id += 1
- #folder_id += 1
- #test_results_folder = Folder( id=folder_id, key='test_results', label='Automated test environment', parent=containing_folder )
- #containing_folder.folders.append( test_results_folder )
folder_id += 1
folder = Folder( id=folder_id, key='test_environment', label='Automated test environment', parent=containing_folder )
containing_folder.folders.append( folder )
@@ -1204,7 +1201,6 @@
subfolder.tool_dependency_installation_errors.append( tool_dependency_installation_error )
installation_error_base_folder.folders.append( subfolder )
containing_folder.installation_errors.append( installation_error_base_folder )
- #containing_folder.folders.append( containing_folder )
if multiple_tool_test_results_dicts:
tool_test_results_root_folder.folders.append( containing_folder )
else:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
27 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/8b613225f1de/
Changeset: 8b613225f1de
User: Dave Bouvier
Date: 2013-11-27 21:37:02
Summary: Fix imports.
Affected #: 1 file
diff -r 11028b31d3f024ef920c3168d3f752bc13ba3ad8 -r 8b613225f1def637af4774b860578a77a7fb0fe4 lib/tool_shed/scripts/check_repositories_for_functional_tests.py
--- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
+++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
@@ -15,6 +15,7 @@
import ConfigParser
import galaxy.webapps.tool_shed.config as tool_shed_config
import galaxy.webapps.tool_shed.model.mapping
+import tool_shed.util.shed_util_common as suc
import logging
import shutil
import tempfile
@@ -31,8 +32,6 @@
from mercurial import __version__
from optparse import OptionParser
from time import strftime
-from tool_shed.util.shed_util_common import clone_repository
-from tool_shed.util.shed_util_common import get_configured_ui
log = logging.getLogger( 'check_repositories_for_functional_tests' )
assert sys.version_info[ :2 ] >= ( 2, 6 )
@@ -129,9 +128,9 @@
testable_revision = False
# Clone the repository up to the changeset revision we're checking.
repo_dir = repository.repo_path( app )
- repo = hg.repository( get_configured_ui(), repo_dir )
+ repo = hg.repository( suc.get_configured_ui(), repo_dir )
work_dir = tempfile.mkdtemp( prefix="tmp-toolshed-cafr" )
- cloned_ok, error_message = clone_repository( repo_dir, work_dir, changeset_revision )
+ cloned_ok, error_message = suc.clone_repository( repo_dir, work_dir, changeset_revision )
if cloned_ok:
# Iterate through all the directories in the cloned changeset revision and determine whether there's a
# directory named test-data. If this directory is not present update the metadata record for the changeset
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Fixes for the tool shed's install and test framework and enhancements to allow a configurable number of test runs to be stored and displayed.
by commits-noreply@bitbucket.org 27 Nov '13
by commits-noreply@bitbucket.org 27 Nov '13
27 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/11028b31d3f0/
Changeset: 11028b31d3f0
User: greg
Date: 2013-11-27 21:28:03
Summary: Fixes for the tool shed's install and test framework and enhancements to allow a configurable number of test runs to be stored and displayed.
Affected #: 10 files
diff -r ad38b77e96fafc96060f9ee8031094f4a15df278 -r 11028b31d3f024ef920c3168d3f752bc13ba3ad8 lib/galaxy/webapps/tool_shed/api/repositories.py
--- a/lib/galaxy/webapps/tool_shed/api/repositories.py
+++ b/lib/galaxy/webapps/tool_shed/api/repositories.py
@@ -453,5 +453,7 @@
value_mapper = { 'id' : trans.security.encode_id,
'repository_id' : trans.security.encode_id }
if repository_metadata.time_last_tested is not None:
- value_mapper[ 'time_last_tested' ] = time_ago
+ # For some reason the Dictifiable.to_dict() method in ~/galaxy/model/item_attrs.py requires
+ # a function rather than a mapped value, so just pass the time_ago function here.
+ value_mapper[ 'time_last_tested' ] = time_ago
return value_mapper
diff -r ad38b77e96fafc96060f9ee8031094f4a15df278 -r 11028b31d3f024ef920c3168d3f752bc13ba3ad8 lib/galaxy/webapps/tool_shed/api/repository_revisions.py
--- a/lib/galaxy/webapps/tool_shed/api/repository_revisions.py
+++ b/lib/galaxy/webapps/tool_shed/api/repository_revisions.py
@@ -166,12 +166,11 @@
repository_metadata = metadata_util.get_repository_metadata_by_id( trans, repository_metadata_id )
flush_needed = False
for key, new_value in payload.items():
- if hasattr( repository_metadata, key ):
+ if key == 'time_last_tested':
+ repository_metadata.time_last_tested = datetime.datetime.utcnow()
+ flush_needed = True
+ elif hasattr( repository_metadata, key ):
setattr( repository_metadata, key, new_value )
- if key in [ 'tools_functionally_correct', 'time_last_tested' ]:
- # Automatically update repository_metadata.time_last_tested.
- repository_metadata.time_last_tested = datetime.datetime.utcnow()
- flush_needed = True
flush_needed = True
if flush_needed:
trans.sa_session.add( repository_metadata )
@@ -192,5 +191,7 @@
value_mapper = { 'id' : trans.security.encode_id,
'repository_id' : trans.security.encode_id }
if repository_metadata.time_last_tested is not None:
+ # For some reason the Dictifiable.to_dict() method in ~/galaxy/model/item_attrs.py requires
+ # a function rather than a mapped value, so just pass the time_ago function here.
value_mapper[ 'time_last_tested' ] = time_ago
return value_mapper
diff -r ad38b77e96fafc96060f9ee8031094f4a15df278 -r 11028b31d3f024ef920c3168d3f752bc13ba3ad8 lib/galaxy/webapps/tool_shed/config.py
--- a/lib/galaxy/webapps/tool_shed/config.py
+++ b/lib/galaxy/webapps/tool_shed/config.py
@@ -53,6 +53,8 @@
self.tool_data_table_config_path = resolve_path( kwargs.get( 'tool_data_table_config_path', 'tool_data_table_conf.xml' ), self.root )
self.shed_tool_data_table_config = resolve_path( kwargs.get( 'shed_tool_data_table_config', 'shed_tool_data_table_conf.xml' ), self.root )
self.ftp_upload_dir = kwargs.get( 'ftp_upload_dir', None )
+ # Install and test framework for testing tools contained in repositories.
+ self.num_tool_test_results_saved = kwargs.get( 'num_tool_test_results_saved', 5 )
# Location for dependencies
if 'tool_dependency_dir' in kwargs:
self.tool_dependency_dir = resolve_path( kwargs.get( "tool_dependency_dir" ), self.root )
diff -r ad38b77e96fafc96060f9ee8031094f4a15df278 -r 11028b31d3f024ef920c3168d3f752bc13ba3ad8 lib/galaxy/webapps/tool_shed/controllers/repository.py
--- a/lib/galaxy/webapps/tool_shed/controllers/repository.py
+++ b/lib/galaxy/webapps/tool_shed/controllers/repository.py
@@ -1515,51 +1515,59 @@
tool_shed_url = web.url_for( '/', qualified=True )
functional_test_results = []
- for metadata_row in trans.sa_session.query( trans.model.RepositoryMetadata ) \
+ for repository_metadata in trans.sa_session.query( trans.model.RepositoryMetadata ) \
.filter( metadata_filter ) \
.join( trans.model.Repository ) \
.filter( and_( trans.model.Repository.table.c.deleted == False,
trans.model.Repository.table.c.private == False,
trans.model.Repository.table.c.deprecated == False,
trans.model.Repository.table.c.user_id == user.id ) ):
- if not metadata_row.tool_test_results:
- continue
- if metadata_row.changeset_revision != metadata_row.repository.tip( trans.app ):
- continue
- current_repository_errors = []
- tool_dependency_errors = []
- repository_dependency_errors = []
- description_lines = []
- # Per the RSS 2.0 specification, all dates in RSS feeds must be formatted as specified in RFC 822
- # section 5.1, e.g. Sat, 07 Sep 2002 00:00:01 UT
- time_tested = metadata_row.time_last_tested.strftime( '%a, %d %b %Y %H:%M:%S UT' )
- repository = metadata_row.repository
- # Generate a citable URL for this repository with owner and changeset revision.
- repository_citable_url = suc.url_join( tool_shed_url, 'view', user.username, repository.name, metadata_row.changeset_revision )
- passed_tests = len( metadata_row.tool_test_results.get( 'passed_tests', [] ) )
- failed_tests = len( metadata_row.tool_test_results.get( 'failed_tests', [] ) )
- missing_test_components = len( metadata_row.tool_test_results.get( 'missing_test_components', [] ) )
- installation_errors = metadata_row.tool_test_results.get( 'installation_errors', [] )
- if installation_errors:
- tool_dependency_errors = installation_errors.get( 'tool_dependencies', [] )
- repository_dependency_errors = installation_errors.get( 'repository_dependencies', [] )
- current_repository_errors = installation_errors.get( 'current_repository', [] )
- description_lines.append( '%d tests passed, %d tests failed, %d tests missing test components.' % \
- ( passed_tests, failed_tests, missing_test_components ) )
- if current_repository_errors:
- description_lines.append( '\nThis repository did not install correctly. ' )
- if tool_dependency_errors or repository_dependency_errors:
- description_lines.append( '\n%d tool dependencies and %d repository dependencies failed to install. ' % \
- ( len( tool_dependency_errors ), len( repository_dependency_errors ) ) )
- title = 'Revision %s of %s' % ( metadata_row.changeset_revision, repository.name )
- # The guid attribute in an RSS feed's list of items allows a feed reader to choose not to show an item as updated
- # if the guid is unchanged. For functional test results, the citable URL is sufficiently unique to enable
- # that behavior.
- functional_test_results.append( dict( title=title,
- guid=repository_citable_url,
- link=repository_citable_url,
- description='\n'.join( description_lines ),
- pubdate=time_tested ) )
+ repository = repository_metadata.repository
+ repo_dir = repository.repo_path( trans.app )
+ repo = hg.repository( suc.get_configured_ui(), repo_dir )
+ latest_downloadable_changeset_revsion = suc.get_latest_downloadable_changeset_revision( trans, repository, repo )
+ if repository_metadata.changeset_revision == latest_downloadable_changeset_revsion:
+ # We'll display only the test run for the latest installable revision in the rss feed.
+ tool_test_results = repository_metadata.tool_test_results
+ if tool_test_results is not None:
+ # The tool_test_results column used to contain a single dictionary, but was recently enhanced to contain
+ # a list of dictionaries, one for each install and test run. We'll display only the latest run in the rss
+ # feed for nwo.
+ if isinstance( tool_test_results, list ):
+ tool_test_results = tool_test_results[ 0 ]
+ current_repository_errors = []
+ tool_dependency_errors = []
+ repository_dependency_errors = []
+ description_lines = []
+ # Per the RSS 2.0 specification, all dates in RSS feeds must be formatted as specified in RFC 822
+ # section 5.1, e.g. Sat, 07 Sep 2002 00:00:01 UT
+ time_tested = repository_metadata.time_last_tested.strftime( '%a, %d %b %Y %H:%M:%S UT' )
+ # Generate a citable URL for this repository with owner and changeset revision.
+ repository_citable_url = suc.url_join( tool_shed_url, 'view', user.username, repository.name, repository_metadata.changeset_revision )
+ passed_tests = len( tool_test_results.get( 'passed_tests', [] ) )
+ failed_tests = len( tool_test_results.get( 'failed_tests', [] ) )
+ missing_test_components = len( tool_test_results.get( 'missing_test_components', [] ) )
+ installation_errors = tool_test_results.get( 'installation_errors', [] )
+ if installation_errors:
+ tool_dependency_errors = installation_errors.get( 'tool_dependencies', [] )
+ repository_dependency_errors = installation_errors.get( 'repository_dependencies', [] )
+ current_repository_errors = installation_errors.get( 'current_repository', [] )
+ description_lines.append( '%d tests passed, %d tests failed, %d tests missing test components.' % \
+ ( passed_tests, failed_tests, missing_test_components ) )
+ if current_repository_errors:
+ description_lines.append( '\nThis repository did not install correctly. ' )
+ if tool_dependency_errors or repository_dependency_errors:
+ description_lines.append( '\n%d tool dependencies and %d repository dependencies failed to install. ' % \
+ ( len( tool_dependency_errors ), len( repository_dependency_errors ) ) )
+ title = 'Revision %s of %s' % ( repository_metadata.changeset_revision, repository.name )
+ # The guid attribute in an RSS feed's list of items allows a feed reader to choose not to show an item as updated
+ # if the guid is unchanged. For functional test results, the citable URL is sufficiently unique to enable
+ # that behavior.
+ functional_test_results.append( dict( title=title,
+ guid=repository_citable_url,
+ link=repository_citable_url,
+ description='\n'.join( description_lines ),
+ pubdate=time_tested ) )
trans.response.set_content_type( 'application/rss+xml' )
return trans.fill_template( '/rss.mako',
title='Tool functional test results',
diff -r ad38b77e96fafc96060f9ee8031094f4a15df278 -r 11028b31d3f024ef920c3168d3f752bc13ba3ad8 lib/galaxy/webapps/tool_shed/model/__init__.py
--- a/lib/galaxy/webapps/tool_shed/model/__init__.py
+++ b/lib/galaxy/webapps/tool_shed/model/__init__.py
@@ -229,11 +229,14 @@
class RepositoryMetadata( object, Dictifiable ):
- dict_collection_visible_keys = ( 'id', 'repository_id', 'changeset_revision', 'malicious', 'downloadable', 'has_repository_dependencies', 'includes_datatypes',
- 'includes_tools', 'includes_tool_dependencies', 'includes_tools_for_display_in_tool_panel', 'includes_workflows', 'time_last_tested' )
- dict_element_visible_keys = ( 'id', 'repository_id', 'changeset_revision', 'malicious', 'downloadable', 'tools_functionally_correct', 'do_not_test',
- 'test_install_error', 'time_last_tested', 'tool_test_results', 'has_repository_dependencies', 'includes_datatypes',
- 'includes_tools', 'includes_tool_dependencies', 'includes_tools_for_display_in_tool_panel', 'includes_workflows' )
+ dict_collection_visible_keys = ( 'id', 'repository_id', 'changeset_revision', 'malicious', 'downloadable', 'missing_test_components',
+ 'tools_functionally_correct', 'do_not_test', 'test_install_error', 'has_repository_dependencies',
+ 'includes_datatypes', 'includes_tools', 'includes_tool_dependencies', 'includes_tools_for_display_in_tool_panel',
+ 'includes_workflows', 'time_last_tested' )
+ dict_element_visible_keys = ( 'id', 'repository_id', 'changeset_revision', 'malicious', 'downloadable', 'missing_test_components',
+ 'tools_functionally_correct', 'do_not_test', 'test_install_error', 'time_last_tested', 'tool_test_results',
+ 'has_repository_dependencies', 'includes_datatypes', 'includes_tools', 'includes_tool_dependencies',
+ 'includes_tools_for_display_in_tool_panel', 'includes_workflows' )
def __init__( self, id=None, repository_id=None, changeset_revision=None, metadata=None, tool_versions=None, malicious=False, downloadable=False,
missing_test_components=None, tools_functionally_correct=False, do_not_test=False, test_install_error=False, time_last_tested=None,
@@ -242,8 +245,8 @@
self.id = id
self.repository_id = repository_id
self.changeset_revision = changeset_revision
- self.metadata = metadata or dict()
- self.tool_versions = tool_versions or dict()
+ self.metadata = metadata
+ self.tool_versions = tool_versions
self.malicious = malicious
self.downloadable = downloadable
self.missing_test_components = missing_test_components
@@ -251,7 +254,7 @@
self.do_not_test = do_not_test
self.test_install_error = test_install_error
self.time_last_tested = time_last_tested
- self.tool_test_results = tool_test_results or dict()
+ self.tool_test_results = tool_test_results
self.has_repository_dependencies = has_repository_dependencies
# We don't consider the special case has_repository_dependencies_only_if_compiling_contained_td here.
self.includes_datatypes = includes_datatypes
diff -r ad38b77e96fafc96060f9ee8031094f4a15df278 -r 11028b31d3f024ef920c3168d3f752bc13ba3ad8 lib/tool_shed/scripts/api/common.py
--- a/lib/tool_shed/scripts/api/common.py
+++ b/lib/tool_shed/scripts/api/common.py
@@ -158,7 +158,7 @@
print e.read( 1024 )
sys.exit( 1 )
else:
- return 'Error. '+ str( e.read( 1024 ) )
+ return 'Error. ' + str( e.read( 1024 ) )
if not return_formatted:
return r
print 'Response'
diff -r ad38b77e96fafc96060f9ee8031094f4a15df278 -r 11028b31d3f024ef920c3168d3f752bc13ba3ad8 lib/tool_shed/scripts/check_repositories_for_functional_tests.py
--- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
+++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
@@ -25,6 +25,7 @@
from galaxy.model.orm import and_
from galaxy.model.orm import not_
from galaxy.model.orm import select
+from galaxy.util import listify
from mercurial import hg
from mercurial import ui
from mercurial import __version__
@@ -41,16 +42,17 @@
"""Application that enables updating repository_metadata table records in the Tool Shed."""
def __init__( self, config ):
- if config.database_connection is False:
- config.database_connection = "sqlite:///%s?isolation_level=IMMEDIATE" % str( config.database )
- log.debug( 'Using database connection: %s' % str( config.database_connection ) )
+ self.config = config
+ if self.config.database_connection is False:
+ self.config.database_connection = "sqlite:///%s?isolation_level=IMMEDIATE" % str( config.database )
+ log.debug( 'Using database connection: %s' % str( self.config.database_connection ) )
# Setup the database engine and ORM
- self.model = galaxy.webapps.tool_shed.model.mapping.init( config.file_path,
- config.database_connection,
+ self.model = galaxy.webapps.tool_shed.model.mapping.init( self.config.file_path,
+ self.config.database_connection,
engine_options={},
create_tables=False )
self.hgweb_config_manager = self.model.hgweb_config_manager
- self.hgweb_config_manager.hgweb_config_dir = config.hgweb_config_dir
+ self.hgweb_config_manager.hgweb_config_dir = self.config.hgweb_config_dir
log.debug( 'Using hgweb.config file: %s' % str( self.hgweb_config_manager.hgweb_config ) )
@property
@@ -72,19 +74,7 @@
"id": "tool_wrapper",
"name": "Map with Tool Wrapper",
"requirements": [],
- "tests": [
- {
- "inputs": [ [ "parameter", "value" ], [ "other_parameter", "other_value" ], ],
- "name": "Test-1",
- "outputs": [
- [
- "output_field_name",
- "output_file_name.bed"
- ]
- ],
- "required_files": [ '1.bed', '2.bed', '3.bed' ]
- }
- ],
+ "tests": [],
"tool_config": "database/community_files/000/repo_1/tool_wrapper.xml",
"tool_type": "default",
"version": "1.2.3",
@@ -120,11 +110,6 @@
missing_test_components = []
repository = repository_metadata.repository
records_checked += 1
- # Create the tool_test_results_dict dictionary, using the dictionary from the previous test run if available.
- if repository_metadata.tool_test_results:
- tool_test_results_dict = repository_metadata.tool_test_results
- else:
- tool_test_results_dict = {}
# Check the next repository revision.
changeset_revision = str( repository_metadata.changeset_revision )
name = repository.name
@@ -141,7 +126,7 @@
tool_dicts = metadata.get( 'tools', None )
if tool_dicts is not None:
has_test_data = False
- testable_revision_found = False
+ testable_revision = False
# Clone the repository up to the changeset revision we're checking.
repo_dir = repository.repo_path( app )
repo = hg.repository( get_configured_ui(), repo_dir )
@@ -236,7 +221,7 @@
if test_errors not in missing_test_components:
missing_test_components.append( test_errors )
if tool_has_tests and has_test_files:
- testable_revision_found = True
+ testable_revision = True
# Remove the cloned repository path. This has to be done after the check for required test files, for obvious reasons.
if os.path.exists( work_dir ):
shutil.rmtree( work_dir )
@@ -253,6 +238,27 @@
if 'missing_components' in invalid_test:
print '# %s' % invalid_test[ 'missing_components' ]
if not info_only:
+ # Get or create the list of tool_test_results dictionaries.
+ if repository_metadata.tool_test_results is not None:
+ # We'll listify the column value in case it uses the old approach of storing the results of only a single test run.
+ tool_test_results_dicts = listify( repository_metadata.tool_test_results )
+ else:
+ tool_test_results_dicts = []
+ if tool_test_results_dicts:
+ # Inspect the tool_test_results_dict for the last test run in case it contains only a test_environment
+ # entry. This will occur with multiple runs of this script without running the associated
+ # install_and_test_tool_sed_repositories.sh script which will further populate the tool_test_results_dict.
+ tool_test_results_dict = tool_test_results_dicts[ 0 ]
+ if len( tool_test_results_dict ) <= 1:
+ # We can re-use the mostly empty tool_test_results_dict for this run, but we need to eliminate it from
+ # the list of tool_test_results_dicts since it will be re-inserted later.
+ tool_test_results_dict = tool_test_results_dicts.pop( 0 )
+ else:
+ # The latest tool_test_results_dict has been populated with the results of a test run, so it cannot be used.
+ tool_test_results_dict = {}
+ else:
+ # Create a new dictionary for the most recent test run.
+ tool_test_results_dict = {}
test_environment_dict = tool_test_results_dict.get( 'test_environment', {} )
test_environment_dict[ 'tool_shed_database_version' ] = get_database_version( app )
test_environment_dict[ 'tool_shed_mercurial_version' ] = __version__.version
@@ -270,12 +276,24 @@
# changeset revision will be created, either of which will be automatically checked and flagged as appropriate.
# In the install and test script, this behavior is slightly different, since we do want to always run functional
# tests on the most recent downloadable changeset revision.
- if should_set_do_not_test_flag( app, repository_metadata.repository, changeset_revision ) and not testable_revision_found:
+ if should_set_do_not_test_flag( app, repository, changeset_revision, testable_revision ):
+ print "# Setting do_not_test to True on revision %s of repository %s because it is missing test components" % \
+ ( changeset_revision, name )
+ print "# and it is not the latest downloadable revision."
repository_metadata.do_not_test = True
repository_metadata.tools_functionally_correct = False
repository_metadata.missing_test_components = True
tool_test_results_dict[ 'missing_test_components' ] = missing_test_components
- repository_metadata.tool_test_results = tool_test_results_dict
+ # Store only the configured number of test runs.
+ num_tool_test_results_saved = int( app.config.num_tool_test_results_saved )
+ if len( tool_test_results_dicts ) >= num_tool_test_results_saved:
+ test_results_index = num_tool_test_results_saved - 1
+ new_tool_test_results_dicts = tool_test_results_dicts[ :test_results_index ]
+ else:
+ new_tool_test_results_dicts = [ d for d in tool_test_results_dicts ]
+ # Insert the new element into the first position in the list.
+ new_tool_test_results_dicts.insert( 0, tool_test_results_dict )
+ repository_metadata.tool_test_results = new_tool_test_results_dicts
app.sa_session.add( repository_metadata )
app.sa_session.flush()
stop = time.time()
@@ -313,19 +331,6 @@
changelog_tuples.append( ( ctx.rev(), str( ctx ) ) )
return changelog_tuples
-def is_most_recent_downloadable_revision( app, repository, changeset_revision, downloadable_revisions ):
- # Get a list of ( numeric revision, changeset hash ) tuples from the changelog.
- changelog = get_repo_changelog_tuples( repository.repo_path( app ) )
- latest_downloadable_revision = None
- for ctx_rev, changeset_hash in changelog:
- if changeset_hash in downloadable_revisions:
- # The last changeset hash in the changelog that is present in the list of downloadable revisions will always be the most
- # recent downloadable revision, since the changelog tuples are ordered from earliest to most recent.
- latest_downloadable_revision = changeset_hash
- if latest_downloadable_revision == changeset_revision:
- return True
- return False
-
def main():
'''Script that checks repositories to see if the tools contained within them have functional tests defined.'''
parser = OptionParser()
@@ -345,7 +350,7 @@
config_parser.read( ini_file )
config_dict = {}
for key, value in config_parser.items( "app:main" ):
- config_dict[key] = value
+ config_dict[ key ] = value
config = tool_shed_config.Configuration( **config_dict )
config_section = options.section
now = strftime( "%Y-%m-%d %H:%M:%S" )
@@ -361,34 +366,27 @@
print "# Displaying extra information ( --verbosity = %d )" % options.verbosity
check_and_update_repository_metadata( app, info_only=options.info_only, verbosity=options.verbosity )
-def should_set_do_not_test_flag( app, repository, changeset_revision ):
- '''
- Returns True if:
- a) There are multiple downloadable revisions, and the provided changeset revision is not the most recent downloadable revision. In this case,
- the revision will never be updated with correct data, and re-testing it would be redundant.
- b) There are one or more downloadable revisions, and the provided changeset revision is the most recent downloadable revision. In this case, if
- the repository is updated with test data or functional tests, the downloadable changeset revision that was tested will either be replaced
- with the new changeset revision, or a new downloadable changeset revision will be created, either of which will be automatically checked and
- flagged as appropriate. In the install and test script, this behavior is slightly different, since we do want to always run functional tests
- on the most recent downloadable changeset revision.
- '''
- repository_revisions = app.sa_session.query( app.model.RepositoryMetadata ) \
- .filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True,
- app.model.RepositoryMetadata.table.c.repository_id == repository.id ) ) \
- .all()
- downloadable_revisions = [ repository_metadata.changeset_revision for repository_metadata in repository_revisions ]
- is_latest_revision = is_most_recent_downloadable_revision( app, repository, changeset_revision, downloadable_revisions )
- if len( downloadable_revisions ) == 1:
- return True
- elif len( downloadable_revisions ) > 1 and is_latest_revision:
- return True
- elif len( downloadable_revisions ) > 1 and not is_latest_revision:
- return True
- else:
- return False
+def should_set_do_not_test_flag( app, repository, changeset_revision, testable_revision ):
+ """
+ The received testable_revision is True if the tool has defined tests and test files are in the repository
+ This method returns True if the received repository has multiple downloadable revisions and the received
+ changeset_revision is not the most recent downloadable revision and the received testable_revision is False.
+ In this case, the received changeset_revision will never be updated with correct data, and re-testing it
+ would be redundant.
+ """
+ if not testable_revision:
+ repo_dir = repository.repo_path( app )
+ repo = hg.repository( suc.get_configured_ui(), repo_dir )
+ changeset_revisions = suc.get_ordered_metadata_changeset_revisions( repository, repo, downloadable=True )
+ if len( changeset_revisions ) > 1:
+ latest_downloadable_revision = changeset_revisions[ -1 ]
+ if changeset_revision != latest_downloadable_revision:
+ return True
+ return False
if __name__ == "__main__":
- # The repository_metadata.tool_test_results json value should have the following structure:
+ # The repository_metadata.tool_test_results json value should have the following list structure:
+ # [
# {
# "test_environment":
# {
@@ -467,6 +465,7 @@
# },
# ]
# }
+ # ]
#
# Optionally, "traceback" may be included in a test_errors dict, if it is relevant. No script should overwrite anything other
# than the list relevant to what it is testing.
diff -r ad38b77e96fafc96060f9ee8031094f4a15df278 -r 11028b31d3f024ef920c3168d3f752bc13ba3ad8 lib/tool_shed/util/container_util.py
--- a/lib/tool_shed/util/container_util.py
+++ b/lib/tool_shed/util/container_util.py
@@ -1,7 +1,7 @@
import logging
import os
import threading
-from galaxy.util import asbool
+import galaxy.util
from galaxy.web.framework.helpers import time_ago
from tool_shed.util import common_util
from tool_shed.util import readme_util
@@ -72,8 +72,8 @@
repository_name=name,
repository_owner=owner,
changeset_revision=changeset_revision,
- prior_installation_required=asbool( prior_installation_required ),
- only_if_compiling_contained_td=asbool( only_if_compiling_contained_td ) )
+ prior_installation_required=galaxy.util.asbool( prior_installation_required ),
+ only_if_compiling_contained_td=galaxy.util.asbool( only_if_compiling_contained_td ) )
class DataManager( object ):
@@ -497,8 +497,8 @@
repository_name=name,
repository_owner=owner,
changeset_revision=changeset_revision,
- prior_installation_required=asbool( prior_installation_required ),
- only_if_compiling_contained_td=asbool( only_if_compiling_contained_td ),
+ prior_installation_required=galaxy.util.asbool( prior_installation_required ),
+ only_if_compiling_contained_td=galaxy.util.asbool( only_if_compiling_contained_td ),
error=error )
folder.invalid_repository_dependencies.append( ird )
invalid_repository_dependencies_folder.folders.append( folder )
@@ -831,16 +831,12 @@
label='Valid tools' )
containers_dict[ 'valid_tools' ] = valid_tools_root_folder
# Tool test results container.
- tool_test_results = repository_metadata.tool_test_results
+ tool_test_results = galaxy.util.listify( repository_metadata.tool_test_results )
# Only create and populate this folder if there are actual tool test results to display.
if can_display_tool_test_results( tool_test_results, exclude=exclude ):
- time_tested = repository_metadata.time_last_tested
- if time_tested is not None:
- time_tested = time_ago( time_tested )
folder_id, tool_test_results_root_folder = build_tool_test_results_folder( trans,
folder_id,
tool_test_results,
- time_tested,
label='Tool test results' )
containers_dict[ 'tool_test_results' ] = tool_test_results_root_folder
# Workflows container.
@@ -1052,153 +1048,165 @@
tool_dependencies_root_folder = None
return folder_id, tool_dependencies_root_folder
-def build_tool_test_results_folder( trans, folder_id, tool_test_results_dict, time_tested, label='Tool test results' ):
+def build_tool_test_results_folder( trans, folder_id, tool_test_results_dicts, label='Tool test results' ):
"""Return a folder hierarchy containing tool dependencies."""
# This container is displayed only in the tool shed.
- if tool_test_results_dict:
+ if tool_test_results_dicts:
+ multiple_tool_test_results_dicts = len( tool_test_results_dicts ) > 1
+ test_results_dict_id = 0
folder_id += 1
tool_test_results_root_folder = Folder( id=folder_id, key='root', label='root', parent=None )
- test_environment_dict = tool_test_results_dict.get( 'test_environment', None )
- if test_environment_dict is not None:
- folder_id += 1
- test_results_folder = Folder( id=folder_id, key='test_results', label=label, parent=tool_test_results_root_folder )
- tool_test_results_root_folder.folders.append( test_results_folder )
- folder_id += 1
- folder = Folder( id=folder_id, key='test_environment', label='Automated test environment', parent=test_results_folder )
- test_results_folder.folders.append( folder )
- architecture = test_environment_dict.get( 'architecture', '' )
- galaxy_database_version = test_environment_dict.get( 'galaxy_database_version', '' )
- galaxy_revision = test_environment_dict.get( 'galaxy_revision', '' )
- python_version = test_environment_dict.get( 'python_version', '' )
- system = test_environment_dict.get( 'system', '' )
- time_tested = time_tested
- tool_shed_database_version = test_environment_dict.get( 'tool_shed_database_version', '' )
- tool_shed_mercurial_version = test_environment_dict.get( 'tool_shed_mercurial_version', '' )
- tool_shed_revision = test_environment_dict.get( 'tool_shed_revision', '' )
- test_environment = TestEnvironment( id=1,
- architecture=architecture,
- galaxy_database_version=galaxy_database_version,
- galaxy_revision=galaxy_revision,
- python_version=python_version,
- system=system,
- time_tested=time_tested,
- tool_shed_database_version=tool_shed_database_version,
- tool_shed_mercurial_version=tool_shed_mercurial_version,
- tool_shed_revision=tool_shed_revision )
- folder.test_environments.append( test_environment )
- not_tested_dict = tool_test_results_dict.get( 'not_tested', {} )
- if not_tested_dict:
- folder_id += 1
- folder = Folder( id=folder_id, key='not_tested', label='Not tested', parent=test_results_folder )
- test_results_folder.folders.append( folder )
- not_tested_id = 0
- not_tested = NotTested( id=not_tested_id,
- reason=not_tested_dict.get( 'reason', '' ) )
- folder.not_tested.append( not_tested )
- passed_tests_dicts = tool_test_results_dict.get( 'passed_tests', [] )
- if passed_tests_dicts:
- folder_id += 1
- folder = Folder( id=folder_id, key='passed_tests', label='Tests that passed successfully', parent=test_results_folder )
- test_results_folder.folders.append( folder )
- passed_test_id = 0
- for passed_tests_dict in passed_tests_dicts:
- passed_test_id += 1
- passed_test = PassedTest( id=passed_test_id,
- test_id=passed_tests_dict.get( 'test_id' '' ),
- tool_id=passed_tests_dict.get( 'tool_id', '' ),
- tool_version=passed_tests_dict.get( 'tool_version', '' ) )
- folder.passed_tests.append( passed_test )
- failed_tests_dicts = tool_test_results_dict.get( 'failed_tests', [] )
- if failed_tests_dicts:
- folder_id += 1
- folder = Folder( id=folder_id, key='failed_tests', label='Tests that failed', parent=test_results_folder )
- test_results_folder.folders.append( folder )
- failed_test_id = 0
- for failed_tests_dict in failed_tests_dicts:
- failed_test_id += 1
- failed_test = FailedTest( id=failed_test_id,
- stderr=failed_tests_dict.get( 'stderr', '' ),
- test_id=failed_tests_dict.get( 'test_id', '' ),
- tool_id=failed_tests_dict.get( 'tool_id', '' ),
- tool_version=failed_tests_dict.get( 'tool_version', '' ),
- traceback=failed_tests_dict.get( 'traceback', '' ) )
- folder.failed_tests.append( failed_test )
- missing_test_components_dicts = tool_test_results_dict.get( 'missing_test_components', [] )
- if missing_test_components_dicts:
- folder_id += 1
- folder = Folder( id=folder_id, key='missing_test_components', label='Tools missing tests or test data', parent=test_results_folder )
- test_results_folder.folders.append( folder )
- missing_test_component_id = 0
- for missing_test_components_dict in missing_test_components_dicts:
- missing_test_component_id += 1
- missing_test_component = MissingTestComponent( id=missing_test_component_id,
- missing_components=missing_test_components_dict.get( 'missing_components', '' ),
- tool_guid=missing_test_components_dict.get( 'tool_guid', '' ),
- tool_id=missing_test_components_dict.get( 'tool_id', '' ),
- tool_version=missing_test_components_dict.get( 'tool_version', '' ) )
- folder.missing_test_components.append( missing_test_component )
- installation_error_dicts = tool_test_results_dict.get( 'installation_errors', {} )
- if installation_error_dicts:
- current_repository_errors = installation_error_dicts.get( 'current_repository', [] )
- repository_dependency_errors = installation_error_dicts.get( 'repository_dependencies', [] )
- tool_dependency_errors = installation_error_dicts.get( 'tool_dependencies', [] )
- if current_repository_errors or repository_dependency_errors or tool_dependency_errors:
+ for index, tool_test_results_dict in enumerate( tool_test_results_dicts ):
+ test_environment_dict = tool_test_results_dict.get( 'test_environment', None )
+ if test_environment_dict is not None:
+ time_tested = test_environment_dict.get( 'time_tested', 'unknown_%d' % index )
+ if multiple_tool_test_results_dicts:
+ folder_id += 1
+ containing_folder = Folder( id=folder_id, key='test_results', label=time_tested, parent=tool_test_results_root_folder )
+ else:
+ containing_folder = tool_test_results_root_folder
+ test_results_dict_id += 1
+ #folder_id += 1
+ #test_results_folder = Folder( id=folder_id, key='test_results', label='Automated test environment', parent=containing_folder )
+ #containing_folder.folders.append( test_results_folder )
folder_id += 1
- installation_error_base_folder = Folder( id=folder_id,
- key='installation_errors',
- label='Installation errors',
- parent=test_results_folder )
- if current_repository_errors:
+ folder = Folder( id=folder_id, key='test_environment', label='Automated test environment', parent=containing_folder )
+ containing_folder.folders.append( folder )
+ architecture = test_environment_dict.get( 'architecture', '' )
+ galaxy_database_version = test_environment_dict.get( 'galaxy_database_version', '' )
+ galaxy_revision = test_environment_dict.get( 'galaxy_revision', '' )
+ python_version = test_environment_dict.get( 'python_version', '' )
+ system = test_environment_dict.get( 'system', '' )
+ tool_shed_database_version = test_environment_dict.get( 'tool_shed_database_version', '' )
+ tool_shed_mercurial_version = test_environment_dict.get( 'tool_shed_mercurial_version', '' )
+ tool_shed_revision = test_environment_dict.get( 'tool_shed_revision', '' )
+ test_environment = TestEnvironment( id=1,
+ architecture=architecture,
+ galaxy_database_version=galaxy_database_version,
+ galaxy_revision=galaxy_revision,
+ python_version=python_version,
+ system=system,
+ time_tested=time_tested,
+ tool_shed_database_version=tool_shed_database_version,
+ tool_shed_mercurial_version=tool_shed_mercurial_version,
+ tool_shed_revision=tool_shed_revision )
+ folder.test_environments.append( test_environment )
+ not_tested_dict = tool_test_results_dict.get( 'not_tested', {} )
+ if not_tested_dict:
+ folder_id += 1
+ folder = Folder( id=folder_id, key='not_tested', label='Not tested', parent=containing_folder )
+ containing_folder.folders.append( folder )
+ not_tested_id = 0
+ not_tested = NotTested( id=not_tested_id,
+ reason=not_tested_dict.get( 'reason', '' ) )
+ folder.not_tested.append( not_tested )
+ passed_tests_dicts = tool_test_results_dict.get( 'passed_tests', [] )
+ if passed_tests_dicts:
+ folder_id += 1
+ folder = Folder( id=folder_id, key='passed_tests', label='Tests that passed successfully', parent=containing_folder )
+ containing_folder.folders.append( folder )
+ passed_test_id = 0
+ for passed_tests_dict in passed_tests_dicts:
+ passed_test_id += 1
+ passed_test = PassedTest( id=passed_test_id,
+ test_id=passed_tests_dict.get( 'test_id' '' ),
+ tool_id=passed_tests_dict.get( 'tool_id', '' ),
+ tool_version=passed_tests_dict.get( 'tool_version', '' ) )
+ folder.passed_tests.append( passed_test )
+ failed_tests_dicts = tool_test_results_dict.get( 'failed_tests', [] )
+ if failed_tests_dicts:
+ folder_id += 1
+ folder = Folder( id=folder_id, key='failed_tests', label='Tests that failed', parent=containing_folder )
+ containing_folder.folders.append( folder )
+ failed_test_id = 0
+ for failed_tests_dict in failed_tests_dicts:
+ failed_test_id += 1
+ failed_test = FailedTest( id=failed_test_id,
+ stderr=failed_tests_dict.get( 'stderr', '' ),
+ test_id=failed_tests_dict.get( 'test_id', '' ),
+ tool_id=failed_tests_dict.get( 'tool_id', '' ),
+ tool_version=failed_tests_dict.get( 'tool_version', '' ),
+ traceback=failed_tests_dict.get( 'traceback', '' ) )
+ folder.failed_tests.append( failed_test )
+ missing_test_components_dicts = tool_test_results_dict.get( 'missing_test_components', [] )
+ if missing_test_components_dicts:
+ folder_id += 1
+ folder = Folder( id=folder_id, key='missing_test_components', label='Tools missing tests or test data', parent=containing_folder )
+ containing_folder.folders.append( folder )
+ missing_test_component_id = 0
+ for missing_test_components_dict in missing_test_components_dicts:
+ missing_test_component_id += 1
+ missing_test_component = MissingTestComponent( id=missing_test_component_id,
+ missing_components=missing_test_components_dict.get( 'missing_components', '' ),
+ tool_guid=missing_test_components_dict.get( 'tool_guid', '' ),
+ tool_id=missing_test_components_dict.get( 'tool_id', '' ),
+ tool_version=missing_test_components_dict.get( 'tool_version', '' ) )
+ folder.missing_test_components.append( missing_test_component )
+ installation_error_dicts = tool_test_results_dict.get( 'installation_errors', {} )
+ if installation_error_dicts:
+ current_repository_errors = installation_error_dicts.get( 'current_repository', [] )
+ repository_dependency_errors = installation_error_dicts.get( 'repository_dependencies', [] )
+ tool_dependency_errors = installation_error_dicts.get( 'tool_dependencies', [] )
+ if current_repository_errors or repository_dependency_errors or tool_dependency_errors:
folder_id += 1
- subfolder = Folder( id=folder_id,
- key='current_repository_errors',
- label='This repository',
- parent=installation_error_base_folder )
- repository_error_id = 0
- for repository_error_dict in current_repository_errors:
- repository_error_id += 1
- repository_installation_error = RepositoryInstallationError( id=repository_error_id,
- tool_shed=repository_error_dict.get( 'tool_shed', '' ),
- name=repository_error_dict.get( 'name', '' ),
- owner=repository_error_dict.get( 'owner', '' ),
- changeset_revision=repository_error_dict.get( 'changeset_revision', '' ),
- error_message=repository_error_dict.get( 'error_message', '' ) )
- subfolder.current_repository_installation_errors.append( repository_installation_error )
- installation_error_base_folder.folders.append( subfolder )
- if repository_dependency_errors:
- folder_id += 1
- subfolder = Folder( id=folder_id,
- key='repository_dependency_errors',
- label='Repository dependencies',
- parent=installation_error_base_folder )
- repository_error_id = 0
- for repository_error_dict in repository_dependency_errors:
- repository_error_id += 1
- repository_installation_error = RepositoryInstallationError( id=repository_error_id,
- tool_shed=repository_error_dict.get( 'tool_shed', '' ),
- name=repository_error_dict.get( 'name', '' ),
- owner=repository_error_dict.get( 'owner', '' ),
- changeset_revision=repository_error_dict.get( 'changeset_revision', '' ),
- error_message=repository_error_dict.get( 'error_message', '' ) )
- subfolder.repository_installation_errors.append( repository_installation_error )
- installation_error_base_folder.folders.append( subfolder )
- if tool_dependency_errors:
- folder_id += 1
- subfolder = Folder( id=folder_id,
- key='tool_dependency_errors',
- label='Tool dependencies',
- parent=installation_error_base_folder )
- tool_dependency_error_id = 0
- for tool_dependency_error_dict in tool_dependency_errors:
- tool_dependency_error_id += 1
- tool_dependency_installation_error = ToolDependencyInstallationError( id=tool_dependency_error_id,
- type=tool_dependency_error_dict.get( 'type', '' ),
- name=tool_dependency_error_dict.get( 'name', '' ),
- version=tool_dependency_error_dict.get( 'version', '' ),
- error_message=tool_dependency_error_dict.get( 'error_message', '' ) )
- subfolder.tool_dependency_installation_errors.append( tool_dependency_installation_error )
- installation_error_base_folder.folders.append( subfolder )
- test_results_folder.installation_errors.append( installation_error_base_folder )
+ installation_error_base_folder = Folder( id=folder_id,
+ key='installation_errors',
+ label='Installation errors',
+ parent=containing_folder )
+ if current_repository_errors:
+ folder_id += 1
+ subfolder = Folder( id=folder_id,
+ key='current_repository_errors',
+ label='This repository',
+ parent=installation_error_base_folder )
+ repository_error_id = 0
+ for repository_error_dict in current_repository_errors:
+ repository_error_id += 1
+ repository_installation_error = RepositoryInstallationError( id=repository_error_id,
+ tool_shed=repository_error_dict.get( 'tool_shed', '' ),
+ name=repository_error_dict.get( 'name', '' ),
+ owner=repository_error_dict.get( 'owner', '' ),
+ changeset_revision=repository_error_dict.get( 'changeset_revision', '' ),
+ error_message=repository_error_dict.get( 'error_message', '' ) )
+ subfolder.current_repository_installation_errors.append( repository_installation_error )
+ installation_error_base_folder.folders.append( subfolder )
+ if repository_dependency_errors:
+ folder_id += 1
+ subfolder = Folder( id=folder_id,
+ key='repository_dependency_errors',
+ label='Repository dependencies',
+ parent=installation_error_base_folder )
+ repository_error_id = 0
+ for repository_error_dict in repository_dependency_errors:
+ repository_error_id += 1
+ repository_installation_error = RepositoryInstallationError( id=repository_error_id,
+ tool_shed=repository_error_dict.get( 'tool_shed', '' ),
+ name=repository_error_dict.get( 'name', '' ),
+ owner=repository_error_dict.get( 'owner', '' ),
+ changeset_revision=repository_error_dict.get( 'changeset_revision', '' ),
+ error_message=repository_error_dict.get( 'error_message', '' ) )
+ subfolder.repository_installation_errors.append( repository_installation_error )
+ installation_error_base_folder.folders.append( subfolder )
+ if tool_dependency_errors:
+ folder_id += 1
+ subfolder = Folder( id=folder_id,
+ key='tool_dependency_errors',
+ label='Tool dependencies',
+ parent=installation_error_base_folder )
+ tool_dependency_error_id = 0
+ for tool_dependency_error_dict in tool_dependency_errors:
+ tool_dependency_error_id += 1
+ tool_dependency_installation_error = ToolDependencyInstallationError( id=tool_dependency_error_id,
+ type=tool_dependency_error_dict.get( 'type', '' ),
+ name=tool_dependency_error_dict.get( 'name', '' ),
+ version=tool_dependency_error_dict.get( 'version', '' ),
+ error_message=tool_dependency_error_dict.get( 'error_message', '' ) )
+ subfolder.tool_dependency_installation_errors.append( tool_dependency_installation_error )
+ installation_error_base_folder.folders.append( subfolder )
+ containing_folder.installation_errors.append( installation_error_base_folder )
+ #containing_folder.folders.append( containing_folder )
+ if multiple_tool_test_results_dicts:
+ tool_test_results_root_folder.folders.append( containing_folder )
else:
tool_test_results_root_folder = None
return folder_id, tool_test_results_root_folder
@@ -1247,13 +1255,13 @@
workflows_root_folder = None
return folder_id, workflows_root_folder
-def can_display_tool_test_results( tool_test_results_dict, exclude=None ):
+def can_display_tool_test_results( tool_test_results_dicts, exclude=None ):
# Only create and populate the tool_test_results container if there are actual tool test results to display.
if exclude is None:
exclude = []
if 'tool_test_results' in exclude:
return False
- if tool_test_results_dict:
+ for tool_test_results_dict in tool_test_results_dicts:
# We check for more than a single entry in the tool_test_results dictionary because it may have
# only the "test_environment" entry, but we want at least 1 of "passed_tests", "failed_tests",
# "installation_errors", "missing_test_components" "skipped_tests", "not_tested" or any other
@@ -1288,7 +1296,7 @@
if key_is_current_repositorys_key( repository_name, repository_owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td, key ):
label = 'Repository dependencies'
else:
- if asbool( prior_installation_required ):
+ if galaxy.util.asbool( prior_installation_required ):
prior_installation_required_str = " <i>(prior install required)</i>"
else:
prior_installation_required_str = ""
@@ -1432,8 +1440,8 @@
repository_name=repository_name,
repository_owner=repository_owner,
changeset_revision=changeset_revision,
- prior_installation_required=asbool( prior_installation_required ),
- only_if_compiling_contained_td=asbool( only_if_compiling_contained_td ),
+ prior_installation_required=galaxy.util.asbool( prior_installation_required ),
+ only_if_compiling_contained_td=galaxy.util.asbool( only_if_compiling_contained_td ),
installation_status=installation_status,
tool_shed_repository_id=tool_shed_repository_id )
# Insert the repository_dependency into the folder.
diff -r ad38b77e96fafc96060f9ee8031094f4a15df278 -r 11028b31d3f024ef920c3168d3f752bc13ba3ad8 test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -58,7 +58,6 @@
from mercurial import __version__
from nose.plugins import Plugin
from paste import httpserver
-from time import strftime
from tool_shed.util import tool_dependency_util
from tool_shed.util.xml_util import parse_xml
@@ -80,6 +79,12 @@
default_galaxy_test_host = '127.0.0.1'
default_galaxy_master_api_key = None
+# This script can be run in such a way that no Tool Shed database records should be changed.
+if '-info_only' in sys.argv or 'GALAXY_INSTALL_TEST_INFO_ONLY' in os.environ:
+ can_update_tool_shed = False
+else:
+ can_update_tool_shed = True
+
# Should this serve static resources (scripts, images, styles, etc.)?
STATIC_ENABLED = True
@@ -157,7 +162,7 @@
galaxy_tool_shed_url = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHED_URL', None )
tool_shed_api_key = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHED_API_KEY', None )
exclude_list_file = os.environ.get( 'GALAXY_INSTALL_TEST_EXCLUDE_REPOSITORIES', 'install_test_exclude.xml' )
-
+
if 'GALAXY_INSTALL_TEST_SECRET' not in os.environ:
galaxy_encode_secret = 'changethisinproductiontoo'
os.environ[ 'GALAXY_INSTALL_TEST_SECRET' ] = galaxy_encode_secret
@@ -325,16 +330,16 @@
missing_tool_dependencies.extend( get_missing_tool_dependencies( repository_dependency ) )
return missing_tool_dependencies
-def get_repository_info_from_api( url, repository_dict ):
+def get_repository_dict( url, repository_dict ):
error_message = ''
parts = [ 'api', 'repositories', repository_dict[ 'repository_id' ] ]
api_url = get_api_url( base=url, parts=parts )
extended_dict, error_message = json_from_url( api_url )
if error_message:
return None, error_message
- latest_changeset_revision, error_message = get_latest_downloadable_changeset_revision( url,
- extended_dict[ 'name' ],
- extended_dict[ 'owner' ] )
+ name = str( extended_dict[ 'name' ] )
+ owner = str( extended_dict[ 'owner' ] )
+ latest_changeset_revision, error_message = get_latest_downloadable_changeset_revision( url, name, owner )
if error_message:
return None, error_message
extended_dict[ 'latest_revision' ] = str( latest_changeset_revision )
@@ -361,8 +366,9 @@
repository_dicts = []
params = urllib.urlencode( dict( do_not_test='false',
downloadable='true',
+ includes_tools='true',
malicious='false',
- includes_tools='true',
+ missing_test_components='false',
skip_tool_test='false' ) )
api_url = get_api_url( base=tool_shed_url, parts=[ 'repository_revisions' ], params=params )
baseline_repository_dicts, error_message = json_from_url( api_url )
@@ -372,7 +378,7 @@
for baseline_repository_dict in baseline_repository_dicts:
# We need to get some details from the tool shed API, such as repository name and owner, to pass on to the
# module that will generate the install methods.
- repository_dict, error_message = get_repository_info_from_api( galaxy_tool_shed_url, baseline_repository_dict )
+ repository_dict, error_message = get_repository_dict( galaxy_tool_shed_url, baseline_repository_dict )
if error_message:
log.debug( 'Error getting additional details about repository %s from the API: %s' % ( str( name ), error_message ) )
else:
@@ -447,16 +453,18 @@
tool_id = parts[ -2 ]
return tool_id, tool_version
-def get_tool_test_results_dict( tool_shed_url, encoded_repository_metadata_id ):
+def get_tool_test_results_dicts( tool_shed_url, encoded_repository_metadata_id ):
+ """
+ Return the list of dictionaries contained in the Tool Shed's repository_metadata.tool_test_results
+ column via the Tool Shed API.
+ """
error_message = ''
api_path = [ 'api', 'repository_revisions', encoded_repository_metadata_id ]
api_url = get_api_url( base=tool_shed_url, parts=api_path )
repository_metadata, error_message = json_from_url( api_url )
if error_message:
return None, error_message
- tool_test_results = repository_metadata.get( 'tool_test_results', {} )
- if tool_test_results is None:
- return None, error_message
+ tool_test_results = repository_metadata.get( 'tool_test_results', [] )
return tool_test_results, error_message
def get_webapp_global_conf():
@@ -466,7 +474,8 @@
global_conf.update( get_static_settings() )
return global_conf
-def handle_missing_dependencies( app, repository, missing_tool_dependencies, repository_dict, tool_test_results_dict, results_dict ):
+def handle_missing_dependencies( app, repository, missing_tool_dependencies, repository_dict,
+ tool_test_results_dicts, tool_test_results_dict, results_dict ):
"""Handle missing repository or tool dependencies for an installed repository."""
# If a tool dependency fails to install correctly, this should be considered an installation error,
# and functional tests should be skipped, since the tool dependency needs to be correctly installed
@@ -494,7 +503,8 @@
params = dict( tools_functionally_correct=False,
do_not_test=False,
test_install_error=True )
- register_test_result( galaxy_tool_shed_url, tool_test_results_dict, repository_dict, params )
+ # TODO: do something usefule with response_dict
+ response_dict = register_test_result( galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params )
# Since this repository is missing components, we do not want to test it, so deactivate it or uninstall it.
# The deactivate flag is set to True if the environment variable GALAXY_INSTALL_TEST_KEEP_TOOL_DEPENDENCIES
# is set to 'true'.
@@ -608,6 +618,13 @@
name = str( repository_dict[ 'name' ] )
owner = str( repository_dict[ 'owner' ] )
changeset_revision = str( repository_dict[ 'changeset_revision' ] )
+ # Populate the tool_test_results_dict.
+ tool_test_results_dicts, error_message = get_tool_test_results_dicts( galaxy_tool_shed_url, encoded_repository_metadata_id )
+ # The preparation script ~/tool_shed/scripts/check_repositories_for_functional_tests.py will have entered
+ # information in the 'test_environment' and possibly the 'missing_test_components' entries of the first
+ # tool_test_results_dict in the list of tool_test_results_dicts. We need to be careful to not lose this
+ # information.
+ tool_test_results_dict = tool_test_results_dicts.pop( 0 )
# See if this repository should be skipped for any reason.
skip_this_repository = False
skip_reason = None
@@ -622,21 +639,20 @@
tool_test_results_dict[ 'not_tested' ] = dict( reason=skip_reason )
params = dict( tools_functionally_correct=False,
do_not_test=False )
- register_test_result( galaxy_tool_shed_url, tool_test_results_dict, repository_dict, params )
+ # TODO: do something usefule with response_dict
+ response_dict = register_test_result( galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params )
log.debug( "Not testing revision %s of repository %s owned by %s." % ( changeset_revision, name, owner ) )
else:
- # Populate the tool_test_results_dict.
- tool_test_results_dict, error_message = get_tool_test_results_dict( galaxy_tool_shed_url, encoded_repository_metadata_id )
if error_message:
log.debug( error_message )
else:
- # The preparation script ~/tool_shed/scripts/check_repositories_for_functional_tests.py will have entered
- # information in the 'missing_test_components' entry of the tool_test_results_dict dictionary for repositories
- # that are missing test components. We need to be careful to not lose this information. For all other repositories,
- # no changes will have been made to this dictionary by the preparation script, and tool_test_results_dict will be None.
- # Initialize the tool_test_results_dict dictionary with the information about the current test environment.
test_environment_dict = tool_test_results_dict.get( 'test_environment', None )
test_environment_dict = get_test_environment( test_environment_dict )
+ # Add the current time as the approximate time that this test run occurs. A similar value will also be
+ # set to the repository_metadata.time_last_tested column, but we also store it here because the Tool Shed
+ # may be configured to store multiple test run results, so each must be associated with a time stamp.
+ now = time.strftime( "%Y-%m-%d %H:%M:%S" )
+ test_environment_dict[ 'time_tested' ] = now
test_environment_dict[ 'galaxy_database_version' ] = get_database_version( app )
test_environment_dict[ 'galaxy_revision' ] = get_repository_current_revision( os.getcwd() )
tool_test_results_dict[ 'test_environment' ] = test_environment_dict
@@ -662,8 +678,8 @@
params = dict( tools_functionally_correct=False,
test_install_error=True,
do_not_test=False )
-
- register_test_result( galaxy_tool_shed_url, tool_test_results_dict, repository_dict, params )
+ # TODO: do something usefule with response_dict
+ response_dict = register_test_result( galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params )
try:
if deactivate:
# We are deactivating this repository and all of its repository dependencies.
@@ -697,6 +713,7 @@
repository,
missing_tool_dependencies,
repository_dict,
+ tool_test_results_dicts,
tool_test_results_dict,
results_dict )
else:
@@ -705,7 +722,12 @@
file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( shed_tools_dict ) )
log.debug( 'Saved generated shed_tools_dict to %s\nContents: %s' % ( str( galaxy_shed_tools_dict ), str( shed_tools_dict ) ) )
try:
- results_dict = test_repository_tools( app, repository, repository_dict, tool_test_results_dict, results_dict )
+ results_dict = test_repository_tools( app,
+ repository,
+ repository_dict,
+ tool_test_results_dicts,
+ tool_test_results_dict,
+ results_dict )
except Exception, e:
exception_message = 'Error executing tests for repository %s: %s' % ( name, str( e ) )
log.exception( exception_message )
@@ -714,7 +736,12 @@
params = dict( tools_functionally_correct=False,
do_not_test=False,
test_install_error=False )
- register_test_result( galaxy_tool_shed_url, tool_test_results_dict, repository_dict, params )
+ # TODO: do something usefule with response_dict
+ response_dict = register_test_result( galaxy_tool_shed_url,
+ tool_test_results_dicts,
+ tool_test_results_dict,
+ repository_dict,
+ params )
results_dict[ 'repositories_failed' ].append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
total_repositories_tested += 1
results_dict[ 'total_repositories_tested' ] = total_repositories_tested
@@ -937,7 +964,7 @@
tool_data_path=additional_tool_data_path,
shed_tool_data_table_config=None,
persist=False )
- now = strftime( "%Y-%m-%d %H:%M:%S" )
+ now = time.strftime( "%Y-%m-%d %H:%M:%S" )
print "####################################################################################"
print "# %s - running repository installation and testing script." % now
print "####################################################################################"
@@ -949,12 +976,12 @@
repositories_passed = results_dict[ 'repositories_passed' ]
repositories_failed = results_dict[ 'repositories_failed' ]
repositories_failed_install = results_dict[ 'repositories_failed_install' ]
- now = strftime( "%Y-%m-%d %H:%M:%S" )
+ now = time.strftime( "%Y-%m-%d %H:%M:%S" )
print "####################################################################################"
print "# %s - repository installation and testing script completed." % now
print "# Repository revisions tested: %s" % str( total_repositories_tested )
- if '-info_only' in sys.argv:
- print "# -info_only set, not updating the tool shed."
+ if not can_update_tool_shed:
+ print "# This run will not update the Tool Shed database."
if total_repositories_tested > 0:
if repositories_passed:
print '# ----------------------------------------------------------------------------------'
@@ -1048,26 +1075,26 @@
log.debug( 'Repository %s owned by %s, all revisions.' % ( str( name ), str( owner ) ) )
return exclude_list
-def register_test_result( url, test_results_dict, repository_dict, params ):
+def register_test_result( url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params ):
"""
Update the repository metadata tool_test_results and appropriate flags using the Tool SHed API. This method
updates tool_test_results with the relevant data, sets the do_not_test and tools_functionally correct flags
to the appropriate values and updates the time_last_tested field to the value of the received time_tested.
"""
- if '-info_only' in sys.argv or 'GALAXY_INSTALL_TEST_INFO_ONLY' in os.environ:
- return {}
- else:
+ if can_update_tool_shed:
metadata_revision_id = repository_dict.get( 'id', None )
if metadata_revision_id is not None:
+ tool_test_results_dicts.insert( 0, tool_test_results_dict )
+ params[ 'tool_test_results' ] = tool_test_results_dicts
# Set the time_last_tested entry so that the repository_metadata.time_last_tested will be set in the tool shed.
- time_tested = datetime.utcnow()
- test_results_dict[ 'time_last_tested' ] = time_ago( time_tested )
- params[ 'tool_test_results' ] = test_results_dict
+ params[ 'time_last_tested' ] = 'This entry will result in this value being set via the Tool Shed API.'
url = '%s' % ( suc.url_join( galaxy_tool_shed_url,'api', 'repository_revisions', str( metadata_revision_id ) ) )
try:
return update( tool_shed_api_key, url, params, return_formatted=False )
except Exception, e:
log.exception( 'Error attempting to register test results: %s' % str( e ) )
+ return {}
+ else:
return {}
def remove_generated_tests( app ):
@@ -1138,7 +1165,7 @@
for repository in repositories_by_owner[ owner ]:
print "# %s owned by %s, changeset revision %s" % ( repository[ 'name' ], repository[ 'owner' ], repository[ 'changeset_revision' ] )
-def test_repository_tools( app, repository, repository_dict, tool_test_results_dict, results_dict ):
+def test_repository_tools( app, repository, repository_dict, tool_test_results_dicts, tool_test_results_dict, results_dict ):
"""Test tools contained in the received repository."""
name = str( repository.name )
owner = str( repository.owner )
@@ -1175,7 +1202,8 @@
params = dict( tools_functionally_correct=True,
do_not_test=False,
test_install_error=False )
- register_test_result( galaxy_tool_shed_url, tool_test_results_dict, repository_dict, params )
+ # TODO: do something usefule with response_dict
+ response_dict = register_test_result( galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params )
log.debug( 'Revision %s of repository %s installed and passed functional tests.' % ( str( changeset_revision ), str( name ) ) )
else:
tool_test_results_dict[ 'failed_tests' ].append( extract_log_data( result, from_tool_test=True ) )
@@ -1184,7 +1212,8 @@
params = dict( tools_functionally_correct=False,
test_install_error=False,
do_not_test=str( set_do_not_test ) )
- register_test_result( galaxy_tool_shed_url, tool_test_results_dict, repository_dict, params )
+ # TODO: do something usefule with response_dict
+ response_dict = register_test_result( galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params )
log.debug( 'Revision %s of repository %s installed successfully but did not pass functional tests.' % \
( str( changeset_revision ), str( name ) ) )
# Run the uninstall method. This removes tool functional test methods from the test_toolbox module and uninstalls the
diff -r ad38b77e96fafc96060f9ee8031094f4a15df278 -r 11028b31d3f024ef920c3168d3f752bc13ba3ad8 tool_shed_wsgi.ini.sample
--- a/tool_shed_wsgi.ini.sample
+++ b/tool_shed_wsgi.ini.sample
@@ -74,6 +74,9 @@
# path to sendmail
sendmail_path = /usr/sbin/sendmail
+# Number of saved tool test results produced by the install and test framework for each repository.
+#num_tool_test_results_saved = 5
+
# For use by email messages sent from the tool shed
#smtp_server = smtp.your_tool_shed_server
#email_from = your_tool_shed_email@server
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/ad38b77e96fa/
Changeset: ad38b77e96fa
User: guerler
Date: 2013-11-27 20:06:06
Summary: Pack scripts
Affected #: 1 file
diff -r 9be62566dbb6f0a0bfc20f6a8027832399838861 -r ad38b77e96fafc96060f9ee8031094f4a15df278 static/scripts/packed/galaxy.menu.js
--- a/static/scripts/packed/galaxy.menu.js
+++ b/static/scripts/packed/galaxy.menu.js
@@ -1,1 +1,1 @@
-define(["galaxy.masthead"],function(b){var a=Backbone.Model.extend({options:null,masthead:null,initialize:function(c){this.options=c.config;this.masthead=c.masthead;this.create()},create:function(){var d=new b.GalaxyMastheadTab({title:"Analyze Data",content:"root/index"});this.masthead.append(d);var c=new b.GalaxyMastheadTab({title:"Workflow",content:"workflow"});this.masthead.append(c);var g=new b.GalaxyMastheadTab({title:"Shared Data",content:"library/index"});g.addMenu({title:"Data Libraries",content:"library/index",divider:true});g.addMenu({title:"Published Histories",content:"history/list_published"});g.addMenu({title:"Published Workflows",content:"workflow/list_published"});g.addMenu({title:"Published Visualizations",content:"visualization/list_published"});g.addMenu({title:"Published Pages",content:"page/list_published"});this.masthead.append(g);if(this.options.user.requests){var h=new b.GalaxyMastheadTab({title:"Lab"});h.addMenu({title:"Sequencing Requests",content:"requests/index"});h.addMenu({title:"Find Samples",content:"requests/find_samples_index"});h.addMenu({title:"Help",content:this.options.lims_doc_url});this.masthead.append(h)}var k=new b.GalaxyMastheadTab({title:"Visualization",content:"visualization/list"});k.addMenu({title:"New Track Browser",content:"visualization/trackster",target:"_frame"});k.addMenu({title:"Saved Visualizations",content:"visualization/list",target:"_frame"});this.masthead.append(k);if(this.options.enable_cloud_launch){var e=new b.GalaxyMastheadTab({title:"Cloud",content:"cloudlaunch/index"});e.addMenu({title:"New Cloud Cluster",content:"cloudlaunch/index"});this.masthead.append(e)}if(this.options.is_admin_user){var f=new b.GalaxyMastheadTab({title:"Admin",content:"admin/index",extra_class:"admin-only"});this.masthead.append(f)}var j=new b.GalaxyMastheadTab({title:"Help"});if(this.options.biostar_url){j.addMenu({title:"Galaxy Q&A Site",content:this.options.biostar_url_redirect,target:"_blank"});j.addMenu({title:"Ask a question",content:"biostar/biostar_question_redirect",target:"_blank"})}j.addMenu({title:"Support",content:this.options.support_url,target:"_blank"});j.addMenu({title:"Search",content:this.options.search_url,target:"_blank"});j.addMenu({title:"Mailing Lists",content:this.options.mailing_lists,target:"_blank"});j.addMenu({title:"Videos",content:this.options.screencasts_url,target:"_blank"});j.addMenu({title:"Wiki",content:this.options.wiki_url,target:"_blank"});j.addMenu({title:"How to Cite Galaxy",content:this.options.citation_url,target:"_blank"});if(!this.options.terms_url){j.addMenu({title:"Terms and Conditions",content:this.options.terms_url,target:"_blank"})}this.masthead.append(j);if(!this.options.user.valid){var i=new b.GalaxyMastheadTab({title:"User",extra_class:"loggedout-only"});if(this.options.allow_user_creation){i.addMenu({title:"Login",content:"user/login",target:"galaxy_main"})}if(this.options.allow_user_creation){i.addMenu({title:"Register",content:"user/create",target:"galaxy_main"})}this.masthead.append(i)}else{var i=new b.GalaxyMastheadTab({title:"User",extra_class:"loggedin-only"});i.addMenu({title:"Logged in as "+this.options.user.email});if(this.options.use_remote_user&&this.options.remote_user_logout_href){i.addMenu({title:"Logout",content:this.options.remote_user_logout_href,target:"_top"})}else{i.addMenu({title:"Preferences",content:"user?cntrller=user",target:"galaxy_main"});i.addMenu({title:"Custom Builds",content:"user/dbkeys",target:"galaxy_main"});i.addMenu({title:"Logout",content:"user/logout",target:"_top",divider:true})}i.addMenu({title:"Saved Histories",content:"history/list",target:"galaxy_main"});i.addMenu({title:"Saved Datasets",content:"dataset/list",target:"galaxy_main"});i.addMenu({title:"Saved Pages",content:"page/list",target:"_top"});i.addMenu({title:"API Keys",content:"user/api_keys?cntrller=user",target:"galaxy_main"});if(this.options.use_remote_user){i.addMenu({title:"Public Name",content:"user/edit_username?cntrller=user",target:"galaxy_main"})}this.masthead.append(i)}}});return{GalaxyMenu:a}});
\ No newline at end of file
+define(["galaxy.masthead"],function(b){var a=Backbone.Model.extend({options:null,masthead:null,initialize:function(c){this.options=c.config;this.masthead=c.masthead;this.create()},create:function(){var d=new b.GalaxyMastheadTab({title:"Analyze Data",content:"root/index"});this.masthead.append(d);var c=new b.GalaxyMastheadTab({title:"Workflow",content:"workflow"});this.masthead.append(c);var g=new b.GalaxyMastheadTab({title:"Shared Data",content:"library/index"});g.addMenu({title:"Data Libraries",content:"library/index",divider:true});g.addMenu({title:"Published Histories",content:"history/list_published"});g.addMenu({title:"Published Workflows",content:"workflow/list_published"});g.addMenu({title:"Published Visualizations",content:"visualization/list_published"});g.addMenu({title:"Published Pages",content:"page/list_published"});this.masthead.append(g);if(this.options.user.requests){var h=new b.GalaxyMastheadTab({title:"Lab"});h.addMenu({title:"Sequencing Requests",content:"requests/index"});h.addMenu({title:"Find Samples",content:"requests/find_samples_index"});h.addMenu({title:"Help",content:this.options.lims_doc_url});this.masthead.append(h)}var k=new b.GalaxyMastheadTab({title:"Visualization",content:"visualization/list"});k.addMenu({title:"New Track Browser",content:"visualization/trackster",target:"_frame"});k.addMenu({title:"Saved Visualizations",content:"visualization/list",target:"_frame"});this.masthead.append(k);if(this.options.enable_cloud_launch){var e=new b.GalaxyMastheadTab({title:"Cloud",content:"cloudlaunch/index"});e.addMenu({title:"New Cloud Cluster",content:"cloudlaunch/index"});this.masthead.append(e)}if(this.options.is_admin_user){var f=new b.GalaxyMastheadTab({title:"Admin",content:"admin/index",extra_class:"admin-only"});this.masthead.append(f)}var j=new b.GalaxyMastheadTab({title:"Help"});if(this.options.biostar_url){j.addMenu({title:"Galaxy Q&A Site",content:this.options.biostar_url_redirect,target:"_blank"});j.addMenu({title:"Ask a question",content:"biostar/biostar_question_redirect",target:"_blank"})}j.addMenu({title:"Support",content:this.options.support_url,target:"_blank"});j.addMenu({title:"Search",content:this.options.search_url,target:"_blank"});j.addMenu({title:"Mailing Lists",content:this.options.mailing_lists,target:"_blank"});j.addMenu({title:"Videos",content:this.options.screencasts_url,target:"_blank"});j.addMenu({title:"Wiki",content:this.options.wiki_url,target:"_blank"});j.addMenu({title:"How to Cite Galaxy",content:this.options.citation_url,target:"_blank"});if(!this.options.terms_url){j.addMenu({title:"Terms and Conditions",content:this.options.terms_url,target:"_blank"})}this.masthead.append(j);if(!this.options.user.valid){var i=new b.GalaxyMastheadTab({title:"User",extra_class:"loggedout-only"});i.addMenu({title:"Login",content:"user/login",target:"galaxy_main"});if(this.options.allow_user_creation){i.addMenu({title:"Register",content:"user/create",target:"galaxy_main"})}this.masthead.append(i)}else{var i=new b.GalaxyMastheadTab({title:"User",extra_class:"loggedin-only"});i.addMenu({title:"Logged in as "+this.options.user.email});if(this.options.use_remote_user&&this.options.remote_user_logout_href){i.addMenu({title:"Logout",content:this.options.remote_user_logout_href,target:"_top"})}else{i.addMenu({title:"Preferences",content:"user?cntrller=user",target:"galaxy_main"});i.addMenu({title:"Custom Builds",content:"user/dbkeys",target:"galaxy_main"});i.addMenu({title:"Logout",content:"user/logout",target:"_top",divider:true})}i.addMenu({title:"Saved Histories",content:"history/list",target:"galaxy_main"});i.addMenu({title:"Saved Datasets",content:"dataset/list",target:"galaxy_main"});i.addMenu({title:"Saved Pages",content:"page/list",target:"_top"});i.addMenu({title:"API Keys",content:"user/api_keys?cntrller=user",target:"galaxy_main"});if(this.options.use_remote_user){i.addMenu({title:"Public Name",content:"user/edit_username?cntrller=user",target:"galaxy_main"})}this.masthead.append(i)}}});return{GalaxyMenu:a}});
\ No newline at end of file
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
27 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/9be62566dbb6/
Changeset: 9be62566dbb6
User: guerler
Date: 2013-11-27 19:44:25
Summary: Masthead: Fix login
Affected #: 1 file
diff -r 97940f59772dd8567c68cb8f895d00d99487fdb9 -r 9be62566dbb6f0a0bfc20f6a8027832399838861 static/scripts/galaxy.menu.js
--- a/static/scripts/galaxy.menu.js
+++ b/static/scripts/galaxy.menu.js
@@ -223,14 +223,11 @@
});
// login
- if (this.options.allow_user_creation)
- {
- tab_user.addMenu({
- title : "Login",
- content : "user/login",
- target : "galaxy_main"
- });
- }
+ tab_user.addMenu({
+ title : "Login",
+ content : "user/login",
+ target : "galaxy_main"
+ });
// register
if (this.options.allow_user_creation)
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jmchilton: Mimic Galaxy functional test handling of GALAXY_TEST_DB_TEMPLATE for tool shed functional tests.
by commits-noreply@bitbucket.org 27 Nov '13
by commits-noreply@bitbucket.org 27 Nov '13
27 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/97940f59772d/
Changeset: 97940f59772d
User: jmchilton
Date: 2013-11-27 15:37:30
Summary: Mimic Galaxy functional test handling of GALAXY_TEST_DB_TEMPLATE for tool shed functional tests.
See 83a95ea606d9fcd15f7554b71804625e203eb1ed for full details (essentially GALAXY_TEST_DB_TEMPLATE can be sqlite file path or URL - will be copied over so original left in pristine state and auto upgraded as needed.
Affected #: 1 file
diff -r 0712180dc77d6df0f19870046562cc34317eb3cc -r 97940f59772dd8567c68cb8f895d00d99487fdb9 test/tool_shed/functional_tests.py
--- a/test/tool_shed/functional_tests.py
+++ b/test/tool_shed/functional_tests.py
@@ -32,6 +32,7 @@
import twill, unittest, time
import sys, threading, random
import httplib, socket
+import urllib
from paste import httpserver
# This is for the tool shed application.
import galaxy.webapps.tool_shed.app
@@ -186,10 +187,20 @@
toolshed_database_connection = os.environ[ 'TOOL_SHED_TEST_DBURI' ]
else:
toolshed_database_connection = 'sqlite:///' + os.path.join( shed_db_path, 'community_test.sqlite' )
+ galaxy_database_auto_migrate = False
if 'GALAXY_TEST_DBURI' in os.environ:
galaxy_database_connection = os.environ[ 'GALAXY_TEST_DBURI' ]
else:
- galaxy_database_connection = 'sqlite:///' + os.path.join( galaxy_db_path, 'universe_test.sqlite' )
+ db_path = os.path.join( galaxy_db_path, 'universe.sqlite' )
+ if 'GALAXY_TEST_DB_TEMPLATE' in os.environ:
+ # Middle ground between recreating a completely new
+ # database and pointing at existing database with
+ # GALAXY_TEST_DBURI. The former requires a lot of setup
+ # time, the latter results in test failures in certain
+ # cases (namely tool shed tests expecting clean database).
+ __copy_database_template(os.environ['GALAXY_TEST_DB_TEMPLATE'], db_path)
+ galaxy_database_auto_migrate = True
+ galaxy_database_connection = 'sqlite:///%s' % db_path
tool_shed_global_conf = get_webapp_global_conf()
tool_shed_global_conf[ '__file__' ] = 'tool_shed_wsgi.ini.sample'
kwargs = dict( admin_users = 'test(a)bx.psu.edu',
@@ -311,6 +322,7 @@
admin_users = 'test(a)bx.psu.edu',
allow_library_path_paste = True,
database_connection = galaxy_database_connection,
+ database_auto_migrate = galaxy_database_auto_migrate,
datatype_converters_config_file = "datatype_converters_conf.xml.sample",
enable_tool_shed_check = True,
file_path = galaxy_file_path,
@@ -449,6 +461,22 @@
else:
return 1
+
+def __copy_database_template( source, db_path ):
+ """
+ Copy a 'clean' sqlite template database (from file or URL) to specified
+ database path.
+ """
+ os.makedirs( os.path.dirname( db_path ) )
+ if os.path.exists( source ):
+ shutil.copy( source, db_path )
+ assert os.path.exists( db_path )
+ elif source.startswith("http"):
+ urllib.urlretrieve( source, db_path )
+ else:
+ raise Exception( "Failed to copy database template from source %s" % source )
+
+
if __name__ == "__main__":
try:
sys.exit( main() )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: carlfeberhard: UI: temporary workaround for popupmenus v. XSS security
by commits-noreply@bitbucket.org 26 Nov '13
by commits-noreply@bitbucket.org 26 Nov '13
26 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/0712180dc77d/
Changeset: 0712180dc77d
User: carlfeberhard
Date: 2013-11-27 00:08:42
Summary: UI: temporary workaround for popupmenus v. XSS security
Affected #: 2 files
diff -r 451af16e671a1be729d0cae6e7ecb1c838af48bd -r 0712180dc77d6df0f19870046562cc34317eb3cc static/scripts/mvc/ui.js
--- a/static/scripts/mvc/ui.js
+++ b/static/scripts/mvc/ui.js
@@ -297,18 +297,26 @@
function closePopup( event ){
$( document ).off( 'click.close_popup' );
if( window.parent !== window ){
- $( window.parent.document ).off( "click.close_popup" );
+ try {
+ $( window.parent.document ).off( "click.close_popup" );
+ } catch( err ){}
} else {
- $( 'iframe#galaxy_main' ).contents().off( "click.close_popup" );
+ try {
+ $( 'iframe#galaxy_main' ).contents().off( "click.close_popup" );
+ } catch( err ){}
}
menu.remove();
}
$( 'html' ).one( "click.close_popup", closePopup );
if( window.parent !== window ){
- $( window.parent.document ).find( 'html' ).one( "click.close_popup", closePopup );
+ try {
+ $( window.parent.document ).find( 'html' ).one( "click.close_popup", closePopup );
+ } catch( err ){}
} else {
- $( 'iframe#galaxy_main' ).contents().one( "click.close_popup", closePopup );
+ try {
+ $( 'iframe#galaxy_main' ).contents().one( "click.close_popup", closePopup );
+ } catch( err ){}
}
},
diff -r 451af16e671a1be729d0cae6e7ecb1c838af48bd -r 0712180dc77d6df0f19870046562cc34317eb3cc static/scripts/packed/mvc/ui.js
--- a/static/scripts/packed/mvc/ui.js
+++ b/static/scripts/packed/mvc/ui.js
@@ -1,1 +1,1 @@
-var IconButton=Backbone.Model.extend({defaults:{title:"",icon_class:"",on_click:null,menu_options:null,is_menu_button:true,id:null,href:null,target:null,enabled:true,visible:true,tooltip_config:{}}});var IconButtonView=Backbone.View.extend({initialize:function(){this.model.attributes.tooltip_config={placement:"bottom"};this.model.bind("change",this.render,this)},render:function(){this.$el.tooltip("hide");var a=this.template(this.model.toJSON());a.tooltip(this.model.get("tooltip_config"));this.$el.replaceWith(a);this.setElement(a);return this},events:{click:"click"},click:function(a){if(_.isFunction(this.model.get("on_click"))){this.model.get("on_click")(a);return false}return true},template:function(b){var a='title="'+b.title+'" class="icon-button';if(b.is_menu_button){a+=" menu-button"}a+=" "+b.icon_class;if(!b.enabled){a+="_disabled"}a+='"';if(b.id){a+=' id="'+b.id+'"'}a+=' href="'+b.href+'"';if(b.target){a+=' target="'+b.target+'"'}if(!b.visible){a+=' style="display: none;"'}if(b.enabled){a="<a "+a+"/>"}else{a="<span "+a+"/>"}return $(a)}});var IconButtonCollection=Backbone.Collection.extend({model:IconButton});var IconButtonMenuView=Backbone.View.extend({tagName:"div",initialize:function(){this.render()},render:function(){var a=this;this.collection.each(function(d){var b=$("<a/>").attr("href","javascript:void(0)").attr("title",d.attributes.title).addClass("icon-button menu-button").addClass(d.attributes.icon_class).appendTo(a.$el).click(d.attributes.on_click);if(d.attributes.tooltip_config){b.tooltip(d.attributes.tooltip_config)}var c=d.get("options");if(c){make_popupmenu(b,c)}});return this}});var create_icon_buttons_menu=function(b,a){if(!a){a={}}var c=new IconButtonCollection(_.map(b,function(d){return new IconButton(_.extend(d,a))}));return new IconButtonMenuView({collection:c})};var Grid=Backbone.Collection.extend({});var GridView=Backbone.View.extend({});var PopupMenu=Backbone.View.extend({initialize:function(b,a){this.$button=b||$("<div/>");this.options=a||[];var c=this;this.$button.click(function(d){$(".popmenu-wrapper").remove();c._renderAndShow(d);return false})},_renderAndShow:function(a){this.render();this.$el.appendTo("body").css(this._getShownPosition(a)).show();this._setUpCloseBehavior()},render:function(){this.$el.addClass("popmenu-wrapper").hide().css({position:"absolute"}).html(this.template(this.$button.attr("id"),this.options));if(this.options.length){var a=this;this.$el.find("li").each(function(c,b){var d=a.options[c];if(d.func){$(this).children("a.popupmenu-option").click(function(e){d.func.call(a,e,d)})}})}return this},template:function(b,a){return['<ul id="',b,'-menu" class="dropdown-menu">',this._templateOptions(a),"</ul>"].join("")},_templateOptions:function(a){if(!a.length){return"<li>(no options)</li>"}return _.map(a,function(d){if(d.divider){return'<li class="divider"></li>'}else{if(d.header){return['<li class="head"><a href="javascript:void(0);">',d.html,"</a></li>"].join("")}}var c=d.href||"javascript:void(0);",e=(d.target)?(' target="'+d.target+'"'):(""),b=(d.checked)?('<span class="fa fa-check"></span>'):("");return['<li><a class="popupmenu-option" href="',c,'"',e,">",b,d.html,"</a></li>"].join("")}).join("")},_getShownPosition:function(b){var c=this.$el.width();var a=b.pageX-c/2;a=Math.min(a,$(document).scrollLeft()+$(window).width()-c-5);a=Math.max(a,$(document).scrollLeft()+5);return{top:b.pageY,left:a}},_setUpCloseBehavior:function(){var b=this;function a(c){$(document).off("click.close_popup");if(window.parent!==window){$(window.parent.document).off("click.close_popup")}else{$("iframe#galaxy_main").contents().off("click.close_popup")}b.remove()}$("html").one("click.close_popup",a);if(window.parent!==window){$(window.parent.document).find("html").one("click.close_popup",a)}else{$("iframe#galaxy_main").contents().one("click.close_popup",a)}},addItem:function(b,a){a=(a>=0)?a:this.options.length;this.options.splice(a,0,b);return this},removeItem:function(a){if(a>=0){this.options.splice(a,1)}return this},findIndexByHtml:function(b){for(var a=0;a<this.options.length;a++){if(_.has(this.options[a],"html")&&(this.options[a].html===b)){return a}}return null},findItemByHtml:function(a){return this.options[(this.findIndexByHtml(a))]},toString:function(){return"PopupMenu"}});PopupMenu.make_popupmenu=function(b,c){var a=[];_.each(c,function(f,d){var e={html:d};if(f===null){e.header=true}else{if(jQuery.type(f)==="function"){e.func=f}}a.push(e)});return new PopupMenu($(b),a)};PopupMenu.convertLinksToOptions=function(c,a){c=$(c);a=a||"a";var b=[];c.find(a).each(function(g,e){var f={},d=$(g);f.html=d.text();if(d.attr("href")){var j=d.attr("href"),k=d.attr("target"),h=d.attr("confirm");f.func=function(){if((h)&&(!confirm(h))){return}switch(k){case"_parent":window.parent.location=j;break;case"_top":window.top.location=j;break;default:window.location=j}}}b.push(f)});return b};PopupMenu.fromExistingDom=function(d,c,a){d=$(d);c=$(c);var b=PopupMenu.convertLinksToOptions(c,a);c.remove();return new PopupMenu(d,b)};PopupMenu.make_popup_menus=function(c,b,d){c=c||document;b=b||"div[popupmenu]";d=d||function(e,f){return"#"+e.attr("popupmenu")};var a=[];$(c).find(b).each(function(){var e=$(this),f=$(c).find(d(e,c));a.push(PopupMenu.fromDom(f,e));f.addClass("popup")});return a};var faIconButton=function(a){a=a||{};a.tooltipConfig=a.tooltipConfig||{placement:"bottom"};a.classes=["icon-btn"].concat(a.classes||[]);if(a.disabled){a.classes.push("disabled")}var b=['<a class="',a.classes.join(" "),'"',((a.title)?(' title="'+a.title+'"'):("")),((a.target)?(' target="'+a.target+'"'):("")),' href="',((a.href)?(a.href):("javascript:void(0);")),'">','<span class="fa ',a.faIcon,'"></span>',"</a>"].join("");var c=$(b).tooltip(a.tooltipConfig);if(_.isFunction(a.onclick)){c.click(a.onclick)}return c};var searchInput=function(k){var a=27,h=13,i=$("<div/>"),b={initialVal:"",name:"search",placeholder:"search",classes:"",onclear:function(){},onsearch:function(l){},minSearchLen:0,escWillClear:true,oninit:function(){}};if(jQuery.type(k)==="object"){k=jQuery.extend(true,b,k)}function d(l){var m=$(this).parent().children("input");m.val("");m.trigger("clear:searchInput");k.onclear()}function j(m,l){$(this).trigger("search:searchInput",l);k.onsearch(l)}function c(){return['<input type="text" name="',k.name,'" placeholder="',k.placeholder,'" ','class="search-query ',k.classes,'" ',"/>"].join("")}function g(){return $(c()).css({width:"100%","padding-right":"24px"}).focus(function(l){$(this).select()}).keyup(function(m){if(m.which===a&&k.escWillClear){d.call(this,m)}else{var l=$(this).val();if((m.which===h)||(k.minSearchLen&&l.length>=k.minSearchLen)){j.call(this,m,l)}else{if(!l.length){d.call(this,m)}}}}).val(k.initialVal)}function f(){return'<span class="search-clear fa fa-times-circle"></span>'}function e(){return $(f()).css({position:"absolute",right:"15px","font-size":"1.4em","line-height":"23px",color:"grey"}).click(function(l){d.call(this,l)})}return i.append([g(),e()])};function LoadingIndicator(a,c){var b=this;c=jQuery.extend({cover:false},c||{});function d(){var e=['<div class="loading-indicator">','<div class="loading-indicator-text">','<span class="fa fa-spinner fa-spin fa-lg"></span>','<span class="loading-indicator-message">loading...</span>',"</div>","</div>"].join("\n");var g=$(e).hide().css(c.css||{position:"fixed"}),f=g.children(".loading-indicator-text");if(c.cover){g.css({"z-index":2,top:a.css("top"),bottom:a.css("bottom"),left:a.css("left"),right:a.css("right"),opacity:0.5,"background-color":"white","text-align":"center"});f=g.children(".loading-indicator-text").css({"margin-top":"20px"})}else{f=g.children(".loading-indicator-text").css({margin:"12px 0px 0px 10px",opacity:"0.85",color:"grey"});f.children(".loading-indicator-message").css({margin:"0px 8px 0px 0px","font-style":"italic"})}return g}b.show=function(f,e,g){f=f||"loading...";e=e||"fast";b.$indicator=d().insertBefore(a);b.message(f);b.$indicator.fadeIn(e,g);return b};b.message=function(e){b.$indicator.find("i").text(e)};b.hide=function(e,f){e=e||"fast";if(b.$indicator&&b.$indicator.size()){b.$indicator.fadeOut(e,function(){b.$indicator.remove();if(f){f()}})}else{if(f){f()}}return b};return b};
\ No newline at end of file
+var IconButton=Backbone.Model.extend({defaults:{title:"",icon_class:"",on_click:null,menu_options:null,is_menu_button:true,id:null,href:null,target:null,enabled:true,visible:true,tooltip_config:{}}});var IconButtonView=Backbone.View.extend({initialize:function(){this.model.attributes.tooltip_config={placement:"bottom"};this.model.bind("change",this.render,this)},render:function(){this.$el.tooltip("hide");var a=this.template(this.model.toJSON());a.tooltip(this.model.get("tooltip_config"));this.$el.replaceWith(a);this.setElement(a);return this},events:{click:"click"},click:function(a){if(_.isFunction(this.model.get("on_click"))){this.model.get("on_click")(a);return false}return true},template:function(b){var a='title="'+b.title+'" class="icon-button';if(b.is_menu_button){a+=" menu-button"}a+=" "+b.icon_class;if(!b.enabled){a+="_disabled"}a+='"';if(b.id){a+=' id="'+b.id+'"'}a+=' href="'+b.href+'"';if(b.target){a+=' target="'+b.target+'"'}if(!b.visible){a+=' style="display: none;"'}if(b.enabled){a="<a "+a+"/>"}else{a="<span "+a+"/>"}return $(a)}});var IconButtonCollection=Backbone.Collection.extend({model:IconButton});var IconButtonMenuView=Backbone.View.extend({tagName:"div",initialize:function(){this.render()},render:function(){var a=this;this.collection.each(function(d){var b=$("<a/>").attr("href","javascript:void(0)").attr("title",d.attributes.title).addClass("icon-button menu-button").addClass(d.attributes.icon_class).appendTo(a.$el).click(d.attributes.on_click);if(d.attributes.tooltip_config){b.tooltip(d.attributes.tooltip_config)}var c=d.get("options");if(c){make_popupmenu(b,c)}});return this}});var create_icon_buttons_menu=function(b,a){if(!a){a={}}var c=new IconButtonCollection(_.map(b,function(d){return new IconButton(_.extend(d,a))}));return new IconButtonMenuView({collection:c})};var Grid=Backbone.Collection.extend({});var GridView=Backbone.View.extend({});var PopupMenu=Backbone.View.extend({initialize:function(b,a){this.$button=b||$("<div/>");this.options=a||[];var c=this;this.$button.click(function(d){$(".popmenu-wrapper").remove();c._renderAndShow(d);return false})},_renderAndShow:function(a){this.render();this.$el.appendTo("body").css(this._getShownPosition(a)).show();this._setUpCloseBehavior()},render:function(){this.$el.addClass("popmenu-wrapper").hide().css({position:"absolute"}).html(this.template(this.$button.attr("id"),this.options));if(this.options.length){var a=this;this.$el.find("li").each(function(c,b){var d=a.options[c];if(d.func){$(this).children("a.popupmenu-option").click(function(e){d.func.call(a,e,d)})}})}return this},template:function(b,a){return['<ul id="',b,'-menu" class="dropdown-menu">',this._templateOptions(a),"</ul>"].join("")},_templateOptions:function(a){if(!a.length){return"<li>(no options)</li>"}return _.map(a,function(d){if(d.divider){return'<li class="divider"></li>'}else{if(d.header){return['<li class="head"><a href="javascript:void(0);">',d.html,"</a></li>"].join("")}}var c=d.href||"javascript:void(0);",e=(d.target)?(' target="'+d.target+'"'):(""),b=(d.checked)?('<span class="fa fa-check"></span>'):("");return['<li><a class="popupmenu-option" href="',c,'"',e,">",b,d.html,"</a></li>"].join("")}).join("")},_getShownPosition:function(b){var c=this.$el.width();var a=b.pageX-c/2;a=Math.min(a,$(document).scrollLeft()+$(window).width()-c-5);a=Math.max(a,$(document).scrollLeft()+5);return{top:b.pageY,left:a}},_setUpCloseBehavior:function(){var c=this;function a(e){$(document).off("click.close_popup");if(window.parent!==window){try{$(window.parent.document).off("click.close_popup")}catch(d){}}else{try{$("iframe#galaxy_main").contents().off("click.close_popup")}catch(d){}}c.remove()}$("html").one("click.close_popup",a);if(window.parent!==window){try{$(window.parent.document).find("html").one("click.close_popup",a)}catch(b){}}else{try{$("iframe#galaxy_main").contents().one("click.close_popup",a)}catch(b){}}},addItem:function(b,a){a=(a>=0)?a:this.options.length;this.options.splice(a,0,b);return this},removeItem:function(a){if(a>=0){this.options.splice(a,1)}return this},findIndexByHtml:function(b){for(var a=0;a<this.options.length;a++){if(_.has(this.options[a],"html")&&(this.options[a].html===b)){return a}}return null},findItemByHtml:function(a){return this.options[(this.findIndexByHtml(a))]},toString:function(){return"PopupMenu"}});PopupMenu.make_popupmenu=function(b,c){var a=[];_.each(c,function(f,d){var e={html:d};if(f===null){e.header=true}else{if(jQuery.type(f)==="function"){e.func=f}}a.push(e)});return new PopupMenu($(b),a)};PopupMenu.convertLinksToOptions=function(c,a){c=$(c);a=a||"a";var b=[];c.find(a).each(function(g,e){var f={},d=$(g);f.html=d.text();if(d.attr("href")){var j=d.attr("href"),k=d.attr("target"),h=d.attr("confirm");f.func=function(){if((h)&&(!confirm(h))){return}switch(k){case"_parent":window.parent.location=j;break;case"_top":window.top.location=j;break;default:window.location=j}}}b.push(f)});return b};PopupMenu.fromExistingDom=function(d,c,a){d=$(d);c=$(c);var b=PopupMenu.convertLinksToOptions(c,a);c.remove();return new PopupMenu(d,b)};PopupMenu.make_popup_menus=function(c,b,d){c=c||document;b=b||"div[popupmenu]";d=d||function(e,f){return"#"+e.attr("popupmenu")};var a=[];$(c).find(b).each(function(){var e=$(this),f=$(c).find(d(e,c));a.push(PopupMenu.fromDom(f,e));f.addClass("popup")});return a};var faIconButton=function(a){a=a||{};a.tooltipConfig=a.tooltipConfig||{placement:"bottom"};a.classes=["icon-btn"].concat(a.classes||[]);if(a.disabled){a.classes.push("disabled")}var b=['<a class="',a.classes.join(" "),'"',((a.title)?(' title="'+a.title+'"'):("")),((a.target)?(' target="'+a.target+'"'):("")),' href="',((a.href)?(a.href):("javascript:void(0);")),'">','<span class="fa ',a.faIcon,'"></span>',"</a>"].join("");var c=$(b).tooltip(a.tooltipConfig);if(_.isFunction(a.onclick)){c.click(a.onclick)}return c};var searchInput=function(k){var a=27,h=13,i=$("<div/>"),b={initialVal:"",name:"search",placeholder:"search",classes:"",onclear:function(){},onsearch:function(l){},minSearchLen:0,escWillClear:true,oninit:function(){}};if(jQuery.type(k)==="object"){k=jQuery.extend(true,b,k)}function d(l){var m=$(this).parent().children("input");m.val("");m.trigger("clear:searchInput");k.onclear()}function j(m,l){$(this).trigger("search:searchInput",l);k.onsearch(l)}function c(){return['<input type="text" name="',k.name,'" placeholder="',k.placeholder,'" ','class="search-query ',k.classes,'" ',"/>"].join("")}function g(){return $(c()).css({width:"100%","padding-right":"24px"}).focus(function(l){$(this).select()}).keyup(function(m){if(m.which===a&&k.escWillClear){d.call(this,m)}else{var l=$(this).val();if((m.which===h)||(k.minSearchLen&&l.length>=k.minSearchLen)){j.call(this,m,l)}else{if(!l.length){d.call(this,m)}}}}).val(k.initialVal)}function f(){return'<span class="search-clear fa fa-times-circle"></span>'}function e(){return $(f()).css({position:"absolute",right:"15px","font-size":"1.4em","line-height":"23px",color:"grey"}).click(function(l){d.call(this,l)})}return i.append([g(),e()])};function LoadingIndicator(a,c){var b=this;c=jQuery.extend({cover:false},c||{});function d(){var e=['<div class="loading-indicator">','<div class="loading-indicator-text">','<span class="fa fa-spinner fa-spin fa-lg"></span>','<span class="loading-indicator-message">loading...</span>',"</div>","</div>"].join("\n");var g=$(e).hide().css(c.css||{position:"fixed"}),f=g.children(".loading-indicator-text");if(c.cover){g.css({"z-index":2,top:a.css("top"),bottom:a.css("bottom"),left:a.css("left"),right:a.css("right"),opacity:0.5,"background-color":"white","text-align":"center"});f=g.children(".loading-indicator-text").css({"margin-top":"20px"})}else{f=g.children(".loading-indicator-text").css({margin:"12px 0px 0px 10px",opacity:"0.85",color:"grey"});f.children(".loading-indicator-message").css({margin:"0px 8px 0px 0px","font-style":"italic"})}return g}b.show=function(f,e,g){f=f||"loading...";e=e||"fast";b.$indicator=d().insertBefore(a);b.message(f);b.$indicator.fadeIn(e,g);return b};b.message=function(e){b.$indicator.find("i").text(e)};b.hide=function(e,f){e=e||"fast";if(b.$indicator&&b.$indicator.size()){b.$indicator.fadeOut(e,function(){b.$indicator.remove();if(f){f()}})}else{if(f){f()}}return b};return b};
\ No newline at end of file
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0