1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/011df6fa309e/
Changeset: 011df6fa309e
User: greg
Date: 2013-11-25 22:53:40
Summary: Fix for rendering Skip tool tests" form on the manage_repository page in the tool shed.
Affected #: 1 file
diff -r 11eb2e028791aca01c206fa5e898e1feb28f4651 -r 011df6fa309e9b4a052d5342218e888deabd063f templates/webapps/tool_shed/repository/manage_repository.mako
--- a/templates/webapps/tool_shed/repository/manage_repository.mako
+++ b/templates/webapps/tool_shed/repository/manage_repository.mako
@@ -209,7 +209,7 @@
<div class="toolFormTitle">Automated tool tests</div>
%endif
<div class="toolFormBody">
- <form name="skip_tool_tests" id="skip_tool_tests" action="${h.url_for( controller='repository', action='manage_repository', id=trans.security.encode_id( repository.id ), changeset_revision=repository_metadata.changeset_revision )}" method="post" >
+ <form name="skip_tool_tests" id="skip_tool_tests" action="${h.url_for( controller='repository', action='manage_repository', id=trans.security.encode_id( repository.id ), changeset_revision=changeset_revision )}" method="post" ><div class="form-row">
%if repository.type == TOOL_DEPENDENCY_DEFINITION:
<label>Skip automated testing of this tool dependency recipe</label>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/11eb2e028791/
Changeset: 11eb2e028791
User: greg
Date: 2013-11-25 22:43:01
Summary: Minor fixes for the prep script for installing and testing tool shed repositories of type tool_dependency_definition.
Affected #: 1 file
diff -r ec2a03eea18ee82c71618d291394da81414cfb45 -r 11eb2e028791aca01c206fa5e898e1feb28f4651 lib/tool_shed/scripts/check_tool_dependency_definition_repositories.py
--- a/lib/tool_shed/scripts/check_tool_dependency_definition_repositories.py
+++ b/lib/tool_shed/scripts/check_tool_dependency_definition_repositories.py
@@ -28,7 +28,9 @@
from base.util import get_database_version
from base.util import get_repository_current_revision
from base.util import get_test_environment
-from galaxy.model.orm import and_, not_, select
+from galaxy.model.orm import and_
+from galaxy.model.orm import not_
+from galaxy.model.orm import select
from galaxy.web import url_for
from tool_shed.repository_types.util import TOOL_DEPENDENCY_DEFINITION
@@ -138,21 +140,6 @@
app.model.RepositoryMetadata.table.c.repository_id.in_( tool_dependency_defintion_repository_ids ),
not_( app.model.RepositoryMetadata.table.c.id.in_( skip_metadata_ids ) ) ) ):
records_checked += 1
- # Create the repository_status dictionary, using the dictionary from the previous test run if available.
- if repository_metadata.tool_test_results:
- repository_status = repository_metadata.tool_test_results
- else:
- repository_status = {}
- # Initialize the repository_status dictionary with the information about the current test environment.
- last_test_environment = repository_status.get( 'test_environment', None )
- if last_test_environment is None:
- test_environment = get_test_environment()
- else:
- test_environment = get_test_environment( last_test_environment )
- test_environment[ 'tool_shed_database_version' ] = get_database_version( app )
- test_environment[ 'tool_shed_mercurial_version' ] = __version__.version
- test_environment[ 'tool_shed_revision' ] = get_repository_current_revision( os.getcwd() )
- repository_status[ 'test_environment' ] = test_environment
# Check the next repository revision.
changeset_revision = str( repository_metadata.changeset_revision )
name = repository.name
@@ -174,7 +161,18 @@
print 'Revision %s of %s owned by %s has invalid metadata.' % ( changeset_revision, name, owner )
invalid_metadata += 1
if not info_only:
- repository_metadata.tool_test_results = repository_status
+ # Create the tool_test_results_dict dictionary, using the dictionary from the previous test run if available.
+ if repository_metadata.tool_test_results:
+ tool_test_results_dict = repository_metadata.tool_test_results
+ else:
+ tool_test_results_dict = {}
+ # Initialize the tool_test_results_dict dictionary with the information about the current test environment.
+ test_environment_dict = tool_test_results_dict.get( 'test_environment', {} )
+ test_environment_dict[ 'tool_shed_database_version' ] = get_database_version( app )
+ test_environment_dict[ 'tool_shed_mercurial_version' ] = __version__.version
+ test_environment_dict[ 'tool_shed_revision' ] = get_repository_current_revision( os.getcwd() )
+ tool_test_results_dict[ 'test_environment' ] = test_environment_dict
+ repository_metadata.tool_test_results = tool_test_results_dict
app.sa_session.add( repository_metadata )
app.sa_session.flush()
stop = time.time()
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/d0a3879c0708/
Changeset: d0a3879c0708
User: guerler
Date: 2013-11-25 21:37:46
Summary: UI: Refresh history on history switch
Affected #: 1 file
diff -r 61eacb9ce6d54a3cc217e7d826963a9a7370adf7 -r d0a3879c0708983fccb24b809bfe29d96a8dbb59 templates/grid_base.mako
--- a/templates/grid_base.mako
+++ b/templates/grid_base.mako
@@ -221,8 +221,7 @@
<%def name="grid_javascripts()">
${h.js("libs/jquery/jquery.autocomplete", "galaxy.autocom_tagging", "libs/jquery/jquery.rating" )}
- ${handle_refresh_frames()}
-
+
<script type="text/javascript">
var gridView = null;
function add_tag_to_grid_filter (tag_name, tag_value)
@@ -470,6 +469,8 @@
});
});
</script>
+
+ ${handle_refresh_frames()}
</%def>
## Render grid table footer contents.
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/61eacb9ce6d5/
Changeset: 61eacb9ce6d5
User: greg
Date: 2013-11-25 21:09:50
Summary: More fixes for the tool shed's install and test framework - the test environment including time last tested should now display correctly again (among other fixes).
Affected #: 5 files
diff -r 1fbb0cb8dd5865185b186e609d747668510a25a8 -r 61eacb9ce6d54a3cc217e7d826963a9a7370adf7 lib/galaxy/webapps/tool_shed/api/repository_revisions.py
--- a/lib/galaxy/webapps/tool_shed/api/repository_revisions.py
+++ b/lib/galaxy/webapps/tool_shed/api/repository_revisions.py
@@ -170,6 +170,7 @@
if key in [ 'tools_functionally_correct', 'time_last_tested' ]:
# Automatically update repository_metadata.time_last_tested.
repository_metadata.time_last_tested = datetime.datetime.utcnow()
+ flush_needed = True
flush_needed = True
if flush_needed:
trans.sa_session.add( repository_metadata )
diff -r 1fbb0cb8dd5865185b186e609d747668510a25a8 -r 61eacb9ce6d54a3cc217e7d826963a9a7370adf7 lib/tool_shed/scripts/check_repositories_for_functional_tests.py
--- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
+++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
@@ -7,7 +7,6 @@
import tempfile
import time
-from datetime import datetime
from optparse import OptionParser
from time import strftime
@@ -119,21 +118,11 @@
missing_test_components = []
repository = repository_metadata.repository
records_checked += 1
- # Create the repository_status dictionary, using the dictionary from the previous test run if available.
+ # Create the tool_test_results_dict dictionary, using the dictionary from the previous test run if available.
if repository_metadata.tool_test_results:
- repository_status = repository_metadata.tool_test_results
+ tool_test_results_dict = repository_metadata.tool_test_results
else:
- repository_status = {}
- # Initialize the repository_status dictionary with the information about the current test environment.
- last_test_environment = repository_status.get( 'test_environment', None )
- if last_test_environment is None:
- test_environment = get_test_environment()
- else:
- test_environment = get_test_environment( last_test_environment )
- test_environment[ 'tool_shed_database_version' ] = get_database_version( app )
- test_environment[ 'tool_shed_mercurial_version' ] = __version__.version
- test_environment[ 'tool_shed_revision' ] = get_repository_current_revision( os.getcwd() )
- repository_status[ 'test_environment' ] = test_environment
+ tool_test_results_dict = {}
# Check the next repository revision.
changeset_revision = str( repository_metadata.changeset_revision )
name = repository.name
@@ -262,6 +251,11 @@
if 'missing_components' in invalid_test:
print '# %s' % invalid_test[ 'missing_components' ]
if not info_only:
+ test_environment_dict = tool_test_results_dict.get( 'test_environment', {} )
+ test_environment_dict[ 'tool_shed_database_version' ] = get_database_version( app )
+ test_environment_dict[ 'tool_shed_mercurial_version' ] = __version__.version
+ test_environment_dict[ 'tool_shed_revision' ] = get_repository_current_revision( os.getcwd() )
+ tool_test_results_dict[ 'test_environment' ] = test_environment_dict
# The repository_metadata.time_last_tested column is not changed by this script since no testing is performed here.
if missing_test_components:
# If functional test definitions or test data are missing, set do_not_test = True if no tool with valid tests has been
@@ -278,8 +272,8 @@
repository_metadata.do_not_test = True
repository_metadata.tools_functionally_correct = False
repository_metadata.missing_test_components = True
- repository_status[ 'missing_test_components' ] = missing_test_components
- repository_metadata.tool_test_results = repository_status
+ tool_test_results_dict[ 'missing_test_components' ] = missing_test_components
+ repository_metadata.tool_test_results = tool_test_results_dict
app.sa_session.add( repository_metadata )
app.sa_session.flush()
stop = time.time()
diff -r 1fbb0cb8dd5865185b186e609d747668510a25a8 -r 61eacb9ce6d54a3cc217e7d826963a9a7370adf7 lib/tool_shed/util/container_util.py
--- a/lib/tool_shed/util/container_util.py
+++ b/lib/tool_shed/util/container_util.py
@@ -235,7 +235,7 @@
class TestEnvironment( object ):
"""Tool test environment object"""
- def __init__( self, id=None, architecture=None, galaxy_database_version=None, galaxy_revision=None, python_version=None, system=None, time_last_tested=None,
+ def __init__( self, id=None, architecture=None, galaxy_database_version=None, galaxy_revision=None, python_version=None, system=None, time_tested=None,
tool_shed_database_version=None, tool_shed_mercurial_version=None, tool_shed_revision=None ):
self.id = id
self.architecture = architecture
@@ -243,7 +243,7 @@
self.galaxy_revision = galaxy_revision
self.python_version = python_version
self.system = system
- self.time_last_tested = time_last_tested
+ self.time_tested = time_tested
self.tool_shed_database_version = tool_shed_database_version
self.tool_shed_mercurial_version = tool_shed_mercurial_version
self.tool_shed_revision = tool_shed_revision
@@ -743,11 +743,6 @@
)
if repository_metadata:
metadata = repository_metadata.metadata
- tool_test_results = repository_metadata.tool_test_results
- try:
- time_last_tested = time_ago( repository_metadata.time_last_tested )
- except:
- time_last_tested = None
lock = threading.Lock()
lock.acquire( True )
try:
@@ -836,12 +831,17 @@
label='Valid tools' )
containers_dict[ 'valid_tools' ] = valid_tools_root_folder
# Tool test results container.
- if 'tool_test_results' not in exclude and tool_test_results and len( tool_test_results ) > 1:
- # Only create and populate this folder if there are actual tool test results to display, since the display of the 'Test environment'
- # folder by itself can be misleading. We check for more than a single entry in the tool_test_results dictionary because it may have
- # only the "test_environment" entry, but we want at least 1 of "passed_tests", "failed_tests", "installation_errors", "missing_test_components"
- # "skipped_tests", "not_tested" or any other entry that may be added in the future.
- folder_id, tool_test_results_root_folder = build_tool_test_results_folder( trans, folder_id, tool_test_results, time_last_tested=time_last_tested )
+ tool_test_results = repository_metadata.tool_test_results
+ # Only create and populate this folder if there are actual tool test results to display.
+ if can_display_tool_test_results( tool_test_results, exclude=exclude ):
+ time_tested = repository_metadata.time_last_tested
+ if time_tested is not None:
+ time_tested = time_ago( time_tested )
+ folder_id, tool_test_results_root_folder = build_tool_test_results_folder( trans,
+ folder_id,
+ tool_test_results,
+ time_tested,
+ label='Tool test results' )
containers_dict[ 'tool_test_results' ] = tool_test_results_root_folder
# Workflows container.
if metadata:
@@ -1052,30 +1052,39 @@
tool_dependencies_root_folder = None
return folder_id, tool_dependencies_root_folder
-def build_tool_test_results_folder( trans, folder_id, tool_test_results_dict, label='Tool test results', time_last_tested=None ):
+def build_tool_test_results_folder( trans, folder_id, tool_test_results_dict, time_tested, label='Tool test results' ):
"""Return a folder hierarchy containing tool dependencies."""
# This container is displayed only in the tool shed.
if tool_test_results_dict:
folder_id += 1
tool_test_results_root_folder = Folder( id=folder_id, key='root', label='root', parent=None )
test_environment_dict = tool_test_results_dict.get( 'test_environment', None )
- if test_environment_dict:
+ if test_environment_dict is not None:
folder_id += 1
test_results_folder = Folder( id=folder_id, key='test_results', label=label, parent=tool_test_results_root_folder )
tool_test_results_root_folder.folders.append( test_results_folder )
folder_id += 1
folder = Folder( id=folder_id, key='test_environment', label='Automated test environment', parent=test_results_folder )
test_results_folder.folders.append( folder )
+ architecture = test_environment_dict.get( 'architecture', '' )
+ galaxy_database_version = test_environment_dict.get( 'galaxy_database_version', '' )
+ galaxy_revision = test_environment_dict.get( 'galaxy_revision', '' )
+ python_version = test_environment_dict.get( 'python_version', '' )
+ system = test_environment_dict.get( 'system', '' )
+ time_tested = time_tested
+ tool_shed_database_version = test_environment_dict.get( 'tool_shed_database_version', '' )
+ tool_shed_mercurial_version = test_environment_dict.get( 'tool_shed_mercurial_version', '' )
+ tool_shed_revision = test_environment_dict.get( 'tool_shed_revision', '' )
test_environment = TestEnvironment( id=1,
- architecture=test_environment_dict.get( 'architecture', '' ),
- galaxy_database_version=test_environment_dict.get( 'galaxy_database_version', '' ),
- galaxy_revision=test_environment_dict.get( 'galaxy_revision', '' ),
- python_version=test_environment_dict.get( 'python_version', '' ),
- system=test_environment_dict.get( 'system', '' ),
- time_last_tested=time_last_tested,
- tool_shed_database_version=test_environment_dict.get( 'tool_shed_database_version', '' ),
- tool_shed_mercurial_version=test_environment_dict.get( 'tool_shed_mercurial_version', '' ),
- tool_shed_revision=test_environment_dict.get( 'tool_shed_revision', '' ) )
+ architecture=architecture,
+ galaxy_database_version=galaxy_database_version,
+ galaxy_revision=galaxy_revision,
+ python_version=python_version,
+ system=system,
+ time_tested=time_tested,
+ tool_shed_database_version=tool_shed_database_version,
+ tool_shed_mercurial_version=tool_shed_mercurial_version,
+ tool_shed_revision=tool_shed_revision )
folder.test_environments.append( test_environment )
not_tested_dict = tool_test_results_dict.get( 'not_tested', {} )
if not_tested_dict:
@@ -1238,6 +1247,26 @@
workflows_root_folder = None
return folder_id, workflows_root_folder
+def can_display_tool_test_results( tool_test_results_dict, exclude=None ):
+ # Only create and populate the tool_test_results container if there are actual tool test results to display.
+ if exclude is None:
+ exclude = []
+ if 'tool_test_results' in exclude:
+ return False
+ if tool_test_results_dict:
+ # We check for more than a single entry in the tool_test_results dictionary because it may have
+ # only the "test_environment" entry, but we want at least 1 of "passed_tests", "failed_tests",
+ # "installation_errors", "missing_test_components" "skipped_tests", "not_tested" or any other
+ # entry that may be added in the future.
+ display_entries = [ 'failed_tests', 'installation_errors', 'missing_test_components', 'not_tested', 'passed_tests', 'skipped_tests' ]
+ for k, v in tool_test_results_dict.items():
+ if k in display_entries:
+ # We've discovered an entry that can be displayed, so see if it has a value since displaying
+ # empty lists is not desired.
+ if v:
+ return True
+ return False
+
def cast_empty_repository_dependency_folders( folder, repository_dependency_id ):
"""
Change any empty folders contained within the repository dependencies container into a repository dependency since it has no repository dependencies
diff -r 1fbb0cb8dd5865185b186e609d747668510a25a8 -r 61eacb9ce6d54a3cc217e7d826963a9a7370adf7 templates/webapps/tool_shed/repository/common.mako
--- a/templates/webapps/tool_shed/repository/common.mako
+++ b/templates/webapps/tool_shed/repository/common.mako
@@ -909,7 +909,7 @@
id="libraryItem-rte-${encoded_id}"><td style="padding-left: ${pad+20}px;"><table class="grid" id="test_environment">
- <tr><td><b>Time tested:</b> ${test_environment.time_last_tested | h}</td></tr>
+ <tr><td><b>Time tested:</b> ${test_environment.time_tested | h}</td></tr><tr><td><b>System:</b> ${test_environment.system | h}</td></tr><tr><td><b>Architecture:</b> ${test_environment.architecture | h}</td></tr><tr><td><b>Python version:</b> ${test_environment.python_version | h}</td></tr>
diff -r 1fbb0cb8dd5865185b186e609d747668510a25a8 -r 61eacb9ce6d54a3cc217e7d826963a9a7370adf7 test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -21,7 +21,6 @@
import threading
import unittest
import urllib
-import urllib2
from time import strftime
# Assume we are run from the galaxy root directory, add lib to the python path
@@ -42,8 +41,8 @@
sys.path = new_path
from functional_tests import generate_config_file
+
from galaxy import eggs
-from galaxy.util import unicodify
eggs.require( "nose" )
eggs.require( "NoseHTML" )
@@ -53,10 +52,14 @@
eggs.require( "PasteDeploy" )
eggs.require( "Cheetah" )
eggs.require( "simplejson" )
+eggs.require( 'mercurial' )
import simplejson
import twill
+from datetime import datetime
+from mercurial import __version__
+
# This should not be required, but it is under certain conditions, thanks to this bug: http://code.google.com/p/python-nose/issues/detail?id=284
eggs.require( "pysqlite" )
@@ -76,13 +79,20 @@
import tool_shed.util.shed_util_common as suc
from tool_shed.util import tool_dependency_util
+from galaxy.web.framework.helpers import time_ago
+
import nose.core
import nose.config
import nose.loader
import nose.plugins.manager
from nose.plugins import Plugin
-from base.util import parse_tool_panel_config, get_database_version, get_test_environment, get_repository_current_revision
+from base.util import get_database_version
+from base.util import get_repository_current_revision
+from base.util import get_test_environment
+from base.util import parse_tool_panel_config
+
+from galaxy.util import unicodify
from common import update
@@ -472,29 +482,27 @@
return repository_dicts, error_message
def get_tool_info_from_test_id( test_id ):
- '''
- Test IDs come in the form test_tool_number (functional.test_toolbox.TestForTool_toolshed_url/repos/owner/repository_name/tool_id/tool_version)
+ """
+ Test IDs come in the form test_tool_number
+ (functional.test_toolbox.TestForTool_toolshed_url/repos/owner/repository_name/tool_id/tool_version)
We want the tool ID and tool version.
- '''
+ """
parts = test_id.replace( ')', '' ).split( '/' )
tool_version = parts[ -1 ]
tool_id = parts[ -2 ]
return tool_id, tool_version
-def get_tool_test_results_from_api( tool_shed_url, metadata_revision_id ):
+def get_tool_test_results_dict( tool_shed_url, encoded_repository_metadata_id ):
error_message = ''
- api_path = [ 'api', 'repository_revisions', metadata_revision_id ]
+ api_path = [ 'api', 'repository_revisions', encoded_repository_metadata_id ]
api_url = get_api_url( base=tool_shed_url, parts=api_path )
repository_metadata, error_message = json_from_url( api_url )
if error_message:
return None, error_message
tool_test_results = repository_metadata.get( 'tool_test_results', {} )
- # If, for some reason, the script that checks for functional tests has not run, tool_test_results will be None.
- if tool_test_results is None:
- return {}, error_message
return tool_test_results, error_message
-def handle_missing_dependencies( app, repository, missing_tool_dependencies, repository_dict, repository_status_dict, results_dict ):
+def handle_missing_dependencies( app, repository, missing_tool_dependencies, repository_dict, tool_test_results_dict, results_dict ):
"""Handle missing repository or tool dependencies for an installed repository."""
# If a tool dependency fails to install correctly, this should be considered an installation error,
# and functional tests should be skipped, since the tool dependency needs to be correctly installed
@@ -508,7 +516,7 @@
name=dependency.name,
version=dependency.version,
error_message=dependency.error_message )
- repository_status_dict[ 'installation_errors' ][ 'tool_dependencies' ].append( test_result )
+ tool_test_results_dict[ 'installation_errors' ][ 'tool_dependencies' ].append( test_result )
for dependency in repository.missing_repository_dependencies:
log.debug( 'Missing repository dependency %s changeset revision %s owned by %s: %s' % \
( str( dependency.name ), str( dependency.changeset_revision ), str( dependency.owner ), unicodify( dependency.error_message ) ) )
@@ -517,12 +525,12 @@
owner=dependency.owner,
changeset_revision=dependency.changeset_revision,
error_message=dependency.error_message )
- repository_status_dict[ 'installation_errors' ][ 'repository_dependencies' ].append( test_result )
+ tool_test_results_dict[ 'installation_errors' ][ 'repository_dependencies' ].append( test_result )
# Record the status of this repository in the tool shed.
params = dict( tools_functionally_correct=False,
do_not_test=False,
test_install_error=True )
- register_test_result( galaxy_tool_shed_url, repository_status_dict, repository_dict, params )
+ register_test_result( galaxy_tool_shed_url, tool_test_results_dict, repository_dict, params )
# Since this repository is missing components, we do not want to test it, so deactivate it or uninstall it.
# The deactivate flag is set to True if the environment variable GALAXY_INSTALL_TEST_KEEP_TOOL_DEPENDENCIES
# is set to 'true'.
@@ -629,27 +637,13 @@
# "tool_shed_url": "http://toolshed.local:10001",
# "url": "/api/repository_revisions/529fd61ab1c6cc36",
# "user_id": "529fd61ab1c6cc36" }
- metadata_revision_id = repository_dict.get( 'id', None )
+ encoded_repository_metadata_id = repository_dict.get( 'id', None )
# Add the URL for the tool shed we're installing from, so the automated installation methods go to the right place.
repository_dict[ 'tool_shed_url' ] = galaxy_tool_shed_url
# Get the name and owner out of the repository info dict.
name = str( repository_dict[ 'name' ] )
owner = str( repository_dict[ 'owner' ] )
changeset_revision = str( repository_dict[ 'changeset_revision' ] )
- # Populate the repository_status_dict.
- repository_status_dict, error_message = get_tool_test_results_from_api( galaxy_tool_shed_url, metadata_revision_id )
- if error_message:
- return None, error_message
- if 'test_environment' not in repository_status_dict:
- repository_status_dict[ 'test_environment' ] = {}
- test_environment = get_test_environment( repository_status_dict[ 'test_environment' ] )
- test_environment[ 'galaxy_database_version' ] = get_database_version( app )
- test_environment[ 'galaxy_revision'] = get_repository_current_revision( os.getcwd() )
- repository_status_dict[ 'test_environment' ] = test_environment
- repository_status_dict[ 'passed_tests' ] = []
- repository_status_dict[ 'failed_tests' ] = []
- repository_status_dict[ 'skip_reason' ] = None
- repository_status_dict[ 'installation_errors' ] = dict( current_repository=[], repository_dependencies=[], tool_dependencies=[] )
# See if this repository should be skipped for any reason.
skip_this_repository = False
skip_reason = None
@@ -661,82 +655,104 @@
skip_reason = reason
break
if skip_this_repository:
- repository_status_dict[ 'not_tested' ] = dict( reason=skip_reason )
+ tool_test_results_dict[ 'not_tested' ] = dict( reason=skip_reason )
params = dict( tools_functionally_correct=False,
do_not_test=False )
- register_test_result( galaxy_tool_shed_url, repository_status_dict, repository_dict, params )
+ register_test_result( galaxy_tool_shed_url, tool_test_results_dict, repository_dict, params )
log.debug( "Not testing revision %s of repository %s owned by %s." % ( changeset_revision, name, owner ) )
else:
- repository, error_message = install_repository( app, repository_dict )
+ # Populate the tool_test_results_dict.
+ tool_test_results_dict, error_message = get_tool_test_results_dict( galaxy_tool_shed_url, encoded_repository_metadata_id )
if error_message:
- repository_status_dict[ 'installation_errors' ][ 'current_repository' ] = error_message
-
- # Even if the repository failed to install, execute the uninstall method, in case a dependency did succeed.
- log.debug( 'Attempting to uninstall repository %s owned by %s.' % ( name, owner ) )
- try:
- repository = test_db_util.get_installed_repository_by_name_owner_changeset_revision( name, owner, changeset_revision )
- except Exception, e:
- error_message = 'Unable to find installed repository %s owned by %s: %s.' % ( name, owner, str( e ) )
- log.exception( error_message )
- test_result = dict( tool_shed=galaxy_tool_shed_url,
- name=name,
- owner=owner,
- changeset_revision=changeset_revision,
- error_message=error_message )
- repository_status_dict[ 'installation_errors' ][ 'repository_dependencies' ].append( test_result )
- params = dict( tools_functionally_correct=False,
- test_install_error=True,
- do_not_test=False )
- register_test_result( galaxy_tool_shed_url, repository_status_dict, repository_dict, params )
- try:
- if deactivate:
- # We are deactivating this repository and all of its repository dependencies.
- deactivate_repository( app, repository_dict )
+ log.debug( error_message )
+ else:
+ # The preparation script ~/tool_shed/scripts/check_repositories_for_functional_tests.py will have entered
+ # information in the 'missing_test_components' entry of the tool_test_results_dict dictionary for repositories
+ # that are missing test components. We need to be careful to not lose this information. For all other repositories,
+ # no changes will have been made to this dictionary by the preparation script, and tool_test_results_dict will be None.
+ # Initialize the tool_test_results_dict dictionary with the information about the current test environment.
+ test_environment_dict = tool_test_results_dict.get( 'test_environent', None )
+ test_environment_dict = get_test_environment( test_environment_dict )
+ test_environment_dict[ 'galaxy_database_version' ] = get_database_version( app )
+ test_environment_dict[ 'galaxy_revision' ] = get_repository_current_revision( os.getcwd() )
+ tool_test_results_dict[ 'test_environment' ] = test_environment_dict
+ tool_test_results_dict[ 'passed_tests' ] = []
+ tool_test_results_dict[ 'failed_tests' ] = []
+ tool_test_results_dict[ 'installation_errors' ] = dict( current_repository=[], repository_dependencies=[], tool_dependencies=[] )
+ repository, error_message = install_repository( app, repository_dict )
+ if error_message:
+ tool_test_results_dict[ 'installation_errors' ][ 'current_repository' ] = error_message
+ # Even if the repository failed to install, execute the uninstall method, in case a dependency did succeed.
+ log.debug( 'Attempting to uninstall repository %s owned by %s.' % ( name, owner ) )
+ try:
+ repository = test_db_util.get_installed_repository_by_name_owner_changeset_revision( name, owner, changeset_revision )
+ except Exception, e:
+ error_message = 'Unable to find installed repository %s owned by %s: %s.' % ( name, owner, str( e ) )
+ log.exception( error_message )
+ test_result = dict( tool_shed=galaxy_tool_shed_url,
+ name=name,
+ owner=owner,
+ changeset_revision=changeset_revision,
+ error_message=error_message )
+ tool_test_results_dict[ 'installation_errors' ][ 'repository_dependencies' ].append( test_result )
+ params = dict( tools_functionally_correct=False,
+ test_install_error=True,
+ do_not_test=False )
+
+ register_test_result( galaxy_tool_shed_url, tool_test_results_dict, repository_dict, params )
+ try:
+ if deactivate:
+ # We are deactivating this repository and all of its repository dependencies.
+ deactivate_repository( app, repository_dict )
+ else:
+ # We are uninstalling this repository and all of its repository dependencies.
+ uninstall_repository( app, repository_dict )
+ except:
+ log.exception( 'Encountered error attempting to deactivate or uninstall %s.', str( repository_dict[ 'name' ] ) )
+ results_dict[ 'repositories_failed_install' ].append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
+ log.debug( 'Repository %s failed to install correctly.' % str( name ) )
+ else:
+ # Configure and run functional tests for this repository. This is equivalent to sh run_functional_tests.sh -installed
+ remove_install_tests()
+ log.debug( 'Installation of %s succeeded, running all defined functional tests.' % str( repository.name ) )
+ # Generate the shed_tools_dict that specifies the location of test data contained within this repository. If the repository
+ # does not have a test-data directory, this will return has_test_data = False, and we will set the do_not_test flag to True,
+ # and the tools_functionally_correct flag to False, as well as updating tool_test_results.
+ file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( {} ) )
+ has_test_data, shed_tools_dict = parse_tool_panel_config( galaxy_shed_tool_conf_file,
+ from_json_string( file( galaxy_shed_tools_dict, 'r' ).read() ) )
+ # Add an empty 'missing_test_results' entry if it is missing from the tool_test_results_dict. The
+ # ~/tool_shed/scripts/check_repositories_for_functional_tests.py will have entered information in the
+ # 'missing_test_components' entry of the tool_test_results_dict dictionary for repositories that are
+ # missing test components.
+ if 'missing_test_components' not in tool_test_results_dict:
+ tool_test_results_dict[ 'missing_test_components' ] = []
+ missing_tool_dependencies = get_missing_tool_dependencies( repository )
+ if missing_tool_dependencies or repository.missing_repository_dependencies:
+ results_dict = handle_missing_dependencies( app,
+ repository,
+ missing_tool_dependencies,
+ repository_dict,
+ tool_test_results_dict,
+ results_dict )
else:
- # We are uninstalling this repository and all of its repository dependencies.
- uninstall_repository( app, repository_dict )
- except:
- log.exception( 'Encountered error attempting to deactivate or uninstall %s.', str( repository_dict[ 'name' ] ) )
- results_dict[ 'repositories_failed_install' ].append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
- log.debug( 'Repository %s failed to install correctly.' % str( name ) )
- else:
- # Configure and run functional tests for this repository. This is equivalent to sh run_functional_tests.sh -installed
- remove_install_tests()
- log.debug( 'Installation of %s succeeded, running all defined functional tests.' % str( repository.name ) )
- # Generate the shed_tools_dict that specifies the location of test data contained within this repository. If the repository
- # does not have a test-data directory, this will return has_test_data = False, and we will set the do_not_test flag to True,
- # and the tools_functionally_correct flag to False, as well as updating tool_test_results.
- file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( {} ) )
- has_test_data, shed_tools_dict = parse_tool_panel_config( galaxy_shed_tool_conf_file,
- from_json_string( file( galaxy_shed_tools_dict, 'r' ).read() ) )
- if 'missing_test_components' not in repository_status_dict:
- repository_status_dict[ 'missing_test_components' ] = []
- missing_tool_dependencies = get_missing_tool_dependencies( repository )
- if missing_tool_dependencies or repository.missing_repository_dependencies:
- results_dict = handle_missing_dependencies( app,
- repository,
- missing_tool_dependencies,
- repository_dict,
- repository_status_dict,
- results_dict )
- else:
- # If the repository has a test-data directory we write the generated shed_tools_dict to a file, so the functional
- # test framework can find it.
- file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( shed_tools_dict ) )
- log.debug( 'Saved generated shed_tools_dict to %s\nContents: %s' % ( str( galaxy_shed_tools_dict ), str( shed_tools_dict ) ) )
- try:
- results_dict = test_repository_tools( app, repository, repository_dict, repository_status_dict, results_dict )
- except Exception, e:
- exception_message = 'Error executing tests for repository %s: %s' % ( name, str( e ) )
- log.exception( exception_message )
- repository_status_dict[ 'failed_tests' ].append( exception_message )
- # Record the status of this repository in the tool shed.
- params = dict( tools_functionally_correct=False,
- do_not_test=False,
- test_install_error=False )
- register_test_result( galaxy_tool_shed_url, repository_status_dict, repository_dict, params )
- results_dict[ 'repositories_failed' ].append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
- total_repositories_tested += 1
+ # If the repository has a test-data directory we write the generated shed_tools_dict to a file, so the functional
+ # test framework can find it.
+ file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( shed_tools_dict ) )
+ log.debug( 'Saved generated shed_tools_dict to %s\nContents: %s' % ( str( galaxy_shed_tools_dict ), str( shed_tools_dict ) ) )
+ try:
+ results_dict = test_repository_tools( app, repository, repository_dict, tool_test_results_dict, results_dict )
+ except Exception, e:
+ exception_message = 'Error executing tests for repository %s: %s' % ( name, str( e ) )
+ log.exception( exception_message )
+ tool_test_results_dict[ 'failed_tests' ].append( exception_message )
+ # Record the status of this repository in the tool shed.
+ params = dict( tools_functionally_correct=False,
+ do_not_test=False,
+ test_install_error=False )
+ register_test_result( galaxy_tool_shed_url, tool_test_results_dict, repository_dict, params )
+ results_dict[ 'repositories_failed' ].append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
+ total_repositories_tested += 1
results_dict[ 'total_repositories_tested' ] = total_repositories_tested
return results_dict, error_message
@@ -794,12 +810,20 @@
return exclude_list
def register_test_result( url, test_results_dict, repository_dict, params ):
- """Update the repository metadata tool_test_results and appropriate flags using the API."""
+ """
+ Update the repository metadata tool_test_results and appropriate flags using the Tool SHed API. This method
+ updates tool_test_results with the relevant data, sets the do_not_test and tools_functionally correct flags
+ to the appropriate values and updates the time_last_tested field to the value of the received time_tested.
+ """
if '-info_only' in sys.argv or 'GALAXY_INSTALL_TEST_INFO_ONLY' in os.environ:
return {}
else:
metadata_revision_id = repository_dict.get( 'id', None )
+ log.debug("RRR In register_test_result, metadata_revision_id: %s" % str( metadata_revision_id ))
if metadata_revision_id is not None:
+ # Set the time_last_tested entry so that the repository_metadata.time_last_tested will be set in the tool shed.
+ time_tested = datetime.utcnow()
+ test_results_dict[ 'time_last_tested' ] = time_ago( time_tested )
params[ 'tool_test_results' ] = test_results_dict
url = '%s' % ( suc.url_join( galaxy_tool_shed_url,'api', 'repository_revisions', str( metadata_revision_id ) ) )
try:
@@ -876,7 +900,7 @@
for repository in repositories_by_owner[ owner ]:
print "# %s owned by %s, changeset revision %s" % ( repository[ 'name' ], repository[ 'owner' ], repository[ 'changeset_revision' ] )
-def test_repository_tools( app, repository, repository_dict, repository_status_dict, results_dict ):
+def test_repository_tools( app, repository, repository_dict, tool_test_results_dict, results_dict ):
"""Test tools contained in the received repository."""
name = str( repository.name )
owner = str( repository.owner )
@@ -898,12 +922,12 @@
test_identifier = '%s/%s' % ( owner, name )
passed_tests = plugin.getTestStatus( test_identifier )
break
- repository_status_dict[ 'passed_tests' ] = []
+ tool_test_results_dict[ 'passed_tests' ] = []
for test_id in passed_tests:
# Normalize the tool ID and version display.
tool_id, tool_version = get_tool_info_from_test_id( test_id )
test_result = dict( test_id=test_id, tool_id=tool_id, tool_version=tool_version )
- repository_status_dict[ 'passed_tests' ].append( test_result )
+ tool_test_results_dict[ 'passed_tests' ].append( test_result )
if success:
# This repository's tools passed all functional tests. Update the repository_metadata table in the tool shed's database
# to reflect that. Call the register_test_result method, which executes a PUT request to the repository_revisions API
@@ -913,20 +937,16 @@
params = dict( tools_functionally_correct=True,
do_not_test=False,
test_install_error=False )
- register_test_result( galaxy_tool_shed_url, repository_status_dict, repository_dict, params )
+ register_test_result( galaxy_tool_shed_url, tool_test_results_dict, repository_dict, params )
log.debug( 'Revision %s of repository %s installed and passed functional tests.' % ( str( changeset_revision ), str( name ) ) )
else:
- repository_status_dict[ 'failed_tests' ].append( extract_log_data( result, from_tool_test=True ) )
- # Call the register_test_result method, which executes a PUT request to the repository_revisions API controller with the outcome
- # of the tests, and updates tool_test_results with the relevant log data.
- # This also sets the do_not_test and tools_functionally correct flags to the appropriate values, and updates the time_last_tested
- # field to today's date.
+ tool_test_results_dict[ 'failed_tests' ].append( extract_log_data( result, from_tool_test=True ) )
results_dict[ 'repositories_failed' ].append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
set_do_not_test = not is_latest_downloadable_revision( galaxy_tool_shed_url, repository_dict )
params = dict( tools_functionally_correct=False,
test_install_error=False,
do_not_test=str( set_do_not_test ) )
- register_test_result( galaxy_tool_shed_url, repository_status_dict, repository_dict, params )
+ register_test_result( galaxy_tool_shed_url, tool_test_results_dict, repository_dict, params )
log.debug( 'Revision %s of repository %s installed successfully but did not pass functional tests.' % \
( str( changeset_revision ), str( name ) ) )
# Run the uninstall method. This removes tool functional test methods from the test_toolbox module and uninstalls the
@@ -1256,7 +1276,7 @@
return 1
if __name__ == "__main__":
- # The repository_status_dict should always have the following structure:
+ # The tool_test_results_dict should always have the following structure:
# {
# "test_environment":
# {
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
3 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/55d684093172/
Changeset: 55d684093172
User: Dave Bouvier
Date: 2013-11-25 19:01:44
Summary: Fix for importing repositories.
Affected #: 1 file
diff -r 8ae8e19ea0762af833558f75b84fe19840c6e783 -r 55d684093172360d6329349337313da202e94108 lib/tool_shed/util/import_util.py
--- a/lib/tool_shed/util/import_util.py
+++ b/lib/tool_shed/util/import_util.py
@@ -164,7 +164,7 @@
# No repository with the specified name and owner currently exists, so make sure the current user can create one.
if trans.user_is_admin():
repository_info_dict[ 'status' ] = None
- elif trans.app.security_agent.user_can_import_repository_archive( trans.user, owner ):
+ elif trans.app.security_agent.user_can_import_repository_archive( trans.user, repository_info_dict[ 'owner' ] ):
repository_info_dict[ 'status' ] = None
else:
repository_info_dict[ 'status' ] = 'Not authorized to import'
https://bitbucket.org/galaxy/galaxy-central/commits/398287552d91/
Changeset: 398287552d91
User: Dave Bouvier
Date: 2013-11-25 19:02:09
Summary: Functional tests for importing and exporting repositories.
Affected #: 3 files
diff -r 55d684093172360d6329349337313da202e94108 -r 398287552d91093721ea0de635ed6b0d7c7cd147 test/tool_shed/test_data/repository_capsules/0490_filtering.tar.gz
Binary file test/tool_shed/test_data/repository_capsules/0490_filtering.tar.gz has changed
diff -r 55d684093172360d6329349337313da202e94108 -r 398287552d91093721ea0de635ed6b0d7c7cd147 test/tool_shed/test_data/repository_capsules/0500_emboss_5.tar.gz
Binary file test/tool_shed/test_data/repository_capsules/0500_emboss_5.tar.gz has changed
diff -r 55d684093172360d6329349337313da202e94108 -r 398287552d91093721ea0de635ed6b0d7c7cd147 test/tool_shed/test_data/repository_capsules/0510_trans_proteomic_pipeline.tar.gz
Binary file test/tool_shed/test_data/repository_capsules/0510_trans_proteomic_pipeline.tar.gz has changed
https://bitbucket.org/galaxy/galaxy-central/commits/d37d6e711d67/
Changeset: d37d6e711d67
User: Dave Bouvier
Date: 2013-11-25 19:02:09
Summary: Functional tests for importing and exporting repositories.
Affected #: 5 files
diff -r 398287552d91093721ea0de635ed6b0d7c7cd147 -r d37d6e711d671763d725ffb4931f0acc379964dc test/tool_shed/base/twilltestcase.py
--- a/test/tool_shed/base/twilltestcase.py
+++ b/test/tool_shed/base/twilltestcase.py
@@ -4,6 +4,7 @@
import re
import test_db_util
import simplejson
+import shutil
import logging
import time
import tempfile
@@ -12,6 +13,7 @@
import galaxy.model as galaxy_model
import galaxy.util as util
from tool_shed.util import shed_util_common as suc
+from tool_shed.util import xml_util
from base.twilltestcase import tc, from_json_string, TwillTestCase, security, urllib
from tool_shed.util.encoding_util import tool_shed_encode, tool_shed_decode
@@ -98,6 +100,16 @@
self.check_repository_changelog( repository )
self.check_string_count_in_page( 'Repository metadata is associated with this change set.', metadata_count )
+ def check_exported_repository_dependency( self, dependency_filename, repository_name, repository_owner ):
+ root, error_message = xml_util.parse_xml( dependency_filename )
+ for elem in root.findall( 'repository' ):
+ if 'changeset_revision' in elem:
+ raise AssertionError( 'Exported repository %s with owner %s has a dependency with a defined changeset revision.' % \
+ ( repository_name, repository_owner ) )
+ if 'toolshed' in elem:
+ raise AssertionError( 'Exported repository %s with owner %s has a dependency with a defined tool shed.' % \
+ ( repository_name, repository_owner ) )
+
def check_for_valid_tools( self, repository, strings_displayed=[], strings_not_displayed=[] ):
strings_displayed.append( 'Valid tools' )
self.display_manage_repository_page( repository, strings_displayed, strings_not_displayed )
@@ -144,6 +156,24 @@
self.visit_galaxy_url( url )
self.check_for_strings( strings_displayed, strings_not_displayed )
+ def check_manifest( self, manifest_filepath, owner=None ):
+ root, error_message = xml_util.parse_xml( manifest_filepath )
+ for elem in root.findall( 'repository' ):
+ repository_name = elem.get( 'name' )
+ manifest_owner = elem.get( 'username' )
+ if owner is not None:
+ assert manifest_owner == owner, 'Expected repository %s to be owned by %s, but found %s' % \
+ ( elem.get( 'name' ), owner, manifest_owner )
+ toolshed = elem.get( 'toolshed' )
+ changeset_revision = elem.get( 'changeset_revision' )
+ assert toolshed is None, 'Repository definition %s has a tool shed attribute %s.' % ( repository_name, toolshed )
+ assert changeset_revision is None, 'Repository definition %s specifies a changeset revision %s.' % \
+ ( repository_name, changeset_revision )
+ repository_archive = elem.find( 'archive' ).text
+ filepath, filename = os.path.split( manifest_filepath )
+ repository_path = os.path.join( filepath, repository_archive )
+ self.verify_repository_in_capsule( repository_path, repository_name, owner )
+
def check_repository_changelog( self, repository, strings_displayed=[], strings_not_displayed=[] ):
url = '/repository/view_changelog?id=%s' % self.security.encode_id( repository.id )
self.visit_url( url )
@@ -1207,6 +1237,15 @@
tc.submit( "upload_button" )
self.check_for_strings( strings_displayed, strings_not_displayed )
+ def verify_capsule_contents( self, capsule_filepath, owner ):
+ tar_object = tarfile.open( capsule_filepath, 'r:*' )
+ extraction_path = tempfile.mkdtemp()
+ tar_object.extractall( extraction_path )
+ for root, dirs, files in os.walk( extraction_path ):
+ if 'manifest.xml' in files:
+ self.check_manifest( os.path.join( root, 'manifest.xml' ), owner=owner )
+ shutil.rmtree( extraction_path )
+
def verify_installed_repositories( self, installed_repositories=[], uninstalled_repositories=[] ):
for repository_name, repository_owner in installed_repositories:
galaxy_repository = test_db_util.get_installed_repository_by_name_owner( repository_name, repository_owner )
@@ -1279,6 +1318,17 @@
# or we know that the repository was not correctly installed!
assert found, 'No entry for %s in %s.' % ( required_data_table_entry, self.shed_tool_data_table_conf )
+ def verify_repository_in_capsule( self, repository_archive, repository_name, repository_owner ):
+ repository_extraction_dir = tempfile.mkdtemp()
+ repository_tar_object = tarfile.open( repository_archive, 'r:*' )
+ repository_tar_object.extractall( repository_extraction_dir )
+ for root, dirs, files in os.walk( repository_extraction_dir ):
+ for filename in files:
+ if filename in [ 'tool_dependencies.xml', 'repository_dependencies.xml' ]:
+ dependency_filepath = os.path.join( root, filename )
+ self.check_exported_repository_dependency( dependency_filepath, repository_name, repository_owner )
+ shutil.rmtree( repository_extraction_dir )
+
def verify_repository_reviews( self, repository, reviewer=None, strings_displayed=[], strings_not_displayed=[] ):
changeset_revision = self.get_repository_tip( repository )
# Verify that the currently logged in user has a repository review for the specified repository, reviewer, and changeset revision.
diff -r 398287552d91093721ea0de635ed6b0d7c7cd147 -r d37d6e711d671763d725ffb4931f0acc379964dc test/tool_shed/functional/test_0490_export_import_repositories.py
--- /dev/null
+++ b/test/tool_shed/functional/test_0490_export_import_repositories.py
@@ -0,0 +1,90 @@
+from tool_shed.base.twilltestcase import ShedTwillTestCase, common, os
+import tool_shed.base.test_db_util as test_db_util
+
+import logging
+log = logging.getLogger( __name__ )
+
+repository_name = 'filtering_0490'
+repository_description = "Galaxy's filtering tool for test 0490"
+repository_long_description = "Long description of Galaxy's filtering tool for test 0490"
+
+category_name = 'Test 0490 Repository Import Export'
+category_description = 'Test script 0490 for importing and exporting single repositories.'
+
+'''
+First test:
+
+1. Import a repository capsule containing a repository with no dependencies, e.g. filter1.
+2. Check that the repository to be imported is not marked as preexisting. The word ' Exists' should not be displayed, but '<b>Exists</b>' will.
+3. Export that repository. Check the capsule's contents, verify that changeset revision and tool shed are not set.
+4. Import the capsule again. Check that the repository to be imported is marked as preexisting. The word ' Exists' should be
+ displayed, as will '<b>Exists</b>'.
+
+'''
+
+class TestExportImportRepository( ShedTwillTestCase ):
+ '''Test exporting and importing repositories.'''
+
+ def test_0000_initiate_users( self ):
+ """Create necessary user accounts and login as an admin user."""
+ self.logout()
+ self.login( email=common.test_user_1_email, username=common.test_user_1_name )
+ test_user_1 = test_db_util.get_user( common.test_user_1_email )
+ assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email
+ test_user_1_private_role = test_db_util.get_private_role( test_user_1 )
+ self.logout()
+ self.login( email=common.admin_email, username=common.admin_username )
+ admin_user = test_db_util.get_user( common.admin_email )
+ assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
+ admin_user_private_role = test_db_util.get_private_role( admin_user )
+
+ def test_0005_import_repository_capsule( self ):
+ """Import the filter_0490 repository capsule."""
+ '''
+ This is step 1 - Import a repository capsule containing a repository with no dependencies, e.g. filter1.
+ Check that the repository to be imported is not marked as preexisting. The string ' Exists' should not
+ be displayed, but '<b>Exists</b>' should.
+ '''
+ self.logout()
+ self.login( email=common.admin_email, username=common.admin_username )
+ self.create_category( name=category_name, description=category_description )
+ self.logout()
+ self.login( email=common.test_user_1_email, username=common.test_user_1_name )
+ self.import_capsule( self.get_filename( 'repository_capsules/0490_filtering.tar.gz' ),
+ strings_displayed=[ repository_name, '<b>Exists' ],
+ strings_not_displayed=[ ' Exists' ],
+ strings_displayed_after_submit=[ 'Repository <b>filtering_0490</b> has been created.' ],
+ strings_not_displayed_after_submit=[ 'Import not necessary' ] )
+
+ def test_0010_export_repository_capsule( self ):
+ '''Export the repository that was imported in the previous step.'''
+ '''
+ This is step 2 - Export that repository.
+ Export the repository to a temporary location.
+ '''
+ global capsule_filepath
+ repository = test_db_util.get_repository_by_name_and_owner( repository_name, common.test_user_1_name )
+ capsule_filepath = self.export_capsule( repository )
+ log.debug( os.path.exists( capsule_filepath ) )
+
+ def test_0015_verify_exported_capsule( self ):
+ '''Verify the exported capsule contents.'''
+ '''
+ This is step 3 - Check the capsule's contents, verify that changeset revision and tool shed are not set.
+ Extract the exported capsule tarball to a temporary path, and confirm that the manifest does not specify
+ a tool shed or changeset revision.
+ '''
+ global capsule_filepath
+ self.verify_capsule_contents( capsule_filepath, owner=common.test_user_1_name )
+
+ def test_0020_import_repository_capsule( self ):
+ '''Import the exported repository capsule.'''
+ '''
+ This is step 4 - Import the capsule again. Check that the repository to be imported is marked as preexisting.
+ The string ' Exists' should be displayed, as should '<b>Exists</b>'.
+ '''
+ global capsule_filepath
+ self.import_capsule( capsule_filepath,
+ strings_displayed=[ repository_name, ' Exists', self.url ],
+ strings_not_displayed_after_submit=[ 'Repository <b>filtering_0490</b> has been created.' ],
+ strings_displayed_after_submit=[ 'Import not necessary', 'Exists' ] )
diff -r 398287552d91093721ea0de635ed6b0d7c7cd147 -r d37d6e711d671763d725ffb4931f0acc379964dc test/tool_shed/functional/test_0500_export_repository_simple_dependency.py
--- /dev/null
+++ b/test/tool_shed/functional/test_0500_export_repository_simple_dependency.py
@@ -0,0 +1,89 @@
+from tool_shed.base.twilltestcase import ShedTwillTestCase, common, os
+import tool_shed.base.test_db_util as test_db_util
+
+import logging
+log = logging.getLogger( __name__ )
+
+emboss_repository_name = 'emboss_5_0500'
+emboss_repository_description = "Galaxy wrappers for Emboss version 5.0.0 tools"
+emboss_repository_long_description = "Galaxy wrappers for Emboss version 5.0.0 tools"
+datatypes_repository_name = 'emboss_datatypes_0500'
+datatypes_repository_description = 'Galaxy applicable data formats used by Emboss tools.'
+datatypes_repository_long_description = 'Galaxy applicable data formats used by Emboss tools. This repository contains no tools.'
+
+category_name = 'Test 0500 Repository Dependency Import Export'
+category_description = 'Test script 0500 for importing and exporting repositories with simple repository dependencies.'
+
+'''
+1. Export a repository with no dependencies, e.g. filter1.
+2. Temporarily extract the repository capsule.
+2a. For every owner in the manifest, set to a different user.
+3. Import it into the same tool shed.
+4. Check that the repository to be imported has no status in the status column.
+5. Click the import button.
+6. Verify the resulting page is correct.
+'''
+
+class TestExportImportRepository( ShedTwillTestCase ):
+ '''Test exporting and importing repositories.'''
+
+ def test_0000_initiate_users( self ):
+ """Create necessary user accounts and login as an admin user."""
+ self.logout()
+ self.login( email=common.test_user_1_email, username=common.test_user_1_name )
+ test_user_1 = test_db_util.get_user( common.test_user_1_email )
+ assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email
+ test_user_1_private_role = test_db_util.get_private_role( test_user_1 )
+ self.logout()
+ self.login( email=common.admin_email, username=common.admin_username )
+ admin_user = test_db_util.get_user( common.admin_email )
+ assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
+ admin_user_private_role = test_db_util.get_private_role( admin_user )
+
+ def test_0005_create_category_and_repository( self ):
+ """Create categories for this test suite"""
+ self.logout()
+ self.login( email=common.admin_email, username=common.admin_username )
+ self.create_category( name=category_name, description=category_description )
+ self.logout()
+ self.login( email=common.test_user_1_email, username=common.test_user_1_name )
+ self.import_capsule( self.get_filename( 'repository_capsules/0500_emboss_5.tar.gz' ),
+ strings_displayed=[ emboss_repository_name, datatypes_repository_name, '<b>Exists' ],
+ strings_not_displayed=[ ' Exists' ],
+ strings_displayed_after_submit=[ 'Repository <b>emboss_5_0500</b> has been created.',
+ 'Repository <b>emboss_datatypes_0500</b> has been created.' ],
+ strings_not_displayed_after_submit=[ 'Import not necessary' ] )
+
+ def test_0010_export_repository_capsule( self ):
+ '''Export the repository that was imported in the previous step.'''
+ '''
+ This is step 2 - Export that repository.
+ Export the repository to a temporary location.
+ '''
+ global capsule_filepath
+ repository = test_db_util.get_repository_by_name_and_owner( emboss_repository_name, common.test_user_1_name )
+ capsule_filepath = self.export_capsule( repository )
+ log.debug( os.path.exists( capsule_filepath ) )
+
+ def test_0015_verify_exported_capsule( self ):
+ '''Verify the exported capsule contents.'''
+ '''
+ This is step 3 - Check the capsule's contents, verify that changeset revision and tool shed are not set.
+ Extract the exported capsule tarball to a temporary path, and confirm that the manifest does not specify
+ a tool shed or changeset revision.
+ '''
+ global capsule_filepath
+ self.verify_capsule_contents( capsule_filepath, owner=common.test_user_1_name )
+
+ def test_0020_import_repository_capsule( self ):
+ '''Import the exported repository capsule.'''
+ '''
+ This is step 4 - Import the capsule again. Check that the repository to be imported is marked as preexisting.
+ The string ' Exists' should be displayed, as should '<b>Exists</b>'.
+ '''
+ global capsule_filepath
+ self.import_capsule( capsule_filepath,
+ strings_displayed=[ emboss_repository_name, datatypes_repository_name, ' Exists', self.url ],
+ strings_not_displayed_after_submit=[ 'Repository <b>emboss_5_0500</b> has been created.',
+ 'Repository <b>emboss_datatypes_0500</b> has been created.' ],
+ strings_displayed_after_submit=[ 'Import not necessary', ' Exists' ] )
diff -r 398287552d91093721ea0de635ed6b0d7c7cd147 -r d37d6e711d671763d725ffb4931f0acc379964dc test/tool_shed/functional/test_0510_export_import_repository_complex_dependencies.py
--- /dev/null
+++ b/test/tool_shed/functional/test_0510_export_import_repository_complex_dependencies.py
@@ -0,0 +1,99 @@
+from tool_shed.base.twilltestcase import ShedTwillTestCase, common, os
+import tool_shed.base.test_db_util as test_db_util
+
+import logging
+log = logging.getLogger( __name__ )
+
+category_name = 'Test 0510 Import Export Complex Dependencies'
+category_description = 'Test script 0510 for importing and exporting repositories with complex repository dependencies.'
+
+'''
+Import a repository capsule with a complex repository dependency with trans_proteomic_pipeline and required repositories.
+Check that the repository to be imported is not marked as preexisting. The word ' Exists' should not be displayed, but '<b>Exists</b>' will.
+Click the import button.
+Verify the resulting page is correct.
+Verify the dependency structure that has been created.
+Export the trans_proteomic_pipeline repository with dependencies.
+Check the capsule's contents, verify that changeset revision and tool shed are not set.
+'''
+
+class TestExportImportRepository( ShedTwillTestCase ):
+ '''Test exporting and importing repositories with complex dependencies.'''
+
+ def test_0000_initiate_users( self ):
+ """Create necessary user accounts and login as an admin user."""
+ self.logout()
+ self.login( email=common.test_user_1_email, username=common.test_user_1_name )
+ test_user_1 = test_db_util.get_user( common.test_user_1_email )
+ assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email
+ test_user_1_private_role = test_db_util.get_private_role( test_user_1 )
+ self.logout()
+ self.login( email=common.admin_email, username=common.admin_username )
+ admin_user = test_db_util.get_user( common.admin_email )
+ assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
+ admin_user_private_role = test_db_util.get_private_role( admin_user )
+
+ def test_0005_create_category_and_repositories( self ):
+ """Create categories for this test suite"""
+ self.logout()
+ self.login( email=common.admin_email, username=common.admin_username )
+ self.create_category( name=category_name, description=category_description )
+ self.logout()
+ self.login( email=common.test_user_1_email, username=common.test_user_1_name )
+ self.import_capsule( self.get_filename( 'repository_capsules/0510_trans_proteomic_pipeline.tar.gz' ),
+ strings_displayed=[ 'package_trans_proteomic_pipeline_4_6_3',
+ 'package_perl_5_18',
+ 'package_libpng_1_2',
+ 'package_libgd_2_1',
+ 'package_expat_2_1',
+ '<b>Exists' ],
+ strings_not_displayed=[ ' Exists' ],
+ strings_displayed_after_submit=[ 'Repository <b>package_trans_proteomic_pipeline_4_6_3</b> has been created.',
+ 'Repository <b>package_perl_5_18</b> has been created.',
+ 'Repository <b>package_libpng_1_2</b> has been created.',
+ 'Repository <b>package_libgd_2_1</b> has been created.',
+ 'Repository <b>package_expat_2_1</b> has been created.' ],
+ strings_not_displayed_after_submit=[ 'Import not necessary' ] )
+
+ def test_0010_export_repository_capsule( self ):
+ '''Export the repository that was imported in the previous step.'''
+ '''
+ This is step 2 - Export that repository.
+ Export the repository to a temporary location.
+ '''
+ global capsule_filepath
+ repository = test_db_util.get_repository_by_name_and_owner( 'package_trans_proteomic_pipeline_4_6_3', common.test_user_1_name )
+ capsule_filepath = self.export_capsule( repository )
+ log.debug( os.path.exists( capsule_filepath ) )
+
+ def test_0015_verify_exported_capsule( self ):
+ '''Verify the exported capsule contents.'''
+ '''
+ This is step 3 - Check the capsule's contents, verify that changeset revision and tool shed are not set.
+ Extract the exported capsule tarball to a temporary path, and confirm that the manifest does not specify
+ a tool shed or changeset revision.
+ '''
+ global capsule_filepath
+ self.verify_capsule_contents( capsule_filepath, owner=common.test_user_1_name )
+
+ def test_0020_import_repository_capsule( self ):
+ '''Import the exported repository capsule.'''
+ '''
+ This is step 4 - Import the capsule again. Check that the repository to be imported is marked as preexisting.
+ The string ' Exists' should be displayed, as should '<b>Exists</b>'.
+ '''
+ global capsule_filepath
+ self.import_capsule( capsule_filepath,
+ strings_displayed=[ 'package_trans_proteomic_pipeline_4_6_3',
+ 'package_perl_5_18',
+ 'package_libpng_1_2',
+ 'package_libgd_2_1',
+ 'package_expat_2_1',
+ ' Exists',
+ self.url ],
+ strings_not_displayed_after_submit=[ 'Repository <b>package_trans_proteomic_pipeline_4_6_3</b> has been created.',
+ 'Repository <b>package_perl_5_18</b> has been created.',
+ 'Repository <b>package_libpng_1_2</b> has been created.',
+ 'Repository <b>package_libgd_2_1</b> has been created.',
+ 'Repository <b>package_expat_2_1</b> has been created.' ],
+ strings_displayed_after_submit=[ 'Import not necessary', ' Exists' ] )
diff -r 398287552d91093721ea0de635ed6b0d7c7cd147 -r d37d6e711d671763d725ffb4931f0acc379964dc test/tool_shed/functional_tests.py
--- a/test/tool_shed/functional_tests.py
+++ b/test/tool_shed/functional_tests.py
@@ -92,8 +92,9 @@
tool_sheds_conf_xml_template = '''<?xml version="1.0"?><tool_sheds>
+ <tool_shed name="Galaxy main tool shed" url="http://toolshed.g2.bx.psu.edu/"/>
+ <tool_shed name="Galaxy test tool shed" url="http://testtoolshed.g2.bx.psu.edu/"/><tool_shed name="Embedded tool shed for functional tests" url="http://${shed_url}:${shed_port}/"/>
- <tool_shed name="Galaxy main tool shed" url="http://toolshed.g2.bx.psu.edu/"/></tool_sheds>
'''
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/8ae8e19ea076/
Changeset: 8ae8e19ea076
User: Dave Bouvier
Date: 2013-11-25 18:36:34
Summary: Handle cases where installed_repostory_dict has a converter_path or display_path set to None.
Affected #: 2 files
diff -r 6997fa059319b288877cf0728ac20fbb57e80dee -r 8ae8e19ea0762af833558f75b84fe19840c6e783 lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
--- a/lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
+++ b/lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
@@ -231,10 +231,13 @@
if tool_shed_repository.includes_datatypes:
# Deactivate proprietary datatypes.
installed_repository_dict = datatype_util.load_installed_datatypes( trans.app, tool_shed_repository, repository_install_dir, deactivate=True )
- if installed_repository_dict and 'converter_path' in installed_repository_dict:
- datatype_util.load_installed_datatype_converters( trans.app, installed_repository_dict, deactivate=True )
- if installed_repository_dict and 'display_path' in installed_repository_dict:
- datatype_util.load_installed_display_applications( trans.app, installed_repository_dict, deactivate=True )
+ if installed_repository_dict:
+ converter_path = installed_repository_dict.get( 'converter_path' )
+ if converter_path is not None:
+ datatype_util.load_installed_datatype_converters( trans.app, installed_repository_dict, deactivate=True )
+ display_path = installed_repository_dict.get( 'display_path' )
+ if display_path is not None:
+ datatype_util.load_installed_display_applications( trans.app, installed_repository_dict, deactivate=True )
if remove_from_disk_checked:
try:
# Remove the repository from disk.
diff -r 6997fa059319b288877cf0728ac20fbb57e80dee -r 8ae8e19ea0762af833558f75b84fe19840c6e783 lib/tool_shed/util/common_install_util.py
--- a/lib/tool_shed/util/common_install_util.py
+++ b/lib/tool_shed/util/common_install_util.py
@@ -61,10 +61,13 @@
repository_install_dir = os.path.abspath( relative_install_dir )
# Activate proprietary datatypes.
installed_repository_dict = datatype_util.load_installed_datatypes( trans.app, repository, repository_install_dir, deactivate=False )
- if installed_repository_dict and 'converter_path' in installed_repository_dict:
- datatype_util.load_installed_datatype_converters( trans.app, installed_repository_dict, deactivate=False )
- if installed_repository_dict and 'display_path' in installed_repository_dict:
- datatype_util.load_installed_display_applications( trans.app, installed_repository_dict, deactivate=False )
+ if installed_repository_dict:
+ converter_path = installed_repository_dict.get( 'converter_path' )
+ if converter_path is not None:
+ datatype_util.load_installed_datatype_converters( trans.app, installed_repository_dict, deactivate=False )
+ display_path = installed_repository_dict.get( 'display_path' )
+ if display_path is not None:
+ datatype_util.load_installed_display_applications( trans.app, installed_repository_dict, deactivate=False )
def get_dependencies_for_repository( trans, tool_shed_url, repo_info_dict, includes_tool_dependencies ):
"""
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.