1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/f50a5a4391ee/
Changeset: f50a5a4391ee
User: greg
Date: 2014-02-12 16:17:38
Summary: Fixes for repositories in the tool shed whose repository tip changeset has no files (because they were all deleted) but have valid tools and other utilities in previous changeset revisions.
Affected #: 2 files
diff -r b8d2ef92592257a76abeba73630e71b44a708fa8 -r f50a5a4391ee589b55516c6d0fc94a9805840900 lib/galaxy/webapps/tool_shed/controllers/repository.py
--- a/lib/galaxy/webapps/tool_shed/controllers/repository.py
+++ b/lib/galaxy/webapps/tool_shed/controllers/repository.py
@@ -2312,6 +2312,7 @@
revision_label = suc.get_revision_label( trans, repository, previous_changeset_revision, include_date=False )
metadata = repository_metadata.metadata
is_malicious = repository_metadata.malicious
+ changeset_revision = previous_changeset_revision
if repository_metadata:
skip_tool_test = repository_metadata.skip_tool_tests
if skip_tool_test:
@@ -2719,10 +2720,15 @@
status = "error"
repository_type_select_field = rt_util.build_repository_type_select_field( trans, repository=repository )
changeset_revision = repository.tip( trans.app )
+ metadata = metadata_util.get_repository_metadata_by_repository_id_changeset_revision( trans,
+ id,
+ changeset_revision,
+ metadata_only=True )
return trans.fill_template( '/webapps/tool_shed/repository/browse_repository.mako',
repo=repo,
repository=repository,
changeset_revision=changeset_revision,
+ metadata=metadata,
commit_message=commit_message,
repository_type_select_field=repository_type_select_field,
message=message,
diff -r b8d2ef92592257a76abeba73630e71b44a708fa8 -r f50a5a4391ee589b55516c6d0fc94a9805840900 templates/webapps/tool_shed/common/repository_actions_menu.mako
--- a/templates/webapps/tool_shed/common/repository_actions_menu.mako
+++ b/templates/webapps/tool_shed/common/repository_actions_menu.mako
@@ -74,7 +74,7 @@
else:
can_rate = False
- if changeset_revision is not None:
+ if metadata is not None and changeset_revision is not None:
if has_metadata and not is_deprecated and trans.app.security_agent.user_can_review_repositories( trans.user ):
can_review_repository = True
else:
@@ -177,7 +177,7 @@
<a class="action-button" target="galaxy_main" href="${h.url_for( controller='repository', action='manage_repository_admins', id=trans.security.encode_id( repository.id ) )}">Manage repository administrators</a>
%endif
%if can_download:
- %if changeset_revision is not None:
+ %if metadata is not None and changeset_revision is not None:
<a class="action-button" href="${h.url_for( controller='repository', action='export', repository_id=trans.app.security.encode_id( repository.id ), changeset_revision=changeset_revision )}">Export this revision</a>
%endif
<a class="action-button" href="${h.url_for( controller='repository', action='download', repository_id=trans.app.security.encode_id( repository.id ), changeset_revision=repository.tip( trans.app ), file_type='gz' )}">Download as a .tar.gz file</a>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/e3a4d4d813fd/
Changeset: e3a4d4d813fd
Branch: stable
User: greg
Date: 2014-02-11 22:41:03
Summary: Fixes for the tool shed's install and test framework.
Affected #: 5 files
diff -r bb744d8d3d6a74872a7b42b511d087c16b1ee8b3 -r e3a4d4d813fdb8a34cfd6d596e0f4bbdb2d9e211 install_and_test_tool_shed_repositories.sh
--- a/install_and_test_tool_shed_repositories.sh
+++ b/install_and_test_tool_shed_repositories.sh
@@ -56,6 +56,10 @@
mkdir -p $GALAXY_INSTALL_TEST_SHED_TOOL_PATH
fi
+if [ ! -d $GALAXY_INSTALL_TEST_TOOL_DEPENDENCY_DIR ] ; then
+ mkdir -p $GALAXY_INSTALL_TEST_TOOL_DEPENDENCY_DIR
+fi
+
test_tool_dependency_definitions () {
# Test installation of repositories of type tool_dependency_definition.
if [ -f $GALAXY_INSTALL_TEST_TOOL_DEPENDENCY_DIR/stage_1_complete ] ; then
@@ -102,6 +106,7 @@
;;
# Use "-w repositories_with_tools" parameter when you want to test repositories that contain tools.
repositories_with_tools)
+ touch $GALAXY_INSTALL_TEST_TOOL_DEPENDENCY_DIR/stage_1_complete
test_repositories_with_tools
;;
# No received parameters or any received parameter not in [ tool_dependency_definitions, repositories_with_tools ]
diff -r bb744d8d3d6a74872a7b42b511d087c16b1ee8b3 -r e3a4d4d813fdb8a34cfd6d596e0f4bbdb2d9e211 test/install_and_test_tool_shed_repositories/base/twilltestcase.py
--- a/test/install_and_test_tool_shed_repositories/base/twilltestcase.py
+++ b/test/install_and_test_tool_shed_repositories/base/twilltestcase.py
@@ -156,3 +156,22 @@
( timeout_counter, repository.status ) )
break
time.sleep( 1 )
+ # Set all metadata on each installed repository.
+ for repository_id in repository_ids:
+ galaxy_repository = test_db_util.get_repository( self.security.decode_id( repository_id ) )
+ if not galaxy_repository.metadata:
+ log.debug( 'Setting metadata on repository %s' % str( galaxy_repository.name ) )
+ timeout_counter = 0
+ url = '/admin_toolshed/reset_repository_metadata?id=%s' % repository_id
+ self.visit_url( url )
+ while not galaxy_repository.metadata:
+ test_db_util.refresh( galaxy_repository )
+ timeout_counter = timeout_counter + 1
+ if timeout_counter % 10 == 0:
+ log.debug( 'Waited %d seconds for repository %s.' % ( timeout_counter, str( galaxy_repository.name ) ) )
+ # This timeout currently defaults to 10 minutes.
+ if timeout_counter > repository_installation_timeout:
+ raise AssertionError( 'Repository installation timed out after %d seconds, repository state is %s.' % \
+ ( timeout_counter, repository.status ) )
+ break
+ time.sleep( 1 )
diff -r bb744d8d3d6a74872a7b42b511d087c16b1ee8b3 -r e3a4d4d813fdb8a34cfd6d596e0f4bbdb2d9e211 test/install_and_test_tool_shed_repositories/base/util.py
--- a/test/install_and_test_tool_shed_repositories/base/util.py
+++ b/test/install_and_test_tool_shed_repositories/base/util.py
@@ -18,6 +18,7 @@
import install_and_test_tool_shed_repositories.functional.test_install_repositories as test_install_repositories
import nose
import platform
+import string
import time
import tool_shed.repository_types.util as rt_util
import tool_shed.util.shed_util_common as suc
@@ -35,7 +36,7 @@
from galaxy.util import asbool
from galaxy.util import listify
from galaxy.util import unicodify
-from galaxy.util.json import from_json_string
+from galaxy.util.json import to_json_string
import galaxy.webapps.tool_shed.model.mapping
from nose.plugins import Plugin
@@ -614,7 +615,7 @@
repository = get_repository( name, owner, changeset_revision )
if repository is None:
error_message = 'Error getting revision %s of repository %s owned by %s: %s' % ( changeset_revision, name, owner, str( e ) )
- log.exception( error_message )
+ log.exception( error_message )
return repository, error_message
def is_excluded( exclude_list_dicts, name, owner, changeset_revision, encoded_repository_metadata_id ):
@@ -981,6 +982,22 @@
print 'tool_shed: %s name: %s owner: %s changeset_revision: %s' % \
( cleaned_tool_shed_url, name, owner, changeset_revision )
+def populate_shed_conf_file( shed_conf_file, tool_path, xml_elems=None ):
+ """Populate the file defined by shed_conf_file with xml_elems or initialize it with a template string."""
+ if xml_elems is None:
+ tool_conf_template_parser = string.Template( shed_tool_conf_xml_template )
+ xml_elems = tool_conf_template_parser.safe_substitute( shed_tool_path=tool_path )
+ file( shed_conf_file, 'w' ).write( xml_elems )
+
+def populate_galaxy_shed_tools_dict_file( galaxy_shed_tools_dict_file, shed_tools_dict=None ):
+ """
+ Populate the file defined by galaxy_shed_tools_dict_file with the contents of the shed_tools_dict
+ dictionary.
+ """
+ if shed_tools_dict is None:
+ shed_tools_dict = {}
+ file( galaxy_shed_tools_dict_file, 'w' ).write( to_json_string( shed_tools_dict ) )
+
def print_install_and_test_results( install_stage_type, install_and_test_statistics_dict, error_message ):
"Print statistics for the current test run."
if error_message:
diff -r bb744d8d3d6a74872a7b42b511d087c16b1ee8b3 -r e3a4d4d813fdb8a34cfd6d596e0f4bbdb2d9e211 test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
@@ -33,7 +33,6 @@
import re
import shutil
import socket
-import string
import tempfile
import time
import threading
@@ -44,7 +43,6 @@
from galaxy.app import UniverseApplication
from galaxy.util.json import from_json_string
-from galaxy.util.json import to_json_string
from galaxy.util import unicodify
from galaxy.web import buildapp
from functional_tests import generate_config_file
@@ -134,7 +132,7 @@
tool_id = parts[ -2 ]
return tool_id, tool_version
-def install_and_test_repositories( app, galaxy_shed_tools_dict, galaxy_shed_tool_conf_file ):
+def install_and_test_repositories( app, galaxy_shed_tools_dict_file, galaxy_shed_tool_conf_file, galaxy_shed_tool_path ):
# We must make sure that functional.test_toolbox is always imported after database_contexts.galaxy_content
# is set (which occurs in the main method before this method is called). If functional.test_toolbox is
# imported before database_contexts.galaxy_content is set, sa_session will be None in all methods that use it.
@@ -166,6 +164,8 @@
# The traceback and captured output of the tool that was run will be recored for test failures. After all tests have
# completed, the repository is uninstalled, so test cases don't interfere with the next repository's functional tests.
for repository_dict in repositories_to_install:
+ # Re-initialize the received galaxy_shed_tool_conf_file to be an empty shed_tool_conf.xml.
+ install_and_test_base_util.populate_shed_conf_file( galaxy_shed_tool_conf_file, galaxy_shed_tool_path, xml_elems=None )
encoded_repository_metadata_id = repository_dict.get( 'id', None )
# Add the URL for the tool shed we're installing from, so the automated installation methods go to the right place.
repository_dict[ 'tool_shed_url' ] = install_and_test_base_util.galaxy_tool_shed_url
@@ -210,6 +210,7 @@
repository, error_message = install_and_test_base_util.install_repository( app, repository_dict )
install_and_test_statistics_dict[ 'total_repositories_processed' ] += 1
if error_message:
+ remove_tests( app, repository )
# The repository installation failed.
print 'Installation failed for revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
processed_repositories_with_installation_error = \
@@ -247,6 +248,12 @@
install_and_test_statistics_dict,
tool_test_results_dict )
if params.get( 'test_install_error', False ):
+ # We cannot run functional tests for contained tools due to dependency installation errors.
+ remove_tests( app, repository )
+ can_run_functional_tests = False
+ print 'Cannot execute tests for tools in revision %s of repository %s owned by %s ' % \
+ ( changeset_revision, name, owner )
+ print 'because one or more dependencies has installation errors.'
# The repository was successfully installed, but one or more dependencies had installation errors,
# so we'll populate the test result containers since we cannot execute any tests.
install_and_test_base_util.save_test_results_for_changeset_revision( install_and_test_base_util.galaxy_tool_shed_url,
@@ -255,12 +262,6 @@
repository_dict,
params,
can_update_tool_shed )
- # We cannot run functional tests for contained tools due to dependency installation errors.
- print 'Cannot execute tests for tools in revision %s of repository %s owned by %s ' % \
- ( changeset_revision, name, owner )
- print 'because one or more dependencies has installation errors.'
- can_run_functional_tests = False
- remove_tests( app )
# Populate the installation containers (success or error) for the repository's immediate repository
# dependencies whose containers are not yet populated.
install_and_test_base_util.populate_install_containers_for_repository_dependencies( app,
@@ -280,32 +281,30 @@
install_and_test_base_util.get_missing_tool_dependencies( repository,
all_missing_tool_dependencies=None )
print 'Missing tool dependencies:\n%s' % str( missing_tool_dependencies )
- if missing_repository_dependencies or missing_tool_dependencies:
+ if missing_repository_dependencies or missing_tool_dependencies:
+ # The repository was installed successfully, but one or more dependencies had installation errors. Since
+ # we cannot test the tools due to these errors, we'll remove tests and tools were created during the repository
+ # installation process so nose will not discover them and attempt to execute them.
+ remove_tests( app, repository )
print 'Cannot execute tests for tools in revision %s of repository %s owned by %s ' % \
( changeset_revision, name, owner )
print 'because one or more dependencies has installation errors.'
- # The repository was installed successfully, but one or more dependencies had installation errors. Since
- # we cannot test the tools due to these errors, we'll remove tests and tools were created during the repository
- # installation process so nose will not discover them and attempt to execute them.
- remove_tests( app )
else:
print 'Revision %s of repository %s owned by %s installed successfully, so running tool tests.' % \
( changeset_revision, name, owner )
# Generate the shed_tools_dict that specifies the location of test data contained within this repository.
# and configure and run functional tests for this repository. This is equivalent to
# sh run_functional_tests.sh -installed
- file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( {} ) )
+ install_and_test_base_util.populate_galaxy_shed_tools_dict_file( galaxy_shed_tools_dict_file,
+ shed_tools_dict=None )
# Find the path to the test-data directory within the installed repository.
has_test_data, shed_tools_dict = \
parse_tool_panel_config( galaxy_shed_tool_conf_file,
- from_json_string( file( galaxy_shed_tools_dict, 'r' ).read() ) )
+ from_json_string( file( galaxy_shed_tools_dict_file, 'r' ).read() ) )
# If the repository has a test-data directory we write the generated shed_tools_dict to a temporary
# file so the functional test framework can find it.
- # TODO: Eliminate the need for this shed_tools_dict since it grows large over the course of each test run.
- # If it cannot be eliminated altogether, reinitialize it with each new repository install so at this point
- # it contains only entries for the current repository dependency hierarchy being tested.
- file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( shed_tools_dict ) )
- print 'Saved generated shed_tools_dict to %s\nContents: %s' % ( galaxy_shed_tools_dict, shed_tools_dict )
+ install_and_test_base_util.populate_galaxy_shed_tools_dict_file( galaxy_shed_tools_dict_file,
+ shed_tools_dict=shed_tools_dict )
try:
install_and_test_statistics_dict = test_repository_tools( app,
repository,
@@ -314,6 +313,7 @@
tool_test_results_dict,
install_and_test_statistics_dict )
except Exception, e:
+ remove_tests( app, repository )
exception_message = 'Error executing tests for repository %s: %s' % ( name, str( e ) )
log.exception( exception_message )
tool_test_results_dict[ 'failed_tests' ].append( exception_message )
@@ -330,6 +330,7 @@
params,
can_update_tool_shed )
else:
+ remove_tests( app, repository )
print 'Skipped attempt to install revision %s of repository %s owned by %s because ' % \
( changeset_revision, name, owner )
print 'it was previously installed and currently has status %s' % str( repository.status )
@@ -379,12 +380,12 @@
os.path.join( galaxy_test_tmp_dir, 'test_migrated_tool_conf.xml' ) )
galaxy_tool_sheds_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF',
os.path.join( galaxy_test_tmp_dir, 'test_tool_sheds_conf.xml' ) )
- galaxy_shed_tools_dict = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_DICT_FILE',
- os.path.join( galaxy_test_tmp_dir, 'shed_tool_dict' ) )
- file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( {} ) )
+ galaxy_shed_tools_dict_file = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_DICT_FILE',
+ os.path.join( galaxy_test_tmp_dir, 'shed_tool_dict' ) )
+ install_and_test_base_util.populate_galaxy_shed_tools_dict_file( galaxy_shed_tools_dict_file, shed_tools_dict=None )
# Set the GALAXY_TOOL_SHED_TEST_FILE environment variable to the path of the shed_tools_dict file so that
# test.base.twilltestcase.setUp will find and parse it properly.
- os.environ[ 'GALAXY_TOOL_SHED_TEST_FILE' ] = galaxy_shed_tools_dict
+ os.environ[ 'GALAXY_TOOL_SHED_TEST_FILE' ] = galaxy_shed_tools_dict_file
if 'GALAXY_INSTALL_TEST_TOOL_DATA_PATH' in os.environ:
tool_data_path = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_DATA_PATH' )
else:
@@ -430,13 +431,10 @@
if 'GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF' not in os.environ:
file( galaxy_tool_sheds_conf_file, 'w' ).write( install_and_test_base_util.tool_sheds_conf_xml )
# Generate the shed_tool_conf.xml file.
- tool_conf_template_parser = string.Template( install_and_test_base_util.shed_tool_conf_xml_template )
- shed_tool_conf_xml = tool_conf_template_parser.safe_substitute( shed_tool_path=galaxy_shed_tool_path )
- file( galaxy_shed_tool_conf_file, 'w' ).write( shed_tool_conf_xml )
+ install_and_test_base_util.populate_shed_conf_file( galaxy_shed_tool_conf_file, galaxy_shed_tool_path, xml_elems=None )
os.environ[ 'GALAXY_INSTALL_TEST_SHED_TOOL_CONF' ] = galaxy_shed_tool_conf_file
# Generate the migrated_tool_conf.xml file.
- migrated_tool_conf_xml = tool_conf_template_parser.safe_substitute( shed_tool_path=galaxy_migrated_tool_path )
- file( galaxy_migrated_tool_conf_file, 'w' ).write( migrated_tool_conf_xml )
+ install_and_test_base_util.populate_shed_conf_file( galaxy_migrated_tool_conf_file, galaxy_migrated_tool_path, xml_elems=None )
# Write the embedded web application's specific configuration to a temporary file. This is necessary in order for
# the external metadata script to find the right datasets.
kwargs = dict( admin_users = 'test(a)bx.psu.edu',
@@ -548,8 +546,9 @@
print "# This run will not update the Tool Shed database."
print "####################################################################################"
install_and_test_statistics_dict, error_message = install_and_test_repositories( app,
- galaxy_shed_tools_dict,
- galaxy_shed_tool_conf_file )
+ galaxy_shed_tools_dict_file,
+ galaxy_shed_tool_conf_file,
+ galaxy_shed_tool_path )
try:
install_and_test_base_util.print_install_and_test_results( 'repositories with tools',
install_and_test_statistics_dict,
@@ -583,12 +582,17 @@
# Return a "successful" response to buildbot.
return 0
-def remove_tests( app ):
+def remove_tests( app, repository ):
"""
- Delete any configured tool functional tests from the test_toolbox.__dict__, otherwise nose will find them
- and try to re-run the tests after uninstalling the repository, which will cause false failure reports,
- since the test data has been deleted from disk by now.
+ Delete any configured tool functional tests from the test_toolbox.__dict__.
"""
+ print '\n-------------------------------------------------------------\n'
+ if repository:
+ print 'Removing tests and tool ids for revision %s of repository %s owned by %s' % \
+ ( str( repository.changeset_revision ), str( repository.name ), str( repository.owner ) )
+ else:
+ print 'Removing tests and tool ids when received repository is None...'
+ print 'app.toolbox.tools_by_id.keys():\n%s\n' % str( app.toolbox.tools_by_id.keys() )
tests_to_delete = []
tools_to_delete_by_id = []
for key in test_toolbox.__dict__:
@@ -608,15 +612,31 @@
reset_spaces_tool_id = tool_id.replace( '_', ' ' )
if reset_spaces_app_tool_id == reset_spaces_tool_id:
print 'Setting tool id %s for deletion from app.toolbox[ tools_by_id ].' % str( app_tool_id )
- tools_to_delete_by_id.append( app_tool_id )
+ tools_to_delete_by_id.append( app_tool_id )
+ if repository:
+ metadata = repository.metadata
+ print 'metadata:\n%s\n' % str( metadata )
+ if metadata:
+ tools = metadata.get( 'tools', [] )
+ print 'tools:\n%s\n' % str( tools )
+ for tool_dict in tools:
+ print 'tool_dict:\n%s\n' % str( tool_dict )
+ guid = tool_dict.get( 'guid', None )
+ print 'guid:\n%s\n' % str( guid )
+ if guid:
+ if guid in app.toolbox.tools_by_id:
+ print 'Setting tool id %s for deletion from app.toolbox[ tools_by_id ].' % str( guid )
+ tools_to_delete_by_id.append( guid )
# Delete the discovered twill-generated tests.
for key in tests_to_delete:
if key in test_toolbox.__dict__:
print 'Deleting test %s from test_toolbox.' % str( key )
del test_toolbox.__dict__[ key ]
for tool_id in tools_to_delete_by_id:
- print 'Deleting tool id %s from app.toolbox[ tools_by_id ].' % str( tool_id )
- del app.toolbox.tools_by_id[ tool_id ]
+ if tool_id in app.toolbox.tools_by_id:
+ print 'Deleting tool id %s from app.toolbox[ tools_by_id ].' % str( tool_id )
+ del app.toolbox.tools_by_id[ tool_id ]
+ print '\n-------------------------------------------------------------\n'
def test_repository_tools( app, repository, repository_dict, tool_test_results_dicts, tool_test_results_dict,
install_and_test_statistics_dict ):
@@ -700,7 +720,7 @@
can_update_tool_shed )
# Remove the just-executed tests so twill will not find and re-test them along with the tools
# contained in the next repository.
- remove_tests( app )
+ remove_tests( app, repository )
return install_and_test_statistics_dict
if __name__ == "__main__":
diff -r bb744d8d3d6a74872a7b42b511d087c16b1ee8b3 -r e3a4d4d813fdb8a34cfd6d596e0f4bbdb2d9e211 test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py
@@ -33,7 +33,6 @@
import re
import shutil
import socket
-import string
import tempfile
import time
import threading
@@ -43,8 +42,6 @@
from base.tool_shed_util import parse_tool_panel_config
from galaxy.app import UniverseApplication
-from galaxy.util.json import from_json_string
-from galaxy.util.json import to_json_string
from galaxy.util import unicodify
from galaxy.web import buildapp
from functional_tests import generate_config_file
@@ -75,7 +72,7 @@
test_framework = install_and_test_base_util.TOOL_DEPENDENCY_DEFINITIONS
-def install_and_test_repositories( app, galaxy_shed_tools_dict, galaxy_shed_tool_conf_file ):
+def install_and_test_repositories( app, galaxy_shed_tools_dict_file, galaxy_shed_tool_conf_file, galaxy_shed_tool_path ):
# Initialize a dictionary for the summary that will be printed to stdout.
install_and_test_statistics_dict = install_and_test_base_util.initialize_install_and_test_statistics_dict()
error_message = ''
@@ -234,12 +231,13 @@
os.path.join( galaxy_test_tmp_dir, 'test_migrated_tool_conf.xml' ) )
galaxy_tool_sheds_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF',
os.path.join( galaxy_test_tmp_dir, 'test_tool_sheds_conf.xml' ) )
- galaxy_shed_tools_dict = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_DICT_FILE',
- os.path.join( galaxy_test_tmp_dir, 'shed_tool_dict' ) )
- file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( {} ) )
+ galaxy_shed_tools_dict_file = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_DICT_FILE',
+ os.path.join( galaxy_test_tmp_dir, 'shed_tool_dict' ) )
+ install_and_test_base_util.populate_galaxy_shed_tools_dict_file( galaxy_shed_tools_dict_file,
+ shed_tools_dict=None )
# Set the GALAXY_TOOL_SHED_TEST_FILE environment variable to the path of the shed_tools_dict file so that
# test.base.twilltestcase.setUp will find and parse it properly.
- os.environ[ 'GALAXY_TOOL_SHED_TEST_FILE' ] = galaxy_shed_tools_dict
+ os.environ[ 'GALAXY_TOOL_SHED_TEST_FILE' ] = galaxy_shed_tools_dict_file
if 'GALAXY_INSTALL_TEST_TOOL_DATA_PATH' in os.environ:
tool_data_path = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_DATA_PATH' )
else:
@@ -285,13 +283,10 @@
if 'GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF' not in os.environ:
file( galaxy_tool_sheds_conf_file, 'w' ).write( install_and_test_base_util.tool_sheds_conf_xml )
# Generate the shed_tool_conf.xml file.
- tool_conf_template_parser = string.Template( install_and_test_base_util.shed_tool_conf_xml_template )
- shed_tool_conf_xml = tool_conf_template_parser.safe_substitute( shed_tool_path=galaxy_shed_tool_path )
- file( galaxy_shed_tool_conf_file, 'w' ).write( shed_tool_conf_xml )
+ install_and_test_base_util.populate_shed_conf_file( galaxy_shed_tool_conf_file, galaxy_shed_tool_path, xml_elems=None )
os.environ[ 'GALAXY_INSTALL_TEST_SHED_TOOL_CONF' ] = galaxy_shed_tool_conf_file
# Generate the migrated_tool_conf.xml file.
- migrated_tool_conf_xml = tool_conf_template_parser.safe_substitute( shed_tool_path=galaxy_migrated_tool_path )
- file( galaxy_migrated_tool_conf_file, 'w' ).write( migrated_tool_conf_xml )
+ install_and_test_base_util.populate_shed_conf_file( galaxy_migrated_tool_conf_file, galaxy_migrated_tool_path, xml_elems=None )
# Write the embedded web application's specific configuration to a temporary file. This is necessary in order for
# the external metadata script to find the right datasets.
kwargs = dict( admin_users = 'test(a)bx.psu.edu',
@@ -402,8 +397,9 @@
print "# This run will not update the Tool Shed database."
print "####################################################################################"
install_and_test_statistics_dict, error_message = install_and_test_repositories( app,
- galaxy_shed_tools_dict,
- galaxy_shed_tool_conf_file )
+ galaxy_shed_tools_dict_file,
+ galaxy_shed_tool_conf_file,
+ galaxy_shed_tool_path )
try:
install_and_test_base_util.print_install_and_test_results( 'tool dependency definitions',
install_and_test_statistics_dict,
https://bitbucket.org/galaxy/galaxy-central/commits/b8d2ef925922/
Changeset: b8d2ef925922
User: inithello
Date: 2014-02-11 22:42:24
Summary: Merge with stable.
Affected #: 0 files
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/e9878f33a333/
Changeset: e9878f33a333
User: greg
Date: 2014-02-11 22:41:03
Summary: Fixes for the tool shed's install and test framework.
Affected #: 5 files
diff -r 694411e94d9aa26c9b9a2c13567b2b5e07f74580 -r e9878f33a33333f211a16d2d399f528c859a6f77 install_and_test_tool_shed_repositories.sh
--- a/install_and_test_tool_shed_repositories.sh
+++ b/install_and_test_tool_shed_repositories.sh
@@ -56,6 +56,10 @@
mkdir -p $GALAXY_INSTALL_TEST_SHED_TOOL_PATH
fi
+if [ ! -d $GALAXY_INSTALL_TEST_TOOL_DEPENDENCY_DIR ] ; then
+ mkdir -p $GALAXY_INSTALL_TEST_TOOL_DEPENDENCY_DIR
+fi
+
test_tool_dependency_definitions () {
# Test installation of repositories of type tool_dependency_definition.
if [ -f $GALAXY_INSTALL_TEST_TOOL_DEPENDENCY_DIR/stage_1_complete ] ; then
@@ -102,6 +106,7 @@
;;
# Use "-w repositories_with_tools" parameter when you want to test repositories that contain tools.
repositories_with_tools)
+ touch $GALAXY_INSTALL_TEST_TOOL_DEPENDENCY_DIR/stage_1_complete
test_repositories_with_tools
;;
# No received parameters or any received parameter not in [ tool_dependency_definitions, repositories_with_tools ]
diff -r 694411e94d9aa26c9b9a2c13567b2b5e07f74580 -r e9878f33a33333f211a16d2d399f528c859a6f77 test/install_and_test_tool_shed_repositories/base/twilltestcase.py
--- a/test/install_and_test_tool_shed_repositories/base/twilltestcase.py
+++ b/test/install_and_test_tool_shed_repositories/base/twilltestcase.py
@@ -156,3 +156,22 @@
( timeout_counter, repository.status ) )
break
time.sleep( 1 )
+ # Set all metadata on each installed repository.
+ for repository_id in repository_ids:
+ galaxy_repository = test_db_util.get_repository( self.security.decode_id( repository_id ) )
+ if not galaxy_repository.metadata:
+ log.debug( 'Setting metadata on repository %s' % str( galaxy_repository.name ) )
+ timeout_counter = 0
+ url = '/admin_toolshed/reset_repository_metadata?id=%s' % repository_id
+ self.visit_url( url )
+ while not galaxy_repository.metadata:
+ test_db_util.refresh( galaxy_repository )
+ timeout_counter = timeout_counter + 1
+ if timeout_counter % 10 == 0:
+ log.debug( 'Waited %d seconds for repository %s.' % ( timeout_counter, str( galaxy_repository.name ) ) )
+ # This timeout currently defaults to 10 minutes.
+ if timeout_counter > repository_installation_timeout:
+ raise AssertionError( 'Repository installation timed out after %d seconds, repository state is %s.' % \
+ ( timeout_counter, repository.status ) )
+ break
+ time.sleep( 1 )
diff -r 694411e94d9aa26c9b9a2c13567b2b5e07f74580 -r e9878f33a33333f211a16d2d399f528c859a6f77 test/install_and_test_tool_shed_repositories/base/util.py
--- a/test/install_and_test_tool_shed_repositories/base/util.py
+++ b/test/install_and_test_tool_shed_repositories/base/util.py
@@ -18,6 +18,7 @@
import install_and_test_tool_shed_repositories.functional.test_install_repositories as test_install_repositories
import nose
import platform
+import string
import time
import tool_shed.repository_types.util as rt_util
import tool_shed.util.shed_util_common as suc
@@ -35,7 +36,7 @@
from galaxy.util import asbool
from galaxy.util import listify
from galaxy.util import unicodify
-from galaxy.util.json import from_json_string
+from galaxy.util.json import to_json_string
import galaxy.webapps.tool_shed.model.mapping
from nose.plugins import Plugin
@@ -614,7 +615,7 @@
repository = get_repository( name, owner, changeset_revision )
if repository is None:
error_message = 'Error getting revision %s of repository %s owned by %s: %s' % ( changeset_revision, name, owner, str( e ) )
- log.exception( error_message )
+ log.exception( error_message )
return repository, error_message
def is_excluded( exclude_list_dicts, name, owner, changeset_revision, encoded_repository_metadata_id ):
@@ -981,6 +982,22 @@
print 'tool_shed: %s name: %s owner: %s changeset_revision: %s' % \
( cleaned_tool_shed_url, name, owner, changeset_revision )
+def populate_shed_conf_file( shed_conf_file, tool_path, xml_elems=None ):
+ """Populate the file defined by shed_conf_file with xml_elems or initialize it with a template string."""
+ if xml_elems is None:
+ tool_conf_template_parser = string.Template( shed_tool_conf_xml_template )
+ xml_elems = tool_conf_template_parser.safe_substitute( shed_tool_path=tool_path )
+ file( shed_conf_file, 'w' ).write( xml_elems )
+
+def populate_galaxy_shed_tools_dict_file( galaxy_shed_tools_dict_file, shed_tools_dict=None ):
+ """
+ Populate the file defined by galaxy_shed_tools_dict_file with the contents of the shed_tools_dict
+ dictionary.
+ """
+ if shed_tools_dict is None:
+ shed_tools_dict = {}
+ file( galaxy_shed_tools_dict_file, 'w' ).write( to_json_string( shed_tools_dict ) )
+
def print_install_and_test_results( install_stage_type, install_and_test_statistics_dict, error_message ):
"Print statistics for the current test run."
if error_message:
diff -r 694411e94d9aa26c9b9a2c13567b2b5e07f74580 -r e9878f33a33333f211a16d2d399f528c859a6f77 test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
@@ -33,7 +33,6 @@
import re
import shutil
import socket
-import string
import tempfile
import time
import threading
@@ -44,7 +43,6 @@
from galaxy.app import UniverseApplication
from galaxy.util.json import from_json_string
-from galaxy.util.json import to_json_string
from galaxy.util import unicodify
from galaxy.web import buildapp
from functional_tests import generate_config_file
@@ -134,7 +132,7 @@
tool_id = parts[ -2 ]
return tool_id, tool_version
-def install_and_test_repositories( app, galaxy_shed_tools_dict, galaxy_shed_tool_conf_file ):
+def install_and_test_repositories( app, galaxy_shed_tools_dict_file, galaxy_shed_tool_conf_file, galaxy_shed_tool_path ):
# We must make sure that functional.test_toolbox is always imported after database_contexts.galaxy_content
# is set (which occurs in the main method before this method is called). If functional.test_toolbox is
# imported before database_contexts.galaxy_content is set, sa_session will be None in all methods that use it.
@@ -166,6 +164,8 @@
# The traceback and captured output of the tool that was run will be recored for test failures. After all tests have
# completed, the repository is uninstalled, so test cases don't interfere with the next repository's functional tests.
for repository_dict in repositories_to_install:
+ # Re-initialize the received galaxy_shed_tool_conf_file to be an empty shed_tool_conf.xml.
+ install_and_test_base_util.populate_shed_conf_file( galaxy_shed_tool_conf_file, galaxy_shed_tool_path, xml_elems=None )
encoded_repository_metadata_id = repository_dict.get( 'id', None )
# Add the URL for the tool shed we're installing from, so the automated installation methods go to the right place.
repository_dict[ 'tool_shed_url' ] = install_and_test_base_util.galaxy_tool_shed_url
@@ -210,6 +210,7 @@
repository, error_message = install_and_test_base_util.install_repository( app, repository_dict )
install_and_test_statistics_dict[ 'total_repositories_processed' ] += 1
if error_message:
+ remove_tests( app, repository )
# The repository installation failed.
print 'Installation failed for revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
processed_repositories_with_installation_error = \
@@ -247,6 +248,12 @@
install_and_test_statistics_dict,
tool_test_results_dict )
if params.get( 'test_install_error', False ):
+ # We cannot run functional tests for contained tools due to dependency installation errors.
+ remove_tests( app, repository )
+ can_run_functional_tests = False
+ print 'Cannot execute tests for tools in revision %s of repository %s owned by %s ' % \
+ ( changeset_revision, name, owner )
+ print 'because one or more dependencies has installation errors.'
# The repository was successfully installed, but one or more dependencies had installation errors,
# so we'll populate the test result containers since we cannot execute any tests.
install_and_test_base_util.save_test_results_for_changeset_revision( install_and_test_base_util.galaxy_tool_shed_url,
@@ -255,12 +262,6 @@
repository_dict,
params,
can_update_tool_shed )
- # We cannot run functional tests for contained tools due to dependency installation errors.
- print 'Cannot execute tests for tools in revision %s of repository %s owned by %s ' % \
- ( changeset_revision, name, owner )
- print 'because one or more dependencies has installation errors.'
- can_run_functional_tests = False
- remove_tests( app )
# Populate the installation containers (success or error) for the repository's immediate repository
# dependencies whose containers are not yet populated.
install_and_test_base_util.populate_install_containers_for_repository_dependencies( app,
@@ -280,32 +281,30 @@
install_and_test_base_util.get_missing_tool_dependencies( repository,
all_missing_tool_dependencies=None )
print 'Missing tool dependencies:\n%s' % str( missing_tool_dependencies )
- if missing_repository_dependencies or missing_tool_dependencies:
+ if missing_repository_dependencies or missing_tool_dependencies:
+ # The repository was installed successfully, but one or more dependencies had installation errors. Since
+ # we cannot test the tools due to these errors, we'll remove tests and tools were created during the repository
+ # installation process so nose will not discover them and attempt to execute them.
+ remove_tests( app, repository )
print 'Cannot execute tests for tools in revision %s of repository %s owned by %s ' % \
( changeset_revision, name, owner )
print 'because one or more dependencies has installation errors.'
- # The repository was installed successfully, but one or more dependencies had installation errors. Since
- # we cannot test the tools due to these errors, we'll remove tests and tools were created during the repository
- # installation process so nose will not discover them and attempt to execute them.
- remove_tests( app )
else:
print 'Revision %s of repository %s owned by %s installed successfully, so running tool tests.' % \
( changeset_revision, name, owner )
# Generate the shed_tools_dict that specifies the location of test data contained within this repository.
# and configure and run functional tests for this repository. This is equivalent to
# sh run_functional_tests.sh -installed
- file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( {} ) )
+ install_and_test_base_util.populate_galaxy_shed_tools_dict_file( galaxy_shed_tools_dict_file,
+ shed_tools_dict=None )
# Find the path to the test-data directory within the installed repository.
has_test_data, shed_tools_dict = \
parse_tool_panel_config( galaxy_shed_tool_conf_file,
- from_json_string( file( galaxy_shed_tools_dict, 'r' ).read() ) )
+ from_json_string( file( galaxy_shed_tools_dict_file, 'r' ).read() ) )
# If the repository has a test-data directory we write the generated shed_tools_dict to a temporary
# file so the functional test framework can find it.
- # TODO: Eliminate the need for this shed_tools_dict since it grows large over the course of each test run.
- # If it cannot be eliminated altogether, reinitialize it with each new repository install so at this point
- # it contains only entries for the current repository dependency hierarchy being tested.
- file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( shed_tools_dict ) )
- print 'Saved generated shed_tools_dict to %s\nContents: %s' % ( galaxy_shed_tools_dict, shed_tools_dict )
+ install_and_test_base_util.populate_galaxy_shed_tools_dict_file( galaxy_shed_tools_dict_file,
+ shed_tools_dict=shed_tools_dict )
try:
install_and_test_statistics_dict = test_repository_tools( app,
repository,
@@ -314,6 +313,7 @@
tool_test_results_dict,
install_and_test_statistics_dict )
except Exception, e:
+ remove_tests( app, repository )
exception_message = 'Error executing tests for repository %s: %s' % ( name, str( e ) )
log.exception( exception_message )
tool_test_results_dict[ 'failed_tests' ].append( exception_message )
@@ -330,6 +330,7 @@
params,
can_update_tool_shed )
else:
+ remove_tests( app, repository )
print 'Skipped attempt to install revision %s of repository %s owned by %s because ' % \
( changeset_revision, name, owner )
print 'it was previously installed and currently has status %s' % str( repository.status )
@@ -379,12 +380,12 @@
os.path.join( galaxy_test_tmp_dir, 'test_migrated_tool_conf.xml' ) )
galaxy_tool_sheds_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF',
os.path.join( galaxy_test_tmp_dir, 'test_tool_sheds_conf.xml' ) )
- galaxy_shed_tools_dict = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_DICT_FILE',
- os.path.join( galaxy_test_tmp_dir, 'shed_tool_dict' ) )
- file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( {} ) )
+ galaxy_shed_tools_dict_file = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_DICT_FILE',
+ os.path.join( galaxy_test_tmp_dir, 'shed_tool_dict' ) )
+ install_and_test_base_util.populate_galaxy_shed_tools_dict_file( galaxy_shed_tools_dict_file, shed_tools_dict=None )
# Set the GALAXY_TOOL_SHED_TEST_FILE environment variable to the path of the shed_tools_dict file so that
# test.base.twilltestcase.setUp will find and parse it properly.
- os.environ[ 'GALAXY_TOOL_SHED_TEST_FILE' ] = galaxy_shed_tools_dict
+ os.environ[ 'GALAXY_TOOL_SHED_TEST_FILE' ] = galaxy_shed_tools_dict_file
if 'GALAXY_INSTALL_TEST_TOOL_DATA_PATH' in os.environ:
tool_data_path = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_DATA_PATH' )
else:
@@ -430,13 +431,10 @@
if 'GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF' not in os.environ:
file( galaxy_tool_sheds_conf_file, 'w' ).write( install_and_test_base_util.tool_sheds_conf_xml )
# Generate the shed_tool_conf.xml file.
- tool_conf_template_parser = string.Template( install_and_test_base_util.shed_tool_conf_xml_template )
- shed_tool_conf_xml = tool_conf_template_parser.safe_substitute( shed_tool_path=galaxy_shed_tool_path )
- file( galaxy_shed_tool_conf_file, 'w' ).write( shed_tool_conf_xml )
+ install_and_test_base_util.populate_shed_conf_file( galaxy_shed_tool_conf_file, galaxy_shed_tool_path, xml_elems=None )
os.environ[ 'GALAXY_INSTALL_TEST_SHED_TOOL_CONF' ] = galaxy_shed_tool_conf_file
# Generate the migrated_tool_conf.xml file.
- migrated_tool_conf_xml = tool_conf_template_parser.safe_substitute( shed_tool_path=galaxy_migrated_tool_path )
- file( galaxy_migrated_tool_conf_file, 'w' ).write( migrated_tool_conf_xml )
+ install_and_test_base_util.populate_shed_conf_file( galaxy_migrated_tool_conf_file, galaxy_migrated_tool_path, xml_elems=None )
# Write the embedded web application's specific configuration to a temporary file. This is necessary in order for
# the external metadata script to find the right datasets.
kwargs = dict( admin_users = 'test(a)bx.psu.edu',
@@ -548,8 +546,9 @@
print "# This run will not update the Tool Shed database."
print "####################################################################################"
install_and_test_statistics_dict, error_message = install_and_test_repositories( app,
- galaxy_shed_tools_dict,
- galaxy_shed_tool_conf_file )
+ galaxy_shed_tools_dict_file,
+ galaxy_shed_tool_conf_file,
+ galaxy_shed_tool_path )
try:
install_and_test_base_util.print_install_and_test_results( 'repositories with tools',
install_and_test_statistics_dict,
@@ -583,12 +582,17 @@
# Return a "successful" response to buildbot.
return 0
-def remove_tests( app ):
+def remove_tests( app, repository ):
"""
- Delete any configured tool functional tests from the test_toolbox.__dict__, otherwise nose will find them
- and try to re-run the tests after uninstalling the repository, which will cause false failure reports,
- since the test data has been deleted from disk by now.
+ Delete any configured tool functional tests from the test_toolbox.__dict__.
"""
+ print '\n-------------------------------------------------------------\n'
+ if repository:
+ print 'Removing tests and tool ids for revision %s of repository %s owned by %s' % \
+ ( str( repository.changeset_revision ), str( repository.name ), str( repository.owner ) )
+ else:
+ print 'Removing tests and tool ids when received repository is None...'
+ print 'app.toolbox.tools_by_id.keys():\n%s\n' % str( app.toolbox.tools_by_id.keys() )
tests_to_delete = []
tools_to_delete_by_id = []
for key in test_toolbox.__dict__:
@@ -608,15 +612,31 @@
reset_spaces_tool_id = tool_id.replace( '_', ' ' )
if reset_spaces_app_tool_id == reset_spaces_tool_id:
print 'Setting tool id %s for deletion from app.toolbox[ tools_by_id ].' % str( app_tool_id )
- tools_to_delete_by_id.append( app_tool_id )
+ tools_to_delete_by_id.append( app_tool_id )
+ if repository:
+ metadata = repository.metadata
+ print 'metadata:\n%s\n' % str( metadata )
+ if metadata:
+ tools = metadata.get( 'tools', [] )
+ print 'tools:\n%s\n' % str( tools )
+ for tool_dict in tools:
+ print 'tool_dict:\n%s\n' % str( tool_dict )
+ guid = tool_dict.get( 'guid', None )
+ print 'guid:\n%s\n' % str( guid )
+ if guid:
+ if guid in app.toolbox.tools_by_id:
+ print 'Setting tool id %s for deletion from app.toolbox[ tools_by_id ].' % str( guid )
+ tools_to_delete_by_id.append( guid )
# Delete the discovered twill-generated tests.
for key in tests_to_delete:
if key in test_toolbox.__dict__:
print 'Deleting test %s from test_toolbox.' % str( key )
del test_toolbox.__dict__[ key ]
for tool_id in tools_to_delete_by_id:
- print 'Deleting tool id %s from app.toolbox[ tools_by_id ].' % str( tool_id )
- del app.toolbox.tools_by_id[ tool_id ]
+ if tool_id in app.toolbox.tools_by_id:
+ print 'Deleting tool id %s from app.toolbox[ tools_by_id ].' % str( tool_id )
+ del app.toolbox.tools_by_id[ tool_id ]
+ print '\n-------------------------------------------------------------\n'
def test_repository_tools( app, repository, repository_dict, tool_test_results_dicts, tool_test_results_dict,
install_and_test_statistics_dict ):
@@ -700,7 +720,7 @@
can_update_tool_shed )
# Remove the just-executed tests so twill will not find and re-test them along with the tools
# contained in the next repository.
- remove_tests( app )
+ remove_tests( app, repository )
return install_and_test_statistics_dict
if __name__ == "__main__":
diff -r 694411e94d9aa26c9b9a2c13567b2b5e07f74580 -r e9878f33a33333f211a16d2d399f528c859a6f77 test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py
@@ -33,7 +33,6 @@
import re
import shutil
import socket
-import string
import tempfile
import time
import threading
@@ -43,8 +42,6 @@
from base.tool_shed_util import parse_tool_panel_config
from galaxy.app import UniverseApplication
-from galaxy.util.json import from_json_string
-from galaxy.util.json import to_json_string
from galaxy.util import unicodify
from galaxy.web import buildapp
from functional_tests import generate_config_file
@@ -75,7 +72,7 @@
test_framework = install_and_test_base_util.TOOL_DEPENDENCY_DEFINITIONS
-def install_and_test_repositories( app, galaxy_shed_tools_dict, galaxy_shed_tool_conf_file ):
+def install_and_test_repositories( app, galaxy_shed_tools_dict_file, galaxy_shed_tool_conf_file, galaxy_shed_tool_path ):
# Initialize a dictionary for the summary that will be printed to stdout.
install_and_test_statistics_dict = install_and_test_base_util.initialize_install_and_test_statistics_dict()
error_message = ''
@@ -234,12 +231,13 @@
os.path.join( galaxy_test_tmp_dir, 'test_migrated_tool_conf.xml' ) )
galaxy_tool_sheds_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF',
os.path.join( galaxy_test_tmp_dir, 'test_tool_sheds_conf.xml' ) )
- galaxy_shed_tools_dict = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_DICT_FILE',
- os.path.join( galaxy_test_tmp_dir, 'shed_tool_dict' ) )
- file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( {} ) )
+ galaxy_shed_tools_dict_file = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_DICT_FILE',
+ os.path.join( galaxy_test_tmp_dir, 'shed_tool_dict' ) )
+ install_and_test_base_util.populate_galaxy_shed_tools_dict_file( galaxy_shed_tools_dict_file,
+ shed_tools_dict=None )
# Set the GALAXY_TOOL_SHED_TEST_FILE environment variable to the path of the shed_tools_dict file so that
# test.base.twilltestcase.setUp will find and parse it properly.
- os.environ[ 'GALAXY_TOOL_SHED_TEST_FILE' ] = galaxy_shed_tools_dict
+ os.environ[ 'GALAXY_TOOL_SHED_TEST_FILE' ] = galaxy_shed_tools_dict_file
if 'GALAXY_INSTALL_TEST_TOOL_DATA_PATH' in os.environ:
tool_data_path = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_DATA_PATH' )
else:
@@ -285,13 +283,10 @@
if 'GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF' not in os.environ:
file( galaxy_tool_sheds_conf_file, 'w' ).write( install_and_test_base_util.tool_sheds_conf_xml )
# Generate the shed_tool_conf.xml file.
- tool_conf_template_parser = string.Template( install_and_test_base_util.shed_tool_conf_xml_template )
- shed_tool_conf_xml = tool_conf_template_parser.safe_substitute( shed_tool_path=galaxy_shed_tool_path )
- file( galaxy_shed_tool_conf_file, 'w' ).write( shed_tool_conf_xml )
+ install_and_test_base_util.populate_shed_conf_file( galaxy_shed_tool_conf_file, galaxy_shed_tool_path, xml_elems=None )
os.environ[ 'GALAXY_INSTALL_TEST_SHED_TOOL_CONF' ] = galaxy_shed_tool_conf_file
# Generate the migrated_tool_conf.xml file.
- migrated_tool_conf_xml = tool_conf_template_parser.safe_substitute( shed_tool_path=galaxy_migrated_tool_path )
- file( galaxy_migrated_tool_conf_file, 'w' ).write( migrated_tool_conf_xml )
+ install_and_test_base_util.populate_shed_conf_file( galaxy_migrated_tool_conf_file, galaxy_migrated_tool_path, xml_elems=None )
# Write the embedded web application's specific configuration to a temporary file. This is necessary in order for
# the external metadata script to find the right datasets.
kwargs = dict( admin_users = 'test(a)bx.psu.edu',
@@ -402,8 +397,9 @@
print "# This run will not update the Tool Shed database."
print "####################################################################################"
install_and_test_statistics_dict, error_message = install_and_test_repositories( app,
- galaxy_shed_tools_dict,
- galaxy_shed_tool_conf_file )
+ galaxy_shed_tools_dict_file,
+ galaxy_shed_tool_conf_file,
+ galaxy_shed_tool_path )
try:
install_and_test_base_util.print_install_and_test_results( 'tool dependency definitions',
install_and_test_statistics_dict,
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/15fc8675064e/
Changeset: 15fc8675064e
User: inithello
Date: 2014-02-11 16:38:50
Summary: Make GALAXY_TEST_NO_CLEANUP apply to uploaded and tool output datasets.
Affected #: 1 file
diff -r f3dc213a5773e356c23915b985803058aeccf9a1 -r 15fc8675064ea46b7e081d9643393be354f07d65 test/base/twilltestcase.py
--- a/test/base/twilltestcase.py
+++ b/test/base/twilltestcase.py
@@ -95,7 +95,10 @@
diff = list( difflib.unified_diff( local_file, history_data, "local_file", "history_data" ) )
diff_lines = get_lines_diff( diff )
if diff_lines > allowed_diff_count:
- diff_slice = diff[0:40]
+ if len(diff) < 60:
+ diff_slice = diff[0:40]
+ else:
+ diff_slice = diff[:25] + ["********\n", "*SNIP *\n", "********\n"] + diff[-25:]
#FIXME: This pdf stuff is rather special cased and has not been updated to consider lines_diff
#due to unknown desired behavior when used in conjunction with a non-zero lines_diff
#PDF forgiveness can probably be handled better by not special casing by __extension__ here
@@ -897,7 +900,8 @@
errmsg += str( err )
raise AssertionError( errmsg )
finally:
- os.remove( temp_name )
+ if 'GALAXY_TEST_NO_CLEANUP' not in os.environ:
+ os.remove( temp_name )
def __default_dataset_fetcher( self ):
def fetcher( hda_id, filename=None ):
@@ -971,7 +975,8 @@
errmsg += str( err )
raise AssertionError( errmsg )
finally:
- os.remove( temp_name )
+ if 'GALAXY_TEST_NO_CLEANUP' not in os.environ:
+ os.remove( temp_name )
def is_zipped( self, filename ):
if not zipfile.is_zipfile( filename ):
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/578b9185b556/
Changeset: 578b9185b556
User: carlfeberhard
Date: 2014-02-10 21:00:22
Summary: History API: allow copying another accessible history
Affected #: 2 files
diff -r bb0f56f7c6361cf6021da56ce70ca07e0aa6818c -r 578b9185b556ce59c170d3fa0b422bd7773d0693 lib/galaxy/webapps/galaxy/api/histories.py
--- a/lib/galaxy/webapps/galaxy/api/histories.py
+++ b/lib/galaxy/webapps/galaxy/api/histories.py
@@ -170,35 +170,64 @@
:type payload: dict
:param payload: (optional) dictionary structure containing:
- * name: the new history's name
- * current: if passed, set the new history to be the user's 'current'
- history
+ * name: the new history's name
+ * current: if passed, set the new history to be the user's
+ 'current' history
+ * history_id: the id of the history to copy
+ * archive_source: the url that will generate the archive to import
+ * archive_type: 'url' (default)
:rtype: dict
:returns: element view of new history
"""
- if self.__create_via_import( payload ):
- return self.__import_archive( trans, payload )
+ def __create_via_import( self, payload ):
+ return
+
+ def __import_archive( self, trans, archive_type, archive_source ):
hist_name = None
if payload.get( 'name', None ):
hist_name = restore_text( payload['name'] )
- new_history = trans.app.model.History( user=trans.user, name=hist_name )
+ #TODO: possibly default to True here - but favor explicit for now (and backwards compat)
+ set_as_current = string_as_bool( payload[ 'current' ] ) if 'current' in payload else False
+ copy_this_history_id = payload.get( 'history_id', None )
+
+ if "archive_source" in payload:
+ archive_source = payload[ "archive_source" ]
+ archive_type = payload.get( "archive_type", "url" )
+ self.queue_history_import( trans, archive_type=archive_type, archive_source=archive_source )
+ trans.response.status = 201
+ return {}
+
+ new_history = None
+ # if a history id was passed, copy that history
+ if copy_this_history_id:
+ try:
+ original_history = self.get_history( trans, copy_this_history_id,
+ check_ownership=False, check_accessible=True )
+ except HTTPBadRequest, bad_request:
+ trans.response.status = 403
+ #TODO: it's either that or parse each possible detail to it's own status code
+ return { 'error': bad_request.detail or 'Bad request' }
+
+ hist_name = hist_name or ( "Copy of '%s'" % original_history.name )
+ new_history = original_history.copy( name=hist_name, target_user=trans.user )
+
+ # otherwise, create a new empty history
+ else:
+ new_history = trans.app.model.History( user=trans.user, name=hist_name )
+
+ item = {}
trans.sa_session.add( new_history )
trans.sa_session.flush()
- #item = new_history.to_dict(view='element', value_mapper={'id':trans.security.encode_id})
+
item = self.get_history_dict( trans, new_history )
item['url'] = url_for( 'history', id=item['id'] )
- #TODO: possibly default to True here - but favor explicit for now (and backwards compat)
- current = string_as_bool( payload[ 'current' ] ) if 'current' in payload else False
- if current:
+ if set_as_current:
trans.history = new_history
- #TODO: copy own history
- #TODO: import an importable history
- #TODO: import from archive
return item
@web.expose_api
@@ -398,14 +427,6 @@
return self.serve_ready_history_export( trans, jeha )
- def __create_via_import( self, payload ):
- return "archive_source" in payload
-
- def __import_archive( self, trans, payload ):
- archive_type = payload.get( "archive_type", "url" )
- archive_source = payload[ "archive_source" ]
- self.queue_history_import( trans, archive_type=archive_type, archive_source=archive_source )
-
def _validate_and_parse_update_payload( self, payload ):
"""
Validate and parse incomming data payload for a history.
diff -r bb0f56f7c6361cf6021da56ce70ca07e0aa6818c -r 578b9185b556ce59c170d3fa0b422bd7773d0693 test/functional/api/test_histories.py
--- a/test/functional/api/test_histories.py
+++ b/test/functional/api/test_histories.py
@@ -68,3 +68,5 @@
if import_name in history_names():
found = True
assert found, "%s not in history names %s" % ( import_name, history_names() )
+
+ #TODO: (CE) test_create_from_copy
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.