commit/galaxy-central: 4 new changesets
4 new commits in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/commits/fea46e621742/ Changeset: fea46e621742 User: jmchilton Date: 2015-01-15 02:03:12+00:00 Summary: Remove unused references to test-data stuff in i+t framework. Affected #: 2 files diff -r e2440583c44e4b0042d6e1c0b7b4d1bd31efec0a -r fea46e621742a7e5af02ea57f71a464822a43815 test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py --- a/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py +++ b/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py @@ -63,7 +63,6 @@ # File containing information about problematic repositories to exclude from test runs. exclude_list_file = os.path.abspath( os.path.join( test_home_directory, 'exclude.xml' ) ) default_galaxy_locales = 'en' -default_galaxy_test_file_dir = "test-data" os.environ[ 'GALAXY_INSTALL_TEST_TMP_DIR' ] = galaxy_test_tmp_dir # Use separate databases for Galaxy and tool shed install info by default, @@ -371,9 +370,6 @@ tool_path = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_PATH', 'tools' ) if 'HTTP_ACCEPT_LANGUAGE' not in os.environ: os.environ[ 'HTTP_ACCEPT_LANGUAGE' ] = default_galaxy_locales - galaxy_test_file_dir = os.environ.get( 'GALAXY_INSTALL_TEST_FILE_DIR', default_galaxy_test_file_dir ) - if not os.path.isabs( galaxy_test_file_dir ): - galaxy_test_file_dir = os.path.abspath( galaxy_test_file_dir ) use_distributed_object_store = os.environ.get( 'GALAXY_INSTALL_TEST_USE_DISTRIBUTED_OBJECT_STORE', False ) if not os.path.isdir( galaxy_test_tmp_dir ): os.mkdir( galaxy_test_tmp_dir ) diff -r e2440583c44e4b0042d6e1c0b7b4d1bd31efec0a -r fea46e621742a7e5af02ea57f71a464822a43815 test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py --- a/test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py +++ b/test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py @@ -62,7 +62,6 @@ # File containing information about problematic repositories to exclude from test runs. exclude_list_file = os.path.abspath( os.path.join( test_home_directory, 'exclude.xml' ) ) default_galaxy_locales = 'en' -default_galaxy_test_file_dir = "test-data" os.environ[ 'GALAXY_INSTALL_TEST_TMP_DIR' ] = galaxy_test_tmp_dir # Use separate databases for Galaxy and tool shed install info by default, @@ -222,9 +221,6 @@ tool_path = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_PATH', 'tools' ) if 'HTTP_ACCEPT_LANGUAGE' not in os.environ: os.environ[ 'HTTP_ACCEPT_LANGUAGE' ] = default_galaxy_locales - galaxy_test_file_dir = os.environ.get( 'GALAXY_INSTALL_TEST_FILE_DIR', default_galaxy_test_file_dir ) - if not os.path.isabs( galaxy_test_file_dir ): - galaxy_test_file_dir = os.path.abspath( galaxy_test_file_dir ) use_distributed_object_store = os.environ.get( 'GALAXY_INSTALL_TEST_USE_DISTRIBUTED_OBJECT_STORE', False ) if not os.path.isdir( galaxy_test_tmp_dir ): os.mkdir( galaxy_test_tmp_dir ) https://bitbucket.org/galaxy/galaxy-central/commits/50ec79ddfe60/ Changeset: 50ec79ddfe60 User: jmchilton Date: 2015-01-15 02:03:12+00:00 Summary: PEP-8 fixes for install_and_test scripts. Affected #: 5 files diff -r fea46e621742a7e5af02ea57f71a464822a43815 -r 50ec79ddfe60ef912425b88447eba2fc08e9e6ac lib/tool_shed/scripts/check_repositories_for_functional_tests.py --- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py +++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py @@ -25,8 +25,6 @@ from install_and_test_tool_shed_repositories.base.util import get_repository_current_revision from install_and_test_tool_shed_repositories.base.util import RepositoryMetadataApplication from galaxy.model.orm import and_ -from galaxy.model.orm import not_ -from galaxy.model.orm import select from galaxy.util import listify from mercurial import __version__ from optparse import OptionParser @@ -35,10 +33,12 @@ log = logging.getLogger( 'check_repositories_for_functional_tests' ) assert sys.version_info[ :2 ] >= ( 2, 6 ) + def check_and_update_repository_metadata( app, info_only=False, verbosity=1 ): """ - This method will iterate through all records in the repository_metadata table, checking each one for tool metadata, - then checking the tool metadata for tests. Each tool's metadata should look something like: + This method will iterate through all records in the repository_metadata + table, checking each one for tool metadata, then checking the tool + metadata for tests. Each tool's metadata should look something like: { "add_to_tool_panel": true, "description": "", @@ -63,7 +63,6 @@ tool_count = 0 has_tests = 0 no_tests = 0 - no_tools = 0 valid_revisions = 0 invalid_revisions = 0 records_checked = 0 @@ -98,9 +97,9 @@ if repository.id not in checked_repository_ids: checked_repository_ids.append( repository.id ) print '# -------------------------------------------------------------------------------------------' - print '# Checking revision %s of %s owned by %s.' % ( changeset_revision, name, owner ) + print '# Checking revision %s of %s owned by %s.' % ( changeset_revision, name, owner ) if repository_metadata.id in skip_metadata_ids: - print'# Skipping revision %s of %s owned by %s because the skip_tool_test setting has been set.' % ( changeset_revision, name, owner ) + print'# Skipping revision %s of %s owned by %s because the skip_tool_test setting has been set.' % ( changeset_revision, name, owner ) continue # If this changeset revision has no tools, we don't need to do anything here, the install and test script has a filter for returning # only repositories that contain tools. @@ -108,7 +107,7 @@ if tool_dicts is not None: # Clone the repository up to the changeset revision we're checking. repo_dir = repository.repo_path( app ) - repo = hg_util.get_repo_for_repository( app, repository=None, repo_path=repo_dir, create=False ) + hg_util.get_repo_for_repository( app, repository=None, repo_path=repo_dir, create=False ) work_dir = tempfile.mkdtemp( prefix="tmp-toolshed-cafr" ) cloned_ok, error_message = hg_util.clone_repository( repo_dir, work_dir, changeset_revision ) if cloned_ok: @@ -127,10 +126,13 @@ else: print '# Test data directory missing in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) print '# Checking for functional tests in changeset revision %s of %s, owned by %s.' % \ - ( changeset_revision, name, owner ) - # Inspect each tool_dict for defined functional tests. If there are no tests, this tool should not be tested, since the - # tool functional tests only report failure if the test itself fails, not if it's missing or undefined. Filtering out those - # repositories at this step will reduce the number of "false negatives" the automated functional test framework produces. + ( changeset_revision, name, owner ) + # Inspect each tool_dict for defined functional tests. If there + # are no tests, this tool should not be tested, since the tool + # functional tests only report failure if the test itself fails, + # not if it's missing or undefined. Filtering out those + # repositories at this step will reduce the number of "false + # negatives" the automated functional test framework produces. for tool_dict in tool_dicts: failure_reason = '' problem_found = False @@ -147,16 +149,16 @@ if defined_test_dicts is not None: # We need to inspect the <test> tags because the following tags... # <tests> - # </tests> + # </tests> # ...will produce the following metadata: # "tests": [] # And the following tags... # <tests> # <test> # </test> - # </tests> + # </tests> # ...will produce the following metadata: - # "tests": + # "tests": # [{"inputs": [], "name": "Test-1", "outputs": [], "required_files": []}] for defined_test_dict in defined_test_dicts: inputs = defined_test_dict.get( 'inputs', [] ) @@ -194,7 +196,7 @@ if test_errors not in missing_test_components: missing_test_components.append( test_errors ) if tool_has_defined_tests and tool_has_test_files: - print '# Revision %s of %s owned by %s is a testable revision.' % ( changeset_revision, name, owner ) + print '# Revision %s of %s owned by %s is a testable revision.' % ( changeset_revision, name, owner ) testable_revision = True # Remove the cloned repository path. This has to be done after the check for required test files, for obvious reasons. if os.path.exists( work_dir ): @@ -226,8 +228,8 @@ # a test_environment entry. If we use it we need to temporarily eliminate it from the list of tool_test_results_dicts # since it will be re-inserted later. tool_test_results_dict = tool_test_results_dicts.pop( 0 ) - elif len( tool_test_results_dict ) == 2 and \ - 'test_environment' in tool_test_results_dict and 'missing_test_components' in tool_test_results_dict: + elif (len( tool_test_results_dict ) == 2 and + 'test_environment' in tool_test_results_dict and 'missing_test_components' in tool_test_results_dict): # We can re-use tool_test_results_dict if its only entries are "test_environment" and "missing_test_components". # In this case, some tools are missing tests components while others are not. tool_test_results_dict = tool_test_results_dicts.pop( 0 ) @@ -260,19 +262,22 @@ # In the install and test script, this behavior is slightly different, since we do want to always run functional # tests on the most recent downloadable changeset revision. if should_set_do_not_test_flag( app, repository, changeset_revision, testable_revision ): - print "# Setting do_not_test to True on revision %s of %s owned by %s because it is missing test components" % \ - ( changeset_revision, name, owner ) + print "# Setting do_not_test to True on revision %s of %s owned by %s because it is missing test components" % ( + changeset_revision, name, owner + ) print "# and it is not the latest downloadable revision." repository_metadata.do_not_test = True if not testable_revision: # Even though some tools may be missing test components, it may be possible to test other tools. Since the # install and test framework filters out repositories marked as missing test components, we'll set it only if # no tools can be tested. - print '# Setting missing_test_components to True for revision %s of %s owned by %s because all tools are missing test components.' % \ - ( changeset_revision, name, owner ) + print '# Setting missing_test_components to True for revision %s of %s owned by %s because all tools are missing test components.' % ( + changeset_revision, name, owner + ) repository_metadata.missing_test_components = True - print "# Setting tools_functionally_correct to False on revision %s of %s owned by %s because it is missing test components" % \ - ( changeset_revision, name, owner ) + print "# Setting tools_functionally_correct to False on revision %s of %s owned by %s because it is missing test components" % ( + changeset_revision, name, owner + ) repository_metadata.tools_functionally_correct = False tool_test_results_dict[ 'missing_test_components' ] = missing_test_components # Store only the configured number of test runs. @@ -299,6 +304,7 @@ print "# Elapsed time: ", stop - start print "#############################################################################" + def check_for_missing_test_files( test_definition, test_data_path ): '''Process the tool's functional test definitions and check for each file specified as an input or output.''' missing_test_files = [] @@ -314,6 +320,7 @@ missing_test_files.append( required_file ) return missing_test_files + def main(): '''Script that checks repositories to see if the tools contained within them have functional tests defined.''' parser = OptionParser() @@ -329,7 +336,7 @@ except IndexError: print "Usage: python %s <tool shed .ini file> [options]" % sys.argv[ 0 ] exit( 127 ) - config_parser = ConfigParser.ConfigParser( {'here':os.getcwd() } ) + config_parser = ConfigParser.ConfigParser( { 'here': os.getcwd() } ) config_parser.read( ini_file ) config_dict = {} for key, value in config_parser.items( "app:main" ): @@ -349,6 +356,7 @@ print "# Displaying extra information ( --verbosity = %d )" % options.verbosity check_and_update_repository_metadata( app, info_only=options.info_only, verbosity=options.verbosity ) + def should_set_do_not_test_flag( app, repository, changeset_revision, testable_revision ): """ The received testable_revision is True if the tool has defined tests and test files are in the repository diff -r fea46e621742a7e5af02ea57f71a464822a43815 -r 50ec79ddfe60ef912425b88447eba2fc08e9e6ac test/base/tool_shed_util.py --- a/test/base/tool_shed_util.py +++ b/test/base/tool_shed_util.py @@ -18,10 +18,12 @@ # Set a 10 minute timeout for repository installation. repository_installation_timeout = 600 + def get_installed_repository_info( elem, last_galaxy_test_file_dir, last_tested_repository_name, last_tested_changeset_revision, tool_path ): """ - Return the GALAXY_TEST_FILE_DIR, the containing repository name and the change set revision for the tool elem. - This only happens when testing tools installed from the tool shed. + Return the GALAXY_TEST_FILE_DIR, the containing repository name and the + change set revision for the tool elem. This only happens when testing + tools installed from the tool shed. """ tool_config_path = elem.get( 'file' ) installed_tool_path_items = tool_config_path.split( '/repos/' ) @@ -41,14 +43,15 @@ return None, repository_name, changeset_revision return last_galaxy_test_file_dir, last_tested_repository_name, last_tested_changeset_revision + def log_reason_repository_cannot_be_uninstalled( app, repository ): # This method should be altered if / when the app.install_model.ToolShedRepository.can_uninstall() # method is altered. Any block returning a False value from that method should be handled here. name = str( repository.name ) owner = str( repository.owner ) installed_changeset_revision = str( repository.installed_changeset_revision ) - log.debug( "Revision %s of repository %s owned by %s cannot be uninstalled because:" % \ - ( installed_changeset_revision, name, owner ) ) + log.debug( "Revision %s of repository %s owned by %s cannot be uninstalled because:" % + ( installed_changeset_revision, name, owner ) ) if repository.status == app.install_model.ToolShedRepository.installation_status.UNINSTALLED: log.debug( 'it is already uninstalled.' ) else: @@ -60,8 +63,8 @@ if installed_dependent_repository_tups: for installed_dependent_repository_tup in installed_dependent_repository_tups: idr_tool_shed, idr_name, idr_owner, idr_installed_changeset_revision = installed_dependent_repository_tup - log.debug( "it is required by revision %s of repository %s owned by %s" % \ - ( idr_installed_changeset_revision, idr_name, idr_owner ) ) + log.debug( "it is required by revision %s of repository %s owned by %s" % + ( idr_installed_changeset_revision, idr_name, idr_owner ) ) else: # Find installed tool dependencies that require this repository's installed tool dependencies. installed_dependent_td_tups = None @@ -79,8 +82,9 @@ dr_name = str( dependent_repository.name ) dr_owner = str( dependent_repository.owner ) dr_installed_changeset_revison = str( dependent_repository.installed_changeset_revision ) - log.debug( "- version %s of %s %s contained in revision %s of repository %s owned by %s" % \ - ( td_version, td_type, td_name, dr_installed_changeset_revison, dr_name, dr_owner ) ) + log.debug( "- version %s of %s %s contained in revision %s of repository %s owned by %s" % + ( td_version, td_type, td_name, dr_installed_changeset_revison, dr_name, dr_owner ) ) + def parse_tool_panel_config( config, shed_tools_dict ): """ @@ -97,12 +101,12 @@ for elem in root: if elem.tag == 'tool': galaxy_test_file_dir, \ - last_tested_repository_name, \ - last_tested_changeset_revision = get_installed_repository_info( elem, - last_galaxy_test_file_dir, - last_tested_repository_name, - last_tested_changeset_revision, - tool_path ) + last_tested_repository_name, \ + last_tested_changeset_revision = get_installed_repository_info( elem, + last_galaxy_test_file_dir, + last_tested_repository_name, + last_tested_changeset_revision, + tool_path ) if galaxy_test_file_dir: if not has_test_data: has_test_data = True @@ -116,12 +120,12 @@ for section_elem in elem: if section_elem.tag == 'tool': galaxy_test_file_dir, \ - last_tested_repository_name, \ - last_tested_changeset_revision = get_installed_repository_info( section_elem, - last_galaxy_test_file_dir, - last_tested_repository_name, - last_tested_changeset_revision, - tool_path ) + last_tested_repository_name, \ + last_tested_changeset_revision = get_installed_repository_info( section_elem, + last_galaxy_test_file_dir, + last_tested_repository_name, + last_tested_changeset_revision, + tool_path ) if galaxy_test_file_dir: if not has_test_data: has_test_data = True diff -r fea46e621742a7e5af02ea57f71a464822a43815 -r 50ec79ddfe60ef912425b88447eba2fc08e9e6ac test/install_and_test_tool_shed_repositories/base/util.py --- a/test/install_and_test_tool_shed_repositories/base/util.py +++ b/test/install_and_test_tool_shed_repositories/base/util.py @@ -160,6 +160,7 @@ REPOSITORIES_WITH_TOOLS = 'repositories_with_tools' TOOL_DEPENDENCY_DEFINITIONS = 'tool_dependency_definitions' + class ReportResults( Plugin ): '''Simple Nose plugin to record the IDs of all tests run, regardless of success.''' name = "reportresults" @@ -221,6 +222,7 @@ def shutdown( self ): pass + def display_repositories_by_owner( repository_tups ): """Group summary display by repository owner.""" repository_tups_by_owner = {} @@ -240,6 +242,7 @@ name, owner, changeset_revision = repository_tup print "# Revision %s of repository %s owned by %s" % ( changeset_revision, name, owner ) + def display_tool_dependencies_by_name( tool_dependency_tups ): """Group summary display by repository owner.""" tool_dependency_tups_by_name = {} @@ -259,11 +262,15 @@ name, type, version = tool_dependency_tup print "# %s %s version %s" % ( type, name, version ) + def get_database_version( app ): ''' - This method returns the value of the version column from the migrate_version table, using the provided app's SQLAlchemy session to determine - which table to get that from. This way, it's provided with an instance of a Galaxy UniverseApplication, it will return the Galaxy instance's - database migration version. If a tool shed UniverseApplication is provided, it returns the tool shed's database migration version. + This method returns the value of the version column from the + migrate_version table, using the provided app's SQLAlchemy session to + determine which table to get that from. This way, it's provided with an + instance of a Galaxy UniverseApplication, it will return the Galaxy + instance's database migration version. If a tool shed UniverseApplication + is provided, it returns the tool shed's database migration version. ''' sa_session = app.model.context.current result = sa_session.execute( 'SELECT version FROM migrate_version LIMIT 1' ) @@ -276,6 +283,7 @@ break return version + def get_missing_repository_dependencies( repository, all_missing_repository_dependencies=None ): """ Return the entire list of missing repository dependencies for the received repository. The entire diff -r fea46e621742a7e5af02ea57f71a464822a43815 -r 50ec79ddfe60ef912425b88447eba2fc08e9e6ac test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py --- a/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py +++ b/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py @@ -44,10 +44,8 @@ from galaxy.app import UniverseApplication from galaxy.util.json import loads from galaxy.util import asbool -from galaxy.util import unicodify from galaxy.web import buildapp from functional_tests import generate_config_file -from nose.plugins import Plugin from paste import httpserver from functional import database_contexts @@ -77,7 +75,8 @@ can_update_tool_shed = True test_framework = install_and_test_base_util.REPOSITORIES_WITH_TOOLS - + + def get_failed_test_dicts( test_result, from_tool_test=True ): """Extract any useful data from the test_result.failures and test_result.errors attributes.""" failed_test_dicts = [] @@ -97,7 +96,6 @@ log_output = re.sub( r'\n+', r'\n', log_output ) appending_to = 'output' tmp_output = {} - output = {} # Iterate through the functional test output and extract only the important data. Captured # logging and stdout are not recorded. for line in log_output.split( '\n' ): @@ -126,6 +124,7 @@ failed_test_dicts.append( test_status_dict ) return failed_test_dicts + def get_tool_info_from_test_id( test_id ): """ Test IDs come in the form test_tool_number @@ -137,6 +136,7 @@ tool_id = parts[ -2 ] return tool_id, tool_version + def install_and_test_repositories( app, galaxy_shed_tools_dict_file, galaxy_shed_tool_conf_file, galaxy_shed_tool_path ): # We must make sure that functional.test_toolbox is always imported after database_contexts.galaxy_content # is set (which occurs in the main method before this method is called). If functional.test_toolbox is @@ -218,7 +218,7 @@ if error_message: remove_tests( app, repository ) # The repository installation failed. - print 'Installation failed for revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) + print 'Installation failed for revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) processed_repositories_with_installation_error = \ install_and_test_statistics_dict.get( 'repositories_with_installation_error', [] ) if repository_identifier_tup not in processed_repositories_with_installation_error: @@ -241,7 +241,7 @@ # The repository was successfully installed. print 'Installation succeeded for revision %s of repository %s owned by %s.' % \ ( changeset_revision, name, owner ) - # Add an empty 'missing_test_results' entry if it is missing from the tool_test_results_dict. The + # Add an empty 'missing_test_results' entry if it is missing from the tool_test_results_dict. The # ~/tool_shed/scripts/check_repositories_for_functional_tests.py will have entered information in the # 'missing_test_components' entry of the tool_test_results_dict dictionary for repositories that are # missing test components. @@ -349,6 +349,7 @@ print 'it was previously installed and currently has status %s' % str( repository.status ) return install_and_test_statistics_dict, error_message + def main(): if install_and_test_base_util.tool_shed_api_key is None: # If the tool shed URL specified in any dict is not present in the tool_sheds_conf.xml, the installation will fail. @@ -370,7 +371,6 @@ tool_path = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_PATH', 'tools' ) if 'HTTP_ACCEPT_LANGUAGE' not in os.environ: os.environ[ 'HTTP_ACCEPT_LANGUAGE' ] = default_galaxy_locales - use_distributed_object_store = os.environ.get( 'GALAXY_INSTALL_TEST_USE_DISTRIBUTED_OBJECT_STORE', False ) if not os.path.isdir( galaxy_test_tmp_dir ): os.mkdir( galaxy_test_tmp_dir ) # Set up the configuration files for the Galaxy instance. @@ -391,7 +391,7 @@ galaxy_tool_sheds_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF', os.path.join( galaxy_test_tmp_dir, 'test_tool_sheds_conf.xml' ) ) galaxy_shed_tools_dict_file = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_DICT_FILE', - os.path.join( galaxy_test_tmp_dir, 'shed_tool_dict' ) ) + os.path.join( galaxy_test_tmp_dir, 'shed_tool_dict' ) ) install_and_test_base_util.populate_galaxy_shed_tools_dict_file( galaxy_shed_tools_dict_file, shed_tools_dict=None ) # Set the GALAXY_TOOL_SHED_TEST_FILE environment variable to the path of the shed_tools_dict file so that # test.base.twilltestcase.setUp will find and parse it properly. @@ -409,7 +409,6 @@ galaxy_db_path = os.path.join( tempdir, 'database' ) # Configure the paths Galaxy needs to install and test tools. galaxy_file_path = os.path.join( galaxy_db_path, 'files' ) - new_repos_path = tempfile.mkdtemp( dir=galaxy_test_tmp_dir ) galaxy_tempfiles = tempfile.mkdtemp( dir=galaxy_test_tmp_dir ) galaxy_migrated_tool_path = tempfile.mkdtemp( dir=galaxy_test_tmp_dir ) # Set up the tool dependency path for the Galaxy instance. @@ -457,34 +456,34 @@ install_and_test_base_util.populate_shed_conf_file( galaxy_migrated_tool_conf_file, galaxy_migrated_tool_path, xml_elems=None ) # Write the embedded web application's specific configuration to a temporary file. This is necessary in order for # the external metadata script to find the right datasets. - kwargs = dict( admin_users = 'test@bx.psu.edu', - master_api_key = install_and_test_base_util.default_galaxy_master_api_key, - allow_user_creation = True, - allow_user_deletion = True, - allow_library_path_paste = True, - database_connection = database_connection, - datatype_converters_config_file = "datatype_converters_conf.xml.sample", - file_path = galaxy_file_path, - id_secret = install_and_test_base_util.galaxy_encode_secret, - install_database_connection = install_database_connection, - job_config_file = galaxy_job_conf_file, - job_queue_workers = 5, - log_destination = "stdout", - migrated_tools_config = galaxy_migrated_tool_conf_file, - new_file_path = galaxy_tempfiles, - running_functional_tests = True, - shed_tool_data_table_config = shed_tool_data_table_conf_file, - shed_tool_path = galaxy_shed_tool_path, - template_path = "templates", - tool_config_file = ','.join( [ galaxy_tool_conf_file, galaxy_shed_tool_conf_file ] ), - tool_data_path = tool_data_path, - tool_data_table_config_path = galaxy_tool_data_table_conf_file, - tool_dependency_dir = tool_dependency_dir, - tool_path = tool_path, - tool_parse_help = False, - tool_sheds_config_file = galaxy_tool_sheds_conf_file, - update_integrated_tool_panel = False, - use_heartbeat = False ) + kwargs = dict( admin_users='test@bx.psu.edu', + master_api_key=install_and_test_base_util.default_galaxy_master_api_key, + allow_user_creation=True, + allow_user_deletion=True, + allow_library_path_paste=True, + database_connection=database_connection, + datatype_converters_config_file="datatype_converters_conf.xml.sample", + file_path=galaxy_file_path, + id_secret=install_and_test_base_util.galaxy_encode_secret, + install_database_connection=install_database_connection, + job_config_file=galaxy_job_conf_file, + job_queue_workers=5, + log_destination="stdout", + migrated_tools_config=galaxy_migrated_tool_conf_file, + new_file_path=galaxy_tempfiles, + running_functional_tests=True, + shed_tool_data_table_config=shed_tool_data_table_conf_file, + shed_tool_path=galaxy_shed_tool_path, + template_path="templates", + tool_config_file=','.join( [ galaxy_tool_conf_file, galaxy_shed_tool_conf_file ] ), + tool_data_path=tool_data_path, + tool_data_table_config_path=galaxy_tool_data_table_conf_file, + tool_dependency_dir=tool_dependency_dir, + tool_path=tool_path, + tool_parse_help=False, + tool_sheds_config_file=galaxy_tool_sheds_conf_file, + update_integrated_tool_panel=False, + use_heartbeat=False ) galaxy_config_file = os.environ.get( 'GALAXY_INSTALL_TEST_INI_FILE', None ) # If the user has passed in a path for the .ini file, do not overwrite it. if not galaxy_config_file: @@ -535,8 +534,10 @@ continue raise else: - raise Exception( "Unable to open a port between %s and %s to start Galaxy server" % \ - ( install_and_test_base_util.default_galaxy_test_port_min, install_and_test_base_util.default_galaxy_test_port_max ) ) + message = "Unable to open a port between %s and %s to start Galaxy server" % ( + install_and_test_base_util.default_galaxy_test_port_min, install_and_test_base_util.default_galaxy_test_port_max + ) + raise Exception( message ) os.environ[ 'GALAXY_INSTALL_TEST_PORT' ] = galaxy_test_port # Start the server. t = threading.Thread( target=server.serve_forever ) @@ -575,8 +576,11 @@ install_and_test_statistics_dict, error_message ) except Exception, e: - log.exception( 'Attempting to print the following dictionary...\n\n%s\n\n...threw the following exception...\n\n%s\n\n' % \ - ( str( install_and_test_statistics_dict ), str( e ) ) ) + message = 'Attempting to print the following dictionary...\n\n%s\n\n...threw the following exception...\n\n%s\n\n' % ( + str( install_and_test_statistics_dict ), + str( e ) + ) + log.exception( message ) log.debug( "Shutting down..." ) # Gracefully shut down the embedded web server and UniverseApplication. if server: @@ -603,6 +607,7 @@ # Return a "successful" response to buildbot. return 0 + def remove_tests( app, repository ): """ Delete any configured tool functional tests from the test_toolbox.__dict__. @@ -659,6 +664,7 @@ del app.toolbox.tools_by_id[ tool_id ] print '\n-------------------------------------------------------------\n' + def test_repository_tools( app, repository, repository_dict, tool_test_results_dicts, tool_test_results_dict, install_and_test_statistics_dict ): """Test tools contained in the received repository.""" diff -r fea46e621742a7e5af02ea57f71a464822a43815 -r 50ec79ddfe60ef912425b88447eba2fc08e9e6ac test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py --- a/test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py +++ b/test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py @@ -30,7 +30,6 @@ import logging import nose import random -import re import shutil import socket import tempfile @@ -39,14 +38,10 @@ import install_and_test_tool_shed_repositories.base.util as install_and_test_base_util -from base.tool_shed_util import parse_tool_panel_config - from galaxy.app import UniverseApplication from galaxy.util import asbool -from galaxy.util import unicodify from galaxy.web import buildapp from functional_tests import generate_config_file -from nose.plugins import Plugin from paste import httpserver from functional import database_contexts @@ -77,6 +72,7 @@ test_framework = install_and_test_base_util.TOOL_DEPENDENCY_DEFINITIONS + def install_and_test_repositories( app, galaxy_shed_tools_dict_file, galaxy_shed_tool_conf_file, galaxy_shed_tool_path ): # Initialize a dictionary for the summary that will be printed to stdout. install_and_test_statistics_dict = install_and_test_base_util.initialize_install_and_test_statistics_dict() @@ -200,6 +196,7 @@ print 'it was previously installed and currently has status %s' % repository.status return install_and_test_statistics_dict, error_message + def main(): if install_and_test_base_util.tool_shed_api_key is None: # If the tool shed URL specified in any dict is not present in the tool_sheds_conf.xml, the installation will fail. @@ -221,7 +218,6 @@ tool_path = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_PATH', 'tools' ) if 'HTTP_ACCEPT_LANGUAGE' not in os.environ: os.environ[ 'HTTP_ACCEPT_LANGUAGE' ] = default_galaxy_locales - use_distributed_object_store = os.environ.get( 'GALAXY_INSTALL_TEST_USE_DISTRIBUTED_OBJECT_STORE', False ) if not os.path.isdir( galaxy_test_tmp_dir ): os.mkdir( galaxy_test_tmp_dir ) # Set up the configuration files for the Galaxy instance. @@ -242,7 +238,7 @@ galaxy_tool_sheds_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF', os.path.join( galaxy_test_tmp_dir, 'test_tool_sheds_conf.xml' ) ) galaxy_shed_tools_dict_file = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_DICT_FILE', - os.path.join( galaxy_test_tmp_dir, 'shed_tool_dict' ) ) + os.path.join( galaxy_test_tmp_dir, 'shed_tool_dict' ) ) install_and_test_base_util.populate_galaxy_shed_tools_dict_file( galaxy_shed_tools_dict_file, shed_tools_dict=None ) # Set the GALAXY_TOOL_SHED_TEST_FILE environment variable to the path of the shed_tools_dict file so that @@ -261,7 +257,6 @@ galaxy_db_path = os.path.join( tempdir, 'database' ) # Configure the paths Galaxy needs to install and test tools. galaxy_file_path = os.path.join( galaxy_db_path, 'files' ) - new_repos_path = tempfile.mkdtemp( dir=galaxy_test_tmp_dir ) galaxy_tempfiles = tempfile.mkdtemp( dir=galaxy_test_tmp_dir ) galaxy_migrated_tool_path = tempfile.mkdtemp( dir=galaxy_test_tmp_dir ) # Set up the tool dependency path for the Galaxy instance. @@ -307,33 +302,33 @@ install_and_test_base_util.populate_shed_conf_file( galaxy_migrated_tool_conf_file, galaxy_migrated_tool_path, xml_elems=None ) # Write the embedded web application's specific configuration to a temporary file. This is necessary in order for # the external metadata script to find the right datasets. - kwargs = dict( admin_users = 'test@bx.psu.edu', - master_api_key = install_and_test_base_util.default_galaxy_master_api_key, - allow_user_creation = True, - allow_user_deletion = True, - allow_library_path_paste = True, - database_connection = database_connection, - datatype_converters_config_file = "datatype_converters_conf.xml.sample", - file_path = galaxy_file_path, - id_secret = install_and_test_base_util.galaxy_encode_secret, - install_database_connection = install_database_connection, - job_config_file = galaxy_job_conf_file, - job_queue_workers = 5, - log_destination = "stdout", - migrated_tools_config = galaxy_migrated_tool_conf_file, - new_file_path = galaxy_tempfiles, - running_functional_tests = True, - shed_tool_data_table_config = shed_tool_data_table_conf_file, - shed_tool_path = galaxy_shed_tool_path, - template_path = "templates", - tool_config_file = ','.join( [ galaxy_tool_conf_file, galaxy_shed_tool_conf_file ] ), - tool_data_path = tool_data_path, - tool_dependency_dir = tool_dependency_dir, - tool_path = tool_path, - tool_parse_help = False, - tool_sheds_config_file = galaxy_tool_sheds_conf_file, - update_integrated_tool_panel = False, - use_heartbeat = False ) + kwargs = dict( admin_users='test@bx.psu.edu', + master_api_key=install_and_test_base_util.default_galaxy_master_api_key, + allow_user_creation=True, + allow_user_deletion=True, + allow_library_path_paste=True, + database_connection=database_connection, + datatype_converters_config_file="datatype_converters_conf.xml.sample", + file_path=galaxy_file_path, + id_secret=install_and_test_base_util.galaxy_encode_secret, + install_database_connection=install_database_connection, + job_config_file=galaxy_job_conf_file, + job_queue_workers=5, + log_destination="stdout", + migrated_tools_config=galaxy_migrated_tool_conf_file, + new_file_path=galaxy_tempfiles, + running_functional_tests=True, + shed_tool_data_table_config=shed_tool_data_table_conf_file, + shed_tool_path=galaxy_shed_tool_path, + template_path="templates", + tool_config_file=','.join( [ galaxy_tool_conf_file, galaxy_shed_tool_conf_file ] ), + tool_data_path=tool_data_path, + tool_dependency_dir=tool_dependency_dir, + tool_path=tool_path, + tool_parse_help=False, + tool_sheds_config_file=galaxy_tool_sheds_conf_file, + update_integrated_tool_panel=False, + use_heartbeat=False ) if os.path.exists( galaxy_tool_data_table_conf_file ): kwargs[ 'tool_data_table_config_path' ] = galaxy_tool_data_table_conf_file galaxy_config_file = os.environ.get( 'GALAXY_INSTALL_TEST_INI_FILE', None ) @@ -385,8 +380,10 @@ continue raise else: - raise Exception( "Unable to open a port between %s and %s to start Galaxy server" % \ - ( install_and_test_base_util.default_galaxy_test_port_min, install_and_test_base_util.default_galaxy_test_port_max ) ) + message = "Unable to open a port between %s and %s to start Galaxy server" % ( + install_and_test_base_util.default_galaxy_test_port_min, install_and_test_base_util.default_galaxy_test_port_max + ) + raise Exception( message ) os.environ[ 'GALAXY_INSTALL_TEST_PORT' ] = galaxy_test_port # Start the server. t = threading.Thread( target=server.serve_forever ) @@ -425,8 +422,11 @@ install_and_test_statistics_dict, error_message ) except Exception, e: - log.exception( 'Attempting to print the following dictionary...\n\n%s\n\n...threw the following exception...\n\n%s\n\n' % \ - ( str( install_and_test_statistics_dict ), str( e ) ) ) + message = 'Attempting to print the following dictionary...\n\n%s\n\n...threw the following exception...\n\n%s\n\n' % ( + str( install_and_test_statistics_dict ), + str( e ) + ) + log.exception( message ) log.debug( "Shutting down..." ) # Gracefully shut down the embedded web server and UniverseApplication. if server: https://bitbucket.org/galaxy/galaxy-central/commits/7e813c1cf0d6/ Changeset: 7e813c1cf0d6 User: jmchilton Date: 2015-01-15 02:03:12+00:00 Summary: Remove unused method tool_shed_util... ... it has a PEP-8 detected bug (undefined reference) so it would fail even if it was used. Affected #: 1 file diff -r 50ec79ddfe60ef912425b88447eba2fc08e9e6ac -r 7e813c1cf0d661ab7e2d717dd2816e791a7395af test/base/tool_shed_util.py --- a/test/base/tool_shed_util.py +++ b/test/base/tool_shed_util.py @@ -44,48 +44,6 @@ return last_galaxy_test_file_dir, last_tested_repository_name, last_tested_changeset_revision -def log_reason_repository_cannot_be_uninstalled( app, repository ): - # This method should be altered if / when the app.install_model.ToolShedRepository.can_uninstall() - # method is altered. Any block returning a False value from that method should be handled here. - name = str( repository.name ) - owner = str( repository.owner ) - installed_changeset_revision = str( repository.installed_changeset_revision ) - log.debug( "Revision %s of repository %s owned by %s cannot be uninstalled because:" % - ( installed_changeset_revision, name, owner ) ) - if repository.status == app.install_model.ToolShedRepository.installation_status.UNINSTALLED: - log.debug( 'it is already uninstalled.' ) - else: - irm = app.installed_repository_manager - repository_tup = ( str( repository.tool_shed ), name, owner, installed_changeset_revision ) - # Find other installed repositories that require this repository. - installed_dependent_repository_tups = \ - irm.installed_dependent_repositories_of_installed_repositories.get( repository_tup, [] ) - if installed_dependent_repository_tups: - for installed_dependent_repository_tup in installed_dependent_repository_tups: - idr_tool_shed, idr_name, idr_owner, idr_installed_changeset_revision = installed_dependent_repository_tup - log.debug( "it is required by revision %s of repository %s owned by %s" % - ( idr_installed_changeset_revision, idr_name, idr_owner ) ) - else: - # Find installed tool dependencies that require this repository's installed tool dependencies. - installed_dependent_td_tups = None - installed_tool_dependency_tups = irm.installed_tool_dependencies_of_installed_repositories.get( repository_tup, [] ) - for td_tup in installed_tool_dependency_tups: - installed_dependent_td_tups = \ - irm.installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies.get( td_tup, [] ) - if installed_dependent_td_tups is not None: - # This repository cannot be uninstalled because it contains installed tool dependencies that - # are required at run time by other installed tool dependencies. - log.debug( "it contains installed tool dependencies that are required at run time by these installed tool dependencies:" ) - for installed_dependent_td_tup in installed_dependent_td_tups: - repository_id, td_name, td_version, td_type = installed_dependent_td_tup - dependent_repository = test_db_util.get_repository( repository_id ) - dr_name = str( dependent_repository.name ) - dr_owner = str( dependent_repository.owner ) - dr_installed_changeset_revison = str( dependent_repository.installed_changeset_revision ) - log.debug( "- version %s of %s %s contained in revision %s of repository %s owned by %s" % - ( td_version, td_type, td_name, dr_installed_changeset_revison, dr_name, dr_owner ) ) - - def parse_tool_panel_config( config, shed_tools_dict ): """ Parse a shed-related tool panel config to generate the shed_tools_dict. This only happens when testing tools installed from the tool shed. https://bitbucket.org/galaxy/galaxy-central/commits/9a4256ccf468/ Changeset: 9a4256ccf468 User: jmchilton Date: 2015-01-15 02:03:12+00:00 Summary: Remove unused load_cookies function in TwillTestCase. Affected #: 1 file diff -r 7e813c1cf0d661ab7e2d717dd2816e791a7395af -r 9a4256ccf468f9e61adfdf89aa8d91ba8a8e720a test/base/twilltestcase.py --- a/test/base/twilltestcase.py +++ b/test/base/twilltestcase.py @@ -1506,10 +1506,6 @@ break self.assertNotEqual(count, maxiter) - def load_cookies( self, file, shed_tool_id=None ): - filename = self.get_filename( file, shed_tool_id=shed_tool_id ) - tc.load_cookies(filename) - def login( self, email='test@bx.psu.edu', password='testuser', username='admin-user', redirect='' ): # test@bx.psu.edu is configured as an admin user previously_created, username_taken, invalid_username = \ Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.
participants (1)
-
commits-noreply@bitbucket.org