commit/galaxy-central: 8 new changesets
8 new commits in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/commits/e8400d6f1dc3/ Changeset: e8400d6f1dc3 User: jmchilton Date: 2014-12-08 05:21:47+00:00 Summary: Drop psu_production stuff from scripts/functional_tests.py. These tests need to runnable against running servers but should be more general and updated. Affected #: 1 file diff -r 690ea2dc6cab18cdacfcd59ba0e5c2178df2bff4 -r e8400d6f1dc31cf8f63d00922231b66dbc83f300 scripts/functional_tests.py --- a/scripts/functional_tests.py +++ b/scripts/functional_tests.py @@ -256,78 +256,33 @@ database_auto_migrate = False galaxy_test_proxy_port = None - psu_production = False if start_server: - if 'GALAXY_TEST_PSU_PRODUCTION' in os.environ: - if not galaxy_test_port: - raise Exception( 'Please set GALAXY_TEST_PORT to the port to which the proxy server will proxy' ) - galaxy_test_proxy_port = os.environ.get( 'GALAXY_TEST_PROXY_PORT', None ) - if not galaxy_test_proxy_port: - raise Exception( 'Please set GALAXY_TEST_PROXY_PORT to the port on which the proxy server is listening' ) - base_file_path = os.environ.get( 'GALAXY_TEST_BASE_FILE_PATH', None ) - if not base_file_path: - raise Exception( 'Please set GALAXY_TEST_BASE_FILE_PATH to the directory which will contain the dataset files directory' ) - base_new_file_path = os.environ.get( 'GALAXY_TEST_BASE_NEW_FILE_PATH', None ) - if not base_new_file_path: - raise Exception( 'Please set GALAXY_TEST_BASE_NEW_FILE_PATH to the directory which will contain the temporary directory' ) - database_connection = os.environ.get( 'GALAXY_TEST_DBURI', None ) - if not database_connection: - raise Exception( 'Please set GALAXY_TEST_DBURI to the URI of the database to be used for tests' ) - nginx_upload_store = os.environ.get( 'GALAXY_TEST_NGINX_UPLOAD_STORE', None ) - if not nginx_upload_store: - raise Exception( 'Please set GALAXY_TEST_NGINX_UPLOAD_STORE to the path where the nginx upload module places uploaded files' ) - tool_config_file = 'tool_conf.xml.main' - default_cluster_job_runner = os.environ.get( 'GALAXY_TEST_DEFAULT_CLUSTER_JOB_RUNNER', 'pbs:///' ) - file_path = tempfile.mkdtemp( dir=base_file_path ) - new_file_path = tempfile.mkdtemp( dir=base_new_file_path ) - cluster_files_directory = os.path.join( new_file_path, 'pbs' ) - job_working_directory = os.path.join( new_file_path, 'job_working_directory' ) - os.mkdir( cluster_files_directory ) - os.mkdir( job_working_directory ) - kwargs = dict( database_engine_option_pool_size='10', - database_engine_option_max_overflow='20', - database_engine_option_strategy='threadlocal', - nginx_x_accel_redirect_base='/_x_accel_redirect', - nginx_upload_store=nginx_upload_store, - nginx_upload_path='/_upload', - allow_library_path_paste='True', - cluster_files_directory=cluster_files_directory, - job_working_directory=job_working_directory, - outputs_to_working_directory='True', - static_enabled='False', - debug='False', - track_jobs_in_database='True', - job_scheduler_policy='FIFO', - start_job_runners='pbs', - default_cluster_job_runner=default_cluster_job_runner ) - psu_production = True + tempdir = tempfile.mkdtemp( dir=galaxy_test_tmp_dir ) + # Configure the database path. + if 'GALAXY_TEST_DBPATH' in os.environ: + galaxy_db_path = os.environ[ 'GALAXY_TEST_DBPATH' ] else: - tempdir = tempfile.mkdtemp( dir=galaxy_test_tmp_dir ) - # Configure the database path. - if 'GALAXY_TEST_DBPATH' in os.environ: - galaxy_db_path = os.environ[ 'GALAXY_TEST_DBPATH' ] - else: - galaxy_db_path = os.path.join( tempdir, 'database' ) - # Configure the paths Galaxy needs to test tools. - file_path = os.path.join( galaxy_db_path, 'files' ) - new_file_path = tempfile.mkdtemp( prefix='new_files_path_', dir=tempdir ) - job_working_directory = tempfile.mkdtemp( prefix='job_working_directory_', dir=tempdir ) - install_database_connection = os.environ.get( 'GALAXY_TEST_INSTALL_DBURI', None ) - if 'GALAXY_TEST_DBURI' in os.environ: - database_connection = os.environ['GALAXY_TEST_DBURI'] - else: - db_path = os.path.join( galaxy_db_path, 'universe.sqlite' ) - if 'GALAXY_TEST_DB_TEMPLATE' in os.environ: - # Middle ground between recreating a completely new - # database and pointing at existing database with - # GALAXY_TEST_DBURI. The former requires a lot of setup - # time, the latter results in test failures in certain - # cases (namely tool shed tests expecting clean database). - log.debug( "Copying database template from %s.", os.environ['GALAXY_TEST_DB_TEMPLATE'] ) - __copy_database_template(os.environ['GALAXY_TEST_DB_TEMPLATE'], db_path) - database_auto_migrate = True - database_connection = 'sqlite:///%s' % db_path - kwargs = {} + galaxy_db_path = os.path.join( tempdir, 'database' ) + # Configure the paths Galaxy needs to test tools. + file_path = os.path.join( galaxy_db_path, 'files' ) + new_file_path = tempfile.mkdtemp( prefix='new_files_path_', dir=tempdir ) + job_working_directory = tempfile.mkdtemp( prefix='job_working_directory_', dir=tempdir ) + install_database_connection = os.environ.get( 'GALAXY_TEST_INSTALL_DBURI', None ) + if 'GALAXY_TEST_DBURI' in os.environ: + database_connection = os.environ['GALAXY_TEST_DBURI'] + else: + db_path = os.path.join( galaxy_db_path, 'universe.sqlite' ) + if 'GALAXY_TEST_DB_TEMPLATE' in os.environ: + # Middle ground between recreating a completely new + # database and pointing at existing database with + # GALAXY_TEST_DBURI. The former requires a lot of setup + # time, the latter results in test failures in certain + # cases (namely tool shed tests expecting clean database). + log.debug( "Copying database template from %s.", os.environ['GALAXY_TEST_DB_TEMPLATE'] ) + __copy_database_template(os.environ['GALAXY_TEST_DB_TEMPLATE'], db_path) + database_auto_migrate = True + database_connection = 'sqlite:///%s' % db_path + kwargs = {} for dir in file_path, new_file_path: try: if not os.path.exists( dir ): @@ -376,8 +331,6 @@ ) if install_database_connection is not None: kwargs[ 'install_database_connection' ] = install_database_connection - if psu_production: - kwargs[ 'global_conf' ] = None if not database_connection.startswith( 'sqlite://' ): kwargs[ 'database_engine_option_max_overflow' ] = '20' kwargs[ 'database_engine_option_pool_size' ] = '10' @@ -448,12 +401,6 @@ time.sleep( 0.1 ) else: raise Exception( "Test HTTP server did not return '200 OK' after 10 tries" ) - # Test if the proxy server is up - if psu_production: - conn = httplib.HTTPConnection( galaxy_test_host, galaxy_test_proxy_port ) # directly test the app, not the proxy - conn.request( "GET", "/" ) - if not conn.getresponse().status == 200: - raise Exception( "Test HTTP proxy server did not return '200 OK'" ) log.info( "Embedded web server started" ) # ---- Load toolbox for generated tests ----------------------------------- # We don't add the tests to the path until everything is up and running @@ -571,14 +518,6 @@ log.info( "GALAXY_TEST_NO_CLEANUP is on. Temporary files in %s" % tempdir ) except: pass - if psu_production and 'GALAXY_TEST_NO_CLEANUP' not in os.environ: - for dir in ( file_path, new_file_path ): - try: - if os.path.exists( dir ): - log.info( 'Cleaning up temporary files in %s' % dir ) - shutil.rmtree( dir ) - except: - pass if success: return 0 else: https://bitbucket.org/galaxy/galaxy-central/commits/ad012fc04856/ Changeset: ad012fc04856 User: jmchilton Date: 2014-12-08 05:21:47+00:00 Summary: Remove redundant lines in scripts/functional_tests.py. This stuff is already imported at the top of the file - my guess is the need to import it later is not longer needed. Affected #: 1 file diff -r e8400d6f1dc31cf8f63d00922231b66dbc83f300 -r ad012fc04856b1423378f2309a9e02bdfbcdf360 scripts/functional_tests.py --- a/scripts/functional_tests.py +++ b/scripts/functional_tests.py @@ -402,11 +402,6 @@ else: raise Exception( "Test HTTP server did not return '200 OK' after 10 tries" ) log.info( "Embedded web server started" ) - # ---- Load toolbox for generated tests ----------------------------------- - # We don't add the tests to the path until everything is up and running - new_path = [ os.path.join( cwd, "test" ) ] - new_path.extend( sys.path[1:] ) - sys.path = new_path # ---- Find tests --------------------------------------------------------- if galaxy_test_proxy_port: https://bitbucket.org/galaxy/galaxy-central/commits/bcae5db5956a/ Changeset: bcae5db5956a User: jmchilton Date: 2014-12-08 05:21:47+00:00 Summary: Reduce some duplication related to nose running. Affected #: 4 files diff -r ad012fc04856b1423378f2309a9e02bdfbcdf360 -r bcae5db5956ac836c90ee0e2f14f8b901606c86b scripts/functional_tests.py --- a/scripts/functional_tests.py +++ b/scripts/functional_tests.py @@ -54,6 +54,7 @@ from functional import database_contexts from base.api_util import get_master_api_key from base.api_util import get_user_api_key +from base.nose_util import run import nose.core import nose.config @@ -160,18 +161,7 @@ def run_tests( test_config ): - loader = nose.loader.TestLoader( config=test_config ) - plug_loader = test_config.plugins.prepareTestLoader( loader ) - if plug_loader is not None: - loader = plug_loader - tests = loader.loadTestsFromNames( test_config.testNames ) - test_runner = nose.core.TextTestRunner( stream=test_config.stream, - verbosity=test_config.verbosity, - config=test_config ) - plug_runner = test_config.plugins.prepareTestRunner( test_runner ) - if plug_runner is not None: - test_runner = plug_runner - return test_runner.run( tests ) + return run( test_config ) def __copy_database_template( source, db_path ): diff -r ad012fc04856b1423378f2309a9e02bdfbcdf360 -r bcae5db5956ac836c90ee0e2f14f8b901606c86b test/base/nose_util.py --- /dev/null +++ b/test/base/nose_util.py @@ -0,0 +1,32 @@ +""" Utilities for dealing with nose. + +There was some duplication between Galaxy, Tool Shed, and Install/Test, +trying to reduce that here. +""" + +try: + from galaxy import eggs + eggs.require( "nose" ) +except ImportError: + pass +import nose + + +def run( test_config, plugins=[] ): + loader = nose.loader.TestLoader( config=test_config ) + for plugin in plugins: + test_config.plugins.addPlugin( plugin ) + plug_loader = test_config.plugins.prepareTestLoader( loader ) + if plug_loader is not None: + loader = plug_loader + tests = loader.loadTestsFromNames( test_config.testNames ) + test_runner = nose.core.TextTestRunner( + stream=test_config.stream, + verbosity=test_config.verbosity, + config=test_config + ) + plug_runner = test_config.plugins.prepareTestRunner( test_runner ) + if plug_runner is not None: + test_runner = plug_runner + result = test_runner.run( tests ) + return result diff -r ad012fc04856b1423378f2309a9e02bdfbcdf360 -r bcae5db5956ac836c90ee0e2f14f8b901606c86b test/install_and_test_tool_shed_repositories/base/util.py --- a/test/install_and_test_tool_shed_repositories/base/util.py +++ b/test/install_and_test_tool_shed_repositories/base/util.py @@ -1058,6 +1058,10 @@ return base_url def run_tests( test_config ): + ## TODO: replace whole method with... + # from base import nose_util + # result = nose_util.run( test_config, plugins=[ new ReportResults() ] ) + # return result, test_config.plugins._plugins loader = nose.loader.TestLoader( config=test_config ) test_config.plugins.addPlugin( ReportResults() ) plug_loader = test_config.plugins.prepareTestLoader( loader ) diff -r ad012fc04856b1423378f2309a9e02bdfbcdf360 -r bcae5db5956ac836c90ee0e2f14f8b901606c86b test/tool_shed/functional_tests.py --- a/test/tool_shed/functional_tests.py +++ b/test/tool_shed/functional_tests.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import absolute_import import os import sys @@ -66,6 +67,7 @@ from functional import database_contexts +from base import nose_util log = logging.getLogger( "tool_shed_functional_tests.py" ) @@ -147,19 +149,10 @@ </data_managers> ''' + def run_tests( test_config ): - loader = nose.loader.TestLoader( config=test_config ) - plug_loader = test_config.plugins.prepareTestLoader( loader ) - if plug_loader is not None: - loader = plug_loader - tests = loader.loadTestsFromNames( test_config.testNames ) - test_runner = nose.core.TextTestRunner( stream=test_config.stream, - verbosity=test_config.verbosity, - config=test_config ) - plug_runner = test_config.plugins.prepareTestRunner( test_runner ) - if plug_runner is not None: - test_runner = plug_runner - return test_runner.run( tests ) + return nose_util.run( test_config ) + def main(): # ---- Configuration ------------------------------------------------------ https://bitbucket.org/galaxy/galaxy-central/commits/c33f0de648fc/ Changeset: c33f0de648fc User: jmchilton Date: 2014-12-08 05:21:47+00:00 Summary: Remove unused method now that twill interactor is gone. Affected #: 1 file diff -r bcae5db5956ac836c90ee0e2f14f8b901606c86b -r c33f0de648fc6cbbeedcc75afe1dc36f74ace89f test/base/twilltestcase.py --- a/test/base/twilltestcase.py +++ b/test/base/twilltestcase.py @@ -1771,20 +1771,6 @@ tc.submit( "reset_user_password_button" ) self.check_page_for_string( "Passwords reset for 1 user." ) - def run_tool( self, tool_id, repeat_name=None, **kwd ): - """Runs the tool 'tool_id' and passes it the key/values from the *kwd""" - params = dict( tool_id=tool_id ) - self.visit_url( "/tool_runner/index", params ) - # Must click somewhere in tool_form, to disambiguate what form - # is being targetted. - tc.browser.clicked( tc.browser.get_form( 'tool_form' ), None ) - if repeat_name is not None: - repeat_button = '%s_add' % repeat_name - # Submit the "repeat" form button to add an input) - tc.submit( repeat_button ) - tc.find( 'runtool_btn' ) - self.submit_form( **kwd ) - def run_ucsc_main( self, track_params, output_params ): """Gets Data From UCSC""" tool_id = "ucsc_table_direct1" https://bitbucket.org/galaxy/galaxy-central/commits/bc3432c21cc7/ Changeset: bc3432c21cc7 User: jmchilton Date: 2014-12-08 05:21:47+00:00 Summary: Adjust test/functional/test_data_managers.py for run_tool ret val change in 21e3d69. Affected #: 1 file diff -r c33f0de648fc6cbbeedcc75afe1dc36f74ace89f -r bc3432c21cc75ff1ac5aa8ac8445ab80b95b7f94 test/functional/test_data_managers.py --- a/test/functional/test_data_managers.py +++ b/test/functional/test_data_managers.py @@ -27,7 +27,7 @@ stage_data_in_history( galaxy_interactor, testdef.test_data(), test_history, shed_tool_id ) - data_list = galaxy_interactor.run_tool( testdef, test_history ) #test_history will have inputs only, outputs are placed in the specialized data manager history + galaxy_interactor.run_tool( testdef, test_history ) #test_history will have inputs only, outputs are placed in the specialized data manager history #FIXME: Move history determination and switching into the interactor data_manager_history = None https://bitbucket.org/galaxy/galaxy-central/commits/ccd35e79ac1f/ Changeset: ccd35e79ac1f User: jmchilton Date: 2014-12-08 05:21:47+00:00 Summary: Slightly improved error message for tool tests if job submission fails. Affected #: 1 file diff -r bc3432c21cc75ff1ac5aa8ac8445ab80b95b7f94 -r ccd35e79ac1fbe89acd55bf3c95978bc30c5c2e9 test/base/interactor.py --- a/test/base/interactor.py +++ b/test/base/interactor.py @@ -212,7 +212,8 @@ try: return self.__dictify_outputs( submit_response_object ), submit_response_object[ 'jobs' ] except KeyError: - raise Exception( submit_response_object[ 'message' ] ) + message = "Error creating a job for these tool inputs - %s" % submit_response_object[ 'message' ] + raise Exception( message ) def _create_collection( self, history_id, collection_def ): create_payload = dict( https://bitbucket.org/galaxy/galaxy-central/commits/38b2fa3d3e49/ Changeset: 38b2fa3d3e49 User: jmchilton Date: 2014-12-08 05:21:47+00:00 Summary: Give admins more data via the jobs API. More raw information about job metrics and the command-line (visible to them via the UI). Affected #: 1 file diff -r ccd35e79ac1fbe89acd55bf3c95978bc30c5c2e9 -r 38b2fa3d3e4973e40998215fae6a9b14d1fecf71 lib/galaxy/webapps/galaxy/api/jobs.py --- a/lib/galaxy/webapps/galaxy/api/jobs.py +++ b/lib/galaxy/webapps/galaxy/api/jobs.py @@ -103,9 +103,20 @@ if full_output: job_dict.update( dict( stderr=job.stderr, stdout=job.stdout ) ) if trans.user_is_admin(): + job_dict['command_line'] = job.command_line def metric_to_dict(metric): - return dict(zip(['title', 'value'], trans.app.job_metrics.format(metric.plugin, metric.metric_name, metric.metric_value))) + metric_name = metric.metric_name + metric_value = metric.metric_value + metric_plugin = metric.plugin + title, value = trans.app.job_metrics.format(metric_plugin, metric_name, metric_value) + return dict( + title=title, + value=value, + plugin=metric_plugin, + name=metric_name, + raw_value=str(metric_value), + ) job_dict['job_metrics'] = [metric_to_dict(metric) for metric in job.metrics] return job_dict https://bitbucket.org/galaxy/galaxy-central/commits/896c18ca6b2f/ Changeset: 896c18ca6b2f User: jmchilton Date: 2014-12-08 05:21:47+00:00 Summary: Implement structured tool test data. Rather than relying solely on exceptions back to nose/test framework - add option (--structured_data_report_file) to run_tests.sh that causes a bunch of detailed data to be dumped to the specified file in a very structured way. Includes full to_dict of the job from the API which in turn includes job metrics, command-line, job's standard error and outputs (instead of the test frameworks), as well as the tool inputs, and exceptions broken out for tool execution versus output checking. Its all indexed in the file by the test id (without the actual test toolbox depending on knowing the test id) - so one could pair this information with the XUnit output to produce much more detailed breakdowns of the tests. Affected #: 5 files diff -r 38b2fa3d3e4973e40998215fae6a9b14d1fecf71 -r 896c18ca6b2f1922f73fc74ba9f2bb6229fb54a1 run_tests.sh --- a/run_tests.sh +++ b/run_tests.sh @@ -47,6 +47,7 @@ test_script="./scripts/functional_tests.py" report_file="run_functional_tests.html" xunit_report_file="" +structured_data_report_file="" with_framework_test_tools_arg="" driver="python" @@ -156,6 +157,15 @@ exit 1 fi ;; + --structured_data_report_file) + if [ $# -gt 1 ]; then + structured_data_report_file=$2 + shift 2 + else + echo "--structured_data_report_file requires an argument" 1>&2 + exit 1 + fi + ;; -c|--coverage) # Must have coverage installed (try `which coverage`) - only valid with --unit # for now. Would be great to get this to work with functional tests though. @@ -249,7 +259,12 @@ else xunit_args="" fi - python $test_script $coverage_arg -v --with-nosehtml --html-report-file $report_file $xunit_args $with_framework_test_tools_arg $extra_args + if [ -n "$structured_data_report_file" ]; then + structured_data_args="--with-structureddata --structured-data-file $structured_data_report_file" + else + structured_data_args="" + fi + python $test_script $coverage_arg -v --with-nosehtml --html-report-file $report_file $xunit_args $structured_data_args $with_framework_test_tools_arg $extra_args else ensure_grunt if [ -n "$watch" ]; then diff -r 38b2fa3d3e4973e40998215fae6a9b14d1fecf71 -r 896c18ca6b2f1922f73fc74ba9f2bb6229fb54a1 scripts/functional_tests.py --- a/scripts/functional_tests.py +++ b/scripts/functional_tests.py @@ -55,6 +55,7 @@ from base.api_util import get_master_api_key from base.api_util import get_user_api_key from base.nose_util import run +from base.instrument import StructuredTestDataPlugin import nose.core import nose.config @@ -439,6 +440,7 @@ user_api_key=get_user_api_key(), ) test_config = nose.config.Config( env=os.environ, ignoreFiles=ignore_files, plugins=nose.plugins.manager.DefaultPluginManager() ) + test_config.plugins.addPlugin( StructuredTestDataPlugin() ) test_config.configure( sys.argv ) result = run_tests( test_config ) success = result.wasSuccessful() diff -r 38b2fa3d3e4973e40998215fae6a9b14d1fecf71 -r 896c18ca6b2f1922f73fc74ba9f2bb6229fb54a1 test/base/instrument.py --- /dev/null +++ b/test/base/instrument.py @@ -0,0 +1,89 @@ +""" Utilities to help instrument tool tests. + +Including structed data nose plugin that allows storing arbitrary structured +data on a per test case basis - used by tool test to store inputs, +output problems, job tests, etc... but could easily by used by other test +types in a different way. +""" + +import json +import threading + +try: + from galaxy import eggs + eggs.require( "nose" ) +except ImportError: + pass + + +from nose.plugins import Plugin + +NO_JOB_DATA = object() +JOB_DATA = threading.local() +JOB_DATA.new = True +JOB_DATA.data = NO_JOB_DATA + + +def register_job_data(data): + if not JOB_DATA.new: + return + JOB_DATA.data = data + JOB_DATA.new = False + + +def fetch_job_data(): + try: + if JOB_DATA.new: + return NO_JOB_DATA + else: + return JOB_DATA.data + finally: + JOB_DATA.new = True + + +class StructuredTestDataPlugin( Plugin ): + name = 'structureddata' + + def options(self, parser, env): + super(StructuredTestDataPlugin, self).options(parser, env=env) + parser.add_option( + '--structured-data-file', action='store', + dest='structured_data_file', metavar="FILE", + default=env.get('NOSE_STRUCTURED_DATA', 'structured_test_data.json'), + help=("Path to JSON file to store the Galaxy structured data report in." + "Default is structured_test_data.json in the working directory " + "[NOSE_STRUCTURED_DATA]")) + + def configure(self, options, conf): + super(StructuredTestDataPlugin, self).configure(options, conf) + self.conf = conf + if not self.enabled: + return + self.tests = [] + self.structured_data_report_file = open(options.structured_data_file, 'w') + + def finalize(self, result): + pass + + def _handle_result(self, test, *args, **kwds): + job_data = fetch_job_data() + id = test.id() + has_data = job_data is not NO_JOB_DATA + entry = { + 'id': id, + 'has_data': has_data, + 'data': job_data if has_data else None, + } + self.tests.append(entry) + + addError = _handle_result + addFailure = _handle_result + addSuccess = _handle_result + + def report(self, stream): + report_obj = { + 'version': '0.1', + 'tests': self.tests, + } + json.dump(report_obj, self.structured_data_report_file) + self.structured_data_report_file.close() diff -r 38b2fa3d3e4973e40998215fae6a9b14d1fecf71 -r 896c18ca6b2f1922f73fc74ba9f2bb6229fb54a1 test/base/interactor.py --- a/test/base/interactor.py +++ b/test/base/interactor.py @@ -6,6 +6,7 @@ eggs.require( "requests" ) from galaxy import util from galaxy.util.odict import odict +from galaxy.util.bunch import Bunch from requests import get from requests import post from json import dumps @@ -210,10 +211,14 @@ submit_response = self.__submit_tool( history_id, tool_id=testdef.tool.id, tool_input=inputs_tree ) submit_response_object = submit_response.json() try: - return self.__dictify_outputs( submit_response_object ), submit_response_object[ 'jobs' ] + return Bunch( + inputs=inputs_tree, + outputs=self.__dictify_outputs( submit_response_object ), + jobs=submit_response_object[ 'jobs' ], + ) except KeyError: message = "Error creating a job for these tool inputs - %s" % submit_response_object[ 'message' ] - raise Exception( message ) + raise RunToolException( message, inputs_tree ) def _create_collection( self, history_id, collection_def ): create_payload = dict( @@ -404,6 +409,13 @@ return get( url, params=data ) +class RunToolException(Exception): + + def __init__(self, message, inputs=None): + super(RunToolException, self).__init__(message) + self.inputs = inputs + + GALAXY_INTERACTORS = { 'api': GalaxyInteractorApi, } diff -r 38b2fa3d3e4973e40998215fae6a9b14d1fecf71 -r 896c18ca6b2f1922f73fc74ba9f2bb6229fb54a1 test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -1,7 +1,8 @@ import new import sys from base.twilltestcase import TwillTestCase -from base.interactor import build_interactor, stage_data_in_history +from base.interactor import build_interactor, stage_data_in_history, RunToolException +from base.instrument import register_job_data from galaxy.tools import DataManagerTool from galaxy.util import bunch import logging @@ -34,10 +35,48 @@ stage_data_in_history( galaxy_interactor, testdef.test_data(), test_history, shed_tool_id ) - data_list, jobs = galaxy_interactor.run_tool( testdef, test_history ) - self.assertTrue( data_list ) + # Once data is ready, run the tool and check the outputs - record API + # input, job info, tool run exception, as well as exceptions related to + # job output checking and register they with the test plugin so it can + # record structured information. + tool_inputs = None + job_stdio = None + job_output_exceptions = None + tool_execution_exception = None + try: + try: + tool_response = galaxy_interactor.run_tool( testdef, test_history ) + data_list, jobs, tool_inputs = tool_response.outputs, tool_response.jobs, tool_response.inputs + except RunToolException as e: + tool_inputs = e.inputs + tool_execution_exception = e + raise e + except Exception as e: + tool_execution_exception = e + raise e - self._verify_outputs( testdef, test_history, jobs, shed_tool_id, data_list, galaxy_interactor ) + self.assertTrue( data_list ) + + try: + job_stdio = self._verify_outputs( testdef, test_history, jobs, shed_tool_id, data_list, galaxy_interactor ) + except JobOutputsError as e: + job_stdio = e.job_stdio + job_output_exceptions = e.output_exceptions + raise e + except Exception as e: + job_output_exceptions = [e] + raise e + finally: + job_data = {} + if tool_inputs is not None: + job_data["inputs"] = tool_inputs + if job_stdio is not None: + job_data["job"] = job_stdio + if job_output_exceptions: + job_data["output_problems"] = map(str, job_output_exceptions) + if tool_execution_exception: + job_data["execution_problem"] = str(tool_execution_exception) + register_job_data(job_data) galaxy_interactor.delete_history( test_history ) @@ -63,6 +102,7 @@ raise Exception( message ) found_exceptions = [] + job_stdio = None for output_index, output_tuple in enumerate(testdef.outputs): # Get the correct hid name, outfile, attributes = output_tuple @@ -89,9 +129,22 @@ if stream in job_stdio: print >>sys.stderr, self._format_stream( job_stdio[ stream ], stream=stream, format=True ) found_exceptions.append(e) + if job_stdio is None: + job_stdio = galaxy_interactor.get_job_stdio( jobs[0][ 'id' ] ) + if found_exceptions: - big_message = "\n".join(map(str, found_exceptions)) - raise AssertionError(big_message) + raise JobOutputsError(found_exceptions, job_stdio) + else: + return job_stdio + + +class JobOutputsError(AssertionError): + + def __init__(self, output_exceptions, job_stdio): + big_message = "\n".join(map(str, output_exceptions)) + super(JobOutputsError, self).__init__(big_message) + self.job_stdio = job_stdio + self.output_exceptions = output_exceptions @nottest Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.
participants (1)
-
commits-noreply@bitbucket.org