5 new commits in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/commits/b716eb3501ce/ Changeset: b716eb3501ce User: jmchilton Date: 2013-10-30 06:01:15 Summary: PEP-8 fixes and style touch-ups for test/functional/test_toolbox.py. Affected #: 1 file diff -r 77d58fdd1c2e0dfde374276209279c5e94b1e4c0 -r b716eb3501cebab20ee2ff2222310aff915750bb test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -1,6 +1,6 @@ -import sys, new +import sys +import new from galaxy.tools.parameters import grouping -from galaxy.tools.parameters import basic from base.twilltestcase import TwillTestCase import galaxy.model from galaxy.model.orm import * @@ -8,8 +8,10 @@ toolbox = None + class ToolTestCase( TwillTestCase ): """Abstract test case that runs tests based on a `galaxy.tools.test.ToolTest`""" + def do_it( self, testdef, shed_tool_id=None ): # If the test generation had an error, raise if testdef.error: @@ -17,22 +19,23 @@ raise testdef.exception else: raise Exception( "Test parse failure" ) + # Start with a new history self.logout() self.login( email='test@bx.psu.edu' ) - admin_user = sa_session.query( galaxy.model.User ).filter( galaxy.model.User.table.c.email=='test@bx.psu.edu' ).one() + admin_user = sa_session.query( galaxy.model.User ).filter( galaxy.model.User.table.c.email == 'test@bx.psu.edu' ).one() self.new_history() latest_history = sa_session.query( galaxy.model.History ) \ - .filter( and_( galaxy.model.History.table.c.deleted==False, - galaxy.model.History.table.c.user_id==admin_user.id ) ) \ + .filter( and_( galaxy.model.History.table.c.deleted == False, + galaxy.model.History.table.c.user_id == admin_user.id ) ) \ .order_by( desc( galaxy.model.History.table.c.create_time ) ) \ .first() assert latest_history is not None, "Problem retrieving latest_history from database" if len( self.get_history_as_data_list() ) > 0: raise AssertionError("ToolTestCase.do_it failed") + # Upload any needed files for fname, extra in testdef.required_files: - children = extra.get( 'children', [] ) metadata = extra.get( 'metadata', [] ) composite_data = extra.get( 'composite_data', [] ) self.upload_file( fname, @@ -51,29 +54,34 @@ assert new_name, 'You must supply the new dataset name as the value tag of the edit_attributes tag' hda_id = self.get_history_as_data_list()[-1].get( 'id' ) try: - self.edit_hda_attribute_info( hda_id = str(hda_id), new_name = new_name ) + self.edit_hda_attribute_info( hda_id=str(hda_id), new_name=new_name ) except: - print "### call to edit_hda failed for hda_id %s, new_name=%s" % (hda_id,new_name) + print "### call to edit_hda failed for hda_id %s, new_name=%s" % (hda_id, new_name) else: raise Exception( 'edit_attributes type (%s) is unimplemented' % edit_att.get( 'type', None ) ) + # We need to handle the case where we've uploaded a valid compressed file since the upload # tool will have uncompressed it on the fly. all_inputs = {} for name, value, _ in testdef.inputs: all_inputs[ name ] = value + # See if we have a grouping.Repeat element repeat_name = None for input_name, input_value in testdef.tool.inputs_by_page[0].items(): - if isinstance( input_value, grouping.Repeat ) and all_inputs.get( input_name, 1 ) not in [ 0, "0" ]: #default behavior is to test 1 repeat, for backwards compatibility + if isinstance( input_value, grouping.Repeat ) and all_inputs.get( input_name, 1 ) not in [ 0, "0" ]: # default behavior is to test 1 repeat, for backwards compatibility repeat_name = input_name break + #check if we need to verify number of outputs created dynamically by tool if testdef.tool.force_history_refresh: job_finish_by_output_count = len( self.get_history_as_data_list() ) else: job_finish_by_output_count = False + # Do the first page - page_inputs = self.__expand_grouping(testdef.tool.inputs_by_page[0], all_inputs) + page_inputs = self.__expand_grouping(testdef.tool.inputs_by_page[0], all_inputs) + # Run the tool self.run_tool( testdef.tool.id, repeat_name=repeat_name, **page_inputs ) print "page_inputs (0)", page_inputs @@ -82,6 +90,7 @@ page_inputs = self.__expand_grouping(testdef.tool.inputs_by_page[i], all_inputs) self.submit_form( **page_inputs ) print "page_inputs (%i)" % i, page_inputs + # Check the results ( handles single or multiple tool outputs ). Make sure to pass the correct hid. # The output datasets from the tool should be in the same order as the testdef.outputs. data_list = None @@ -100,7 +109,7 @@ elem_index += 1 try: self.verify_dataset_correctness( outfile, hid=elem_hid, maxseconds=testdef.maxseconds, attributes=attributes, shed_tool_id=shed_tool_id ) - except Exception, e: + except Exception: print >>sys.stderr, self.get_job_stdout( elem.get( 'id' ), format=True ) print >>sys.stderr, self.get_job_stderr( elem.get( 'id' ), format=True ) raise @@ -121,14 +130,14 @@ else: expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = case.value for input_name, input_value in case.inputs.items(): - expanded_inputs.update( self.__expand_grouping( { input_name:input_value }, declared_inputs, prefix = new_prefix ) ) + expanded_inputs.update( self.__expand_grouping( { input_name: input_value }, declared_inputs, prefix=new_prefix ) ) elif isinstance( value, grouping.Repeat ): - for repeat_index in xrange( 0, 1 ): #need to allow for and figure out how many repeats we have + for repeat_index in xrange( 0, 1 ): # need to allow for and figure out how many repeats we have for r_name, r_value in value.inputs.iteritems(): - new_prefix = "%s_%d" % ( value.name, repeat_index ) - if prefix: - new_prefix = "%s|%s" % ( prefix, new_prefix ) - expanded_inputs.update( self.__expand_grouping( { new_prefix : r_value }, declared_inputs, prefix = new_prefix ) ) + new_prefix = "%s_%d" % ( value.name, repeat_index ) + if prefix: + new_prefix = "%s|%s" % ( prefix, new_prefix ) + expanded_inputs.update( self.__expand_grouping( { new_prefix : r_value }, declared_inputs, prefix=new_prefix ) ) elif value.name not in declared_inputs: print "%s not declared in tool test, will not change default value." % value.name elif isinstance(declared_inputs[value.name], str): @@ -143,6 +152,7 @@ expanded_inputs[value.name] = declared_inputs[value.name] return expanded_inputs + def build_tests( testing_shed_tools=False ): """ If the module level variable `toolbox` is set, generate `ToolTestCase` @@ -151,12 +161,15 @@ """ if toolbox is None: return + # Push all the toolbox tests to module level G = globals() + # Eliminate all previous tests from G. for key, val in G.items(): if key.startswith( 'TestForTool_' ): del G[ key ] + for i, tool_id in enumerate( toolbox.tools_by_id ): tool = toolbox.get_tool( tool_id ) if tool.tests: https://bitbucket.org/galaxy/galaxy-central/commits/4ef6fa54e0b9/ Changeset: 4ef6fa54e0b9 User: jmchilton Date: 2013-10-30 06:01:15 Summary: PEP-8 fixes for scripts/functional_tests.py. Still contains some unused imports. Perhaps these are reimported elsewhere? Affected #: 1 file diff -r b716eb3501cebab20ee2ff2222310aff915750bb -r 4ef6fa54e0b98e24979742c775c287cb9fbf3d27 scripts/functional_tests.py --- a/scripts/functional_tests.py +++ b/scripts/functional_tests.py @@ -1,6 +1,10 @@ #!/usr/bin/env python -import os, sys, shutil, tempfile, re +import os +import sys +import shutil +import tempfile +import re from ConfigParser import SafeConfigParser # Assume we are run from the galaxy root directory, add lib to the python path @@ -25,10 +29,17 @@ # http://code.google.com/p/python-nose/issues/detail?id=284 eggs.require( "pysqlite" ) -import atexit, logging, os, os.path, sys, tempfile -import twill, unittest, time -import subprocess, sys, threading, random -import httplib, socket +import atexit +import logging +import os.path +import twill +import unittest +import time +import subprocess +import threading +import random +import httplib +import socket from paste import httpserver import galaxy.app from galaxy.app import UniverseApplication @@ -56,6 +67,7 @@ # should this serve static resources (scripts, images, styles, etc.) STATIC_ENABLED = True + def get_static_settings(): """Returns dictionary of the settings necessary for a galaxy App to be wrapped in the static middleware. @@ -68,16 +80,17 @@ #TODO: these should be copied from universe_wsgi.ini return dict( #TODO: static_enabled needed here? - static_enabled = True, - static_cache_time = 360, - static_dir = static_dir, - static_images_dir = os.path.join( static_dir, 'images', '' ), - static_favicon_dir = os.path.join( static_dir, 'favicon.ico' ), - static_scripts_dir = os.path.join( static_dir, 'scripts', '' ), - static_style_dir = os.path.join( static_dir, 'june_2007_style', 'blue' ), - static_robots_txt = os.path.join( static_dir, 'robots.txt' ), + static_enabled=True, + static_cache_time=360, + static_dir=static_dir, + static_images_dir=os.path.join( static_dir, 'images', '' ), + static_favicon_dir=os.path.join( static_dir, 'favicon.ico' ), + static_scripts_dir=os.path.join( static_dir, 'scripts', '' ), + static_style_dir=os.path.join( static_dir, 'june_2007_style', 'blue' ), + static_robots_txt=os.path.join( static_dir, 'robots.txt' ), ) + def get_webapp_global_conf(): """Get the global_conf dictionary sent as the first argument to app_factory. """ @@ -87,12 +100,13 @@ global_conf.update( get_static_settings() ) return global_conf + def generate_config_file( input_filename, output_filename, config_items ): ''' Generate a config file with the configuration that has been defined for the embedded web application. This is mostly relevant when setting metadata externally, since the script for doing that does not have access to app.config. - ''' + ''' cp = SafeConfigParser() cp.read( input_filename ) config_items_by_section = [] @@ -110,9 +124,9 @@ config_tuple = 'app:main', label, value config_items_by_section.append( config_tuple ) print( config_items_by_section ) + # Replace the default values with the provided configuration. for section, label, value in config_items_by_section: - if cp.has_option( section, label ): cp.remove_option( section, label ) cp.set( section, label, str( value ) ) @@ -120,6 +134,7 @@ cp.write( fh ) fh.close() + def run_tests( test_config ): loader = nose.loader.TestLoader( config=test_config ) plug_loader = test_config.plugins.prepareTestLoader( loader ) @@ -134,7 +149,8 @@ test_runner = plug_runner return test_runner.run( tests ) -def main(): + +def main(): # ---- Configuration ------------------------------------------------------ galaxy_test_host = os.environ.get( 'GALAXY_TEST_HOST', default_galaxy_test_host ) galaxy_test_port = os.environ.get( 'GALAXY_TEST_PORT', None ) @@ -174,7 +190,7 @@ start_server = 'GALAXY_TEST_EXTERNAL' not in os.environ if os.path.exists( 'tool_data_table_conf.test.xml' ): tool_data_table_config_path = 'tool_data_table_conf.test.xml' - else: + else: tool_data_table_config_path = 'tool_data_table_conf.xml' shed_tool_data_table_config = 'shed_tool_data_table_conf.xml' tool_dependency_dir = os.environ.get( 'GALAXY_TOOL_DEPENDENCY_DIR', None ) @@ -182,7 +198,7 @@ galaxy_test_tmp_dir = os.environ.get( 'GALAXY_TEST_TMP_DIR', None ) if galaxy_test_tmp_dir is None: galaxy_test_tmp_dir = tempfile.mkdtemp() - + if start_server: psu_production = False galaxy_test_proxy_port = None @@ -212,29 +228,29 @@ job_working_directory = os.path.join( new_file_path, 'job_working_directory' ) os.mkdir( cluster_files_directory ) os.mkdir( job_working_directory ) - kwargs = dict( database_engine_option_pool_size = '10', - database_engine_option_max_overflow = '20', - database_engine_option_strategy = 'threadlocal', - nginx_x_accel_redirect_base = '/_x_accel_redirect', - nginx_upload_store = nginx_upload_store, - nginx_upload_path = '/_upload', - allow_library_path_paste = 'True', - cluster_files_directory = cluster_files_directory, - job_working_directory = job_working_directory, - outputs_to_working_directory = 'True', - static_enabled = 'False', - debug = 'False', - track_jobs_in_database = 'True', - job_scheduler_policy = 'FIFO', - start_job_runners = 'pbs', - default_cluster_job_runner = default_cluster_job_runner ) + kwargs = dict( database_engine_option_pool_size='10', + database_engine_option_max_overflow='20', + database_engine_option_strategy='threadlocal', + nginx_x_accel_redirect_base='/_x_accel_redirect', + nginx_upload_store=nginx_upload_store, + nginx_upload_path='/_upload', + allow_library_path_paste='True', + cluster_files_directory=cluster_files_directory, + job_working_directory=job_working_directory, + outputs_to_working_directory='True', + static_enabled='False', + debug='False', + track_jobs_in_database='True', + job_scheduler_policy='FIFO', + start_job_runners='pbs', + default_cluster_job_runner=default_cluster_job_runner ) psu_production = True else: tempdir = tempfile.mkdtemp( dir=galaxy_test_tmp_dir ) # Configure the database path. if 'GALAXY_TEST_DBPATH' in os.environ: galaxy_db_path = os.environ[ 'GALAXY_TEST_DBPATH' ] - else: + else: galaxy_db_path = os.path.join( tempdir, 'database' ) # Configure the paths Galaxy needs to test tools. file_path = os.path.join( galaxy_db_path, 'files' ) @@ -252,33 +268,33 @@ except OSError: pass - # ---- Build Application -------------------------------------------------- - app = None + # ---- Build Application -------------------------------------------------- + app = None if start_server: - kwargs = dict( admin_users = 'test@bx.psu.edu', - allow_library_path_paste = True, - allow_user_creation = True, - allow_user_deletion = True, - database_connection = database_connection, - datatype_converters_config_file = "datatype_converters_conf.xml.sample", - file_path = file_path, - id_secret = 'changethisinproductiontoo', - job_queue_workers = 5, - job_working_directory = job_working_directory, - library_import_dir = library_import_dir, - log_destination = "stdout", - new_file_path = new_file_path, - running_functional_tests = True, - shed_tool_data_table_config = shed_tool_data_table_config, - template_path = "templates", - test_conf = "test.conf", - tool_config_file = tool_config_file, - tool_data_table_config_path = tool_data_table_config_path, - tool_path = tool_path, - tool_parse_help = False, - update_integrated_tool_panel = False, - use_heartbeat = False, - user_library_import_dir = user_library_import_dir ) + kwargs = dict( admin_users='test@bx.psu.edu', + allow_library_path_paste=True, + allow_user_creation=True, + allow_user_deletion=True, + database_connection=database_connection, + datatype_converters_config_file="datatype_converters_conf.xml.sample", + file_path=file_path, + id_secret='changethisinproductiontoo', + job_queue_workers=5, + job_working_directory=job_working_directory, + library_import_dir=library_import_dir, + log_destination="stdout", + new_file_path=new_file_path, + running_functional_tests=True, + shed_tool_data_table_config=shed_tool_data_table_config, + template_path="templates", + test_conf="test.conf", + tool_config_file=tool_config_file, + tool_data_table_config_path=tool_data_table_config_path, + tool_path=tool_path, + tool_parse_help=False, + update_integrated_tool_panel=False, + use_heartbeat=False, + user_library_import_dir=user_library_import_dir ) if psu_production: kwargs[ 'global_conf' ] = None if not database_connection.startswith( 'sqlite://' ): @@ -309,7 +325,7 @@ # ---- Run webserver ------------------------------------------------------ server = None - + if start_server: webapp = buildapp.app_factory( kwargs[ 'global_conf' ], app=app, use_translogger=False, static_enabled=STATIC_ENABLED ) @@ -338,7 +354,7 @@ t.start() # Test if the server is up for i in range( 10 ): - conn = httplib.HTTPConnection( galaxy_test_host, galaxy_test_port ) # directly test the app, not the proxy + conn = httplib.HTTPConnection( galaxy_test_host, galaxy_test_port ) # directly test the app, not the proxy conn.request( "GET", "/" ) if conn.getresponse().status == 200: break @@ -347,7 +363,7 @@ raise Exception( "Test HTTP server did not return '200 OK' after 10 tries" ) # Test if the proxy server is up if psu_production: - conn = httplib.HTTPConnection( galaxy_test_host, galaxy_test_proxy_port ) # directly test the app, not the proxy + conn = httplib.HTTPConnection( galaxy_test_host, galaxy_test_proxy_port ) # directly test the app, not the proxy conn.request( "GET", "/" ) if not conn.getresponse().status == 200: raise Exception( "Test HTTP proxy server did not return '200 OK'" ) @@ -366,10 +382,10 @@ success = False try: tool_configs = app.config.tool_configs - # What requires these? Handy for (eg) functional tests to save outputs? + # What requires these? Handy for (eg) functional tests to save outputs? if galaxy_test_save: os.environ[ 'GALAXY_TEST_SAVE' ] = galaxy_test_save - # Pass in through script setenv, will leave a copy of ALL test validate files + # Pass in through script setenv, will leave a copy of ALL test validate files os.environ[ 'GALAXY_TEST_HOST' ] = galaxy_test_host if testing_migrated_tools or testing_installed_tools: shed_tools_dict = {} @@ -398,7 +414,7 @@ functional.test_toolbox.build_tests( testing_shed_tools=True ) test_config = nose.config.Config( env=os.environ, ignoreFiles=ignore_files, plugins=nose.plugins.manager.DefaultPluginManager() ) test_config.configure( sys.argv ) - result = run_tests( test_config ) + result = run_tests( test_config ) success = result.wasSuccessful() try: os.unlink( tmp_tool_panel_conf ) @@ -415,11 +431,11 @@ os.environ[ 'GALAXY_TEST_FILE_DIR' ] = galaxy_test_file_dir test_config = nose.config.Config( env=os.environ, ignoreFiles=ignore_files, plugins=nose.plugins.manager.DefaultPluginManager() ) test_config.configure( sys.argv ) - result = run_tests( test_config ) + result = run_tests( test_config ) success = result.wasSuccessful() except: log.exception( "Failure running tests" ) - + log.info( "Shutting down" ) # ---- Tear down ----------------------------------------------------------- if server: https://bitbucket.org/galaxy/galaxy-central/commits/2dfc7b0390d0/ Changeset: 2dfc7b0390d0 User: jmchilton Date: 2013-10-30 06:01:15 Summary: Refactor test_toolbox.py into smaller methods. Enhanced readability, hopefully will aide in plugging in an alternative API driven backend. Affected #: 1 file diff -r 4ef6fa54e0b98e24979742c775c287cb9fbf3d27 -r 2dfc7b0390d056d30142010f830fe82257f1307c test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -3,7 +3,7 @@ from galaxy.tools.parameters import grouping from base.twilltestcase import TwillTestCase import galaxy.model -from galaxy.model.orm import * +from galaxy.model.orm import and_, desc from galaxy.model.mapping import context as sa_session toolbox = None @@ -13,6 +13,23 @@ """Abstract test case that runs tests based on a `galaxy.tools.test.ToolTest`""" def do_it( self, testdef, shed_tool_id=None ): + """ + Run through a tool test case. + """ + self.__handle_test_def_errors( testdef ) + + latest_history = self.__setup_test_history() + + self.__setup_test_data( testdef, shed_tool_id ) + + data_list = self.__run_tool( testdef ) + self.assertTrue( data_list ) + + self.__verify_outputs( testdef, shed_tool_id, data_list ) + + self.__delete_history( latest_history ) + + def __handle_test_def_errors(self, testdef): # If the test generation had an error, raise if testdef.error: if testdef.exception: @@ -20,6 +37,7 @@ else: raise Exception( "Test parse failure" ) + def __setup_test_history( self ): # Start with a new history self.logout() self.login( email='test@bx.psu.edu' ) @@ -33,7 +51,9 @@ assert latest_history is not None, "Problem retrieving latest_history from database" if len( self.get_history_as_data_list() ) > 0: raise AssertionError("ToolTestCase.do_it failed") + return latest_history + def __setup_test_data( self, testdef, shed_tool_id ): # Upload any needed files for fname, extra in testdef.required_files: metadata = extra.get( 'metadata', [] ) @@ -44,9 +64,11 @@ metadata=metadata, composite_data=composite_data, shed_tool_id=shed_tool_id ) + print "Uploaded file: ", fname, ", ftype: ", extra.get( 'ftype', 'auto' ), ", extra: ", extra #Post upload attribute editing edit_attributes = extra.get( 'edit_attributes', [] ) + #currently only renaming is supported for edit_att in edit_attributes: if edit_att.get( 'type', None ) == 'name': @@ -60,6 +82,7 @@ else: raise Exception( 'edit_attributes type (%s) is unimplemented' % edit_att.get( 'type', None ) ) + def __run_tool( self, testdef ): # We need to handle the case where we've uploaded a valid compressed file since the upload # tool will have uncompressed it on the fly. all_inputs = {} @@ -98,21 +121,30 @@ data_list = self.get_history_as_data_list() if job_finish_by_output_count and len( testdef.outputs ) > ( len( data_list ) - job_finish_by_output_count ): data_list = None - self.assertTrue( data_list ) + return data_list + + def __verify_outputs( self, testdef, shed_tool_id, data_list ): + maxseconds = testdef.maxseconds + elem_index = 0 - len( testdef.outputs ) for output_tuple in testdef.outputs: - name, outfile, attributes = output_tuple # Get the correct hid elem = data_list[ elem_index ] self.assertTrue( elem is not None ) + self.__verify_output( output_tuple, shed_tool_id, elem, maxseconds=maxseconds ) + elem_index += 1 + + def __verify_output( self, output_tuple, shed_tool_id, elem, maxseconds ): + name, outfile, attributes = output_tuple elem_hid = elem.get( 'hid' ) - elem_index += 1 try: - self.verify_dataset_correctness( outfile, hid=elem_hid, maxseconds=testdef.maxseconds, attributes=attributes, shed_tool_id=shed_tool_id ) + self.verify_dataset_correctness( outfile, hid=elem_hid, attributes=attributes, shed_tool_id=shed_tool_id ) except Exception: print >>sys.stderr, self.get_job_stdout( elem.get( 'id' ), format=True ) print >>sys.stderr, self.get_job_stderr( elem.get( 'id' ), format=True ) raise + + def __delete_history( self, latest_history ): self.delete_history( id=self.security.encode_id( latest_history.id ) ) def __expand_grouping( self, tool_inputs, declared_inputs, prefix='' ): https://bitbucket.org/galaxy/galaxy-central/commits/83a95ea606d9/ Changeset: 83a95ea606d9 User: jmchilton Date: 2013-10-30 06:01:15 Summary: Implement functional test env. variable GALAXY_TEST_DB_TEMPLATE. Middle ground between recreating a completely new database and pointing at existing database with GALAXY_TEST_DBURI. The former requires a lot of setup time, the latter results in test failures in certain cases (namely tool shed tests expecting clean database). GALAXY_TEST_DB_TEMPLATE can be either a file path (absolute) or URL. In order to facilitate this, a new Galaxy config option (database_auto_migrate) has been added. If this option is enabled, when Galaxy starts up and points at an existing database, if that database is not at the newest version it will be automatically migrated. This option defaults to False, but is enabled in testing if GALAXY_TEST_DB_TEMPLATE is set. I think we should go a step further and make this (database_auto_migrate) default to True if database_connection references an sqlite database - unless we believe there are Galaxy instances out there based on sqlite that are REALLY old or that are production enough to warrent requiring admins to do that database migration in a separate step (presumably encouraging them to make a backup pre-migration). Thoughts? Affected #: 3 files diff -r 2dfc7b0390d056d30142010f830fe82257f1307c -r 83a95ea606d9fcd15f7554b71804625e203eb1ed lib/galaxy/config.py --- a/lib/galaxy/config.py +++ b/lib/galaxy/config.py @@ -35,10 +35,13 @@ self.gid = os.getgid() # if running under newgrp(1) we'll need to fix the group of data created on the cluster # Database related configuration self.database = resolve_path( kwargs.get( "database_file", "database/universe.sqlite" ), self.root ) - self.database_connection = kwargs.get( "database_connection", False ) + self.database_connection = kwargs.get( "database_connection", False ) self.database_engine_options = get_database_engine_options( kwargs ) self.database_create_tables = string_as_bool( kwargs.get( "database_create_tables", "True" ) ) self.database_query_profiling_proxy = string_as_bool( kwargs.get( "database_query_profiling_proxy", "False" ) ) + # Don't set this to true for production databases, but probably should + # default to True for sqlite databases. + self.database_auto_migrate = string_as_bool( kwargs.get( "database_auto_migrate", "False" ) ) # Where dataset files are stored self.file_path = resolve_path( kwargs.get( "file_path", "database/files" ), self.root ) self.new_file_path = resolve_path( kwargs.get( "new_file_path", "database/tmp" ), self.root ) diff -r 2dfc7b0390d056d30142010f830fe82257f1307c -r 83a95ea606d9fcd15f7554b71804625e203eb1ed lib/galaxy/model/migrate/check.py --- a/lib/galaxy/model/migrate/check.py +++ b/lib/galaxy/model/migrate/check.py @@ -45,7 +45,22 @@ log.error( "database_connection contains an unknown SQLAlchemy database dialect: %s" % dialect ) # Create engine and metadata engine = create_engine( url, **engine_options ) + + def migrate(): + try: + # Declare the database to be under a repository's version control + db_schema = schema.ControlledSchema.create( engine, migrate_repository ) + except: + # The database is already under version control + db_schema = schema.ControlledSchema( engine, migrate_repository ) + # Apply all scripts to get to current version + migrate_to_current_version( engine, db_schema ) + meta = MetaData( bind=engine ) + if getattr( app.config, 'database_auto_migrate', False ): + migrate() + return + # Try to load dataset table try: dataset_table = Table( "dataset", meta, autoload=True ) @@ -55,15 +70,7 @@ if app: app.new_installation = True log.info( "No database, initializing" ) - # Database might or might not be versioned - try: - # Declare the database to be under a repository's version control - db_schema = schema.ControlledSchema.create( engine, migrate_repository ) - except: - # The database is already under version control - db_schema = schema.ControlledSchema( engine, migrate_repository ) - # Apply all scripts to get to current version - migrate_to_current_version( engine, db_schema ) + migrate() return try: hda_table = Table( "history_dataset_association", meta, autoload=True ) diff -r 2dfc7b0390d056d30142010f830fe82257f1307c -r 83a95ea606d9fcd15f7554b71804625e203eb1ed scripts/functional_tests.py --- a/scripts/functional_tests.py +++ b/scripts/functional_tests.py @@ -40,6 +40,7 @@ import random import httplib import socket +import urllib from paste import httpserver import galaxy.app from galaxy.app import UniverseApplication @@ -150,6 +151,21 @@ return test_runner.run( tests ) +def __copy_database_template( source, db_path ): + """ + Copy a 'clean' sqlite template database (from file or URL) to specified + database path. + """ + os.makedirs( os.path.dirname( db_path ) ) + if os.path.exists( source ): + shutil.copy( source, db_path ) + assert os.path.exists( db_path ) + elif source.startswith("http"): + urllib.urlretrieve( source, db_path ) + else: + raise Exception( "Failed to copy database template from source %s" % source ) + + def main(): # ---- Configuration ------------------------------------------------------ galaxy_test_host = os.environ.get( 'GALAXY_TEST_HOST', default_galaxy_test_host ) @@ -199,6 +215,8 @@ if galaxy_test_tmp_dir is None: galaxy_test_tmp_dir = tempfile.mkdtemp() + database_auto_migrate = False + if start_server: psu_production = False galaxy_test_proxy_port = None @@ -259,7 +277,16 @@ if 'GALAXY_TEST_DBURI' in os.environ: database_connection = os.environ['GALAXY_TEST_DBURI'] else: - database_connection = 'sqlite:///' + os.path.join( galaxy_db_path, 'universe.sqlite' ) + db_path = os.path.join( galaxy_db_path, 'universe.sqlite' ) + if 'GALAXY_TEST_DB_TEMPLATE' in os.environ: + # Middle ground between recreating a completely new + # database and pointing at existing database with + # GALAXY_TEST_DBURI. The former requires a lot of setup + # time, the latter results in test failures in certain + # cases (namely tool shed tests expecting clean database). + __copy_database_template(os.environ['GALAXY_TEST_DB_TEMPLATE'], db_path) + database_auto_migrate = True + database_connection = 'sqlite:///%s' % db_path kwargs = {} for dir in file_path, new_file_path: try: @@ -276,6 +303,7 @@ allow_user_creation=True, allow_user_deletion=True, database_connection=database_connection, + database_auto_migrate=database_auto_migrate, datatype_converters_config_file="datatype_converters_conf.xml.sample", file_path=file_path, id_secret='changethisinproductiontoo', https://bitbucket.org/galaxy/galaxy-central/commits/4beddecc0ebc/ Changeset: 4beddecc0ebc User: jmchilton Date: 2013-11-01 16:14:24 Summary: Merged in jmchilton/galaxy-central-fork-1 (pull request #246) Small functional test framework enhancements Affected #: 4 files diff -r 0dccf0970fefa7a54b036d20a7d512f3d27a54de -r 4beddecc0ebc6bf13b5d00dc48cbabe65c401e10 lib/galaxy/config.py --- a/lib/galaxy/config.py +++ b/lib/galaxy/config.py @@ -35,10 +35,13 @@ self.gid = os.getgid() # if running under newgrp(1) we'll need to fix the group of data created on the cluster # Database related configuration self.database = resolve_path( kwargs.get( "database_file", "database/universe.sqlite" ), self.root ) - self.database_connection = kwargs.get( "database_connection", False ) + self.database_connection = kwargs.get( "database_connection", False ) self.database_engine_options = get_database_engine_options( kwargs ) self.database_create_tables = string_as_bool( kwargs.get( "database_create_tables", "True" ) ) self.database_query_profiling_proxy = string_as_bool( kwargs.get( "database_query_profiling_proxy", "False" ) ) + # Don't set this to true for production databases, but probably should + # default to True for sqlite databases. + self.database_auto_migrate = string_as_bool( kwargs.get( "database_auto_migrate", "False" ) ) # Where dataset files are stored self.file_path = resolve_path( kwargs.get( "file_path", "database/files" ), self.root ) self.new_file_path = resolve_path( kwargs.get( "new_file_path", "database/tmp" ), self.root ) diff -r 0dccf0970fefa7a54b036d20a7d512f3d27a54de -r 4beddecc0ebc6bf13b5d00dc48cbabe65c401e10 lib/galaxy/model/migrate/check.py --- a/lib/galaxy/model/migrate/check.py +++ b/lib/galaxy/model/migrate/check.py @@ -45,7 +45,22 @@ log.error( "database_connection contains an unknown SQLAlchemy database dialect: %s" % dialect ) # Create engine and metadata engine = create_engine( url, **engine_options ) + + def migrate(): + try: + # Declare the database to be under a repository's version control + db_schema = schema.ControlledSchema.create( engine, migrate_repository ) + except: + # The database is already under version control + db_schema = schema.ControlledSchema( engine, migrate_repository ) + # Apply all scripts to get to current version + migrate_to_current_version( engine, db_schema ) + meta = MetaData( bind=engine ) + if getattr( app.config, 'database_auto_migrate', False ): + migrate() + return + # Try to load dataset table try: dataset_table = Table( "dataset", meta, autoload=True ) @@ -55,15 +70,7 @@ if app: app.new_installation = True log.info( "No database, initializing" ) - # Database might or might not be versioned - try: - # Declare the database to be under a repository's version control - db_schema = schema.ControlledSchema.create( engine, migrate_repository ) - except: - # The database is already under version control - db_schema = schema.ControlledSchema( engine, migrate_repository ) - # Apply all scripts to get to current version - migrate_to_current_version( engine, db_schema ) + migrate() return try: hda_table = Table( "history_dataset_association", meta, autoload=True ) diff -r 0dccf0970fefa7a54b036d20a7d512f3d27a54de -r 4beddecc0ebc6bf13b5d00dc48cbabe65c401e10 scripts/functional_tests.py --- a/scripts/functional_tests.py +++ b/scripts/functional_tests.py @@ -1,6 +1,10 @@ #!/usr/bin/env python -import os, sys, shutil, tempfile, re +import os +import sys +import shutil +import tempfile +import re from ConfigParser import SafeConfigParser # Assume we are run from the galaxy root directory, add lib to the python path @@ -25,10 +29,18 @@ # http://code.google.com/p/python-nose/issues/detail?id=284 eggs.require( "pysqlite" ) -import atexit, logging, os, os.path, sys, tempfile -import twill, unittest, time -import subprocess, sys, threading, random -import httplib, socket +import atexit +import logging +import os.path +import twill +import unittest +import time +import subprocess +import threading +import random +import httplib +import socket +import urllib from paste import httpserver import galaxy.app from galaxy.app import UniverseApplication @@ -56,6 +68,7 @@ # should this serve static resources (scripts, images, styles, etc.) STATIC_ENABLED = True + def get_static_settings(): """Returns dictionary of the settings necessary for a galaxy App to be wrapped in the static middleware. @@ -68,16 +81,17 @@ #TODO: these should be copied from universe_wsgi.ini return dict( #TODO: static_enabled needed here? - static_enabled = True, - static_cache_time = 360, - static_dir = static_dir, - static_images_dir = os.path.join( static_dir, 'images', '' ), - static_favicon_dir = os.path.join( static_dir, 'favicon.ico' ), - static_scripts_dir = os.path.join( static_dir, 'scripts', '' ), - static_style_dir = os.path.join( static_dir, 'june_2007_style', 'blue' ), - static_robots_txt = os.path.join( static_dir, 'robots.txt' ), + static_enabled=True, + static_cache_time=360, + static_dir=static_dir, + static_images_dir=os.path.join( static_dir, 'images', '' ), + static_favicon_dir=os.path.join( static_dir, 'favicon.ico' ), + static_scripts_dir=os.path.join( static_dir, 'scripts', '' ), + static_style_dir=os.path.join( static_dir, 'june_2007_style', 'blue' ), + static_robots_txt=os.path.join( static_dir, 'robots.txt' ), ) + def get_webapp_global_conf(): """Get the global_conf dictionary sent as the first argument to app_factory. """ @@ -87,12 +101,13 @@ global_conf.update( get_static_settings() ) return global_conf + def generate_config_file( input_filename, output_filename, config_items ): ''' Generate a config file with the configuration that has been defined for the embedded web application. This is mostly relevant when setting metadata externally, since the script for doing that does not have access to app.config. - ''' + ''' cp = SafeConfigParser() cp.read( input_filename ) config_items_by_section = [] @@ -110,9 +125,9 @@ config_tuple = 'app:main', label, value config_items_by_section.append( config_tuple ) print( config_items_by_section ) + # Replace the default values with the provided configuration. for section, label, value in config_items_by_section: - if cp.has_option( section, label ): cp.remove_option( section, label ) cp.set( section, label, str( value ) ) @@ -120,6 +135,7 @@ cp.write( fh ) fh.close() + def run_tests( test_config ): loader = nose.loader.TestLoader( config=test_config ) plug_loader = test_config.plugins.prepareTestLoader( loader ) @@ -134,7 +150,23 @@ test_runner = plug_runner return test_runner.run( tests ) -def main(): + +def __copy_database_template( source, db_path ): + """ + Copy a 'clean' sqlite template database (from file or URL) to specified + database path. + """ + os.makedirs( os.path.dirname( db_path ) ) + if os.path.exists( source ): + shutil.copy( source, db_path ) + assert os.path.exists( db_path ) + elif source.startswith("http"): + urllib.urlretrieve( source, db_path ) + else: + raise Exception( "Failed to copy database template from source %s" % source ) + + +def main(): # ---- Configuration ------------------------------------------------------ galaxy_test_host = os.environ.get( 'GALAXY_TEST_HOST', default_galaxy_test_host ) galaxy_test_port = os.environ.get( 'GALAXY_TEST_PORT', None ) @@ -174,7 +206,7 @@ start_server = 'GALAXY_TEST_EXTERNAL' not in os.environ if os.path.exists( 'tool_data_table_conf.test.xml' ): tool_data_table_config_path = 'tool_data_table_conf.test.xml' - else: + else: tool_data_table_config_path = 'tool_data_table_conf.xml' shed_tool_data_table_config = 'shed_tool_data_table_conf.xml' tool_dependency_dir = os.environ.get( 'GALAXY_TOOL_DEPENDENCY_DIR', None ) @@ -182,7 +214,9 @@ galaxy_test_tmp_dir = os.environ.get( 'GALAXY_TEST_TMP_DIR', None ) if galaxy_test_tmp_dir is None: galaxy_test_tmp_dir = tempfile.mkdtemp() - + + database_auto_migrate = False + if start_server: psu_production = False galaxy_test_proxy_port = None @@ -212,29 +246,29 @@ job_working_directory = os.path.join( new_file_path, 'job_working_directory' ) os.mkdir( cluster_files_directory ) os.mkdir( job_working_directory ) - kwargs = dict( database_engine_option_pool_size = '10', - database_engine_option_max_overflow = '20', - database_engine_option_strategy = 'threadlocal', - nginx_x_accel_redirect_base = '/_x_accel_redirect', - nginx_upload_store = nginx_upload_store, - nginx_upload_path = '/_upload', - allow_library_path_paste = 'True', - cluster_files_directory = cluster_files_directory, - job_working_directory = job_working_directory, - outputs_to_working_directory = 'True', - static_enabled = 'False', - debug = 'False', - track_jobs_in_database = 'True', - job_scheduler_policy = 'FIFO', - start_job_runners = 'pbs', - default_cluster_job_runner = default_cluster_job_runner ) + kwargs = dict( database_engine_option_pool_size='10', + database_engine_option_max_overflow='20', + database_engine_option_strategy='threadlocal', + nginx_x_accel_redirect_base='/_x_accel_redirect', + nginx_upload_store=nginx_upload_store, + nginx_upload_path='/_upload', + allow_library_path_paste='True', + cluster_files_directory=cluster_files_directory, + job_working_directory=job_working_directory, + outputs_to_working_directory='True', + static_enabled='False', + debug='False', + track_jobs_in_database='True', + job_scheduler_policy='FIFO', + start_job_runners='pbs', + default_cluster_job_runner=default_cluster_job_runner ) psu_production = True else: tempdir = tempfile.mkdtemp( dir=galaxy_test_tmp_dir ) # Configure the database path. if 'GALAXY_TEST_DBPATH' in os.environ: galaxy_db_path = os.environ[ 'GALAXY_TEST_DBPATH' ] - else: + else: galaxy_db_path = os.path.join( tempdir, 'database' ) # Configure the paths Galaxy needs to test tools. file_path = os.path.join( galaxy_db_path, 'files' ) @@ -243,7 +277,16 @@ if 'GALAXY_TEST_DBURI' in os.environ: database_connection = os.environ['GALAXY_TEST_DBURI'] else: - database_connection = 'sqlite:///' + os.path.join( galaxy_db_path, 'universe.sqlite' ) + db_path = os.path.join( galaxy_db_path, 'universe.sqlite' ) + if 'GALAXY_TEST_DB_TEMPLATE' in os.environ: + # Middle ground between recreating a completely new + # database and pointing at existing database with + # GALAXY_TEST_DBURI. The former requires a lot of setup + # time, the latter results in test failures in certain + # cases (namely tool shed tests expecting clean database). + __copy_database_template(os.environ['GALAXY_TEST_DB_TEMPLATE'], db_path) + database_auto_migrate = True + database_connection = 'sqlite:///%s' % db_path kwargs = {} for dir in file_path, new_file_path: try: @@ -252,33 +295,34 @@ except OSError: pass - # ---- Build Application -------------------------------------------------- - app = None + # ---- Build Application -------------------------------------------------- + app = None if start_server: - kwargs = dict( admin_users = 'test@bx.psu.edu', - allow_library_path_paste = True, - allow_user_creation = True, - allow_user_deletion = True, - database_connection = database_connection, - datatype_converters_config_file = "datatype_converters_conf.xml.sample", - file_path = file_path, - id_secret = 'changethisinproductiontoo', - job_queue_workers = 5, - job_working_directory = job_working_directory, - library_import_dir = library_import_dir, - log_destination = "stdout", - new_file_path = new_file_path, - running_functional_tests = True, - shed_tool_data_table_config = shed_tool_data_table_config, - template_path = "templates", - test_conf = "test.conf", - tool_config_file = tool_config_file, - tool_data_table_config_path = tool_data_table_config_path, - tool_path = tool_path, - tool_parse_help = False, - update_integrated_tool_panel = False, - use_heartbeat = False, - user_library_import_dir = user_library_import_dir ) + kwargs = dict( admin_users='test@bx.psu.edu', + allow_library_path_paste=True, + allow_user_creation=True, + allow_user_deletion=True, + database_connection=database_connection, + database_auto_migrate=database_auto_migrate, + datatype_converters_config_file="datatype_converters_conf.xml.sample", + file_path=file_path, + id_secret='changethisinproductiontoo', + job_queue_workers=5, + job_working_directory=job_working_directory, + library_import_dir=library_import_dir, + log_destination="stdout", + new_file_path=new_file_path, + running_functional_tests=True, + shed_tool_data_table_config=shed_tool_data_table_config, + template_path="templates", + test_conf="test.conf", + tool_config_file=tool_config_file, + tool_data_table_config_path=tool_data_table_config_path, + tool_path=tool_path, + tool_parse_help=False, + update_integrated_tool_panel=False, + use_heartbeat=False, + user_library_import_dir=user_library_import_dir ) if psu_production: kwargs[ 'global_conf' ] = None if not database_connection.startswith( 'sqlite://' ): @@ -309,7 +353,7 @@ # ---- Run webserver ------------------------------------------------------ server = None - + if start_server: webapp = buildapp.app_factory( kwargs[ 'global_conf' ], app=app, use_translogger=False, static_enabled=STATIC_ENABLED ) @@ -338,7 +382,7 @@ t.start() # Test if the server is up for i in range( 10 ): - conn = httplib.HTTPConnection( galaxy_test_host, galaxy_test_port ) # directly test the app, not the proxy + conn = httplib.HTTPConnection( galaxy_test_host, galaxy_test_port ) # directly test the app, not the proxy conn.request( "GET", "/" ) if conn.getresponse().status == 200: break @@ -347,7 +391,7 @@ raise Exception( "Test HTTP server did not return '200 OK' after 10 tries" ) # Test if the proxy server is up if psu_production: - conn = httplib.HTTPConnection( galaxy_test_host, galaxy_test_proxy_port ) # directly test the app, not the proxy + conn = httplib.HTTPConnection( galaxy_test_host, galaxy_test_proxy_port ) # directly test the app, not the proxy conn.request( "GET", "/" ) if not conn.getresponse().status == 200: raise Exception( "Test HTTP proxy server did not return '200 OK'" ) @@ -366,10 +410,10 @@ success = False try: tool_configs = app.config.tool_configs - # What requires these? Handy for (eg) functional tests to save outputs? + # What requires these? Handy for (eg) functional tests to save outputs? if galaxy_test_save: os.environ[ 'GALAXY_TEST_SAVE' ] = galaxy_test_save - # Pass in through script setenv, will leave a copy of ALL test validate files + # Pass in through script setenv, will leave a copy of ALL test validate files os.environ[ 'GALAXY_TEST_HOST' ] = galaxy_test_host if testing_migrated_tools or testing_installed_tools: shed_tools_dict = {} @@ -398,7 +442,7 @@ functional.test_toolbox.build_tests( testing_shed_tools=True ) test_config = nose.config.Config( env=os.environ, ignoreFiles=ignore_files, plugins=nose.plugins.manager.DefaultPluginManager() ) test_config.configure( sys.argv ) - result = run_tests( test_config ) + result = run_tests( test_config ) success = result.wasSuccessful() try: os.unlink( tmp_tool_panel_conf ) @@ -415,11 +459,11 @@ os.environ[ 'GALAXY_TEST_FILE_DIR' ] = galaxy_test_file_dir test_config = nose.config.Config( env=os.environ, ignoreFiles=ignore_files, plugins=nose.plugins.manager.DefaultPluginManager() ) test_config.configure( sys.argv ) - result = run_tests( test_config ) + result = run_tests( test_config ) success = result.wasSuccessful() except: log.exception( "Failure running tests" ) - + log.info( "Shutting down" ) # ---- Tear down ----------------------------------------------------------- if server: diff -r 0dccf0970fefa7a54b036d20a7d512f3d27a54de -r 4beddecc0ebc6bf13b5d00dc48cbabe65c401e10 test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -1,38 +1,61 @@ -import sys, new +import sys +import new from galaxy.tools.parameters import grouping -from galaxy.tools.parameters import basic from base.twilltestcase import TwillTestCase import galaxy.model -from galaxy.model.orm import * +from galaxy.model.orm import and_, desc from galaxy.model.mapping import context as sa_session toolbox = None + class ToolTestCase( TwillTestCase ): """Abstract test case that runs tests based on a `galaxy.tools.test.ToolTest`""" + def do_it( self, testdef, shed_tool_id=None ): + """ + Run through a tool test case. + """ + self.__handle_test_def_errors( testdef ) + + latest_history = self.__setup_test_history() + + self.__setup_test_data( testdef, shed_tool_id ) + + data_list = self.__run_tool( testdef ) + self.assertTrue( data_list ) + + self.__verify_outputs( testdef, shed_tool_id, data_list ) + + self.__delete_history( latest_history ) + + def __handle_test_def_errors(self, testdef): # If the test generation had an error, raise if testdef.error: if testdef.exception: raise testdef.exception else: raise Exception( "Test parse failure" ) + + def __setup_test_history( self ): # Start with a new history self.logout() self.login( email='test@bx.psu.edu' ) - admin_user = sa_session.query( galaxy.model.User ).filter( galaxy.model.User.table.c.email=='test@bx.psu.edu' ).one() + admin_user = sa_session.query( galaxy.model.User ).filter( galaxy.model.User.table.c.email == 'test@bx.psu.edu' ).one() self.new_history() latest_history = sa_session.query( galaxy.model.History ) \ - .filter( and_( galaxy.model.History.table.c.deleted==False, - galaxy.model.History.table.c.user_id==admin_user.id ) ) \ + .filter( and_( galaxy.model.History.table.c.deleted == False, + galaxy.model.History.table.c.user_id == admin_user.id ) ) \ .order_by( desc( galaxy.model.History.table.c.create_time ) ) \ .first() assert latest_history is not None, "Problem retrieving latest_history from database" if len( self.get_history_as_data_list() ) > 0: raise AssertionError("ToolTestCase.do_it failed") + return latest_history + + def __setup_test_data( self, testdef, shed_tool_id ): # Upload any needed files for fname, extra in testdef.required_files: - children = extra.get( 'children', [] ) metadata = extra.get( 'metadata', [] ) composite_data = extra.get( 'composite_data', [] ) self.upload_file( fname, @@ -41,9 +64,11 @@ metadata=metadata, composite_data=composite_data, shed_tool_id=shed_tool_id ) + print "Uploaded file: ", fname, ", ftype: ", extra.get( 'ftype', 'auto' ), ", extra: ", extra #Post upload attribute editing edit_attributes = extra.get( 'edit_attributes', [] ) + #currently only renaming is supported for edit_att in edit_attributes: if edit_att.get( 'type', None ) == 'name': @@ -51,29 +76,35 @@ assert new_name, 'You must supply the new dataset name as the value tag of the edit_attributes tag' hda_id = self.get_history_as_data_list()[-1].get( 'id' ) try: - self.edit_hda_attribute_info( hda_id = str(hda_id), new_name = new_name ) + self.edit_hda_attribute_info( hda_id=str(hda_id), new_name=new_name ) except: - print "### call to edit_hda failed for hda_id %s, new_name=%s" % (hda_id,new_name) + print "### call to edit_hda failed for hda_id %s, new_name=%s" % (hda_id, new_name) else: raise Exception( 'edit_attributes type (%s) is unimplemented' % edit_att.get( 'type', None ) ) + + def __run_tool( self, testdef ): # We need to handle the case where we've uploaded a valid compressed file since the upload # tool will have uncompressed it on the fly. all_inputs = {} for name, value, _ in testdef.inputs: all_inputs[ name ] = value + # See if we have a grouping.Repeat element repeat_name = None for input_name, input_value in testdef.tool.inputs_by_page[0].items(): - if isinstance( input_value, grouping.Repeat ) and all_inputs.get( input_name, 1 ) not in [ 0, "0" ]: #default behavior is to test 1 repeat, for backwards compatibility + if isinstance( input_value, grouping.Repeat ) and all_inputs.get( input_name, 1 ) not in [ 0, "0" ]: # default behavior is to test 1 repeat, for backwards compatibility repeat_name = input_name break + #check if we need to verify number of outputs created dynamically by tool if testdef.tool.force_history_refresh: job_finish_by_output_count = len( self.get_history_as_data_list() ) else: job_finish_by_output_count = False + # Do the first page - page_inputs = self.__expand_grouping(testdef.tool.inputs_by_page[0], all_inputs) + page_inputs = self.__expand_grouping(testdef.tool.inputs_by_page[0], all_inputs) + # Run the tool self.run_tool( testdef.tool.id, repeat_name=repeat_name, **page_inputs ) print "page_inputs (0)", page_inputs @@ -82,6 +113,7 @@ page_inputs = self.__expand_grouping(testdef.tool.inputs_by_page[i], all_inputs) self.submit_form( **page_inputs ) print "page_inputs (%i)" % i, page_inputs + # Check the results ( handles single or multiple tool outputs ). Make sure to pass the correct hid. # The output datasets from the tool should be in the same order as the testdef.outputs. data_list = None @@ -89,21 +121,30 @@ data_list = self.get_history_as_data_list() if job_finish_by_output_count and len( testdef.outputs ) > ( len( data_list ) - job_finish_by_output_count ): data_list = None - self.assertTrue( data_list ) + return data_list + + def __verify_outputs( self, testdef, shed_tool_id, data_list ): + maxseconds = testdef.maxseconds + elem_index = 0 - len( testdef.outputs ) for output_tuple in testdef.outputs: - name, outfile, attributes = output_tuple # Get the correct hid elem = data_list[ elem_index ] self.assertTrue( elem is not None ) + self.__verify_output( output_tuple, shed_tool_id, elem, maxseconds=maxseconds ) + elem_index += 1 + + def __verify_output( self, output_tuple, shed_tool_id, elem, maxseconds ): + name, outfile, attributes = output_tuple elem_hid = elem.get( 'hid' ) - elem_index += 1 try: - self.verify_dataset_correctness( outfile, hid=elem_hid, maxseconds=testdef.maxseconds, attributes=attributes, shed_tool_id=shed_tool_id ) - except Exception, e: + self.verify_dataset_correctness( outfile, hid=elem_hid, attributes=attributes, shed_tool_id=shed_tool_id ) + except Exception: print >>sys.stderr, self.get_job_stdout( elem.get( 'id' ), format=True ) print >>sys.stderr, self.get_job_stderr( elem.get( 'id' ), format=True ) raise + + def __delete_history( self, latest_history ): self.delete_history( id=self.security.encode_id( latest_history.id ) ) def __expand_grouping( self, tool_inputs, declared_inputs, prefix='' ): @@ -121,14 +162,14 @@ else: expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = case.value for input_name, input_value in case.inputs.items(): - expanded_inputs.update( self.__expand_grouping( { input_name:input_value }, declared_inputs, prefix = new_prefix ) ) + expanded_inputs.update( self.__expand_grouping( { input_name: input_value }, declared_inputs, prefix=new_prefix ) ) elif isinstance( value, grouping.Repeat ): - for repeat_index in xrange( 0, 1 ): #need to allow for and figure out how many repeats we have + for repeat_index in xrange( 0, 1 ): # need to allow for and figure out how many repeats we have for r_name, r_value in value.inputs.iteritems(): - new_prefix = "%s_%d" % ( value.name, repeat_index ) - if prefix: - new_prefix = "%s|%s" % ( prefix, new_prefix ) - expanded_inputs.update( self.__expand_grouping( { new_prefix : r_value }, declared_inputs, prefix = new_prefix ) ) + new_prefix = "%s_%d" % ( value.name, repeat_index ) + if prefix: + new_prefix = "%s|%s" % ( prefix, new_prefix ) + expanded_inputs.update( self.__expand_grouping( { new_prefix : r_value }, declared_inputs, prefix=new_prefix ) ) elif value.name not in declared_inputs: print "%s not declared in tool test, will not change default value." % value.name elif isinstance(declared_inputs[value.name], str): @@ -143,6 +184,7 @@ expanded_inputs[value.name] = declared_inputs[value.name] return expanded_inputs + def build_tests( testing_shed_tools=False ): """ If the module level variable `toolbox` is set, generate `ToolTestCase` @@ -151,12 +193,15 @@ """ if toolbox is None: return + # Push all the toolbox tests to module level G = globals() + # Eliminate all previous tests from G. for key, val in G.items(): if key.startswith( 'TestForTool_' ): del G[ key ] + for i, tool_id in enumerate( toolbox.tools_by_id ): tool = toolbox.get_tool( tool_id ) if tool.tests: Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.