commit/galaxy-central: 2 new changesets
2 new commits in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/commits/bfb7f5ec6b14/ Changeset: bfb7f5ec6b14 Branch: next-stable User: greg Date: 2013-03-28 21:32:32 Summary: Fix for displaying tool shep images in for tools contained in repositories either in the tool shed or in Galaxy. Affected #: 9 files diff -r 97bf49816c0ba82bb47b5846bcce0d067a0a8bf4 -r bfb7f5ec6b14a277b96c567c92678675394c226d lib/galaxy/tools/__init__.py --- a/lib/galaxy/tools/__init__.py +++ b/lib/galaxy/tools/__init__.py @@ -446,6 +446,7 @@ def load_tool_tag_set( self, elem, panel_dict, integrated_panel_dict, tool_path, load_panel_dict, guid=None, index=None ): try: path = elem.get( "file" ) + repository_id = None if guid is None: tool_shed_repository = None can_load_into_panel_dict = True @@ -464,11 +465,12 @@ if tool_shed_repository: # Only load tools if the repository is not deactivated or uninstalled. can_load_into_panel_dict = not tool_shed_repository.deleted + repository_id = self.app.security.encode_id( tool_shed_repository.id ) else: # If there is not yet a tool_shed_repository record, we're in the process of installing # a new repository, so any included tools can be loaded into the tool panel. can_load_into_panel_dict = True - tool = self.load_tool( os.path.join( tool_path, path ), guid=guid ) + tool = self.load_tool( os.path.join( tool_path, path ), guid=guid, repository_id=repository_id ) key = 'tool_%s' % str( tool.id ) if can_load_into_panel_dict: if guid is not None: @@ -574,7 +576,7 @@ self.integrated_tool_panel[ key ] = integrated_section else: self.integrated_tool_panel.insert( index, key, integrated_section ) - def load_tool( self, config_file, guid=None, **kwds ): + def load_tool( self, config_file, guid=None, repository_id=None, **kwds ): """Load a single tool from the file named by `config_file` and return an instance of `Tool`.""" # Parse XML configuration file and get the root element tree = self._load_and_preprocess_tool_xml( config_file ) @@ -590,7 +592,7 @@ ToolClass = tool_types.get( root.get( 'tool_type' ) ) else: ToolClass = Tool - return ToolClass( config_file, root, self.app, guid=guid, **kwds ) + return ToolClass( config_file, root, self.app, guid=guid, repository_id=repository_id, **kwds ) def reload_tool_by_id( self, tool_id ): """ Attempt to reload the tool identified by 'tool_id', if successful @@ -1004,12 +1006,13 @@ tool_type = 'default' default_tool_action = DefaultToolAction - def __init__( self, config_file, root, app, guid=None ): + def __init__( self, config_file, root, app, guid=None, repository_id=None ): """Load a tool from the config named by `config_file`""" # Determine the full path of the directory where the tool config is self.config_file = config_file self.tool_dir = os.path.dirname( config_file ) self.app = app + self.repository_id = repository_id #setup initial attribute values self.inputs = odict() self.stdio_exit_codes = list() @@ -1354,6 +1357,19 @@ """ # TODO: Allow raw HTML or an external link. self.help = root.find("help") + # Handle tool shelp image display for tools that are contained in repositories that are in the lool shed or installed into Galaxy. + # When tool config files use the speical string $PATH_TO_IMAGES, the folloing code will replace that string with the path on disk. + if self.repository_id and self.help.text.find( '$PATH_TO_IMAGES' ) >= 0: + if self.app.name == 'galaxy': + repository = self.sa_session.query( self.app.model.ToolShedRepository ).get( self.app.security.decode_id( self.repository_id ) ) + if repository: + path_to_images = '/tool_runner/static/images/%s' % self.repository_id + self.help.text = self.help.text.replace( '$PATH_TO_IMAGES', path_to_images ) + elif self.app.name == 'tool_shed': + repository = self.sa_session.query( self.app.model.Repository ).get( self.app.security.decode_id( self.repository_id ) ) + if repository: + path_to_images = '/repository/static/images/%s' % self.repository_id + self.help.text = self.help.text.replace( '$PATH_TO_IMAGES', path_to_images ) self.help_by_page = list() help_header = "" help_footer = "" diff -r 97bf49816c0ba82bb47b5846bcce0d067a0a8bf4 -r bfb7f5ec6b14a277b96c567c92678675394c226d lib/galaxy/webapps/galaxy/buildapp.py --- a/lib/galaxy/webapps/galaxy/buildapp.py +++ b/lib/galaxy/webapps/galaxy/buildapp.py @@ -44,6 +44,8 @@ atexit.register( app.shutdown ) # Create the universe WSGI application webapp = GalaxyWebApplication( app, session_cookie='galaxysession', name='galaxy' ) + # The following route will handle displaying tool shelp images for tools contained in repositories installed from the tool shed. + webapp.add_route( '/tool_runner/static/images/:repository_id/:image_file', controller='tool_runner', action='display_tool_help_image_in_repository', repository_id=None, image_file=None ) webapp.add_ui_controllers( 'galaxy.webapps.galaxy.controllers', app ) # Force /history to go to /root/history -- needed since the tests assume this webapp.add_route( '/history', controller='root', action='history' ) diff -r 97bf49816c0ba82bb47b5846bcce0d067a0a8bf4 -r bfb7f5ec6b14a277b96c567c92678675394c226d lib/galaxy/webapps/galaxy/controllers/tool_runner.py --- a/lib/galaxy/webapps/galaxy/controllers/tool_runner.py +++ b/lib/galaxy/webapps/galaxy/controllers/tool_runner.py @@ -1,19 +1,22 @@ """ Upload class """ - +import os import logging import galaxy.util - from galaxy import web from galaxy.tools import DefaultToolState from galaxy.tools.actions import upload_common -from galaxy.tools.parameters import params_to_incoming, visit_input_values -from galaxy.tools.parameters.basic import DataToolParameter, UnvalidatedValue +from galaxy.tools.parameters import params_to_incoming +from galaxy.tools.parameters import visit_input_values +from galaxy.tools.parameters.basic import DataToolParameter +from galaxy.tools.parameters.basic import UnvalidatedValue from galaxy.util.bunch import Bunch from galaxy.util.hash_util import is_hashable -from galaxy.web import error, url_for +from galaxy.web import error +from galaxy.web import url_for from galaxy.web.base.controller import BaseUIController +import tool_shed.util.shed_util_common as suc log = logging.getLogger( __name__ ) @@ -92,7 +95,17 @@ util=galaxy.util, add_frame=add_frame, **vars ) - + + @web.expose + def display_tool_help_image_in_repository( self, trans, **kwd ): + repository_id = kwd.get( 'repository_id', None ) + image_file = kwd.get( 'image_file', None ) + if repository_id and image_file: + repository = suc.get_tool_shed_repository_by_id( trans, repository_id ) + repo_files_dir = os.path.join( repository.repo_path( trans.app ), repository.name, repository.name ) + return open( os.path.join( repo_files_dir, 'static', 'images', image_file ), 'r' ) + return None + @web.expose def rerun( self, trans, id=None, from_noframe=None, **kwd ): """ diff -r 97bf49816c0ba82bb47b5846bcce0d067a0a8bf4 -r bfb7f5ec6b14a277b96c567c92678675394c226d lib/galaxy/webapps/tool_shed/buildapp.py --- a/lib/galaxy/webapps/tool_shed/buildapp.py +++ b/lib/galaxy/webapps/tool_shed/buildapp.py @@ -69,6 +69,8 @@ webapp.add_route( '/view/{owner}', controller='repository', action='sharable_owner' ) webapp.add_route( '/view/{owner}/{name}', controller='repository', action='sharable_repository' ) webapp.add_route( '/view/{owner}/{name}/{changeset_revision}', controller='repository', action='sharable_repository_revision' ) + # The following route will handle displaying tool shelp images for tools contained in repositories. + webapp.add_route( '/repository/static/images/:repository_id/:image_file', controller='repository', action='display_tool_help_image_in_repository', repository_id=None, image_file=None ) webapp.add_route( '/:controller/:action', action='index' ) webapp.add_route( '/:action', controller='repository', action='index' ) webapp.add_route( '/repos/*path_info', controller='hg', action='handle_request', path_info='/' ) diff -r 97bf49816c0ba82bb47b5846bcce0d067a0a8bf4 -r bfb7f5ec6b14a277b96c567c92678675394c226d lib/galaxy/webapps/tool_shed/config.py --- a/lib/galaxy/webapps/tool_shed/config.py +++ b/lib/galaxy/webapps/tool_shed/config.py @@ -36,7 +36,7 @@ self.database_engine_options = get_database_engine_options( kwargs ) self.database_create_tables = string_as_bool( kwargs.get( "database_create_tables", "True" ) ) # Where dataset files are stored - self.file_path = resolve_path( kwargs.get( "file_path", "database/files" ), self.root ) + self.file_path = resolve_path( kwargs.get( "file_path", "database/community_files" ), self.root ) self.new_file_path = resolve_path( kwargs.get( "new_file_path", "database/tmp" ), self.root ) self.cookie_path = kwargs.get( "cookie_path", "/" ) self.enable_quotas = string_as_bool( kwargs.get( 'enable_quotas', False ) ) diff -r 97bf49816c0ba82bb47b5846bcce0d067a0a8bf4 -r bfb7f5ec6b14a277b96c567c92678675394c226d lib/galaxy/webapps/tool_shed/controllers/repository.py --- a/lib/galaxy/webapps/tool_shed/controllers/repository.py +++ b/lib/galaxy/webapps/tool_shed/controllers/repository.py @@ -367,7 +367,6 @@ return trans.response.send_redirect( web.url_for( controller='repository', action='browse_valid_repositories', **kwd ) ) - log.debug("CCC In browse_valid_categories, just before returning valid_category_grid, kwd: %s" % str( kwd )) return self.valid_category_grid( trans, **kwd ) @web.expose @@ -746,6 +745,16 @@ status='error' ) ) @web.expose + def display_tool_help_image_in_repository( self, trans, **kwd ): + repository_id = kwd.get( 'repository_id', None ) + image_file = kwd.get( 'image_file', None ) + if repository_id and image_file: + repository = suc.get_repository_in_tool_shed( trans, repository_id ) + repo_files_dir = os.path.join( repository.repo_path( trans.app ), repository.name ) + return open( os.path.join( repo_files_dir, 'static', 'images', image_file ), 'r' ) + return None + + @web.expose def download( self, trans, repository_id, changeset_revision, file_type, **kwd ): # Download an archive of the repository files compressed as zip, gz or bz2. params = util.Params( kwd ) @@ -2568,14 +2577,16 @@ if can_use_disk_file: trans.app.config.tool_data_path = work_dir tool, valid, message, sample_files = tool_util.handle_sample_files_and_load_tool_from_disk( trans, - repo_files_dir, - full_path_to_tool_config, - work_dir ) + repo_files_dir, + repository_id, + full_path_to_tool_config, + work_dir ) if message: status = 'error' else: tool, message, sample_files = tool_util.handle_sample_files_and_load_tool_from_tmp_config( trans, repo, + repository_id, changeset_revision, tool_config_filename, work_dir ) diff -r 97bf49816c0ba82bb47b5846bcce0d067a0a8bf4 -r bfb7f5ec6b14a277b96c567c92678675394c226d lib/tool_shed/util/metadata_util.py --- a/lib/tool_shed/util/metadata_util.py +++ b/lib/tool_shed/util/metadata_util.py @@ -377,7 +377,7 @@ log.debug( 'Loaded Data Manager tool_files: %s' % ( tool_file ) ) return metadata_dict -def generate_datatypes_metadata( app, repository_clone_url, repository_files_dir, datatypes_config, metadata_dict ): +def generate_datatypes_metadata( app, repository, repository_clone_url, repository_files_dir, datatypes_config, metadata_dict ): """Update the received metadata_dict with information from the parsed datatypes_config.""" try: tree = ElementTree.parse( datatypes_config ) @@ -428,7 +428,7 @@ # Parse the tool_config to get the guid. tool_config_path = suc.get_config_from_disk( tool_config, repository_files_dir ) full_path = os.path.abspath( tool_config_path ) - tool, valid, error_message = tool_util.load_tool_from_config( app, full_path ) + tool, valid, error_message = tool_util.load_tool_from_config( app, app.security.encode_id( repository.id ), full_path ) if tool is None: guid = None else: @@ -534,7 +534,7 @@ # Handle proprietary datatypes, if any. datatypes_config = suc.get_config_from_disk( 'datatypes_conf.xml', files_dir ) if datatypes_config: - metadata_dict = generate_datatypes_metadata( app, repository_clone_url, files_dir, datatypes_config, metadata_dict ) + metadata_dict = generate_datatypes_metadata( app, repository, repository_clone_url, files_dir, datatypes_config, metadata_dict ) # Get the relative path to all sample files included in the repository for storage in the repository's metadata. sample_file_metadata_paths, sample_file_copy_paths = get_sample_files_from_disk( repository_files_dir=files_dir, tool_path=shed_config_dict.get( 'tool_path' ), @@ -589,7 +589,7 @@ log.debug( "Error parsing %s, exception: %s" % ( full_path, str( e ) ) ) is_tool = False if is_tool: - tool, valid, error_message = tool_util.load_tool_from_config( app, full_path ) + tool, valid, error_message = tool_util.load_tool_from_config( app, app.security.encode_id( repository.id ), full_path ) if tool is None: if not valid: invalid_tool_configs.append( name ) diff -r 97bf49816c0ba82bb47b5846bcce0d067a0a8bf4 -r bfb7f5ec6b14a277b96c567c92678675394c226d lib/tool_shed/util/tool_util.py --- a/lib/tool_shed/util/tool_util.py +++ b/lib/tool_shed/util/tool_util.py @@ -555,7 +555,7 @@ repository_tools_tups[ index ] = ( tup_path, guid, repository_tool ) return repository_tools_tups, sample_files_copied -def handle_sample_files_and_load_tool_from_disk( trans, repo_files_dir, tool_config_filepath, work_dir ): +def handle_sample_files_and_load_tool_from_disk( trans, repo_files_dir, repository_id, tool_config_filepath, work_dir ): # Copy all sample files from disk to a temporary directory since the sample files may be in multiple directories. message = '' sample_files = copy_disk_sample_files_to_dir( trans, repo_files_dir, work_dir ) @@ -564,11 +564,11 @@ # Load entries into the tool_data_tables if the tool requires them. tool_data_table_config = os.path.join( work_dir, 'tool_data_table_conf.xml' ) error, message = handle_sample_tool_data_table_conf_file( trans.app, tool_data_table_config ) - tool, valid, message2 = load_tool_from_config( trans.app, tool_config_filepath ) + tool, valid, message2 = load_tool_from_config( trans.app, repository_id, tool_config_filepath ) message = concat_messages( message, message2 ) return tool, valid, message, sample_files -def handle_sample_files_and_load_tool_from_tmp_config( trans, repo, changeset_revision, tool_config_filename, work_dir ): +def handle_sample_files_and_load_tool_from_tmp_config( trans, repo, repository_id, changeset_revision, tool_config_filename, work_dir ): tool = None message = '' ctx = suc.get_changectx_for_changeset( repo, changeset_revision ) @@ -586,7 +586,7 @@ log.debug( message ) manifest_ctx, ctx_file = suc.get_ctx_file_path_from_manifest( tool_config_filename, repo, changeset_revision ) if manifest_ctx and ctx_file: - tool, message2 = load_tool_from_tmp_config( trans, repo, manifest_ctx, ctx_file, work_dir ) + tool, message2 = load_tool_from_tmp_config( trans, repo, repository_id, manifest_ctx, ctx_file, work_dir ) message = concat_messages( message, message2 ) return tool, message, sample_files @@ -755,7 +755,7 @@ can_use_disk_file = can_use_tool_config_disk_file( trans, repository, repo, tool_config_filepath, changeset_revision ) if can_use_disk_file: trans.app.config.tool_data_path = work_dir - tool, valid, message, sample_files = handle_sample_files_and_load_tool_from_disk( trans, repo_files_dir, tool_config_filepath, work_dir ) + tool, valid, message, sample_files = handle_sample_files_and_load_tool_from_disk( trans, repo_files_dir, repository_id, tool_config_filepath, work_dir ) if tool is not None: invalid_files_and_errors_tups = check_tool_input_params( trans.app, repo_files_dir, @@ -771,16 +771,16 @@ displaying_invalid_tool=True ) message = concat_messages( message, message2 ) else: - tool, message, sample_files = handle_sample_files_and_load_tool_from_tmp_config( trans, repo, changeset_revision, tool_config_filename, work_dir ) + tool, message, sample_files = handle_sample_files_and_load_tool_from_tmp_config( trans, repo, repository_id, changeset_revision, tool_config_filename, work_dir ) suc.remove_dir( work_dir ) trans.app.config.tool_data_path = original_tool_data_path # Reset the tool_data_tables by loading the empty tool_data_table_conf.xml file. reset_tool_data_tables( trans.app ) return repository, tool, message -def load_tool_from_config( app, full_path ): +def load_tool_from_config( app, repository_id, full_path ): try: - tool = app.toolbox.load_tool( full_path ) + tool = app.toolbox.load_tool( full_path, repository_id=repository_id ) valid = True error_message = None except KeyError, e: @@ -795,7 +795,7 @@ error_message = str( e ) return tool, valid, error_message -def load_tool_from_tmp_config( trans, repo, ctx, ctx_file, work_dir ): +def load_tool_from_tmp_config( trans, repo, repository_id, ctx, ctx_file, work_dir ): tool = None message = '' tmp_tool_config = suc.get_named_tmpfile_from_ctx( ctx, ctx_file, work_dir ) @@ -809,7 +809,7 @@ tmp_code_file_name = suc.copy_file_from_manifest( repo, ctx, code_file_name, work_dir ) if tmp_code_file_name: tmp_code_files.append( tmp_code_file_name ) - tool, valid, message = load_tool_from_config( trans.app, tmp_tool_config ) + tool, valid, message = load_tool_from_config( trans.app, repository_id, tmp_tool_config ) for tmp_code_file in tmp_code_files: try: os.unlink( tmp_code_file ) diff -r 97bf49816c0ba82bb47b5846bcce0d067a0a8bf4 -r bfb7f5ec6b14a277b96c567c92678675394c226d tool_shed_wsgi.ini.sample --- a/tool_shed_wsgi.ini.sample +++ b/tool_shed_wsgi.ini.sample @@ -29,7 +29,7 @@ # Where the hgweb.config file is stored. The default is the Galaxy installation directory. #hgweb_config_dir = None -# Where dataset files are saved +# Where tool shed repositories are stored. file_path = database/community_files # Temporary storage for additional datasets, this should be shared through the cluster new_file_path = database/tmp https://bitbucket.org/galaxy/galaxy-central/commits/9722c83fab8b/ Changeset: 9722c83fab8b Branch: next-stable User: greg Date: 2013-03-28 21:32:59 Summary: merging from next-stable Affected #: 6 files diff -r bfb7f5ec6b14a277b96c567c92678675394c226d -r 9722c83fab8bc5c67da5d60fe8507e2496ee69c2 lib/galaxy/jobs/__init__.py --- a/lib/galaxy/jobs/__init__.py +++ b/lib/galaxy/jobs/__init__.py @@ -2,24 +2,22 @@ Support for running a tool in Galaxy via an internal job management system """ +import copy +import datetime +import logging import os +import pwd +import random +import re +import shutil +import subprocess import sys -import pwd -import time -import copy -import random -import logging -import datetime import threading import traceback -import subprocess import galaxy from galaxy import util, model from galaxy.util.bunch import Bunch -from galaxy.datatypes.tabular import * -from galaxy.datatypes.interval import * -# tabular/interval imports appear to be unused. Clean up? from galaxy.datatypes import metadata from galaxy.util.json import from_json_string from galaxy.util.expressions import ExpressionContext @@ -86,7 +84,7 @@ class JobConfiguration( object ): """A parser and interface to advanced job management features. - + These features are configured in the job configuration, by default, ``job_conf.xml`` """ DEFAULT_NWORKERS = 4 @@ -611,7 +609,7 @@ Calling this method for the first time causes the dynamic runner to do its calculation, if any. - + :returns: ``JobDestination`` """ return self.job_runner_mapper.get_job_destination(self.params) @@ -675,7 +673,7 @@ special = self.sa_session.query( model.GenomeIndexToolData ).filter_by( job=job ).first() if special: out_data[ "output_file" ] = FakeDatasetAssociation( dataset=special.dataset ) - + # These can be passed on the command line if wanted as $__user_*__ if job.history and job.history.user: user_id = '%d' % job.history.user.id @@ -779,11 +777,11 @@ if ( len( stdout ) > 32768 ): stdout = stdout[:32768] log.info( "stdout for job %d is greater than 32K, only first part will be logged to database" % job.id ) - job.stdout = stdout + job.stdout = stdout if ( len( stderr ) > 32768 ): stderr = stderr[:32768] log.info( "stderr for job %d is greater than 32K, only first part will be logged to database" % job.id ) - job.stderr = stderr + job.stderr = stderr # Let the exit code be Null if one is not provided: if ( exit_code != None ): job.exit_code = exit_code @@ -865,7 +863,7 @@ self.sa_session.expunge_all() job = self.get_job() - # TODO: After failing here, consider returning from the function. + # TODO: After failing here, consider returning from the function. try: self.reclaim_ownership() except: @@ -882,13 +880,16 @@ return self.fail( job.info, stderr=stderr, stdout=stdout, exit_code=tool_exit_code ) # Check the tool's stdout, stderr, and exit code for errors, but only - # if the job has not already been marked as having an error. + # if the job has not already been marked as having an error. # The job's stdout and stderr will be set accordingly. + + # We set final_job_state to use for dataset management, but *don't* set + # job.state until after dataset collection to prevent history issues if job.states.ERROR != job.state: if ( self.check_tool_output( stdout, stderr, tool_exit_code, job )): - job.state = job.states.OK + final_job_state = job.states.OK else: - job.state = job.states.ERROR + final_job_state = job.states.ERROR if self.version_string_cmd: version_filename = self.get_version_string_path() @@ -908,9 +909,11 @@ if os.path.exists( dataset_path.real_path ) and os.stat( dataset_path.real_path ).st_size > 0: log.warning( "finish(): %s not found, but %s is not empty, so it will be used instead" % ( dataset_path.false_path, dataset_path.real_path ) ) else: + # Prior to fail we need to set job.state + job.state = final_job_state return self.fail( "Job %s's output dataset(s) could not be read" % job.id ) + job_context = ExpressionContext( dict( stdout = job.stdout, stderr = job.stderr ) ) - job_tool = self.app.toolbox.tools_by_id.get( job.tool_id, None ) for dataset_assoc in job.output_datasets + job.output_library_datasets: context = self.get_dataset_finish_context( job_context, dataset_assoc.dataset.dataset ) @@ -926,10 +929,7 @@ # Update (non-library) job output datasets through the object store if dataset not in job.output_library_datasets: self.app.object_store.update_from_file(dataset.dataset, create=True) - # TODO: The context['stderr'] holds stderr's contents. An error - # only really occurs if the job also has an error. So check the - # job's state: - if job.states.ERROR == job.state: + if job.states.ERROR == final_job_state: dataset.blurb = "error" dataset.mark_unhidden() elif dataset.has_data(): @@ -945,13 +945,7 @@ ( not self.external_output_metadata.external_metadata_set_successfully( dataset, self.sa_session ) \ and self.app.config.retry_metadata_internally ): dataset.datatype.set_meta( dataset, overwrite = False ) #call datatype.set_meta directly for the initial set_meta call during dataset creation - # TODO: The context['stderr'] used to indicate that there - # was an error. Now we must rely on the job's state instead; - # that indicates whether the tool relied on stderr to indicate - # the state or whether the tool used exit codes and regular - # expressions to do so. So we use - # job.state == job.states.ERROR to replace this same test. - elif not self.external_output_metadata.external_metadata_set_successfully( dataset, self.sa_session ) and job.states.ERROR != job.state: + elif not self.external_output_metadata.external_metadata_set_successfully( dataset, self.sa_session ) and job.states.ERROR != final_job_state: dataset._state = model.Dataset.states.FAILED_METADATA else: #load metadata from file @@ -981,10 +975,7 @@ if dataset.ext == 'auto': dataset.extension = 'txt' self.sa_session.add( dataset ) - # TODO: job.states.ERROR == job.state now replaces checking - # stderr for a problem: - #if context['stderr']: - if job.states.ERROR == job.state: + if job.states.ERROR == final_job_state: log.debug( "setting dataset state to ERROR" ) # TODO: This is where the state is being set to error. Change it! dataset_assoc.dataset.dataset.state = model.Dataset.states.ERROR @@ -1015,7 +1006,7 @@ job.stderr = job.stderr[:32768] # The exit code will be null if there is no exit code to be set. # This is so that we don't assign an exit code, such as 0, that - # is either incorrect or has the wrong semantics. + # is either incorrect or has the wrong semantics. if None != tool_exit_code: job.exit_code = tool_exit_code # custom post process setup @@ -1054,7 +1045,12 @@ # fix permissions for path in [ dp.real_path for dp in self.get_mutable_output_fnames() ]: util.umask_fix_perms( path, self.app.config.umask, 0666, self.app.config.gid ) + + # Finally set the job state. This should only happen *after* all + # dataset creation, and will allow us to eliminate force_history_refresh. + job.state = final_job_state self.sa_session.flush() + log.debug( 'job %d ended' % self.job_id ) if self.app.config.cleanup_job == 'always' or ( not stderr and self.app.config.cleanup_job == 'onsuccess' ): self.cleanup() @@ -1062,26 +1058,26 @@ def check_tool_output( self, stdout, stderr, tool_exit_code, job ): """ Check the output of a tool - given the stdout, stderr, and the tool's - exit code, return True if the tool exited succesfully and False + exit code, return True if the tool exited succesfully and False otherwise. No exceptions should be thrown. If this code encounters an exception, it returns True so that the workflow can continue; - otherwise, a bug in this code could halt workflow progress. + otherwise, a bug in this code could halt workflow progress. Note that, if the tool did not define any exit code handling or any stdio/stderr handling, then it reverts back to previous behavior: if stderr contains anything, then False is returned. Note that the job id is just for messages. """ - # By default, the tool succeeded. This covers the case where the code + # By default, the tool succeeded. This covers the case where the code # has a bug but the tool was ok, and it lets a workflow continue. - success = True + success = True try: - # Check exit codes and match regular expressions against stdout and + # Check exit codes and match regular expressions against stdout and # stderr if this tool was configured to do so. # If there is a regular expression for scanning stdout/stderr, - # then we assume that the tool writer overwrote the default + # then we assume that the tool writer overwrote the default # behavior of just setting an error if there is *anything* on - # stderr. + # stderr. if ( len( self.tool.stdio_regexes ) > 0 or len( self.tool.stdio_exit_codes ) > 0 ): # Check the exit code ranges in the order in which @@ -1092,9 +1088,9 @@ max_error_level = galaxy.tools.StdioErrorLevel.NO_ERROR if tool_exit_code != None: for stdio_exit_code in self.tool.stdio_exit_codes: - if ( tool_exit_code >= stdio_exit_code.range_start and + if ( tool_exit_code >= stdio_exit_code.range_start and tool_exit_code <= stdio_exit_code.range_end ): - # Tack on a generic description of the code + # Tack on a generic description of the code # plus a specific code description. For example, # this might prepend "Job 42: Warning (Out of Memory)\n". code_desc = stdio_exit_code.desc @@ -1106,21 +1102,21 @@ code_desc ) ) log.info( "Job %s: %s" % (job.get_id_tag(), tool_msg) ) stderr = tool_msg + "\n" + stderr - max_error_level = max( max_error_level, + max_error_level = max( max_error_level, stdio_exit_code.error_level ) - if ( max_error_level >= + if ( max_error_level >= galaxy.tools.StdioErrorLevel.FATAL ): break - + if max_error_level < galaxy.tools.StdioErrorLevel.FATAL: # We'll examine every regex. Each regex specifies whether - # it is to be run on stdout, stderr, or both. (It is + # it is to be run on stdout, stderr, or both. (It is # possible for neither stdout nor stderr to be scanned, # but those regexes won't be used.) We record the highest # error level, which are currently "warning" and "fatal". # If fatal, then we set the job's state to ERROR. # If warning, then we still set the job's state to OK - # but include a message. We'll do this if we haven't seen + # but include a message. We'll do this if we haven't seen # a fatal error yet for regex in self.tool.stdio_regexes: # If ( this regex should be matched against stdout ) @@ -1130,16 +1126,16 @@ # Repeat the stdout stuff for stderr. # TODO: Collapse this into a single function. if ( regex.stdout_match ): - regex_match = re.search( regex.match, stdout, + regex_match = re.search( regex.match, stdout, re.IGNORECASE ) if ( regex_match ): rexmsg = self.regex_err_msg( regex_match, regex) - log.info( "Job %s: %s" + log.info( "Job %s: %s" % ( job.get_id_tag(), rexmsg ) ) stdout = rexmsg + "\n" + stdout - max_error_level = max( max_error_level, + max_error_level = max( max_error_level, regex.error_level ) - if ( max_error_level >= + if ( max_error_level >= galaxy.tools.StdioErrorLevel.FATAL ): break @@ -1148,33 +1144,33 @@ re.IGNORECASE ) if ( regex_match ): rexmsg = self.regex_err_msg( regex_match, regex) - log.info( "Job %s: %s" + log.info( "Job %s: %s" % ( job.get_id_tag(), rexmsg ) ) stderr = rexmsg + "\n" + stderr - max_error_level = max( max_error_level, + max_error_level = max( max_error_level, regex.error_level ) - if ( max_error_level >= + if ( max_error_level >= galaxy.tools.StdioErrorLevel.FATAL ): break - + # If we encountered a fatal error, then we'll need to set the # job state accordingly. Otherwise the job is ok: if max_error_level >= galaxy.tools.StdioErrorLevel.FATAL: - success = False + success = False else: - success = True - + success = True + # When there are no regular expressions and no exit codes to check, # default to the previous behavior: when there's anything on stderr - # the job has an error, and the job is ok otherwise. + # the job has an error, and the job is ok otherwise. else: - # TODO: Add in the tool and job id: + # TODO: Add in the tool and job id: log.debug( "Tool did not define exit code or stdio handling; " + "checking stderr for success" ) if stderr: - success = False + success = False else: - success = True + success = True # On any exception, return True. except: @@ -1182,7 +1178,7 @@ log.warning( "Tool check encountered unexpected exception; " + "assuming tool was successful: " + tb ) success = True - + # Store the modified stdout and stderr in the job: if None != job: job.stdout = stdout @@ -1196,7 +1192,7 @@ ToolStdioRegex regex object. The regex_match is a MatchObject that will contain the string matched on. """ - # Get the description for the error level: + # Get the description for the error level: err_msg = galaxy.tools.StdioErrorLevel.desc( regex.error_level ) + ": " # If there's a description for the regular expression, then use it. # Otherwise, we'll take the first 256 characters of the match. @@ -1210,7 +1206,7 @@ if mend - mstart > 256: err_msg += match.string[ mstart : mstart+256 ] + "..." else: - err_msg += match.string[ mstart: mend ] + err_msg += match.string[ mstart: mend ] return err_msg def cleanup( self ): @@ -1489,7 +1485,7 @@ self.status = task.states.NEW def can_split( self ): - # Should the job handler split this job up? TaskWrapper should + # Should the job handler split this job up? TaskWrapper should # always return False as the job has already been split. return False @@ -1631,8 +1627,8 @@ the contents of the output files. """ # This may have ended too soon - log.debug( 'task %s for job %d ended; exit code: %d' - % (self.task_id, self.job_id, + log.debug( 'task %s for job %d ended; exit code: %d' + % (self.task_id, self.job_id, tool_exit_code if tool_exit_code != None else -256 ) ) # default post job setup_external_metadata self.sa_session.expunge_all() @@ -1647,12 +1643,12 @@ self.fail( task.info ) return - # Check what the tool returned. If the stdout or stderr matched + # Check what the tool returned. If the stdout or stderr matched # regular expressions that indicate errors, then set an error. # The same goes if the tool's exit code was in a given range. if ( self.check_tool_output( stdout, stderr, tool_exit_code, task ) ): task.state = task.states.OK - else: + else: task.state = task.states.ERROR # Save stdout and stderr diff -r bfb7f5ec6b14a277b96c567c92678675394c226d -r 9722c83fab8bc5c67da5d60fe8507e2496ee69c2 lib/galaxy/webapps/galaxy/controllers/biostar.py --- a/lib/galaxy/webapps/galaxy/controllers/biostar.py +++ b/lib/galaxy/webapps/galaxy/controllers/biostar.py @@ -29,7 +29,7 @@ # Biostar requires all keys to be present, so we start with a template DEFAULT_PAYLOAD = { 'email': "", - 'title': "Question about Galaxy", + 'title': "", 'tags': 'galaxy', 'tool_name': '', 'tool_version': '', @@ -72,14 +72,16 @@ payload = dict( DEFAULT_PAYLOAD, **payload ) # Do the best we can of providing user information for the payload if trans.user: + payload['username'] = "user-" + trans.security.encode_id( trans.user.id ) payload['email'] = trans.user.email if trans.user.username: - payload['username'] = trans.user.username payload['display_name'] = trans.user.username else: - payload['display_name'] = "Galaxy User" + payload['display_name'] = trans.user.email.split( "@" )[0] else: - payload['username'] = payload['display_name'] = "Anonymous Galaxy User %d" % trans.galaxy_session.id + encoded = trans.security.encode_id( trans.galaxy_session.id ) + payload['username'] = "anon-" + encoded + payload['display_name'] = "Anonymous Galaxy User %d" % encoded[0:8] data, digest = encode_data( trans.app.config.biostar_key, payload ) return trans.response.send_redirect( url_for( trans.app.config.biostar_url, data=data, digest=digest, name=trans.app.config.biostar_key_name, action=biostar_action ) ) @@ -107,8 +109,7 @@ if not tool: return error( "No tool found matching '%s'" % tool_id ) # Tool specific information for payload - payload = { 'title': "Question about Galaxy tool '%s'" % tool.name, - 'tool_name': tool.name, + payload = { 'tool_name': tool.name, 'tool_version': tool.version, 'tool_id': tool.id, 'tags': 'galaxy ' + tag_for_tool( tool ) } diff -r bfb7f5ec6b14a277b96c567c92678675394c226d -r 9722c83fab8bc5c67da5d60fe8507e2496ee69c2 static/scripts/galaxy.base.js --- a/static/scripts/galaxy.base.js +++ b/static/scripts/galaxy.base.js @@ -239,6 +239,14 @@ return 0; } +$.fn.refresh_select2 = function() { + var select_elt = $(this); + var options = { width: "resolve", + closeOnSelect: !select_elt.is("[MULTIPLE]"), + }; + return select_elt.select2( options ); +} + // Replace select box with a text input box + autocomplete. function replace_big_select_inputs(min_length, max_length, select_elts) { // To do replace, the select2 plugin must be loaded. @@ -276,9 +284,7 @@ * * - should we still sort dbkey fields here? */ - - select_elt.select2( { width: "resolve" } ); - + select_elt.refresh_select2(); }); } diff -r bfb7f5ec6b14a277b96c567c92678675394c226d -r 9722c83fab8bc5c67da5d60fe8507e2496ee69c2 templates/webapps/galaxy/tool_form.mako --- a/templates/webapps/galaxy/tool_form.mako +++ b/templates/webapps/galaxy/tool_form.mako @@ -73,7 +73,7 @@ }); $(this).append(select_link).append(" ").append(unselect_link); }); - + $(".add-librarydataset").live("click", function() { var link = $(this); $.ajax({ @@ -311,7 +311,7 @@ %if trans.app.config.biostar_url: <!-- BioStar links --> - <span class="pull-right"><a href="${h.url_for( controller='biostar', action='biostar_tool_question_redirect', tool_id=tool.id )}" target="galaxy_main" class="fa-icon-question-sign tooltip" data-original-title="Ask a question about this tool"></a></span> + <span class="pull-right"><a href="${h.url_for( controller='biostar', action='biostar_tool_question_redirect', tool_id=tool.id )}" target="_blank" class="fa-icon-question-sign tooltip" data-original-title="Ask a question about this tool"></a></span><!-- End of BioStar links --> %endif </div> diff -r bfb7f5ec6b14a277b96c567c92678675394c226d -r 9722c83fab8bc5c67da5d60fe8507e2496ee69c2 templates/webapps/galaxy/workflow/run.mako --- a/templates/webapps/galaxy/workflow/run.mako +++ b/templates/webapps/galaxy/workflow/run.mako @@ -8,7 +8,7 @@ <%def name="javascripts()"> ${parent.javascripts()} - ${h.js( "libs/jquery/jquery.autocomplete" )} + ${h.js( "libs/jquery/select2" )} <script type="text/javascript"> $( function() { function show_tool_body(title){ @@ -40,12 +40,12 @@ select.val($('option:last', select).val()); } select.closest('.form-row').children('label').children('span.mode-icon').hide(); - select.removeAttr('multiple').removeAttr('size'); + select.removeAttr('multiple').refresh_select2().removeAttr('size'); placeholder = 'type to filter'; } else { $('.multiinput', select.closest('.form-row')).removeClass('disabled'); select.closest('.form-row').children('label').children('span.mode-icon').show(); - select.attr('multiple', 'multiple').attr('size', 8); + select.attr('multiple', 'multiple').refresh_select2().attr('size', 8); placeholder = 'type to filter, [enter] to select all'; } $('input.multiinput-filter', select.parent()).attr( @@ -79,7 +79,7 @@ $("#new_history_cbx").click(function(){ $("#new_history_input").toggle(this.checked); }); - $('span.multiinput_wrap select[name*="|input"]').removeAttr('multiple').each(function(i, s) { + $('span.multiinput_wrap select[name*="|input"]').removeAttr('multiple').refresh_select2().each(function(i, s) { var select = $(s); var new_width = Math.max(200, select.width()) + 20; // Find the label for this element. diff -r bfb7f5ec6b14a277b96c567c92678675394c226d -r 9722c83fab8bc5c67da5d60fe8507e2496ee69c2 test/install_and_test_tool_shed_repositories/functional_tests.py --- a/test/install_and_test_tool_shed_repositories/functional_tests.py +++ b/test/install_and_test_tool_shed_repositories/functional_tests.py @@ -281,8 +281,7 @@ def register_test_result( url, metadata_id, test_results_dict, tests_passed=False ): ''' - Set do_not_test = True if the repository fails functional tests. Set do_not_test = False - if the repository passes functional tests, so that the repository will always be re-tested + This script should never set do_not_test = True, because the repositories should always be re-tested against the most recent code. ''' params = {} @@ -291,7 +290,7 @@ params[ 'do_not_test' ] = 'false' else: params[ 'tools_functionally_correct' ] = 'false' - params[ 'do_not_test' ] = 'true' + params[ 'do_not_test' ] = 'false' params[ 'tool_test_errors' ] = test_results_dict return update( tool_shed_api_key, '%s' % ( url_join( galaxy_tool_shed_url, 'api', 'repository_revisions', metadata_id ) ), params, return_formatted=False ) Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.
participants (1)
-
commits-noreply@bitbucket.org