1 new changeset in galaxy-central:
http://bitbucket.org/galaxy/galaxy-central/changeset/540ff06d44b4/
changeset: 540ff06d44b4
user: greg
date: 2011-10-10 21:51:43
summary: Miscellaneous enhancements for the tool shed:
1. Enable the same behavior for the config settings named "tool_config_file" and "tool_config_files" since this config setting can now be a comma-separated list of file names.
2. Display an error message if a user attempts to install a Galaxy tool shed repository but they do not have a shed tool config file named in the "tool_config_file" setting in universe_wsgi.ini.
3. If tool search using whoosh is enabled, re-index the tool box when new tools are installed from a tool shed.
affected #: 3 files (-1 bytes)
--- a/lib/galaxy/config.py Mon Oct 10 15:45:31 2011 -0400
+++ b/lib/galaxy/config.py Mon Oct 10 15:51:43 2011 -0400
@@ -52,7 +52,13 @@
self.tool_data_path = resolve_path( kwargs.get( "tool_data_path", "tool-data" ), os.getcwd() )
self.len_file_path = kwargs.get( "len_file_path", resolve_path(os.path.join(self.tool_data_path, 'shared','ucsc','chrom'), self.root) )
self.test_conf = resolve_path( kwargs.get( "test_conf", "" ), self.root )
- self.tool_configs = [ resolve_path( p, self.root ) for p in listify( kwargs.get( 'tool_config_file', 'tool_conf.xml' ) ) ]
+ if 'tool_config_file' in kwargs:
+ tcf = kwargs[ 'tool_config_file' ]
+ elif 'tool_config_files' in kwargs:
+ tcf = kwargs[ 'tool_config_files' ]
+ else:
+ tcf = 'tool_conf.xml'
+ self.tool_configs = [ resolve_path( p, self.root ) for p in listify( tcf ) ]
self.tool_data_table_config_path = resolve_path( kwargs.get( 'tool_data_table_config_path', 'tool_data_table_conf.xml' ), self.root )
self.tool_secret = kwargs.get( "tool_secret", "" )
self.id_secret = kwargs.get( "id_secret", "USING THE DEFAULT IS NOT SECURE!" )
--- a/lib/galaxy/web/controllers/admin.py Mon Oct 10 15:45:31 2011 -0400
+++ b/lib/galaxy/web/controllers/admin.py Mon Oct 10 15:51:43 2011 -0400
@@ -2,6 +2,7 @@
from galaxy import model
from galaxy.model.orm import *
from galaxy.web.framework.helpers import time_ago, iff, grids
+from galaxy.tools.search import ToolBoxSearch
import logging
log = logging.getLogger( __name__ )
@@ -693,6 +694,15 @@
@web.expose
@web.require_admin
def install_tool_shed_repository( self, trans, **kwd ):
+ if not trans.app.toolbox.shed_tool_confs:
+ message = 'The <b>tool_config_file</b> setting in <b>universe_wsgi.ini</b> must include at least one shed tool configuration file name with a '
+ message += '<b><toolbox></b> tag that includes a <b>tool_path</b> attribute value which is a directory relative to the Galaxy installation '
+ message += 'directory in order to automatically install tools from a Galaxy tool shed (e.g., the file name <b>shed_tool_conf.xml</b> whose '
+ message += '<b><toolbox></b> tag is <b><toolbox tool_path="../shed_tools"></b>).<p/>See the '
+ message += '<a href="http://wiki.g2.bx.psu.edu/Tool%20Shed#Automatic_installation_of_Galaxy_tool…" '
+ message += 'target=_blank">Automatic installation of Galaxy tool shed repository tools into a local Galaxy instance</a> section of the '
+ message += '<a href="http://wiki.g2.bx.psu.edu/Tool%20Shed" target="_blank">Galaxy tool shed wiki</a> for all of the details.'
+ return trans.show_error_message( message )
params = util.Params( kwd )
message = util.restore_text( params.get( 'message', '' ) )
status = params.get( 'status', 'done' )
@@ -782,6 +792,9 @@
pass
# Append the new section to the shed_tool_config file.
self.__add_shed_tool_conf_entry( trans, shed_tool_conf, new_tool_section )
+ if trans.app.toolbox_search.enabled:
+ # If search support for tools is enabled, index the new installed tools.
+ trans.app.toolbox_search = ToolBoxSearch( trans.app.toolbox )
message = 'Revision <b>%s</b> of repository <b>%s</b> has been installed in tool panel section <b>%s</b>.' % \
( changeset_revision, name, tool_section.name )
return trans.show_ok_message( message )
--- a/universe_wsgi.ini.sample Mon Oct 10 15:45:31 2011 -0400
+++ b/universe_wsgi.ini.sample Mon Oct 10 15:51:43 2011 -0400
@@ -124,8 +124,8 @@
#new_file_path = database/tmp
# Tool config files, defines what tools are available in Galaxy.
-# Tools can be locally developed or installed from tool sheds.
-#tool_config_file = tool_conf.xml
+# Tools can be locally developed or installed from Galaxy tool sheds.
+#tool_config_file = tool_conf.xml,shed_tool_conf.xml
# Default path to the directory containing the tools defined in tool_conf.xml.
# Other tool config files must include the tool_path as an attribute in the <toolbox> tag.
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
3 new changesets in galaxy-central:
http://bitbucket.org/galaxy/galaxy-central/changeset/feee8fdcdaeb/
changeset: feee8fdcdaeb
user: John Duddy
date: 2011-08-18 23:16:03
summary: Allow 2 new optional parameters to workflow/run controller method:
history_id: an encoded history id to use. Will not permantently switch user's
current id
hide_fixed_params: Initially hides all workflow parameters that are not
"Set at Runtime" and all workflow steps that only contain them.
Intended to reduce clutter when launching "canned" workflows.
Also added configurable feature that governs how initial values are selected
from the history for workflow runtime input. When enabled, this feature
causes Galaxy to use each input only once until it has used them all. This
is for the paired-end scenario, to attempt to match inputs correctly by default.
affected #: 5 files (-1 bytes)
--- a/lib/galaxy/config.py Thu Aug 18 14:20:48 2011 -0400
+++ b/lib/galaxy/config.py Thu Aug 18 14:16:03 2011 -0700
@@ -46,6 +46,7 @@
self.enable_api = string_as_bool( kwargs.get( 'enable_api', False ) )
self.enable_openid = string_as_bool( kwargs.get( 'enable_openid', False ) )
self.enable_quotas = string_as_bool( kwargs.get( 'enable_quotas', False ) )
+ self.enable_unique_workflow_defaults = string_as_bool ( kwargs.get ('enable_unique_workflow_defaults', False ) )
self.tool_path = resolve_path( kwargs.get( "tool_path", "tools" ), self.root )
self.tool_data_path = resolve_path( kwargs.get( "tool_data_path", "tool-data" ), os.getcwd() )
self.len_file_path = kwargs.get( "len_file_path", resolve_path(os.path.join(self.tool_data_path, 'shared','ucsc','chrom'), self.root) )
--- a/lib/galaxy/tools/parameters/basic.py Thu Aug 18 14:20:48 2011 -0400
+++ b/lib/galaxy/tools/parameters/basic.py Thu Aug 18 14:16:03 2011 -0700
@@ -75,6 +75,16 @@
"""
return None
+ def get_initial_value_from_history_prevent_repeats( self, trans, context, already_used ):
+ """
+ Get the starting value for the parameter, but if fetching from the history, try
+ to find a value that has not yet been used. already_used is a list of objects that
+ tools must manipulate (by adding to it) to store a memento that they can use to detect
+ if a value has already been chosen from the history. This is to support the capability to
+ choose each dataset once
+ """
+ return self.get_initial_value(trans, context);
+
def get_required_enctype( self ):
"""
If this parameter needs the form to have a specific encoding
@@ -1385,6 +1395,9 @@
return field
def get_initial_value( self, trans, context ):
+ return self.get_initial_value_from_history_prevent_repeats(trans, context, None);
+
+ def get_initial_value_from_history_prevent_repeats( self, trans, context, already_used ):
"""
NOTE: This is wasteful since dynamic options and dataset collection
happens twice (here and when generating HTML).
@@ -1397,7 +1410,7 @@
assert history is not None, "DataToolParameter requires a history"
if self.optional:
return None
- most_recent_dataset = [None]
+ most_recent_dataset = []
filter_value = None
if self.options:
try:
@@ -1423,15 +1436,19 @@
data = converted_dataset
if not is_valid or ( self.options and self._options_filter_attribute( data ) != filter_value ):
continue
- most_recent_dataset[0] = data
+ most_recent_dataset.append(data)
# Also collect children via association object
dataset_collector( data.children )
dataset_collector( history.datasets )
- most_recent_dataset = most_recent_dataset.pop()
- if most_recent_dataset is not None:
- return most_recent_dataset
- else:
- return ''
+ most_recent_dataset.reverse()
+ if already_used is not None:
+ for val in most_recent_dataset:
+ if val is not None and val not in already_used:
+ already_used.append(val)
+ return val
+ if len(most_recent_dataset) > 0:
+ return most_recent_dataset[0]
+ return ''
def from_html( self, value, trans, other_values={} ):
# Can't look at history in workflow mode, skip validation and such,
--- a/lib/galaxy/web/controllers/workflow.py Thu Aug 18 14:20:48 2011 -0400
+++ b/lib/galaxy/web/controllers/workflow.py Thu Aug 18 14:16:03 2011 -0700
@@ -1258,7 +1258,7 @@
## % ( workflow_name, web.url_for( action='editor', id=trans.security.encode_id(stored.id) ) ) )
@web.expose
- def run( self, trans, id, **kwargs ):
+ def run( self, trans, id, history_id=None, hide_fixed_params=False, **kwargs ):
stored = self.get_stored_workflow( trans, id, check_ownership=False )
user = trans.get_user()
if stored.user != user:
@@ -1279,163 +1279,185 @@
errors = {}
has_upgrade_messages = False
has_errors = False
- if kwargs:
- # If kwargs were provided, the states for each step should have
- # been POSTed
- # Get the kwarg keys for data inputs
- input_keys = filter(lambda a: a.endswith('|input'), kwargs)
- # Example: prefixed='2|input'
- # Check if one of them is a list
- multiple_input_key = None
- multiple_inputs = [None]
- for input_key in input_keys:
- if isinstance(kwargs[input_key], list):
- multiple_input_key = input_key
- multiple_inputs = kwargs[input_key]
- # List to gather values for the template
- invocations=[]
- for input_number, single_input in enumerate(multiple_inputs):
- # Example: single_input='1', single_input='2', etc...
- # 'Fix' the kwargs, to have only the input for this iteration
- if multiple_input_key:
- kwargs[multiple_input_key] = single_input
+ saved_history = None
+ if history_id is not None:
+ saved_history = trans.get_history();
+ try:
+ decoded_history_id = trans.security.decode_id( history_id )
+ history = trans.sa_session.query(trans.app.model.History).get(decoded_history_id)
+ if history.user != trans.user and not trans.user_is_admin():
+ if trans.sa_session.query(trans.app.model.HistoryUserShareAssociation).filter_by(user=trans.user, history=history).count() == 0:
+ error("History is not owned by or shared with current user")
+ trans.set_history(history)
+ except TypeError:
+ error("Malformed history id ( %s ) specified, unable to decode." % str( history_id ))
+ except:
+ error("That history does not exist.")
+ try: # use a try/finally block to restore the user's current history
+ if kwargs:
+ # If kwargs were provided, the states for each step should have
+ # been POSTed
+ # Get the kwarg keys for data inputs
+ input_keys = filter(lambda a: a.endswith('|input'), kwargs)
+ # Example: prefixed='2|input'
+ # Check if one of them is a list
+ multiple_input_key = None
+ multiple_inputs = [None]
+ for input_key in input_keys:
+ if isinstance(kwargs[input_key], list):
+ multiple_input_key = input_key
+ multiple_inputs = kwargs[input_key]
+ # List to gather values for the template
+ invocations=[]
+ for input_number, single_input in enumerate(multiple_inputs):
+ # Example: single_input='1', single_input='2', etc...
+ # 'Fix' the kwargs, to have only the input for this iteration
+ if multiple_input_key:
+ kwargs[multiple_input_key] = single_input
+ for step in workflow.steps:
+ step.upgrade_messages = {}
+ # Connections by input name
+ step.input_connections_by_name = \
+ dict( ( conn.input_name, conn ) for conn in step.input_connections )
+ # Extract just the arguments for this step by prefix
+ p = "%s|" % step.id
+ l = len(p)
+ step_args = dict( ( k[l:], v ) for ( k, v ) in kwargs.iteritems() if k.startswith( p ) )
+ step_errors = None
+ if step.type == 'tool' or step.type is None:
+ module = module_factory.from_workflow_step( trans, step )
+ # Fix any missing parameters
+ step.upgrade_messages = module.check_and_update_state()
+ if step.upgrade_messages:
+ has_upgrade_messages = True
+ # Any connected input needs to have value DummyDataset (these
+ # are not persisted so we need to do it every time)
+ module.add_dummy_datasets( connections=step.input_connections )
+ # Get the tool
+ tool = module.tool
+ # Get the state
+ step.state = state = module.state
+ # Get old errors
+ old_errors = state.inputs.pop( "__errors__", {} )
+ # Update the state
+ step_errors = tool.update_state( trans, tool.inputs, step.state.inputs, step_args,
+ update_only=True, old_errors=old_errors )
+ else:
+ # Fix this for multiple inputs
+ module = step.module = module_factory.from_workflow_step( trans, step )
+ state = step.state = module.decode_runtime_state( trans, step_args.pop( "tool_state" ) )
+ step_errors = module.update_runtime_state( trans, state, step_args )
+ if step_errors:
+ errors[step.id] = state.inputs["__errors__"] = step_errors
+ if 'run_workflow' in kwargs and not errors:
+ new_history = None
+ if 'new_history' in kwargs:
+ if 'new_history_name' in kwargs and kwargs['new_history_name'] != '':
+ nh_name = kwargs['new_history_name']
+ else:
+ nh_name = "History from %s workflow" % workflow.name
+ if multiple_input_key:
+ nh_name = '%s %d' % (nh_name, input_number + 1)
+ new_history = trans.app.model.History( user=trans.user, name=nh_name )
+ trans.sa_session.add( new_history )
+ # Run each step, connecting outputs to inputs
+ workflow_invocation = model.WorkflowInvocation()
+ workflow_invocation.workflow = workflow
+ outputs = odict()
+ for i, step in enumerate( workflow.steps ):
+ # Execute module
+ job = None
+ if step.type == 'tool' or step.type is None:
+ tool = trans.app.toolbox.tools_by_id[ step.tool_id ]
+ input_values = step.state.inputs
+ # Connect up
+ def callback( input, value, prefixed_name, prefixed_label ):
+ if isinstance( input, DataToolParameter ):
+ if prefixed_name in step.input_connections_by_name:
+ conn = step.input_connections_by_name[ prefixed_name ]
+ return outputs[ conn.output_step.id ][ conn.output_name ]
+ visit_input_values( tool.inputs, step.state.inputs, callback )
+ # Execute it
+ job, out_data = tool.execute( trans, step.state.inputs, history=new_history)
+ outputs[ step.id ] = out_data
+ # Create new PJA associations with the created job, to be run on completion.
+ # PJA Parameter Replacement (only applies to immediate actions-- rename specifically, for now)
+ # Pass along replacement dict with the execution of the PJA so we don't have to modify the object.
+ replacement_dict = {}
+ for k, v in kwargs.iteritems():
+ if k.startswith('wf_parm|'):
+ replacement_dict[k[8:]] = v
+ for pja in step.post_job_actions:
+ if pja.action_type in ActionBox.immediate_actions:
+ ActionBox.execute(trans.app, trans.sa_session, pja, job, replacement_dict)
+ else:
+ job.add_post_job_action(pja)
+ else:
+ job, out_data = step.module.execute( trans, step.state )
+ outputs[ step.id ] = out_data
+ # Record invocation
+ workflow_invocation_step = model.WorkflowInvocationStep()
+ workflow_invocation_step.workflow_invocation = workflow_invocation
+ workflow_invocation_step.workflow_step = step
+ workflow_invocation_step.job = job
+ # All jobs ran sucessfully, so we can save now
+ trans.sa_session.add( workflow_invocation )
+ invocations.append({'outputs': outputs,
+ 'new_history': new_history})
+ trans.sa_session.flush()
+ return trans.fill_template( "workflow/run_complete.mako",
+ workflow=stored,
+ invocations=invocations )
+ else:
+ # Prepare each step
+ missing_tools = []
for step in workflow.steps:
step.upgrade_messages = {}
- # Connections by input name
- step.input_connections_by_name = \
- dict( ( conn.input_name, conn ) for conn in step.input_connections )
- # Extract just the arguments for this step by prefix
- p = "%s|" % step.id
- l = len(p)
- step_args = dict( ( k[l:], v ) for ( k, v ) in kwargs.iteritems() if k.startswith( p ) )
- step_errors = None
+ # Contruct modules
if step.type == 'tool' or step.type is None:
- module = module_factory.from_workflow_step( trans, step )
- # Fix any missing parameters
- step.upgrade_messages = module.check_and_update_state()
+ # Restore the tool state for the step
+ step.module = module_factory.from_workflow_step( trans, step )
+ if not step.module:
+ if step.tool_id not in missing_tools:
+ missing_tools.append(step.tool_id)
+ continue
+ step.upgrade_messages = step.module.check_and_update_state()
if step.upgrade_messages:
has_upgrade_messages = True
# Any connected input needs to have value DummyDataset (these
# are not persisted so we need to do it every time)
- module.add_dummy_datasets( connections=step.input_connections )
- # Get the tool
- tool = module.tool
- # Get the state
- step.state = state = module.state
- # Get old errors
- old_errors = state.inputs.pop( "__errors__", {} )
- # Update the state
- step_errors = tool.update_state( trans, tool.inputs, step.state.inputs, step_args,
- update_only=True, old_errors=old_errors )
+ step.module.add_dummy_datasets( connections=step.input_connections )
+ # Store state with the step
+ step.state = step.module.state
+ # Error dict
+ if step.tool_errors:
+ has_errors = True
+ errors[step.id] = step.tool_errors
else:
- # Fix this for multiple inputs
- module = step.module = module_factory.from_workflow_step( trans, step )
- state = step.state = module.decode_runtime_state( trans, step_args.pop( "tool_state" ) )
- step_errors = module.update_runtime_state( trans, state, step_args )
- if step_errors:
- errors[step.id] = state.inputs["__errors__"] = step_errors
- if 'run_workflow' in kwargs and not errors:
- new_history = None
- if 'new_history' in kwargs:
- if 'new_history_name' in kwargs and kwargs['new_history_name'] != '':
- nh_name = kwargs['new_history_name']
- else:
- nh_name = "History from %s workflow" % workflow.name
- if multiple_input_key:
- nh_name = '%s %d' % (nh_name, input_number + 1)
- new_history = trans.app.model.History( user=trans.user, name=nh_name )
- trans.sa_session.add( new_history )
- # Run each step, connecting outputs to inputs
- workflow_invocation = model.WorkflowInvocation()
- workflow_invocation.workflow = workflow
- outputs = odict()
- for i, step in enumerate( workflow.steps ):
- # Execute module
- job = None
- if step.type == 'tool' or step.type is None:
- tool = trans.app.toolbox.tools_by_id[ step.tool_id ]
- input_values = step.state.inputs
- # Connect up
- def callback( input, value, prefixed_name, prefixed_label ):
- if isinstance( input, DataToolParameter ):
- if prefixed_name in step.input_connections_by_name:
- conn = step.input_connections_by_name[ prefixed_name ]
- return outputs[ conn.output_step.id ][ conn.output_name ]
- visit_input_values( tool.inputs, step.state.inputs, callback )
- # Execute it
- job, out_data = tool.execute( trans, step.state.inputs, history=new_history)
- outputs[ step.id ] = out_data
- # Create new PJA associations with the created job, to be run on completion.
- # PJA Parameter Replacement (only applies to immediate actions-- rename specifically, for now)
- # Pass along replacement dict with the execution of the PJA so we don't have to modify the object.
- replacement_dict = {}
- for k, v in kwargs.iteritems():
- if k.startswith('wf_parm|'):
- replacement_dict[k[8:]] = v
- for pja in step.post_job_actions:
- if pja.action_type in ActionBox.immediate_actions:
- ActionBox.execute(trans.app, trans.sa_session, pja, job, replacement_dict)
- else:
- job.add_post_job_action(pja)
- else:
- job, out_data = step.module.execute( trans, step.state )
- outputs[ step.id ] = out_data
- # Record invocation
- workflow_invocation_step = model.WorkflowInvocationStep()
- workflow_invocation_step.workflow_invocation = workflow_invocation
- workflow_invocation_step.workflow_step = step
- workflow_invocation_step.job = job
- # All jobs ran sucessfully, so we can save now
- trans.sa_session.add( workflow_invocation )
- invocations.append({'outputs': outputs,
- 'new_history': new_history})
- trans.sa_session.flush()
- return trans.fill_template( "workflow/run_complete.mako",
- workflow=stored,
- invocations=invocations )
- else:
- # Prepare each step
- missing_tools = []
- for step in workflow.steps:
- step.upgrade_messages = {}
- # Contruct modules
- if step.type == 'tool' or step.type is None:
- # Restore the tool state for the step
- step.module = module_factory.from_workflow_step( trans, step )
- if not step.module:
- if step.tool_id not in missing_tools:
- missing_tools.append(step.tool_id)
- continue
- step.upgrade_messages = step.module.check_and_update_state()
- if step.upgrade_messages:
- has_upgrade_messages = True
- # Any connected input needs to have value DummyDataset (these
- # are not persisted so we need to do it every time)
- step.module.add_dummy_datasets( connections=step.input_connections )
- # Store state with the step
- step.state = step.module.state
- # Error dict
- if step.tool_errors:
- has_errors = True
- errors[step.id] = step.tool_errors
- else:
- ## Non-tool specific stuff?
- step.module = module_factory.from_workflow_step( trans, step )
- step.state = step.module.get_runtime_state()
- # Connections by input name
- step.input_connections_by_name = dict( ( conn.input_name, conn ) for conn in step.input_connections )
- if missing_tools:
- stored.annotation = self.get_item_annotation_str( trans.sa_session, trans.user, stored )
- return trans.fill_template("workflow/run.mako", steps=[], workflow=stored, missing_tools = missing_tools)
- # Render the form
- stored.annotation = self.get_item_annotation_str( trans.sa_session, trans.user, stored )
- return trans.fill_template(
- "workflow/run.mako",
- steps=workflow.steps,
- workflow=stored,
- has_upgrade_messages=has_upgrade_messages,
- errors=errors,
- incoming=kwargs )
+ ## Non-tool specific stuff?
+ step.module = module_factory.from_workflow_step( trans, step )
+ step.state = step.module.get_runtime_state()
+ # Connections by input name
+ step.input_connections_by_name = dict( ( conn.input_name, conn ) for conn in step.input_connections )
+ if missing_tools:
+ stored.annotation = self.get_item_annotation_str( trans.sa_session, trans.user, stored )
+ return trans.fill_template("workflow/run.mako", steps=[], workflow=stored, missing_tools = missing_tools)
+ # Render the form
+ stored.annotation = self.get_item_annotation_str( trans.sa_session, trans.user, stored )
+ return trans.fill_template(
+ "workflow/run.mako",
+ steps=workflow.steps,
+ workflow=stored,
+ has_upgrade_messages=has_upgrade_messages,
+ errors=errors,
+ incoming=kwargs,
+ history_id=history_id,
+ hide_fixed_params=hide_fixed_params,
+ enable_unique_defaults=trans.app.config.enable_unique_workflow_defaults)
+ finally:
+ # restore the active history
+ if saved_history is not None:
+ trans.set_history(saved_history)
def get_item( self, trans, id ):
return self.get_stored_workflow( trans, id )
--- a/templates/workflow/run.mako Thu Aug 18 14:20:48 2011 -0400
+++ b/templates/workflow/run.mako Thu Aug 18 14:16:03 2011 -0700
@@ -6,8 +6,12 @@
<script type="text/javascript">
$( function() {
function show_tool_body(title){
- title.parent().css('border-bottom-width', '1px');
+ title.parent().show().css('border-bottom-width', '1px');
title.next().show('fast');
+ if ('${hide_fixed_params}'.toLowerCase() == 'true') {
+ // show previously hidden parameters
+ title.next().children(".form-row").show();
+ }
}
function hide_tool_body(title){
title.parent().css('border-bottom-width', '0px');
@@ -46,8 +50,15 @@
$("div.toolFormTitle").click(function(){
toggle_tool_body($(this));
});
- // Collapse non-interactive run-workflow panels by default.
- $("div.toolFormBody:not(:has(select, textarea, input[type!=hidden], .wfpspan))").hide().parent().css('border-bottom-width', '0px');
+ if ('${hide_fixed_params}'.toLowerCase() == 'true') {
+ // hide parameters that are not runtime inputs
+ $("div.form-row:not(:has(select, textarea, input[type!=hidden], .wfpspan))").hide();
+ $("div.toolForm:not(:has(select, textarea, input[type!=hidden], .wfpspan))").hide();
+ }
+ else {
+ // Collapse non-interactive run-workflow panels by default.
+ $("div.toolFormBody:not(:has(select, textarea, input[type!=hidden], .wfpspan))").hide().parent().css('border-bottom-width', '0px');
+ }
$("#show_all_tool_body").click(function(){
$("div.toolFormTitle").each(function(){
show_tool_body($(this));
@@ -163,6 +174,8 @@
import colorsys
import random
+used_accumulator = []
+
wf_parms = {}
for step in steps:
for v in [ActionBox.get_short_str(pja) for pja in step.post_job_actions] + step.state.inputs.values():
@@ -178,7 +191,7 @@
hue += hue_offset
%>
-<%def name="do_inputs( inputs, values, errors, prefix, step, other_values = None )">
+<%def name="do_inputs( inputs, values, errors, prefix, step, other_values = None, already_used = None )">
%if other_values is None:
<% other_values = values %>
%endif
@@ -196,7 +209,7 @@
<div class="repeat-group-item"><% index = repeat_values[i]['__index__'] %><div class="form-title-row"><b>${input.title} ${i + 1}</b></div>
- ${do_inputs( input.inputs, repeat_values[ i ], rep_errors, prefix + input.name + "_" + str(index) + "|", step, other_values )}
+ ${do_inputs( input.inputs, repeat_values[ i ], rep_errors, prefix + input.name + "_" + str(index) + "|", step, other_values, already_used )}
## <div class="form-row"><input type="submit" name="${step.id}|${prefix}${input.name}_${i}_remove" value="Remove ${input.title} ${i+1}" /></div></div>
%endfor
@@ -207,15 +220,15 @@
<% current_case = group_values['__current_case__'] %><% new_prefix = prefix + input.name + "|" %><% group_errors = errors.get( input.name, {} ) %>
- ${row_for_param( input.test_param, group_values[ input.test_param.name ], other_values, group_errors, prefix, step )}
- ${do_inputs( input.cases[ current_case ].inputs, group_values, group_errors, new_prefix, step, other_values )}
+ ${row_for_param( input.test_param, group_values[ input.test_param.name ], other_values, group_errors, prefix, step, already_used )}
+ ${do_inputs( input.cases[ current_case ].inputs, group_values, group_errors, new_prefix, step, other_values, already_used )}
%else:
- ${row_for_param( input, values[ input.name ], other_values, errors, prefix, step )}
+ ${row_for_param( input, values[ input.name ], other_values, errors, prefix, step, already_used )}
%endif
%endfor
</%def>
-<%def name="row_for_param( param, value, other_values, error_dict, prefix, step )">
+<%def name="row_for_param( param, value, other_values, error_dict, prefix, step, already_used )">
## -- ${param.name} -- ${step.state.inputs} --
%if error_dict.has_key( param.name ):
<% cls = "form-row form-row-error" %>
@@ -235,7 +248,9 @@
## FIXME: Initialize in the controller
<%
if value is None:
- value = other_values[ param.name ] = param.get_initial_value( t, other_values )
+ value = other_values[ param.name ] = param.get_initial_value_from_history_prevent_repeats( t, other_values, already_used )
+ if not enable_unique_defaults:
+ del already_used[:]
%>
${param.get_html_field( t, value, other_values ).get_html( str(step.id) + "|" + prefix )}
<input type="hidden" name="${step.id}|__force_update__${prefix}${param.name}" value="true" />
@@ -250,7 +265,11 @@
## controller should go through the inputs on the first
## load, fill in initial values where needed, and mark
## all that are runtime modifiable in some way.
- <% value = other_values[ param.name ] = param.get_initial_value( t, other_values ) %>
+ <%
+ value = other_values[ param.name ] = param.get_initial_value_from_history_prevent_repeats( t, other_values, already_used )
+ if not enable_unique_defaults:
+ del already_used[:]
+ %>
${param.get_html_field( t, value, other_values ).get_html( str(step.id) + "|" + prefix )}
<input type="hidden" name="${step.id}|__runtime__${prefix}${param.name}" value="true" />
%else:
@@ -342,7 +361,6 @@
});
</script>
%endif
-
%for i, step in enumerate( steps ):
%if step.type == 'tool' or step.type is None:
<% tool = app.toolbox.tools_by_id[step.tool_id] %>
@@ -355,36 +373,36 @@
% endif
</div><div class="toolFormBody">
- ${do_inputs( tool.inputs, step.state.inputs, errors.get( step.id, dict() ), "", step )}
- % if step.post_job_actions:
- <hr/>
- <div class='form-row'>
- % if len(step.post_job_actions) > 1:
- <label>Actions:</label>
- % else:
- <label>Action:</label>
+ ${do_inputs( tool.inputs, step.state.inputs, errors.get( step.id, dict() ), "", step, None, used_accumulator )}
+ % if step.post_job_actions:
+ <hr/>
+ <div class='form-row'>
+ % if len(step.post_job_actions) > 1:
+ <label>Actions:</label>
+ % else:
+ <label>Action:</label>
+ % endif
+ <%
+ pja_ss_all = []
+ for pja_ss in [ActionBox.get_short_str(pja) for pja in step.post_job_actions]:
+ for rematch in re.findall('\$\{.+?\}', pja_ss):
+ pja_ss = pja_ss.replace(rematch, '<span style="background-color:%s" class="wfpspan wf_parm__%s pja_wfp">%s</span>' % (wf_parms[rematch[2:-1]], rematch[2:-1], rematch[2:-1]))
+ pja_ss_all.append(pja_ss)
+ %>
+ ${'<br/>'.join(pja_ss_all)}
+ </div>
% endif
- <%
- pja_ss_all = []
- for pja_ss in [ActionBox.get_short_str(pja) for pja in step.post_job_actions]:
- for rematch in re.findall('\$\{.+?\}', pja_ss):
- pja_ss = pja_ss.replace(rematch, '<span style="background-color:%s" class="wfpspan wf_parm__%s pja_wfp">%s</span>' % (wf_parms[rematch[2:-1]], rematch[2:-1], rematch[2:-1]))
- pja_ss_all.append(pja_ss)
- %>
- ${'<br/>'.join(pja_ss_all)}
- </div>
- % endif
+ </div></div>
- </div>
- %else:
- <% module = step.module %>
- <input type="hidden" name="${step.id}|tool_state" value="${module.encode_runtime_state( t, step.state )}">
- <div class="toolForm">
- <div class="toolFormTitle">
- <span class='title_ul_text'>Step ${int(step.order_index)+1}: ${module.name}</span>
- % if step.annotations:
- <div class="step-annotation">${step.annotations[0].annotation}</div>
- % endif
+ %else:
+ <% module = step.module %>
+ <input type="hidden" name="${step.id}|tool_state" value="${module.encode_runtime_state( t, step.state )}">
+ <div class="toolForm">
+ <div class="toolFormTitle">
+ <span class='title_ul_text'>Step ${int(step.order_index)+1}: ${module.name}</span>
+ % if step.annotations:
+ <div class="step-annotation">${step.annotations[0].annotation}</div>
+ % endif
</div><div class="toolFormBody"><%
@@ -397,7 +415,7 @@
if not type_filter:
type_filter = ['data']
%>
- ${do_inputs( module.get_runtime_inputs(type_filter), step.state.inputs, errors.get( step.id, dict() ), "", step )}
+ ${do_inputs( module.get_runtime_inputs(type_filter), step.state.inputs, errors.get( step.id, dict() ), "", step, None, used_accumulator )}
</div></div>
%endif
@@ -411,10 +429,12 @@
%endfor
</ul>
%else:
+ %if history_id is None:
<p id='new_history_p'><input type="checkbox" name='new_history' value="true" id='new_history_cbx'/><label for='new_history_cbx'>Send results to a new history </label><span id="new_history_input">named: <input type='text' name='new_history_name' value='${h.to_unicode( workflow.name )}'/></span></p>
+ %endif
<input type="submit" name="run_workflow" value="Run workflow" /></form>
%endif
--- a/universe_wsgi.ini.sample Thu Aug 18 14:20:48 2011 -0400
+++ b/universe_wsgi.ini.sample Thu Aug 18 14:16:03 2011 -0700
@@ -444,6 +444,14 @@
# large servers.
#enable_tool_tags = False
+# Enable a feature when running workflows. When enabled, default datasets
+# are selected for "Set at Runtime" inputs from the history such that the
+# same input will not be selected twice, unless there are more inputs than
+# compatible datasets in the history.
+# When False, the most recently added compatible item in the history will
+# be used for each "Set at Runtime" input, independent of others in the Workflow
+#enable_unique_workflow_defaults = False
+
# Enable Galaxy's "Upload via FTP" interface. You'll need to install and
# configure an FTP server (we've used ProFTPd since it can use Galaxy's
# database for authentication) and set the following two options.
@@ -459,6 +467,15 @@
# Enable enforcement of quotas. Quotas can be set from the Admin interface.
#enable_quotas = False
+# Enable a feature when running workflows. When enabled, default datasets
+# are selected for "Set at Runtime" inputs from the history such that the
+# same input will not be selected twice, unless there are more inputs than
+# compatible datasets in the history.
+# When False, the most recently added compatible item in the history will
+# be used for each "Set at Runtime" input, independent of others in the Workflow
+#enable_unique_workflow_defaults = False
+
+
# -- Job Execution
# If running multiple Galaxy processes, one can be designated as the job
http://bitbucket.org/galaxy/galaxy-central/changeset/b58453168699/
changeset: b58453168699
user: John Duddy
date: 2011-08-18 23:38:26
summary: Removed duplicate section (merge error)
affected #: 1 file (-1 bytes)
--- a/universe_wsgi.ini.sample Thu Aug 18 14:16:03 2011 -0700
+++ b/universe_wsgi.ini.sample Thu Aug 18 14:38:26 2011 -0700
@@ -444,14 +444,6 @@
# large servers.
#enable_tool_tags = False
-# Enable a feature when running workflows. When enabled, default datasets
-# are selected for "Set at Runtime" inputs from the history such that the
-# same input will not be selected twice, unless there are more inputs than
-# compatible datasets in the history.
-# When False, the most recently added compatible item in the history will
-# be used for each "Set at Runtime" input, independent of others in the Workflow
-#enable_unique_workflow_defaults = False
-
# Enable Galaxy's "Upload via FTP" interface. You'll need to install and
# configure an FTP server (we've used ProFTPd since it can use Galaxy's
# database for authentication) and set the following two options.
http://bitbucket.org/galaxy/galaxy-central/changeset/553cdfbd3ad7/
changeset: 553cdfbd3ad7
user: dannon
date: 2011-10-10 15:39:20
summary: Workflow unique default inputs: Merge in from John Duddy.
affected #: 5 files (-1 bytes)
--- a/lib/galaxy/config.py Fri Oct 07 15:24:27 2011 -0400
+++ b/lib/galaxy/config.py Mon Oct 10 09:39:20 2011 -0400
@@ -47,6 +47,7 @@
self.enable_openid = string_as_bool( kwargs.get( 'enable_openid', False ) )
self.enable_quotas = string_as_bool( kwargs.get( 'enable_quotas', False ) )
self.tool_sheds_config = kwargs.get( 'tool_sheds_config_file', 'tool_sheds_conf.xml' )
+ self.enable_unique_workflow_defaults = string_as_bool ( kwargs.get ('enable_unique_workflow_defaults', False ) )
self.tool_path = resolve_path( kwargs.get( "tool_path", "tools" ), self.root )
self.tool_data_path = resolve_path( kwargs.get( "tool_data_path", "tool-data" ), os.getcwd() )
self.len_file_path = kwargs.get( "len_file_path", resolve_path(os.path.join(self.tool_data_path, 'shared','ucsc','chrom'), self.root) )
--- a/lib/galaxy/tools/parameters/basic.py Fri Oct 07 15:24:27 2011 -0400
+++ b/lib/galaxy/tools/parameters/basic.py Mon Oct 10 09:39:20 2011 -0400
@@ -75,6 +75,16 @@
"""
return None
+ def get_initial_value_from_history_prevent_repeats( self, trans, context, already_used ):
+ """
+ Get the starting value for the parameter, but if fetching from the history, try
+ to find a value that has not yet been used. already_used is a list of objects that
+ tools must manipulate (by adding to it) to store a memento that they can use to detect
+ if a value has already been chosen from the history. This is to support the capability to
+ choose each dataset once
+ """
+ return self.get_initial_value(trans, context);
+
def get_required_enctype( self ):
"""
If this parameter needs the form to have a specific encoding
@@ -1389,6 +1399,9 @@
return field
def get_initial_value( self, trans, context ):
+ return self.get_initial_value_from_history_prevent_repeats(trans, context, None);
+
+ def get_initial_value_from_history_prevent_repeats( self, trans, context, already_used ):
"""
NOTE: This is wasteful since dynamic options and dataset collection
happens twice (here and when generating HTML).
@@ -1401,7 +1414,7 @@
assert history is not None, "DataToolParameter requires a history"
if self.optional:
return None
- most_recent_dataset = [None]
+ most_recent_dataset = []
filter_value = None
if self.options:
try:
@@ -1427,15 +1440,19 @@
data = converted_dataset
if not is_valid or ( self.options and self._options_filter_attribute( data ) != filter_value ):
continue
- most_recent_dataset[0] = data
+ most_recent_dataset.append(data)
# Also collect children via association object
dataset_collector( data.children )
dataset_collector( history.datasets )
- most_recent_dataset = most_recent_dataset.pop()
- if most_recent_dataset is not None:
- return most_recent_dataset
- else:
- return ''
+ most_recent_dataset.reverse()
+ if already_used is not None:
+ for val in most_recent_dataset:
+ if val is not None and val not in already_used:
+ already_used.append(val)
+ return val
+ if len(most_recent_dataset) > 0:
+ return most_recent_dataset[0]
+ return ''
def from_html( self, value, trans, other_values={} ):
# Can't look at history in workflow mode, skip validation and such,
--- a/lib/galaxy/web/controllers/workflow.py Fri Oct 07 15:24:27 2011 -0400
+++ b/lib/galaxy/web/controllers/workflow.py Mon Oct 10 09:39:20 2011 -0400
@@ -1258,7 +1258,7 @@
## % ( workflow_name, web.url_for( action='editor', id=trans.security.encode_id(stored.id) ) ) )
@web.expose
- def run( self, trans, id, **kwargs ):
+ def run( self, trans, id, history_id=None, hide_fixed_params=False, **kwargs ):
stored = self.get_stored_workflow( trans, id, check_ownership=False )
user = trans.get_user()
if stored.user != user:
@@ -1279,163 +1279,185 @@
errors = {}
has_upgrade_messages = False
has_errors = False
- if kwargs:
- # If kwargs were provided, the states for each step should have
- # been POSTed
- # Get the kwarg keys for data inputs
- input_keys = filter(lambda a: a.endswith('|input'), kwargs)
- # Example: prefixed='2|input'
- # Check if one of them is a list
- multiple_input_key = None
- multiple_inputs = [None]
- for input_key in input_keys:
- if isinstance(kwargs[input_key], list):
- multiple_input_key = input_key
- multiple_inputs = kwargs[input_key]
- # List to gather values for the template
- invocations=[]
- for input_number, single_input in enumerate(multiple_inputs):
- # Example: single_input='1', single_input='2', etc...
- # 'Fix' the kwargs, to have only the input for this iteration
- if multiple_input_key:
- kwargs[multiple_input_key] = single_input
+ saved_history = None
+ if history_id is not None:
+ saved_history = trans.get_history();
+ try:
+ decoded_history_id = trans.security.decode_id( history_id )
+ history = trans.sa_session.query(trans.app.model.History).get(decoded_history_id)
+ if history.user != trans.user and not trans.user_is_admin():
+ if trans.sa_session.query(trans.app.model.HistoryUserShareAssociation).filter_by(user=trans.user, history=history).count() == 0:
+ error("History is not owned by or shared with current user")
+ trans.set_history(history)
+ except TypeError:
+ error("Malformed history id ( %s ) specified, unable to decode." % str( history_id ))
+ except:
+ error("That history does not exist.")
+ try: # use a try/finally block to restore the user's current history
+ if kwargs:
+ # If kwargs were provided, the states for each step should have
+ # been POSTed
+ # Get the kwarg keys for data inputs
+ input_keys = filter(lambda a: a.endswith('|input'), kwargs)
+ # Example: prefixed='2|input'
+ # Check if one of them is a list
+ multiple_input_key = None
+ multiple_inputs = [None]
+ for input_key in input_keys:
+ if isinstance(kwargs[input_key], list):
+ multiple_input_key = input_key
+ multiple_inputs = kwargs[input_key]
+ # List to gather values for the template
+ invocations=[]
+ for input_number, single_input in enumerate(multiple_inputs):
+ # Example: single_input='1', single_input='2', etc...
+ # 'Fix' the kwargs, to have only the input for this iteration
+ if multiple_input_key:
+ kwargs[multiple_input_key] = single_input
+ for step in workflow.steps:
+ step.upgrade_messages = {}
+ # Connections by input name
+ step.input_connections_by_name = \
+ dict( ( conn.input_name, conn ) for conn in step.input_connections )
+ # Extract just the arguments for this step by prefix
+ p = "%s|" % step.id
+ l = len(p)
+ step_args = dict( ( k[l:], v ) for ( k, v ) in kwargs.iteritems() if k.startswith( p ) )
+ step_errors = None
+ if step.type == 'tool' or step.type is None:
+ module = module_factory.from_workflow_step( trans, step )
+ # Fix any missing parameters
+ step.upgrade_messages = module.check_and_update_state()
+ if step.upgrade_messages:
+ has_upgrade_messages = True
+ # Any connected input needs to have value DummyDataset (these
+ # are not persisted so we need to do it every time)
+ module.add_dummy_datasets( connections=step.input_connections )
+ # Get the tool
+ tool = module.tool
+ # Get the state
+ step.state = state = module.state
+ # Get old errors
+ old_errors = state.inputs.pop( "__errors__", {} )
+ # Update the state
+ step_errors = tool.update_state( trans, tool.inputs, step.state.inputs, step_args,
+ update_only=True, old_errors=old_errors )
+ else:
+ # Fix this for multiple inputs
+ module = step.module = module_factory.from_workflow_step( trans, step )
+ state = step.state = module.decode_runtime_state( trans, step_args.pop( "tool_state" ) )
+ step_errors = module.update_runtime_state( trans, state, step_args )
+ if step_errors:
+ errors[step.id] = state.inputs["__errors__"] = step_errors
+ if 'run_workflow' in kwargs and not errors:
+ new_history = None
+ if 'new_history' in kwargs:
+ if 'new_history_name' in kwargs and kwargs['new_history_name'] != '':
+ nh_name = kwargs['new_history_name']
+ else:
+ nh_name = "History from %s workflow" % workflow.name
+ if multiple_input_key:
+ nh_name = '%s %d' % (nh_name, input_number + 1)
+ new_history = trans.app.model.History( user=trans.user, name=nh_name )
+ trans.sa_session.add( new_history )
+ # Run each step, connecting outputs to inputs
+ workflow_invocation = model.WorkflowInvocation()
+ workflow_invocation.workflow = workflow
+ outputs = odict()
+ for i, step in enumerate( workflow.steps ):
+ # Execute module
+ job = None
+ if step.type == 'tool' or step.type is None:
+ tool = trans.app.toolbox.tools_by_id[ step.tool_id ]
+ input_values = step.state.inputs
+ # Connect up
+ def callback( input, value, prefixed_name, prefixed_label ):
+ if isinstance( input, DataToolParameter ):
+ if prefixed_name in step.input_connections_by_name:
+ conn = step.input_connections_by_name[ prefixed_name ]
+ return outputs[ conn.output_step.id ][ conn.output_name ]
+ visit_input_values( tool.inputs, step.state.inputs, callback )
+ # Execute it
+ job, out_data = tool.execute( trans, step.state.inputs, history=new_history)
+ outputs[ step.id ] = out_data
+ # Create new PJA associations with the created job, to be run on completion.
+ # PJA Parameter Replacement (only applies to immediate actions-- rename specifically, for now)
+ # Pass along replacement dict with the execution of the PJA so we don't have to modify the object.
+ replacement_dict = {}
+ for k, v in kwargs.iteritems():
+ if k.startswith('wf_parm|'):
+ replacement_dict[k[8:]] = v
+ for pja in step.post_job_actions:
+ if pja.action_type in ActionBox.immediate_actions:
+ ActionBox.execute(trans.app, trans.sa_session, pja, job, replacement_dict)
+ else:
+ job.add_post_job_action(pja)
+ else:
+ job, out_data = step.module.execute( trans, step.state )
+ outputs[ step.id ] = out_data
+ # Record invocation
+ workflow_invocation_step = model.WorkflowInvocationStep()
+ workflow_invocation_step.workflow_invocation = workflow_invocation
+ workflow_invocation_step.workflow_step = step
+ workflow_invocation_step.job = job
+ # All jobs ran sucessfully, so we can save now
+ trans.sa_session.add( workflow_invocation )
+ invocations.append({'outputs': outputs,
+ 'new_history': new_history})
+ trans.sa_session.flush()
+ return trans.fill_template( "workflow/run_complete.mako",
+ workflow=stored,
+ invocations=invocations )
+ else:
+ # Prepare each step
+ missing_tools = []
for step in workflow.steps:
step.upgrade_messages = {}
- # Connections by input name
- step.input_connections_by_name = \
- dict( ( conn.input_name, conn ) for conn in step.input_connections )
- # Extract just the arguments for this step by prefix
- p = "%s|" % step.id
- l = len(p)
- step_args = dict( ( k[l:], v ) for ( k, v ) in kwargs.iteritems() if k.startswith( p ) )
- step_errors = None
+ # Contruct modules
if step.type == 'tool' or step.type is None:
- module = module_factory.from_workflow_step( trans, step )
- # Fix any missing parameters
- step.upgrade_messages = module.check_and_update_state()
+ # Restore the tool state for the step
+ step.module = module_factory.from_workflow_step( trans, step )
+ if not step.module:
+ if step.tool_id not in missing_tools:
+ missing_tools.append(step.tool_id)
+ continue
+ step.upgrade_messages = step.module.check_and_update_state()
if step.upgrade_messages:
has_upgrade_messages = True
# Any connected input needs to have value DummyDataset (these
# are not persisted so we need to do it every time)
- module.add_dummy_datasets( connections=step.input_connections )
- # Get the tool
- tool = module.tool
- # Get the state
- step.state = state = module.state
- # Get old errors
- old_errors = state.inputs.pop( "__errors__", {} )
- # Update the state
- step_errors = tool.update_state( trans, tool.inputs, step.state.inputs, step_args,
- update_only=True, old_errors=old_errors )
+ step.module.add_dummy_datasets( connections=step.input_connections )
+ # Store state with the step
+ step.state = step.module.state
+ # Error dict
+ if step.tool_errors:
+ has_errors = True
+ errors[step.id] = step.tool_errors
else:
- # Fix this for multiple inputs
- module = step.module = module_factory.from_workflow_step( trans, step )
- state = step.state = module.decode_runtime_state( trans, step_args.pop( "tool_state" ) )
- step_errors = module.update_runtime_state( trans, state, step_args )
- if step_errors:
- errors[step.id] = state.inputs["__errors__"] = step_errors
- if 'run_workflow' in kwargs and not errors:
- new_history = None
- if 'new_history' in kwargs:
- if 'new_history_name' in kwargs and kwargs['new_history_name'] != '':
- nh_name = kwargs['new_history_name']
- else:
- nh_name = "History from %s workflow" % workflow.name
- if multiple_input_key:
- nh_name = '%s %d' % (nh_name, input_number + 1)
- new_history = trans.app.model.History( user=trans.user, name=nh_name )
- trans.sa_session.add( new_history )
- # Run each step, connecting outputs to inputs
- workflow_invocation = model.WorkflowInvocation()
- workflow_invocation.workflow = workflow
- outputs = odict()
- for i, step in enumerate( workflow.steps ):
- # Execute module
- job = None
- if step.type == 'tool' or step.type is None:
- tool = trans.app.toolbox.tools_by_id[ step.tool_id ]
- input_values = step.state.inputs
- # Connect up
- def callback( input, value, prefixed_name, prefixed_label ):
- if isinstance( input, DataToolParameter ):
- if prefixed_name in step.input_connections_by_name:
- conn = step.input_connections_by_name[ prefixed_name ]
- return outputs[ conn.output_step.id ][ conn.output_name ]
- visit_input_values( tool.inputs, step.state.inputs, callback )
- # Execute it
- job, out_data = tool.execute( trans, step.state.inputs, history=new_history)
- outputs[ step.id ] = out_data
- # Create new PJA associations with the created job, to be run on completion.
- # PJA Parameter Replacement (only applies to immediate actions-- rename specifically, for now)
- # Pass along replacement dict with the execution of the PJA so we don't have to modify the object.
- replacement_dict = {}
- for k, v in kwargs.iteritems():
- if k.startswith('wf_parm|'):
- replacement_dict[k[8:]] = v
- for pja in step.post_job_actions:
- if pja.action_type in ActionBox.immediate_actions:
- ActionBox.execute(trans.app, trans.sa_session, pja, job, replacement_dict)
- else:
- job.add_post_job_action(pja)
- else:
- job, out_data = step.module.execute( trans, step.state )
- outputs[ step.id ] = out_data
- # Record invocation
- workflow_invocation_step = model.WorkflowInvocationStep()
- workflow_invocation_step.workflow_invocation = workflow_invocation
- workflow_invocation_step.workflow_step = step
- workflow_invocation_step.job = job
- # All jobs ran sucessfully, so we can save now
- trans.sa_session.add( workflow_invocation )
- invocations.append({'outputs': outputs,
- 'new_history': new_history})
- trans.sa_session.flush()
- return trans.fill_template( "workflow/run_complete.mako",
- workflow=stored,
- invocations=invocations )
- else:
- # Prepare each step
- missing_tools = []
- for step in workflow.steps:
- step.upgrade_messages = {}
- # Contruct modules
- if step.type == 'tool' or step.type is None:
- # Restore the tool state for the step
- step.module = module_factory.from_workflow_step( trans, step )
- if not step.module:
- if step.tool_id not in missing_tools:
- missing_tools.append(step.tool_id)
- continue
- step.upgrade_messages = step.module.check_and_update_state()
- if step.upgrade_messages:
- has_upgrade_messages = True
- # Any connected input needs to have value DummyDataset (these
- # are not persisted so we need to do it every time)
- step.module.add_dummy_datasets( connections=step.input_connections )
- # Store state with the step
- step.state = step.module.state
- # Error dict
- if step.tool_errors:
- has_errors = True
- errors[step.id] = step.tool_errors
- else:
- ## Non-tool specific stuff?
- step.module = module_factory.from_workflow_step( trans, step )
- step.state = step.module.get_runtime_state()
- # Connections by input name
- step.input_connections_by_name = dict( ( conn.input_name, conn ) for conn in step.input_connections )
- if missing_tools:
- stored.annotation = self.get_item_annotation_str( trans.sa_session, trans.user, stored )
- return trans.fill_template("workflow/run.mako", steps=[], workflow=stored, missing_tools = missing_tools)
- # Render the form
- stored.annotation = self.get_item_annotation_str( trans.sa_session, trans.user, stored )
- return trans.fill_template(
- "workflow/run.mako",
- steps=workflow.steps,
- workflow=stored,
- has_upgrade_messages=has_upgrade_messages,
- errors=errors,
- incoming=kwargs )
+ ## Non-tool specific stuff?
+ step.module = module_factory.from_workflow_step( trans, step )
+ step.state = step.module.get_runtime_state()
+ # Connections by input name
+ step.input_connections_by_name = dict( ( conn.input_name, conn ) for conn in step.input_connections )
+ if missing_tools:
+ stored.annotation = self.get_item_annotation_str( trans.sa_session, trans.user, stored )
+ return trans.fill_template("workflow/run.mako", steps=[], workflow=stored, missing_tools = missing_tools)
+ # Render the form
+ stored.annotation = self.get_item_annotation_str( trans.sa_session, trans.user, stored )
+ return trans.fill_template(
+ "workflow/run.mako",
+ steps=workflow.steps,
+ workflow=stored,
+ has_upgrade_messages=has_upgrade_messages,
+ errors=errors,
+ incoming=kwargs,
+ history_id=history_id,
+ hide_fixed_params=hide_fixed_params,
+ enable_unique_defaults=trans.app.config.enable_unique_workflow_defaults)
+ finally:
+ # restore the active history
+ if saved_history is not None:
+ trans.set_history(saved_history)
def get_item( self, trans, id ):
return self.get_stored_workflow( trans, id )
--- a/templates/workflow/run.mako Fri Oct 07 15:24:27 2011 -0400
+++ b/templates/workflow/run.mako Mon Oct 10 09:39:20 2011 -0400
@@ -6,8 +6,12 @@
<script type="text/javascript">
$( function() {
function show_tool_body(title){
- title.parent().css('border-bottom-width', '1px');
+ title.parent().show().css('border-bottom-width', '1px');
title.next().show('fast');
+ if ('${hide_fixed_params}'.toLowerCase() == 'true') {
+ // show previously hidden parameters
+ title.next().children(".form-row").show();
+ }
}
function hide_tool_body(title){
title.parent().css('border-bottom-width', '0px');
@@ -46,8 +50,15 @@
$("div.toolFormTitle").click(function(){
toggle_tool_body($(this));
});
- // Collapse non-interactive run-workflow panels by default.
- $("div.toolFormBody:not(:has(select, textarea, input[type!=hidden], .wfpspan))").hide().parent().css('border-bottom-width', '0px');
+ if ('${hide_fixed_params}'.toLowerCase() == 'true') {
+ // hide parameters that are not runtime inputs
+ $("div.form-row:not(:has(select, textarea, input[type!=hidden], .wfpspan))").hide();
+ $("div.toolForm:not(:has(select, textarea, input[type!=hidden], .wfpspan))").hide();
+ }
+ else {
+ // Collapse non-interactive run-workflow panels by default.
+ $("div.toolFormBody:not(:has(select, textarea, input[type!=hidden], .wfpspan))").hide().parent().css('border-bottom-width', '0px');
+ }
$("#show_all_tool_body").click(function(){
$("div.toolFormTitle").each(function(){
show_tool_body($(this));
@@ -163,6 +174,8 @@
import colorsys
import random
+used_accumulator = []
+
wf_parms = {}
for step in steps:
for v in [ActionBox.get_short_str(pja) for pja in step.post_job_actions] + step.state.inputs.values():
@@ -178,7 +191,7 @@
hue += hue_offset
%>
-<%def name="do_inputs( inputs, values, errors, prefix, step, other_values = None )">
+<%def name="do_inputs( inputs, values, errors, prefix, step, other_values = None, already_used = None )">
%if other_values is None:
<% other_values = values %>
%endif
@@ -196,7 +209,7 @@
<div class="repeat-group-item"><% index = repeat_values[i]['__index__'] %><div class="form-title-row"><b>${input.title} ${i + 1}</b></div>
- ${do_inputs( input.inputs, repeat_values[ i ], rep_errors, prefix + input.name + "_" + str(index) + "|", step, other_values )}
+ ${do_inputs( input.inputs, repeat_values[ i ], rep_errors, prefix + input.name + "_" + str(index) + "|", step, other_values, already_used )}
## <div class="form-row"><input type="submit" name="${step.id}|${prefix}${input.name}_${i}_remove" value="Remove ${input.title} ${i+1}" /></div></div>
%endfor
@@ -207,15 +220,15 @@
<% current_case = group_values['__current_case__'] %><% new_prefix = prefix + input.name + "|" %><% group_errors = errors.get( input.name, {} ) %>
- ${row_for_param( input.test_param, group_values[ input.test_param.name ], other_values, group_errors, prefix, step )}
- ${do_inputs( input.cases[ current_case ].inputs, group_values, group_errors, new_prefix, step, other_values )}
+ ${row_for_param( input.test_param, group_values[ input.test_param.name ], other_values, group_errors, prefix, step, already_used )}
+ ${do_inputs( input.cases[ current_case ].inputs, group_values, group_errors, new_prefix, step, other_values, already_used )}
%else:
- ${row_for_param( input, values[ input.name ], other_values, errors, prefix, step )}
+ ${row_for_param( input, values[ input.name ], other_values, errors, prefix, step, already_used )}
%endif
%endfor
</%def>
-<%def name="row_for_param( param, value, other_values, error_dict, prefix, step )">
+<%def name="row_for_param( param, value, other_values, error_dict, prefix, step, already_used )">
## -- ${param.name} -- ${step.state.inputs} --
%if error_dict.has_key( param.name ):
<% cls = "form-row form-row-error" %>
@@ -235,7 +248,9 @@
## FIXME: Initialize in the controller
<%
if value is None:
- value = other_values[ param.name ] = param.get_initial_value( t, other_values )
+ value = other_values[ param.name ] = param.get_initial_value_from_history_prevent_repeats( t, other_values, already_used )
+ if not enable_unique_defaults:
+ del already_used[:]
%>
${param.get_html_field( t, value, other_values ).get_html( str(step.id) + "|" + prefix )}
<input type="hidden" name="${step.id}|__force_update__${prefix}${param.name}" value="true" />
@@ -250,7 +265,11 @@
## controller should go through the inputs on the first
## load, fill in initial values where needed, and mark
## all that are runtime modifiable in some way.
- <% value = other_values[ param.name ] = param.get_initial_value( t, other_values ) %>
+ <%
+ value = other_values[ param.name ] = param.get_initial_value_from_history_prevent_repeats( t, other_values, already_used )
+ if not enable_unique_defaults:
+ del already_used[:]
+ %>
${param.get_html_field( t, value, other_values ).get_html( str(step.id) + "|" + prefix )}
<input type="hidden" name="${step.id}|__runtime__${prefix}${param.name}" value="true" />
%else:
@@ -342,7 +361,6 @@
});
</script>
%endif
-
%for i, step in enumerate( steps ):
%if step.type == 'tool' or step.type is None:
<% tool = app.toolbox.tools_by_id[step.tool_id] %>
@@ -355,36 +373,36 @@
% endif
</div><div class="toolFormBody">
- ${do_inputs( tool.inputs, step.state.inputs, errors.get( step.id, dict() ), "", step )}
- % if step.post_job_actions:
- <hr/>
- <div class='form-row'>
- % if len(step.post_job_actions) > 1:
- <label>Actions:</label>
- % else:
- <label>Action:</label>
+ ${do_inputs( tool.inputs, step.state.inputs, errors.get( step.id, dict() ), "", step, None, used_accumulator )}
+ % if step.post_job_actions:
+ <hr/>
+ <div class='form-row'>
+ % if len(step.post_job_actions) > 1:
+ <label>Actions:</label>
+ % else:
+ <label>Action:</label>
+ % endif
+ <%
+ pja_ss_all = []
+ for pja_ss in [ActionBox.get_short_str(pja) for pja in step.post_job_actions]:
+ for rematch in re.findall('\$\{.+?\}', pja_ss):
+ pja_ss = pja_ss.replace(rematch, '<span style="background-color:%s" class="wfpspan wf_parm__%s pja_wfp">%s</span>' % (wf_parms[rematch[2:-1]], rematch[2:-1], rematch[2:-1]))
+ pja_ss_all.append(pja_ss)
+ %>
+ ${'<br/>'.join(pja_ss_all)}
+ </div>
% endif
- <%
- pja_ss_all = []
- for pja_ss in [ActionBox.get_short_str(pja) for pja in step.post_job_actions]:
- for rematch in re.findall('\$\{.+?\}', pja_ss):
- pja_ss = pja_ss.replace(rematch, '<span style="background-color:%s" class="wfpspan wf_parm__%s pja_wfp">%s</span>' % (wf_parms[rematch[2:-1]], rematch[2:-1], rematch[2:-1]))
- pja_ss_all.append(pja_ss)
- %>
- ${'<br/>'.join(pja_ss_all)}
- </div>
- % endif
+ </div></div>
- </div>
- %else:
- <% module = step.module %>
- <input type="hidden" name="${step.id}|tool_state" value="${module.encode_runtime_state( t, step.state )}">
- <div class="toolForm">
- <div class="toolFormTitle">
- <span class='title_ul_text'>Step ${int(step.order_index)+1}: ${module.name}</span>
- % if step.annotations:
- <div class="step-annotation">${step.annotations[0].annotation}</div>
- % endif
+ %else:
+ <% module = step.module %>
+ <input type="hidden" name="${step.id}|tool_state" value="${module.encode_runtime_state( t, step.state )}">
+ <div class="toolForm">
+ <div class="toolFormTitle">
+ <span class='title_ul_text'>Step ${int(step.order_index)+1}: ${module.name}</span>
+ % if step.annotations:
+ <div class="step-annotation">${step.annotations[0].annotation}</div>
+ % endif
</div><div class="toolFormBody"><%
@@ -397,7 +415,7 @@
if not type_filter:
type_filter = ['data']
%>
- ${do_inputs( module.get_runtime_inputs(type_filter), step.state.inputs, errors.get( step.id, dict() ), "", step )}
+ ${do_inputs( module.get_runtime_inputs(type_filter), step.state.inputs, errors.get( step.id, dict() ), "", step, None, used_accumulator )}
</div></div>
%endif
@@ -411,10 +429,12 @@
%endfor
</ul>
%else:
+ %if history_id is None:
<p id='new_history_p'><input type="checkbox" name='new_history' value="true" id='new_history_cbx'/><label for='new_history_cbx'>Send results to a new history </label><span id="new_history_input">named: <input type='text' name='new_history_name' value='${h.to_unicode( workflow.name )}'/></span></p>
+ %endif
<input type="submit" name="run_workflow" value="Run workflow" /></form>
%endif
--- a/universe_wsgi.ini.sample Fri Oct 07 15:24:27 2011 -0400
+++ b/universe_wsgi.ini.sample Mon Oct 10 09:39:20 2011 -0400
@@ -461,6 +461,15 @@
# Enable enforcement of quotas. Quotas can be set from the Admin interface.
#enable_quotas = False
+# Enable a feature when running workflows. When enabled, default datasets
+# are selected for "Set at Runtime" inputs from the history such that the
+# same input will not be selected twice, unless there are more inputs than
+# compatible datasets in the history.
+# When False, the most recently added compatible item in the history will
+# be used for each "Set at Runtime" input, independent of others in the Workflow
+#enable_unique_workflow_defaults = False
+
+
# -- Job Execution
# If running multiple Galaxy processes, one can be designated as the job
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new changeset in galaxy-central:
http://bitbucket.org/galaxy/galaxy-central/changeset/95a1cc87043f/
changeset: 95a1cc87043f
user: dan
date: 2011-10-06 22:30:46
summary: Add get info/parameter button to empty datasets in history item view.
affected #: 1 file (-1 bytes)
--- a/templates/root/history_common.mako Thu Oct 06 14:22:59 2011 -0400
+++ b/templates/root/history_common.mako Thu Oct 06 16:30:46 2011 -0400
@@ -263,6 +263,7 @@
<br />
%endfor
%elif for_editing:
+ <a href="${h.url_for( controller='dataset', action='show_params', dataset_id=dataset_id )}" target="galaxy_main" title="View Details" class="icon-button information tooltip"></a><a href="${h.url_for( controller='tool_runner', action='rerun', id=data.id )}" target="galaxy_main" title="Run this job again" class="icon-button arrow-circle tooltip"></a>
%endif
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new changeset in galaxy-central:
http://bitbucket.org/galaxy/galaxy-central/changeset/98908b114f38/
changeset: 98908b114f38
user: dan
date: 2011-10-06 19:55:25
summary: Add test/development GenomeSpace file importer datasource tool.
affected #: 3 files (-1 bytes)
--- a/tool_conf.xml.sample Thu Oct 06 13:41:27 2011 -0400
+++ b/tool_conf.xml.sample Thu Oct 06 13:55:25 2011 -0400
@@ -26,6 +26,7 @@
<tool file="data_source/epigraph_import.xml" /><tool file="data_source/epigraph_import_test.xml" /><tool file="data_source/hbvar.xml" />
+ <tool file="data_source/genomespace_file_browser_dev.xml" /><tool file="validation/fix_errors.xml" /></section><section name="Send Data" id="send">
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new changeset in galaxy-central:
http://bitbucket.org/galaxy/galaxy-central/changeset/92ea25658bd1/
changeset: 92ea25658bd1
user: natefoo
date: 2011-10-06 19:41:27
summary: A much more efficient query for calculating a history's disk usage.
affected #: 1 file (-1 bytes)
--- a/lib/galaxy/model/__init__.py Thu Oct 06 12:27:48 2011 -0400
+++ b/lib/galaxy/model/__init__.py Thu Oct 06 13:41:27 2011 -0400
@@ -17,6 +17,7 @@
from galaxy.web.form_builder import *
from galaxy.model.item_attrs import UsesAnnotations, APIItem
from sqlalchemy.orm import object_session
+from sqlalchemy.sql.expression import func
import os.path, os, errno, codecs, operator, socket, pexpect, logging, time, shutil
if sys.version_info[:2] < ( 2, 5 ):
@@ -468,7 +469,12 @@
return self.get_disk_size( nice_size=False )
def get_disk_size( self, nice_size=False ):
# unique datasets only
- rval = sum( [ d.get_total_size() for d in list( set( [ hda.dataset for hda in self.datasets if not hda.purged ] ) ) if not d.purged ] )
+ db_session = object_session( self )
+ rval = db_session.query( func.sum( db_session.query( HistoryDatasetAssociation.dataset_id, Dataset.total_size ).join( Dataset )
+ .filter( HistoryDatasetAssociation.table.c.history_id == self.id )
+ .distinct().subquery().c.total_size ) ).first()[0]
+ if rval is None:
+ rval = 0
if nice_size:
rval = galaxy.datatypes.data.nice_size( rval )
return rval
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.