# HG changeset patch -- Bitbucket.org
# Project galaxy-dist
# URL http://bitbucket.org/galaxy/galaxy-dist/overview
# User Dannon Baker <dannonbaker(a)me.com>
# Date 1288647270 14400
# Node ID c4d8ffb3109e8cfb8143dc87c88dee3337bb7569
# Parent 9285b39ae2f3dfd5017808b4d9b657094fec802b
Increment js version to get around cached editors.
--- a/lib/galaxy/web/framework/helpers/__init__.py
+++ b/lib/galaxy/web/framework/helpers/__init__.py
@@ -44,7 +44,7 @@ def js( *args ):
TODO: This has a hardcoded "?v=X" to defeat caching. This should be done
in a better way.
"""
- return "\n".join( [ javascript_include_tag( "/static/scripts/" + name + ".js?v=7" ) for name in args ] )
+ return "\n".join( [ javascript_include_tag( "/static/scripts/" + name + ".js?v=8" ) for name in args ] )
# Hashes
# HG changeset patch -- Bitbucket.org
# Project galaxy-dist
# URL http://bitbucket.org/galaxy/galaxy-dist/overview
# User Dannon Baker <dannonbaker(a)me.com>
# Date 1288377653 14400
# Node ID 9f74170b7781122577b8cf7ffe38f5c986617844
# Parent a2e4d36c4318bc4ce73af9df869ae4071af33d55
Fix for multiple hide dataset actions when using workflow outputs.
--- a/lib/galaxy/web/controllers/workflow.py
+++ b/lib/galaxy/web/controllers/workflow.py
@@ -1299,6 +1299,11 @@ class WorkflowController( BaseController
step_outputs = [s.output_name for s in step.workflow_outputs]
for output in tool.outputs.keys():
if output not in step_outputs:
+ # Necessary, unfortunately, to clean up workflows that might have more than one at this point.
+ for pja in step.post_job_actions:
+ if pja.action_type == "HideDatasetAction" and pja.output_name == output:
+ step.post_job_actions.remove(pja)
+ trans.sa_session.delete(pja)
# Create a PJA for hiding this output.
n_pja = PostJobAction('HideDatasetAction', step, output, {})
else:
# HG changeset patch -- Bitbucket.org
# Project galaxy-dist
# URL http://bitbucket.org/galaxy/galaxy-dist/overview
# User Nate Coraor <nate(a)bx.psu.edu>
# Date 1288379862 14400
# Node ID 0518ce52d8d3ca15ec6070f2addb4bfe7608c23b
# Parent a22917c97fbe89aa2c0393d30f51d2a2183fd2f8
Allow jobs to be stopped when using track_jobs_in_database/enable_job_running (multiprocess config).
--- a/lib/galaxy/jobs/__init__.py
+++ b/lib/galaxy/jobs/__init__.py
@@ -813,6 +813,8 @@ class JobStopQueue( object ):
self.sa_session = app.model.context
self.dispatcher = dispatcher
+ self.track_jobs_in_database = app.config.get_bool( 'track_jobs_in_database', False )
+
# Keep track of the pid that started the job manager, only it
# has valid threads
self.parent_pid = os.getpid()
@@ -848,21 +850,29 @@ class JobStopQueue( object ):
Called repeatedly by `monitor` to stop jobs.
"""
# Pull all new jobs from the queue at once
- jobs = []
- try:
- while 1:
- ( job_id, error_msg ) = self.queue.get_nowait()
- if job_id is self.STOP_SIGNAL:
- return
- # Append to watch queue
- jobs.append( ( job_id, error_msg ) )
- except Empty:
- pass
-
- for job_id, error_msg in jobs:
- job = self.sa_session.query( model.Job ).get( job_id )
- self.sa_session.refresh( job )
- # if desired, error the job so we can inform the user.
+ jobs_to_check = []
+ if self.track_jobs_in_database:
+ # Clear the session so we get fresh states for job and all datasets
+ self.sa_session.expunge_all()
+ # Fetch all new jobs
+ newly_deleted_jobs = self.sa_session.query( model.Job ) \
+ .options( lazyload( "external_output_metadata" ), lazyload( "parameters" ) ) \
+ .filter( model.Job.state == model.Job.states.DELETED_NEW ).all()
+ for job in newly_deleted_jobs:
+ jobs_to_check.append( ( job, None ) )
+ else:
+ try:
+ while 1:
+ message = self.queue.get_nowait()
+ if message is self.STOP_SIGNAL:
+ return
+ # Unpack the message
+ job_id, error_msg = message
+ # Get the job object and append to watch queue
+ jobs_to_check.append( ( self.sa_session.query( model.Job ).get( job_id ), error_msg ) )
+ except Empty:
+ pass
+ for job, error_msg in jobs_to_check:
if error_msg is not None:
job.state = job.states.ERROR
job.info = error_msg
@@ -870,9 +880,6 @@ class JobStopQueue( object ):
job.state = job.states.DELETED
self.sa_session.add( job )
self.sa_session.flush()
- # if job is in JobQueue or FooJobRunner's put method,
- # job_runner_name will be unset and the job will be dequeued due to
- # state change above
if job.job_runner_name is not None:
# tell the dispatcher to stop the job
self.dispatcher.stop( job )
@@ -888,7 +895,8 @@ class JobStopQueue( object ):
else:
log.info( "sending stop signal to worker thread" )
self.running = False
- self.queue.put( ( self.STOP_SIGNAL, None ) )
+ if not self.track_jobs_in_database:
+ self.queue.put( self.STOP_SIGNAL )
self.sleeper.wake()
log.info( "job stopper stopped" )
--- a/lib/galaxy/web/controllers/root.py
+++ b/lib/galaxy/web/controllers/root.py
@@ -432,7 +432,8 @@ class RootController( BaseController, Us
if job.state in [ self.app.model.Job.states.QUEUED, self.app.model.Job.states.RUNNING, self.app.model.Job.states.NEW ]:
# Are *all* of the job's other output datasets deleted?
if job.check_if_output_datasets_deleted():
- job.mark_deleted()
+ job.mark_deleted( self.app.config.get_bool( 'enable_job_running', True ),
+ self.app.config.get_bool( 'track_jobs_in_database', False ) )
self.app.job_manager.job_stop_queue.put( job.id )
trans.sa_session.flush()
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -97,7 +97,8 @@ class Job( object ):
RUNNING = 'running',
OK = 'ok',
ERROR = 'error',
- DELETED = 'deleted' )
+ DELETED = 'deleted',
+ DELETED_NEW = 'deleted_new' )
def __init__( self ):
self.session_id = None
self.user_id = None
@@ -152,11 +153,17 @@ class Job( object ):
if not dataset.deleted:
return False
return True
- def mark_deleted( self ):
+ def mark_deleted( self, enable_job_running=True, track_jobs_in_database=False ):
"""
Mark this job as deleted, and mark any output datasets as discarded.
"""
- self.state = Job.states.DELETED
+ # This could be handled with *just* track_jobs_in_database, but I
+ # didn't want to make setting track_jobs_in_database required in
+ # non-runner configs.
+ if not enable_job_running or track_jobs_in_database:
+ self.state = Job.states.DELETED_NEW
+ else:
+ self.state = Job.states.DELETED
self.info = "Job output deleted by user before job completed."
for dataset_assoc in self.output_datasets:
dataset = dataset_assoc.dataset
# HG changeset patch -- Bitbucket.org
# Project galaxy-dist
# URL http://bitbucket.org/galaxy/galaxy-dist/overview
# User Daniel Blankenberg <dan(a)bx.psu.edu>
# Date 1288640846 14400
# Node ID 1efe19f6f3d1a75bda9c198633dd8102d3063410
# Parent 2c934a168af968dc84300412220c2f2cb74bf676
Fixes for rerun action to recurse grouping options when checking unvalidated values and cloned HDAs. Better selection of corresponding HDAs from cloned histories, when multiple copies exist.
--- a/lib/galaxy/web/controllers/tool_runner.py
+++ b/lib/galaxy/web/controllers/tool_runner.py
@@ -123,16 +123,6 @@ class ToolRunner( BaseController ):
params_objects = job.get_param_values( trans.app )
except:
raise Exception( "Failed to get paramemeters for dataset id %d " % data.id )
- # Unpack unvalidated values to strings, they'll be validated when the
- # form is submitted (this happens when re-running a job that was
- # initially run by a workflow)
- validated_params = {}
- for name, value in params_objects.items():
- if isinstance( value, UnvalidatedValue ):
- validated_params [ str(name) ] = str(value)
- else:
- validated_params [ str(name) ] = value
- params_objects = validated_params
# Need to remap dataset parameters. Job parameters point to original
# dataset used; parameter should be the analygous dataset in the
# current history.
@@ -141,12 +131,22 @@ class ToolRunner( BaseController ):
for hda in history.datasets:
source_hda = hda.copied_from_history_dataset_association
while source_hda:#should this check library datasets as well?
- hda_source_dict[ source_hda ] = hda
+ #FIXME: could be multiple copies of a hda in a single history, this does a better job of matching on cloned histories,
+ #but is still less than perfect when eg individual datasets are copied between histories
+ if source_hda not in hda_source_dict or source_hda.hid == hda.hid:
+ hda_source_dict[ source_hda ] = hda
source_hda = source_hda.copied_from_history_dataset_association
- for name, value in validated_params.items():
- if isinstance( value, trans.app.model.HistoryDatasetAssociation ):
- if value not in history.datasets:
- validated_params[ name ] = hda_source_dict[ value ]
+ # Unpack unvalidated values to strings, they'll be validated when the
+ # form is submitted (this happens when re-running a job that was
+ # initially run by a workflow)
+ #This needs to be done recursively through grouping parameters
+ def rerun_callback( input, value, prefixed_name, prefixed_label ):
+ if isinstance( value, UnvalidatedValue ):
+ return str( value )
+ if isinstance( input, DataToolParameter ):
+ if value not in history.datasets and value in hda_source_dict:
+ return hda_source_dict[ value ]
+ visit_input_values( tool.inputs, params_objects, rerun_callback )
# Create a fake tool_state for the tool, with the parameters values
state = tool.new_state( trans )
state.inputs = params_objects
# HG changeset patch -- Bitbucket.org
# Project galaxy-dist
# URL http://bitbucket.org/galaxy/galaxy-dist/overview
# User Dannon Baker <dannonbaker(a)me.com>
# Date 1288645730 14400
# Node ID cf7ec71c561323940ce95c8148d2008508920092
# Parent 1efe19f6f3d1a75bda9c198633dd8102d3063410
Shift management of the interaction between workflow outputs and HideDatasetActions to the front end editor.
This resolves the issue with multiple HideDatasetActions being created.
Existing workflows displaying multiple HideDatasetActions per step on the Run Workflow screen will persist.
These extra HideDatasetActions are harmless, but a simple edit workflow -> save will remove them.
--- a/lib/galaxy/web/controllers/workflow.py
+++ b/lib/galaxy/web/controllers/workflow.py
@@ -1270,13 +1270,6 @@ class WorkflowController( BaseController
workflow_invocation = model.WorkflowInvocation()
workflow_invocation.workflow = workflow
outputs = odict()
- # Find out if there are any workflow outputs defined, as that influences our actions.
- use_workflow_outputs = False
- for step in workflow.steps:
- if step.type == 'tool' or step.type is None:
- if step.workflow_outputs:
- use_workflow_outputs = True
- break
for i, step in enumerate( workflow.steps ):
# Execute module
job = None
@@ -1294,24 +1287,6 @@ class WorkflowController( BaseController
job, out_data = tool.execute( trans, step.state.inputs )
outputs[ step.id ] = out_data
# Create new PJA associations with the created job, to be run on completion.
- if use_workflow_outputs:
- # We're using outputs. Check the step for outputs to be displayed. Create PJAs to hide the rest upon completion.
- step_outputs = [s.output_name for s in step.workflow_outputs]
- for output in tool.outputs.keys():
- if output not in step_outputs:
- # Necessary, unfortunately, to clean up workflows that might have more than one at this point.
- for pja in step.post_job_actions:
- if pja.action_type == "HideDatasetAction" and pja.output_name == output:
- step.post_job_actions.remove(pja)
- trans.sa_session.delete(pja)
- # Create a PJA for hiding this output.
- n_pja = PostJobAction('HideDatasetAction', step, output, {})
- else:
- # Remove any HideDatasetActions, step is flagged for output.
- for pja in step.post_job_actions:
- if pja.action_type == "HideDatasetAction" and pja.output_name == output:
- step.post_job_actions.remove(pja)
- trans.sa_session.delete(pja)
for pja in step.post_job_actions:
if pja.action_type in ActionBox.immediate_actions:
ActionBox.execute(trans.app, trans.sa_session, pja, job)
--- a/static/scripts/galaxy.workflow_editor.canvas.js
+++ b/static/scripts/galaxy.workflow_editor.canvas.js
@@ -475,6 +475,63 @@ function Workflow( canvas_container ) {
wf.remove_node( v );
});
},
+ rectify_workflow_outputs : function() {
+ console.log("RECTIFICATION!");
+ // Find out if we're using workflow_outputs or not.
+ var using_workflow_outputs = false;
+ $.each( this.nodes, function ( k, node ) {
+ if (node.workflow_outputs && node.workflow_outputs.length > 0){
+ using_workflow_outputs = true;
+ }
+ });
+ if (using_workflow_outputs == false){
+ //We're done, leave PJAs alone.
+ return true;
+ }
+ wf = this;
+ $.each(this.nodes, function (k, node ){
+ if (node.type == 'tool'){
+ var node_changed = false;
+ if (node.post_job_actions == null){
+ console.log("CREATED FOR NEW NODE");
+ node.post_job_actions = {};
+ }
+ var pjas_to_rem = [];
+ $.each(node.post_job_actions, function(pja_id, pja){
+ if (pja.action_type == "HideDatasetAction"){
+ pjas_to_rem.push(pja_id);
+ }
+ });
+ if (pjas_to_rem.length > 0 && node == workflow.active_node)
+ $.each(pjas_to_rem, function(i, pja_name){
+ node_changed = true;
+ delete node.post_job_actions[pja_name];
+ })
+ $.each(node.output_terminals, function(ot_id, ot){
+ var create_pja = true;
+ $.each(node.workflow_outputs, function(i, wo_name){
+ if (ot.name == wo_name){
+ create_pja = false;
+ }
+ });
+ if (create_pja == true){
+ node_changed = true;
+ var pja = {
+ action_type : "HideDatasetAction",
+ output_name : ot.name,
+ action_arguments : {}
+ }
+ node.post_job_actions['HideDatasetAction'+ot.name] = null;
+ node.post_job_actions['HideDatasetAction'+ot.name] = pja;
+ }
+ });
+ // lastly, if this is the active node, and we made changes, reload the display at right.
+ if (wf.active_node == node && node_changed == true) {
+ wf.reload_active_node();
+ }
+ }
+ });
+ },
to_simple : function () {
var nodes = {};
$.each( this.nodes, function ( i, node ) {
@@ -491,7 +548,6 @@ function Workflow( canvas_container ) {
if (node.post_job_actions){
$.each( node.post_job_actions, function ( i, act ) {
var pja = {
- job_id : act.id,
action_type : act.action_type,
output_name : act.output_name,
action_arguments : act.action_arguments
@@ -559,6 +615,10 @@ function Workflow( canvas_container ) {
this.active_form_has_changes = false;
}
},
+ reload_active_node : function() {
+ this.clear_active_node();
+ this.activate_node(node);
+ },
clear_active_node : function() {
if ( this.active_node ) {
this.active_node.make_inactive();
--- a/templates/workflow/editor.mako
+++ b/templates/workflow/editor.mako
@@ -627,6 +627,7 @@
}
return;
}
+ workflow.rectify_workflow_outputs();
var savefn = function(callback) {
$.ajax( {
url: "${h.url_for( action='save_workflow' )}",
# HG changeset patch -- Bitbucket.org
# Project galaxy-dist
# URL http://bitbucket.org/galaxy/galaxy-dist/overview
# User Nate Coraor <nate(a)bx.psu.edu>
# Date 1288376593 14400
# Node ID cbb895e2b2729d324c1693882de7939360e683a4
# Parent 23844e86e6167c79737a7e0396c39e350e95d1c4
If a job's input fails to set metadata, fail the job rather than leaving it in the 'new' state indefinitely.
--- a/lib/galaxy/jobs/__init__.py
+++ b/lib/galaxy/jobs/__init__.py
@@ -241,6 +241,9 @@ class JobQueue( object ):
elif idata.state == idata.states.ERROR:
JobWrapper( job, self ).fail( "input data %d is in error state" % ( idata.hid ) )
return JOB_INPUT_ERROR
+ elif idata.state == idata.states.FAILED_METADATA:
+ JobWrapper( job, self ).fail( "input data %d failed to properly set metadata" % ( idata.hid ) )
+ return JOB_INPUT_ERROR
elif idata.state != idata.states.OK and not ( idata.state == idata.states.SETTING_METADATA and job.tool_id is not None and job.tool_id == self.app.datatypes_registry.set_external_metadata_tool.id ):
# need to requeue
return JOB_WAIT