1 new changeset in galaxy-central:
http://bitbucket.org/galaxy/galaxy-central/changeset/ef89e4d62007/
changeset: ef89e4d62007
user: dan
date: 2011-09-23 16:38:27
summary: Fix for maf_utilities.build_maf_index_species_chromosomes() return values when a bad MAF is provided.
affected #: 1 file (-1 bytes)
--- a/lib/galaxy/tools/util/maf_utilities.py Fri Sep 23 10:07:19 2011 -0400
+++ b/lib/galaxy/tools/util/maf_utilities.py Fri Sep 23 10:38:27 2011 -0400
@@ -227,7 +227,7 @@
except Exception, e:
#most likely a bad MAF
log.debug( 'Building MAF index on %s failed: %s' % ( filename, e ) )
- return ( None, [], {} )
+ return ( None, [], {}, 0 )
return ( indexes, species, species_chromosomes, blocks )
#builds and returns ( index, index_filename ) for specified maf_file
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new changeset in galaxy-central:
http://bitbucket.org/galaxy/galaxy-central/changeset/898f4f57223d/
changeset: 898f4f57223d
user: greg
date: 2011-09-23 16:01:18
summary: Add 2 new attributes to dynamically generated select lists; "missing_tool_data_table_name" will include a value (e.g., tmap_indexes) if the select list requires a missing entry in the tool_data_table_conf.xml file, and "missing_index_file" will include a value (e.g., tmap_indexes.loc) if the select list requires a missing index file. Tools that used to not load in index files were missing will now load, although they will not properly execute. Work remaining is for a check to be performed against these new "missing" attributes and a message displayed on the tool form - not quite sure if this should be a validator or not...
Define a list in the Tool class to keep track of all input parameters (tool.input_params). These differ from the inputs dictionary (tool.inputs) in that inputs can be page elements like conditionals, but input_params are basic parameters like SelectField objects. This enables us to more easily ensure that parameter dependencies like index files or tool_data_table_conf.xml entries exist.
Add the ability to append new entries into the tool_data_table_conf.xml file in real time and add the same entries into the in-memory tool_data_tables dictionary.
affected #: 4 files (-1 bytes)
--- a/lib/galaxy/tools/__init__.py Fri Sep 23 09:36:57 2011 -0400
+++ b/lib/galaxy/tools/__init__.py Fri Sep 23 10:01:18 2011 -0400
@@ -360,13 +360,18 @@
tool_type = 'default'
def __init__( self, config_file, root, app, guid=None ):
- """
- Load a tool from the config named by `config_file`
- """
+ """Load a tool from the config named by `config_file`"""
# Determine the full path of the directory where the tool config is
self.config_file = config_file
self.tool_dir = os.path.dirname( config_file )
self.app = app
+ # Define a place to keep track of all input parameters. These
+ # differ from the inputs dictionary in that inputs can be page
+ # elements like conditionals, but input_params are basic form
+ # parameters like SelectField objects. This enables us to more
+ # easily ensure that parameter dependencies like index files or
+ # tool_data_table_conf.xml entries exist.
+ self.input_params = []
# Parse XML element containing configuration
self.parse( root, guid=guid )
@@ -698,7 +703,6 @@
name = attrib.pop( 'name', None )
if name is None:
raise Exception( "Test output does not have a 'name'" )
-
assert_elem = output_elem.find("assert_contents")
assert_list = None
# Trying to keep testing patch as localized as
@@ -713,13 +717,10 @@
for child_elem in child_elems:
converted_children.append( convert_elem(child_elem) )
return {"tag" : tag, "attributes" : attributes, "children" : converted_children}
-
if assert_elem is not None:
assert_list = []
for assert_child in list(assert_elem):
assert_list.append(convert_elem(assert_child))
-
-
file = attrib.pop( 'file', None )
# File no longer required if an list of assertions was present.
if assert_list is None and file is None:
@@ -734,8 +735,6 @@
attributes['sort'] = util.string_as_bool( attrib.pop( 'sort', False ) )
attributes['extra_files'] = []
attributes['assert_list'] = assert_list
-
-
if 'ftype' in attrib:
attributes['ftype'] = attrib['ftype']
for extra in output_elem.findall( 'extra_files' ):
@@ -862,6 +861,7 @@
rval[param.name] = param
if hasattr( param, 'data_ref' ):
param.ref_input = context[ param.data_ref ]
+ self.input_params.append( param )
return rval
def parse_param_elem( self, input_elem, enctypes, context ):
--- a/lib/galaxy/tools/data/__init__.py Fri Sep 23 09:36:57 2011 -0400
+++ b/lib/galaxy/tools/data/__init__.py Fri Sep 23 10:01:18 2011 -0400
@@ -12,34 +12,64 @@
log = logging.getLogger( __name__ )
class ToolDataTableManager( object ):
- """
- Manages a collection of tool data tables
- """
-
+ """Manages a collection of tool data tables"""
def __init__( self, config_filename=None ):
self.data_tables = {}
if config_filename:
- self.add_from_config_file( config_filename )
-
+ self.load_from_config_file( config_filename )
def __getitem__( self, key ):
return self.data_tables.__getitem__( key )
-
def __contains__( self, key ):
return self.data_tables.__contains__( key )
-
- def add_from_config_file( self, config_filename ):
+ def load_from_config_file( self, config_filename ):
tree = util.parse_xml( config_filename )
root = tree.getroot()
+ table_elems = []
for table_elem in root.findall( 'table' ):
type = table_elem.get( 'type', 'tabular' )
assert type in tool_data_table_types, "Unknown data table type '%s'" % type
+ table_elems.append( table_elem )
table = tool_data_table_types[ type ]( table_elem )
- self.data_tables[ table.name ] = table
- log.debug( "Loaded tool data table '%s", table.name )
+ if table.name not in self.data_tables:
+ self.data_tables[ table.name ] = table
+ log.debug( "Loaded tool data table '%s", table.name )
+ return table_elems
+ def add_new_entries_from_config_file( self, config_filename ):
+ """
+ We have 2 cases to handle, files whose root tag is <tables>, for example:
+ <tables>
+ <!-- Location of Tmap files -->
+ <table name="tmap_indexes" comment_char="#">
+ <columns>value, dbkey, name, path</columns>
+ <file path="tool-data/tmap_index.loc" />
+ </table>
+ </tables>
+ and files whose root tag is <table>, for example:
+ <!-- Location of Tmap files -->
+ <table name="tmap_indexes" comment_char="#">
+ <columns>value, dbkey, name, path</columns>
+ <file path="tool-data/tmap_index.loc" />
+ </table>
+ """
+ tree = util.parse_xml( config_filename )
+ root = tree.getroot()
+ if root.tag == 'tables':
+ table_elems = self.load_from_config_file( config_filename )
+ else:
+ table_elems = []
+ type = root.get( 'type', 'tabular' )
+ assert type in tool_data_table_types, "Unknown data table type '%s'" % type
+ table_elems.append( root )
+ table = tool_data_table_types[ type ]( root )
+ if table.name not in self.data_tables:
+ self.data_tables[ table.name ] = table
+ log.debug( "Loaded tool data table '%s", table.name )
+ return table_elems
class ToolDataTable( object ):
def __init__( self, config_element ):
self.name = config_element.get( 'name' )
+ self.missing_index_file = None
class TabularToolDataTable( ToolDataTable ):
"""
@@ -58,7 +88,6 @@
def __init__( self, config_element ):
super( TabularToolDataTable, self ).__init__( config_element )
self.configure_and_load( config_element )
-
def configure_and_load( self, config_element ):
"""
Configure and load table from an XML element.
@@ -71,15 +100,14 @@
all_rows = []
for file_element in config_element.findall( 'file' ):
filename = file_element.get( 'path' )
- if not os.path.exists( filename ):
+ if os.path.exists( filename ):
+ all_rows.extend( self.parse_file_fields( open( filename ) ) )
+ else:
+ self.missing_index_file = filename
log.warn( "Cannot find index file '%s' for tool data table '%s'" % ( filename, self.name ) )
- else:
- all_rows.extend( self.parse_file_fields( open( filename ) ) )
self.data = all_rows
-
def get_fields( self ):
return self.data
-
def parse_column_spec( self, config_element ):
"""
Parse column definitions, which can either be a set of 'column' elements
@@ -109,7 +137,6 @@
assert 'value' in self.columns, "Required 'value' column missing from column def"
if 'name' not in self.columns:
self.columns['name'] = self.columns['value']
-
def parse_file_fields( self, reader ):
"""
Parse separated lines from file and return a list of tuples.
--- a/lib/galaxy/tools/parameters/dynamic_options.py Fri Sep 23 09:36:57 2011 -0400
+++ b/lib/galaxy/tools/parameters/dynamic_options.py Fri Sep 23 10:01:18 2011 -0400
@@ -399,23 +399,29 @@
self.separator = elem.get( 'separator', '\t' )
self.line_startswith = elem.get( 'startswith', None )
data_file = elem.get( 'from_file', None )
+ self.missing_index_file = None
dataset_file = elem.get( 'from_dataset', None )
from_parameter = elem.get( 'from_parameter', None )
tool_data_table_name = elem.get( 'from_data_table', None )
-
# Options are defined from a data table loaded by the app
self.tool_data_table = None
+ self.missing_tool_data_table_name = None
if tool_data_table_name:
app = tool_param.tool.app
- assert tool_data_table_name in app.tool_data_tables, \
- "Data table named '%s' is required by tool but not configured" % tool_data_table_name
- self.tool_data_table = app.tool_data_tables[ tool_data_table_name ]
- # Column definitions are optional, but if provided override those from the table
- if elem.find( "column" ) is not None:
- self.parse_column_definitions( elem )
+ if tool_data_table_name in app.tool_data_tables:
+ self.tool_data_table = app.tool_data_tables[ tool_data_table_name ]
+ # Set self.missing_index_file if the index file to
+ # which the tool_data_table refers does not exist.
+ if self.tool_data_table.missing_index_file:
+ self.missing_index_file = self.tool_data_table.missing_index_file
+ # Column definitions are optional, but if provided override those from the table
+ if elem.find( "column" ) is not None:
+ self.parse_column_definitions( elem )
+ else:
+ self.columns = self.tool_data_table.columns
else:
- self.columns = self.tool_data_table.columns
-
+ self.missing_tool_data_table_name = tool_data_table_name
+ log.warn( "Data table named '%s' is required by tool but not configured" % tool_data_table_name )
# Options are defined by parsing tabular text data from an data file
# on disk, a dataset, or the value of another parameter
elif data_file is not None or dataset_file is not None or from_parameter is not None:
@@ -423,8 +429,11 @@
if data_file is not None:
data_file = data_file.strip()
if not os.path.isabs( data_file ):
- data_file = os.path.join( self.tool_param.tool.app.config.tool_data_path, data_file )
- self.file_fields = self.parse_file_fields( open( data_file ) )
+ full_path = os.path.join( self.tool_param.tool.app.config.tool_data_path, data_file )
+ if os.path.exists( full_path ):
+ self.file_fields = self.parse_file_fields( open( full_path ) )
+ else:
+ self.missing_index_file = data_file
elif dataset_file is not None:
self.dataset_ref_name = dataset_file
self.has_dataset_dependencies = True
--- a/lib/galaxy/tools/parameters/output.py Fri Sep 23 09:36:57 2011 -0400
+++ b/lib/galaxy/tools/parameters/output.py Fri Sep 23 10:01:18 2011 -0400
@@ -206,13 +206,16 @@
super( FromDataTableOutputActionOption, self ).__init__( parent, elem )
self.name = elem.get( 'name', None )
assert self.name is not None, "Required 'name' attribute missing from FromDataTableOutputActionOption"
- assert self.name in self.tool.app.tool_data_tables, "Data table named '%s' is required by tool but not configured" % self.name
- self.options = self.tool.app.tool_data_tables[ self.name ].get_fields()
- self.column = elem.get( 'column', None )
- assert self.column is not None, "Required 'column' attribute missing from FromDataTableOutputActionOption"
- self.column = int( self.column )
- self.offset = elem.get( 'offset', -1 )
- self.offset = int( self.offset )
+ self.missing_tool_data_table_name = None
+ if self.name in self.tool.app.tool_data_tables:
+ self.options = self.tool.app.tool_data_tables[ self.name ].get_fields()
+ self.column = elem.get( 'column', None )
+ assert self.column is not None, "Required 'column' attribute missing from FromDataTableOutputActionOption"
+ self.column = int( self.column )
+ self.offset = elem.get( 'offset', -1 )
+ self.offset = int( self.offset )
+ else:
+ self.missing_tool_data_table_name = self.name
def get_value( self, other_values ):
options = self.options
for filter in self.filters:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new changeset in galaxy-central:
http://bitbucket.org/galaxy/galaxy-central/changeset/3f926d934d98/
changeset: 3f926d934d98
user: dannon
date: 2011-09-22 17:36:11
summary: Fix use_tasked_jobs to inherit the parent job's user.
affected #: 1 file (-1 bytes)
--- a/lib/galaxy/jobs/__init__.py Wed Sep 21 16:50:18 2011 -0400
+++ b/lib/galaxy/jobs/__init__.py Thu Sep 22 11:36:11 2011 -0400
@@ -32,9 +32,9 @@
class JobManager( object ):
"""
Highest level interface to job management.
-
+
TODO: Currently the app accesses "job_queue" and "job_stop_queue" directly.
- This should be decoupled.
+ This should be decoupled.
"""
def __init__( self, app ):
self.app = app
@@ -71,7 +71,7 @@
class JobQueue( object ):
"""
- Job manager, waits for jobs to be runnable and then dispatches to
+ Job manager, waits for jobs to be runnable and then dispatches to
a JobRunner.
"""
STOP_SIGNAL = object()
@@ -95,7 +95,7 @@
self.running = True
self.dispatcher = dispatcher
self.monitor_thread = threading.Thread( target=self.__monitor )
- self.monitor_thread.start()
+ self.monitor_thread.start()
log.info( "job manager started" )
if app.config.get_bool( 'enable_job_recovery', True ):
self.__check_jobs_at_startup()
@@ -132,7 +132,7 @@
def __monitor( self ):
"""
- Continually iterate the waiting jobs, checking is each is ready to
+ Continually iterate the waiting jobs, checking is each is ready to
run and dispatching if so.
"""
# HACK: Delay until after forking, we need a way to do post fork notification!!!
@@ -180,12 +180,12 @@
jobs_to_check.append( self.sa_session.query( model.Job ).get( job_id ) )
except Empty:
pass
- # Iterate over new and waiting jobs and look for any that are
+ # Iterate over new and waiting jobs and look for any that are
# ready to run
new_waiting_jobs = []
for job in jobs_to_check:
try:
- # Check the job's dependencies, requeue if they're not done
+ # Check the job's dependencies, requeue if they're not done
job_state = self.__check_if_ready_to_run( job )
if job_state == JOB_WAIT:
if not self.track_jobs_in_database:
@@ -216,7 +216,7 @@
self.waiting_jobs = new_waiting_jobs
# Done with the session
self.sa_session.remove()
-
+
def __check_if_ready_to_run( self, job ):
"""
Check if a job is ready to run by verifying that each of its input
@@ -281,13 +281,13 @@
if len( user_jobs ) >= self.app.config.user_job_limit:
return JOB_WAIT
return JOB_READY
-
+
def put( self, job_id, tool ):
"""Add a job to the queue (by job identifier)"""
if not self.track_jobs_in_database:
self.queue.put( ( job_id, tool.id ) )
self.sleeper.wake()
-
+
def shutdown( self ):
"""Attempts to gracefully shut down the worker thread"""
if self.parent_pid != os.getpid():
@@ -304,7 +304,7 @@
class JobWrapper( object ):
"""
- Wraps a 'model.Job' with convenience methods for running processes and
+ Wraps a 'model.Job' with convenience methods for running processes and
state management.
"""
def __init__( self, job, queue ):
@@ -329,15 +329,15 @@
self.output_paths = None
self.tool_provided_job_metadata = None
# Wrapper holding the info required to restore and clean up from files used for setting metadata externally
- self.external_output_metadata = metadata.JobExternalOutputMetadataWrapper( job )
-
+ self.external_output_metadata = metadata.JobExternalOutputMetadataWrapper( job )
+
def get_job( self ):
return self.sa_session.query( model.Job ).get( self.job_id )
-
+
def get_id_tag(self):
# For compatability with drmaa, which uses job_id right now, and TaskWrapper
return str(self.job_id)
-
+
def get_param_dict( self ):
"""
Restore the dictionary of parameters from the database.
@@ -346,10 +346,10 @@
param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] )
param_dict = self.tool.params_from_strings( param_dict, self.app )
return param_dict
-
+
def get_version_string_path( self ):
return os.path.abspath(os.path.join(self.app.config.new_file_path, "GALAXY_VERSION_STRING_%s" % self.job_id))
-
+
def prepare( self ):
"""
Prepare the job to run by creating the working directory and the
@@ -371,9 +371,9 @@
out_data = dict( [ ( da.name, da.dataset ) for da in job.output_datasets ] )
inp_data.update( [ ( da.name, da.dataset ) for da in job.input_library_datasets ] )
out_data.update( [ ( da.name, da.dataset ) for da in job.output_library_datasets ] )
-
- # Set up output dataset association for export history jobs. Because job
- # uses a Dataset rather than an HDA or LDA, it's necessary to set up a
+
+ # Set up output dataset association for export history jobs. Because job
+ # uses a Dataset rather than an HDA or LDA, it's necessary to set up a
# fake dataset association that provides the needed attributes for
# preparing a job.
class FakeDatasetAssociation ( object ):
@@ -400,7 +400,7 @@
# ( this used to be performed in the "exec_before_job" hook, but hooks are deprecated ).
self.tool.exec_before_job( self.queue.app, inp_data, out_data, param_dict )
# Run the before queue ("exec_before_job") hook
- self.tool.call_hook( 'exec_before_job', self.queue.app, inp_data=inp_data,
+ self.tool.call_hook( 'exec_before_job', self.queue.app, inp_data=inp_data,
out_data=out_data, tool=self.tool, param_dict=incoming)
self.sa_session.flush()
# Build any required config files
@@ -433,7 +433,7 @@
def fail( self, message, exception=False ):
"""
- Indicate job failure by setting state and message on all output
+ Indicate job failure by setting state and message on all output
datasets.
"""
job = self.get_job()
@@ -479,7 +479,7 @@
if self.tool:
self.tool.job_failed( self, message, exception )
self.cleanup()
-
+
def change_state( self, state, info = False ):
job = self.get_job()
self.sa_session.refresh( job )
@@ -509,12 +509,12 @@
job.job_runner_external_id = external_id
self.sa_session.add( job )
self.sa_session.flush()
-
+
def finish( self, stdout, stderr ):
"""
- Called to indicate that the associated command has been run. Updates
+ Called to indicate that the associated command has been run. Updates
the output datasets based on stderr and stdout from the command, and
- the contents of the output files.
+ the contents of the output files.
"""
# default post job setup
self.sa_session.expunge_all()
@@ -536,7 +536,7 @@
if os.path.exists(version_filename):
self.version_string = open(version_filename).read()
os.unlink(version_filename)
-
+
if self.app.config.outputs_to_working_directory:
for dataset_path in self.get_output_fnames():
try:
@@ -584,7 +584,7 @@
else:
# Security violation.
log.exception( "from_work_dir specified a location not in the working directory: %s, %s" % ( source_file, self.working_directory ) )
-
+
dataset.blurb = 'done'
dataset.peek = 'no peek'
dataset.info = context['stdout'] + context['stderr']
@@ -599,7 +599,7 @@
dataset.init_meta( copy_from=dataset )
#if a dataset was copied, it won't appear in our dictionary:
#either use the metadata from originating output dataset, or call set_meta on the copies
- #it would be quicker to just copy the metadata from the originating output dataset,
+ #it would be quicker to just copy the metadata from the originating output dataset,
#but somewhat trickier (need to recurse up the copied_from tree), for now we'll call set_meta()
if not self.app.config.set_metadata_externally or \
( not self.external_output_metadata.external_metadata_set_successfully( dataset, self.sa_session ) \
@@ -611,7 +611,7 @@
#load metadata from file
#we need to no longer allow metadata to be edited while the job is still running,
#since if it is edited, the metadata changed on the running output will no longer match
- #the metadata that was stored to disk for use via the external process,
+ #the metadata that was stored to disk for use via the external process,
#and the changes made by the user will be lost, without warning or notice
dataset.metadata.from_JSON_dict( self.external_output_metadata.get_output_filenames_by_dataset( dataset, self.sa_session ).filename_out )
try:
@@ -652,13 +652,13 @@
# Flush all the dataset and job changes above. Dataset state changes
# will now be seen by the user.
self.sa_session.flush()
- # Save stdout and stderr
+ # Save stdout and stderr
if len( stdout ) > 32768:
log.error( "stdout for job %d is greater than 32K, only first part will be logged to database" % job.id )
job.stdout = stdout[:32768]
if len( stderr ) > 32768:
log.error( "stderr for job %d is greater than 32K, only first part will be logged to database" % job.id )
- job.stderr = stderr[:32768]
+ job.stderr = stderr[:32768]
# custom post process setup
inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] )
out_data = dict( [ ( da.name, da.dataset ) for da in job.output_datasets ] )
@@ -675,8 +675,8 @@
# ( this used to be performed in the "exec_after_process" hook, but hooks are deprecated ).
self.tool.exec_after_process( self.queue.app, inp_data, out_data, param_dict, job = job )
# Call 'exec_after_process' hook
- self.tool.call_hook( 'exec_after_process', self.queue.app, inp_data=inp_data,
- out_data=out_data, param_dict=param_dict,
+ self.tool.call_hook( 'exec_after_process', self.queue.app, inp_data=inp_data,
+ out_data=out_data, param_dict=param_dict,
tool=self.tool, stdout=stdout, stderr=stderr )
job.command_line = self.command_line
@@ -695,7 +695,7 @@
self.sa_session.flush()
log.debug( 'job %d ended' % self.job_id )
self.cleanup()
-
+
def cleanup( self ):
# remove temporary files
try:
@@ -709,10 +709,10 @@
galaxy.tools.imp_exp.JobImportHistoryArchiveWrapper( self.job_id ).cleanup_after_job( self.sa_session )
except:
log.exception( "Unable to cleanup job %d" % self.job_id )
-
+
def get_command_line( self ):
return self.command_line
-
+
def get_session_id( self ):
return self.session_id
@@ -865,7 +865,7 @@
Should be refactored into a generalized executable unit wrapper parent, then jobs and tasks.
"""
# Abstract this to be more useful for running tasks that *don't* necessarily compose a job.
-
+
def __init__(self, task, queue):
super(TaskWrapper, self).__init__(task.job, queue)
self.task_id = task.id
@@ -939,7 +939,7 @@
# ( this used to be performed in the "exec_before_job" hook, but hooks are deprecated ).
self.tool.exec_before_job( self.queue.app, inp_data, out_data, param_dict )
# Run the before queue ("exec_before_job") hook
- self.tool.call_hook( 'exec_before_job', self.queue.app, inp_data=inp_data,
+ self.tool.call_hook( 'exec_before_job', self.queue.app, inp_data=inp_data,
out_data=out_data, tool=self.tool, param_dict=incoming)
self.sa_session.flush()
# Build any required config files
@@ -986,12 +986,12 @@
task.state = state
self.sa_session.add( task )
self.sa_session.flush()
-
+
def get_state( self ):
task = self.get_task()
self.sa_session.refresh( task )
return task.state
-
+
def set_runner( self, runner_url, external_id ):
task = self.get_task()
self.sa_session.refresh( task )
@@ -1000,15 +1000,15 @@
# DBTODO Check task job_runner_stuff
self.sa_session.add( task )
self.sa_session.flush()
-
+
def finish( self, stdout, stderr ):
# DBTODO integrate previous finish logic.
# Simple finish for tasks. Just set the flag OK.
log.debug( 'task %s for job %d ended' % (self.task_id, self.job_id) )
"""
- Called to indicate that the associated command has been run. Updates
+ Called to indicate that the associated command has been run. Updates
the output datasets based on stderr and stdout from the command, and
- the contents of the output files.
+ the contents of the output files.
"""
# default post job setup_external_metadata
self.sa_session.expunge_all()
@@ -1025,7 +1025,7 @@
task.state = task.states.ERROR
else:
task.state = task.states.OK
- # Save stdout and stderr
+ # Save stdout and stderr
if len( stdout ) > 32768:
log.error( "stdout for task %d is greater than 32K, only first part will be logged to database" % task.id )
task.stdout = stdout[:32768]
@@ -1039,7 +1039,7 @@
def cleanup( self ):
# There is no task cleanup. The job cleans up for all tasks.
pass
-
+
def get_command_line( self ):
return self.command_line
@@ -1049,7 +1049,7 @@
def get_output_file_id( self, file ):
# There is no permanent output file for tasks.
return None
-
+
def get_tool_provided_job_metadata( self ):
# DBTODO Handle this as applicable for tasks.
return None
@@ -1071,10 +1071,6 @@
def setup_external_metadata( self, exec_dir = None, tmp_dir = None, dataset_files_path = None, config_root = None, datatypes_config = None, set_extension = True, **kwds ):
# There is no metadata setting for tasks. This is handled after the merge, at the job level.
return ""
-
- @property
- def user( self ):
- pass
class DefaultJobDispatcher( object ):
def __init__( self, app ):
@@ -1105,13 +1101,19 @@
runner = getattr( module, obj )
self.job_runners[name] = runner( self.app )
log.debug( 'Loaded job runner: %s' % display_name )
-
+
def put( self, job_wrapper ):
try:
- if self.app.config.use_tasked_jobs and job_wrapper.tool.parallelism is not None and not isinstance(job_wrapper, TaskWrapper):
- runner_name = "tasks"
- log.debug( "dispatching job %d to %s runner" %( job_wrapper.job_id, runner_name ) )
- self.job_runners[runner_name].put( job_wrapper )
+ if self.app.config.use_tasked_jobs and job_wrapper.tool.parallelism is not None:
+ if isinstance(job_wrapper, TaskWrapper):
+ #DBTODO Refactor
+ runner_name = ( job_wrapper.tool.job_runner.split(":", 1) )[0]
+ log.debug( "dispatching task %s, of job %d, to %s runner" %( job_wrapper.task_id, job_wrapper.job_id, runner_name ) )
+ self.job_runners[runner_name].put( job_wrapper )
+ else:
+ runner_name = "tasks"
+ log.debug( "dispatching job %d to %s runner" %( job_wrapper.job_id, runner_name ) )
+ self.job_runners[runner_name].put( job_wrapper )
else:
runner_name = ( job_wrapper.tool.job_runner.split(":", 1) )[0]
log.debug( "dispatching job %d to %s runner" %( job_wrapper.job_id, runner_name ) )
@@ -1167,7 +1169,7 @@
self.sleeper = Sleeper()
self.running = True
self.monitor_thread = threading.Thread( target=self.monitor )
- self.monitor_thread.start()
+ self.monitor_thread.start()
log.info( "job stopper started" )
def monitor( self ):
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new changeset in galaxy-central:
http://bitbucket.org/galaxy/galaxy-central/changeset/5c0412d06db9/
changeset: 5c0412d06db9
user: richard_burhans
date: 2011-09-21 22:50:18
summary: aaChanges tool: Added an option to keep the columns from the SNP dataset
affected #: 1 file (-1 bytes)
--- a/tools/evolution/codingSnps.xml Wed Sep 21 12:20:07 2011 -0400
+++ b/tools/evolution/codingSnps.xml Wed Sep 21 16:50:18 2011 -0400
@@ -2,7 +2,7 @@
<description>amino-acid changes caused by a set of SNPs</description><command interpreter="perl">
- codingSnps.pl $input1 $input2 Galaxy build=${input1.metadata.dbkey} loc=${GALAXY_DATA_INDEX_DIR}/codingSnps.loc chr=${input1.metadata.chromCol} start=${input1.metadata.startCol} end=${input1.metadata.endCol} snp=$col1 > $out_file1
+ codingSnps.pl $input1 $input2 Galaxy build=${input1.metadata.dbkey} loc=${GALAXY_DATA_INDEX_DIR}/codingSnps.loc chr=${input1.metadata.chromCol} start=${input1.metadata.startCol} end=${input1.metadata.endCol} snp=$col1 keepColumns=$keep > $out_file1
</command><inputs>
@@ -13,6 +13,10 @@
<param format="interval" name="input2" type="data" label="Gene dataset"><validator type="dataset_metadata_in_file" filename="codingSnps.loc" metadata_name="dbkey" metadata_column="0" message="Sequences are not currently available for the specified build." split="\t" /></param>
+ <param name="keep" type="select" label="Keep columns from SNP dataset">
+ <option value="0" selected="true">No</option>
+ <option value="1">Yes</option>
+ </param></inputs><outputs>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new changeset in galaxy-central:
http://bitbucket.org/galaxy/galaxy-central/changeset/b2c68b980ffc/
changeset: b2c68b980ffc
user: jgoecks
date: 2011-09-21 18:20:07
summary: Read and write summary tree in binary; this mitigates problems in reading summary tree file on some platforms.
affected #: 1 file (-1 bytes)
--- a/lib/galaxy/visualization/tracks/summary.py Wed Sep 21 11:36:58 2011 -0400
+++ b/lib/galaxy/visualization/tracks/summary.py Wed Sep 21 12:20:07 2011 -0400
@@ -89,5 +89,5 @@
cPickle.dump(self, open(filename, 'wb'), 2)
def summary_tree_from_file(filename):
- return cPickle.load(open(filename, "r"))
+ return cPickle.load(open(filename, "rb"))
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.