1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/50bf77aa34c3/
changeset: 50bf77aa34c3
user: dannon
date: 2012-02-02 16:51:13
summary: History export - now works for a guest user.
affected #: 1 file
diff -r 62c2a2f9142c11d1ae279ce3af395bd4fb0d4c52 -r 50bf77aa34c3689a122187d7ba9b55deb348df92 lib/galaxy/tools/actions/history_imp_exp.py
--- a/lib/galaxy/tools/actions/history_imp_exp.py
+++ b/lib/galaxy/tools/actions/history_imp_exp.py
@@ -9,7 +9,7 @@
class ImportHistoryToolAction( ToolAction ):
"""Tool action used for importing a history to an archive. """
- def execute( self, tool, trans, incoming = {}, set_output_hid = False, overwrite = True, history=None ):
+ def execute( self, tool, trans, incoming = {}, set_output_hid = False, overwrite = True, history=None ):
#
# Create job.
#
@@ -20,43 +20,43 @@
job.user_id = trans.user.id
start_job_state = job.state #should be job.states.NEW
job.state = job.states.WAITING #we need to set job state to something other than NEW, or else when tracking jobs in db it will be picked up before we have added input / output parameters
- trans.sa_session.add( job )
+ trans.sa_session.add( job )
trans.sa_session.flush() #ensure job.id are available
-
+
#
# Setup job and job wrapper.
#
-
+
# Add association for keeping track of job, history relationship.
-
- # Use abspath because mkdtemp() does not, contrary to the documentation,
+
+ # Use abspath because mkdtemp() does not, contrary to the documentation,
# always return an absolute path.
archive_dir = os.path.abspath( tempfile.mkdtemp() )
jiha = trans.app.model.JobImportHistoryArchive( job=job, archive_dir=archive_dir )
trans.sa_session.add( jiha )
job_wrapper = JobImportHistoryArchiveWrapper( job )
-
+
#
# Add parameters to job_parameter table.
#
-
+
# Set additional parameters.
incoming[ '__DEST_DIR__' ] = jiha.archive_dir
for name, value in tool.params_to_strings( incoming, trans.app ).iteritems():
job.add_parameter( name, value )
-
+
job.state = start_job_state #job inputs have been configured, restore initial job state
trans.sa_session.flush()
# Queue the job for execution
trans.app.job_queue.put( job.id, tool )
trans.log_event( "Added import history job to the job queue, id: %s" % str(job.id), tool_id=job.tool_id )
-
+
return job, odict()
class ExportHistoryToolAction( ToolAction ):
"""Tool action used for exporting a history to an archive. """
-
+
def execute( self, tool, trans, incoming = {}, set_output_hid = False, overwrite = True, history=None ):
#
# Get history to export.
@@ -68,10 +68,10 @@
history = value
del incoming[ history_param_name ]
break
-
+
if not history:
raise Exception( 'There is no history to export.' )
-
+
#
# Create the job and output dataset objects
#
@@ -79,22 +79,24 @@
job.session_id = trans.get_galaxy_session().id
job.history_id = trans.history.id
job.tool_id = tool.id
- job.user_id = trans.user.id
+ if trans.user:
+ # If this is an actual user, run the job as that individual. Otherwise we're running as guest.
+ job.user_id = trans.user.id
start_job_state = job.state #should be job.states.NEW
job.state = job.states.WAITING #we need to set job state to something other than NEW, or else when tracking jobs in db it will be picked up before we have added input / output parameters
trans.sa_session.add( job )
-
+
# Create dataset that will serve as archive.
archive_dataset = trans.app.model.Dataset()
trans.sa_session.add( archive_dataset )
-
+
trans.sa_session.flush() #ensure job.id and archive_dataset.id are available
trans.app.object_store.create( archive_dataset ) # set the object store id, create dataset (if applicable)
-
+
#
# Setup job and job wrapper.
#
-
+
# Add association for keeping track of job, history, archive relationship.
jeha = trans.app.model.JobExportHistoryArchive( job=job, history=history, \
dataset=archive_dataset, \
@@ -104,23 +106,23 @@
job_wrapper = JobExportHistoryArchiveWrapper( job )
cmd_line = job_wrapper.setup_job( trans, jeha, include_hidden=incoming[ 'include_hidden' ], \
include_deleted=incoming[ 'include_deleted' ] )
-
+
#
# Add parameters to job_parameter table.
#
-
+
# Set additional parameters.
incoming[ '__HISTORY_TO_EXPORT__' ] = history.id
incoming[ '__EXPORT_HISTORY_COMMAND_INPUTS_OPTIONS__' ] = cmd_line
for name, value in tool.params_to_strings( incoming, trans.app ).iteritems():
job.add_parameter( name, value )
-
+
job.state = start_job_state #job inputs have been configured, restore initial job state
trans.sa_session.flush()
-
-
+
+
# Queue the job for execution
trans.app.job_queue.put( job.id, tool )
trans.log_event( "Added export history job to the job queue, id: %s" % str(job.id), tool_id=job.tool_id )
-
+
return job, odict()
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/62c2a2f9142c/
changeset: 62c2a2f9142c
user: dannon
date: 2012-02-02 16:17:25
summary: History Size wasn't filtering out 'purged' datasets in the summary size, should now reflect accurate total on disk.
affected #: 1 file
diff -r 3fdaa5c7d5270564ea55b4c5997dde8c1c00a6e8 -r 62c2a2f9142c11d1ae279ce3af395bd4fb0d4c52 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -475,6 +475,7 @@
db_session = object_session( self )
rval = db_session.query( func.sum( db_session.query( HistoryDatasetAssociation.dataset_id, Dataset.total_size ).join( Dataset )
.filter( HistoryDatasetAssociation.table.c.history_id == self.id )
+ .filter( Dataset.purged != True )
.distinct().subquery().c.total_size ) ).first()[0]
if rval is None:
rval = 0
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/93520b875b56/
changeset: 93520b875b56
user: jgoecks
date: 2012-02-02 15:26:01
summary: Add num_threads parameter to cuffmerge wrapper script.
affected #: 1 file
diff -r 63bc8824dae5d8ab8cf45d86226fcd7f0508f46b -r 93520b875b56905f50dd7de2b09bab189f2429ad tools/ngs_rna/cuffmerge_wrapper.py
--- a/tools/ngs_rna/cuffmerge_wrapper.py
+++ b/tools/ngs_rna/cuffmerge_wrapper.py
@@ -25,6 +25,8 @@
parser = optparse.OptionParser()
parser.add_option( '-g', dest='ref_annotation', help='An optional "reference" annotation GTF. Each sample is matched against this file, and sample isoforms are tagged as overlapping, matching, or novel where appropriate. See the refmap and tmap output file descriptions below.' )
parser.add_option( '-s', dest='use_seq_data', action="store_true", help='Causes cuffmerge to look into for fasta files with the underlying genomic sequences (one file per contig) against which your reads were aligned for some optional classification functions. For example, Cufflinks transcripts consisting mostly of lower-case bases are classified as repeats. Note that <seq_dir> must contain one fasta file per reference chromosome, and each file must be named after the chromosome, and have a .fa or .fasta extension.')
+ parser.add_option( '-p', '--num-threads', dest='num_threads', help='Use this many threads to align reads. The default is 1.' )
+
# Wrapper / Galaxy options.
parser.add_option( '', '--dbkey', dest='dbkey', help='The build of the reference dataset' )
@@ -74,6 +76,8 @@
cmd = "cuffmerge -o cm_output "
# Add options.
+ if options.num_threads:
+ cmd += ( " -p %i" % int ( options.num_threads ) )
if options.ref_annotation:
cmd += " -g %s " % options.ref_annotation
if options.use_seq_data:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/63bc8824dae5/
changeset: 63bc8824dae5
user: jgoecks
date: 2012-02-02 15:15:19
summary: Add num-threads param to cuffdiff.
affected #: 1 file
diff -r 724bbcc69c921ebe53b780d04f2699178201941e -r 63bc8824dae5d8ab8cf45d86226fcd7f0508f46b tools/ngs_rna/cuffmerge_wrapper.xml
--- a/tools/ngs_rna/cuffmerge_wrapper.xml
+++ b/tools/ngs_rna/cuffmerge_wrapper.xml
@@ -5,7 +5,9 @@
<requirement type="package">cufflinks</requirement></requirements><command interpreter="python">
- cuffmerge_wrapper.py
+ cuffmerge_wrapper.py
+
+ --num-threads="4"
## Use annotation reference?
#if $annotation.use_ref_annotation == "Yes":
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/315515ad52b0/
changeset: 315515ad52b0
user: jgoecks
date: 2012-02-01 21:27:53
summary: Add overwrite param when setting metadata in Trackster data subsets.
affected #: 1 file
diff -r a8eca62cd5304509a67e598d54d5203411aeb3df -r 315515ad52b04d15fe33476b7a33c1079712728c lib/galaxy/web/controllers/tracks.py
--- a/lib/galaxy/web/controllers/tracks.py
+++ b/lib/galaxy/web/controllers/tracks.py
@@ -970,7 +970,7 @@
# Set metadata.
if trans.app.config.set_metadata_externally:
- trans.app.datatypes_registry.set_external_metadata_tool.tool_action.execute( trans.app.datatypes_registry.set_external_metadata_tool, trans, incoming = { 'input1':new_dataset } )
+ trans.app.datatypes_registry.set_external_metadata_tool.tool_action.execute( trans.app.datatypes_registry.set_external_metadata_tool, trans, incoming = { 'input1':new_dataset }, overwrite=False )
else:
message = 'Attributes updated'
new_dataset.set_meta()
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/a8eca62cd530/
changeset: a8eca62cd530
user: jgoecks
date: 2012-02-01 20:00:21
summary: Fix bug: when setting chromInfo, do not assume a fasta file exists.
affected #: 1 file
diff -r 7d97c594b1639bc16aba60ae8a0bf9a2798dbde8 -r a8eca62cd5304509a67e598d54d5203411aeb3df lib/galaxy/tools/actions/__init__.py
--- a/lib/galaxy/tools/actions/__init__.py
+++ b/lib/galaxy/tools/actions/__init__.py
@@ -193,12 +193,15 @@
incoming[ "chromInfo" ] = db_dataset.file_name
else:
# For custom builds, chrom info resides in converted dataset; for built-in builds, chrom info resides in tool-data/shared.
+ chrom_info = None
if trans.user and ( 'dbkeys' in trans.user.preferences ) and ( input_dbkey in trans.user.preferences[ 'dbkeys' ] ):
# Custom build.
custom_build_dict = from_json_string( trans.user.preferences[ 'dbkeys' ] )[ input_dbkey ]
- build_fasta_dataset = trans.app.model.HistoryDatasetAssociation.get( custom_build_dict[ 'fasta' ] )
- chrom_info = build_fasta_dataset.get_converted_dataset( trans, 'len' ).file_name
- else:
+ if 'fasta' in custom_build_dict:
+ build_fasta_dataset = trans.app.model.HistoryDatasetAssociation.get( custom_build_dict[ 'fasta' ] )
+ chrom_info = build_fasta_dataset.get_converted_dataset( trans, 'len' ).file_name
+
+ if not chrom_info:
# Default to built-in build.
chrom_info = os.path.join( trans.app.config.tool_data_path, 'shared','ucsc','chrom', "%s.len" % input_dbkey )
incoming[ "chromInfo" ] = chrom_info
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.