1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/ebe882ab26c4/
Changeset: ebe882ab26c4
User: greg
Date: 2013-05-20 21:51:10
Summary: Change the tool shed grid ToolsFunctionallyCorrectColumn column type.
Affected #: 1 file
diff -r ae1fb20222cc7d075009b59295e247c00616bc15 -r ebe882ab26c4abbd7b3fa85161864257a3f785a6 lib/tool_shed/grids/repository_grids.py
--- a/lib/tool_shed/grids/repository_grids.py
+++ b/lib/tool_shed/grids/repository_grids.py
@@ -121,7 +121,7 @@
return escape_html( repository.revision( trans.app ) )
- class ToolsFunctionallyCorrectColumn( grids.BooleanColumn ):
+ class ToolsFunctionallyCorrectColumn( grids.TextColumn ):
def get_value( self, trans, grid, repository ):
# This column will display the value associated with the currently displayed metadata revision.
@@ -1130,7 +1130,7 @@
return ''
- class ToolsFunctionallyCorrectColumn( grids.BooleanColumn ):
+ class ToolsFunctionallyCorrectColumn( grids.TextColumn ):
def get_value( self, trans, grid, repository ):
# This column will display the value associated with the currently displayed metadata revision.
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
4 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/a709ca0ea728/
Changeset: a709ca0ea728
User: jmchilton
Date: 2013-05-13 20:59:16
Summary: Refactor logic for setting metadata exteranlly used local job runner out into a method on the BaseJobRunner class. This improves the readibility and allows other job runners to use this logic.
Rework LWR to use this new method. Overtime the lwr.py and local.py version of that code had diverged and it had never been noticed because I guess no one had been using the LWR with set_metadata_exteranlly = True, which is no longer an option.
Affected #: 3 files
diff -r d92704335a2774e1bde2be72f9f78ce63c0447fd -r a709ca0ea7289833b8016613a646c88bbf04cef9 lib/galaxy/jobs/runners/__init__.py
--- a/lib/galaxy/jobs/runners/__init__.py
+++ b/lib/galaxy/jobs/runners/__init__.py
@@ -7,6 +7,7 @@
import string
import logging
import threading
+import subprocess
from Queue import Queue, Empty
@@ -235,6 +236,30 @@
log.exception( "from_work_dir specified a location not in the working directory: %s, %s" % ( source_file, job_wrapper.working_directory ) )
return output_pairs
+ def _handle_metadata_externally(self, job_wrapper):
+ """
+ Set metadata externally. Used by the local and lwr job runners where this
+ shouldn't be attached to command-line to execute.
+ """
+ #run the metadata setting script here
+ #this is terminate-able when output dataset/job is deleted
+ #so that long running set_meta()s can be canceled without having to reboot the server
+ if job_wrapper.get_state() not in [ model.Job.states.ERROR, model.Job.states.DELETED ] and job_wrapper.output_paths:
+ external_metadata_script = job_wrapper.setup_external_metadata( output_fnames=job_wrapper.get_output_fnames(),
+ set_extension=True,
+ tmp_dir=job_wrapper.working_directory,
+ #we don't want to overwrite metadata that was copied over in init_meta(), as per established behavior
+ kwds={ 'overwrite' : False } )
+ log.debug( 'executing external set_meta script for job %d: %s' % ( job_wrapper.job_id, external_metadata_script ) )
+ external_metadata_proc = subprocess.Popen( args=external_metadata_script,
+ shell=True,
+ env=os.environ,
+ preexec_fn=os.setpgrp )
+ job_wrapper.external_output_metadata.set_job_runner_external_pid( external_metadata_proc.pid, self.sa_session )
+ external_metadata_proc.wait()
+ log.debug( 'execution of external set_meta for job %d finished' % job_wrapper.job_id )
+
+
class AsynchronousJobState( object ):
"""
Encapsulate the state of an asynchronous job, this should be subclassed as
diff -r d92704335a2774e1bde2be72f9f78ce63c0447fd -r a709ca0ea7289833b8016613a646c88bbf04cef9 lib/galaxy/jobs/runners/local.py
--- a/lib/galaxy/jobs/runners/local.py
+++ b/lib/galaxy/jobs/runners/local.py
@@ -87,23 +87,7 @@
job_wrapper.fail( "failure running job", exception=True )
log.exception("failure running job %d" % job_wrapper.job_id)
return
- #run the metadata setting script here
- #this is terminate-able when output dataset/job is deleted
- #so that long running set_meta()s can be canceled without having to reboot the server
- if job_wrapper.get_state() not in [ model.Job.states.ERROR, model.Job.states.DELETED ] and job_wrapper.output_paths:
- external_metadata_script = job_wrapper.setup_external_metadata( output_fnames = job_wrapper.get_output_fnames(),
- set_extension = True,
- tmp_dir = job_wrapper.working_directory,
- kwds = { 'overwrite' : False } ) #we don't want to overwrite metadata that was copied over in init_meta(), as per established behavior
- log.debug( 'executing external set_meta script for job %d: %s' % ( job_wrapper.job_id, external_metadata_script ) )
- external_metadata_proc = subprocess.Popen( args = external_metadata_script,
- shell = True,
- env = os.environ,
- preexec_fn = os.setpgrp )
- job_wrapper.external_output_metadata.set_job_runner_external_pid( external_metadata_proc.pid, self.sa_session )
- external_metadata_proc.wait()
- log.debug( 'execution of external set_meta for job %d finished' % job_wrapper.job_id )
-
+ self._handle_metadata_externally( job_wrapper )
# Finish the job!
try:
job_wrapper.finish( stdout, stderr, exit_code )
diff -r d92704335a2774e1bde2be72f9f78ce63c0447fd -r a709ca0ea7289833b8016613a646c88bbf04cef9 lib/galaxy/jobs/runners/lwr.py
--- a/lib/galaxy/jobs/runners/lwr.py
+++ b/lib/galaxy/jobs/runners/lwr.py
@@ -1,5 +1,4 @@
import logging
-import subprocess
from galaxy import model
from galaxy.jobs.runners import AsynchronousJobState, AsynchronousJobRunner
@@ -7,6 +6,7 @@
import errno
from time import sleep
+import os
from lwr_client import FileStager, Client, url_to_destination_params
@@ -140,23 +140,8 @@
job_wrapper.fail( "failure running job", exception=True )
log.exception("failure running job %d" % job_wrapper.job_id)
return
- #run the metadata setting script here
- #this is terminate-able when output dataset/job is deleted
- #so that long running set_meta()s can be canceled without having to reboot the server
- if job_wrapper.get_state() not in [ model.Job.states.ERROR, model.Job.states.DELETED ] and job_wrapper.output_paths:
- external_metadata_script = job_wrapper.setup_external_metadata( output_fnames = job_wrapper.get_output_fnames(),
- set_extension = True,
- kwds = { 'overwrite' : False } ) #we don't want to overwrite metadata that was copied over in init_meta(), as per established behavior
- log.debug( 'executing external set_meta script for job %d: %s' % ( job_wrapper.job_id, external_metadata_script ) )
- external_metadata_proc = subprocess.Popen( args = external_metadata_script,
- shell = True,
- env = os.environ,
- preexec_fn = os.setpgrp )
- job_wrapper.external_output_metadata.set_job_runner_external_pid( external_metadata_proc.pid, self.sa_session )
- external_metadata_proc.wait()
- log.debug( 'execution of external set_meta finished for job %d' % job_wrapper.job_id )
-
- # Finish the job
+ self._handle_metadata_externally( job_wrapper )
+ # Finish the job
try:
job_wrapper.finish( stdout, stderr )
except:
https://bitbucket.org/galaxy/galaxy-central/commits/e456392f0557/
Changeset: e456392f0557
User: jmchilton
Date: 2013-05-16 06:37:16
Summary: Implement queued state for LWR jobs - previously all queued and running LWR jobs were marked as running in the Galaxy.
Affected #: 2 files
diff -r a709ca0ea7289833b8016613a646c88bbf04cef9 -r e456392f0557079488a901a2c0d90459919aa9ea lib/galaxy/jobs/runners/lwr.py
--- a/lib/galaxy/jobs/runners/lwr.py
+++ b/lib/galaxy/jobs/runners/lwr.py
@@ -34,15 +34,18 @@
def check_watched_item(self, job_state):
try:
client = self.get_client_from_state(job_state)
- complete = client.check_complete()
+ status = client.get_status()
except Exception:
# An orphaned job was put into the queue at app startup, so remote server went down
# either way we are done I guess.
self.mark_as_finished(job_state)
return None
- if complete:
+ if status == "complete":
self.mark_as_finished(job_state)
return None
+ if status == "running" and not job_state.running:
+ job_state.running = True
+ job_state.job_wrapper.change_state( model.Job.states.RUNNING )
return job_state
def queue_job(self, job_wrapper):
@@ -81,7 +84,7 @@
job_id = file_stager.job_id
client.launch( rebuilt_command_line )
job_wrapper.set_job_destination( job_destination, job_id )
- job_wrapper.change_state( model.Job.states.RUNNING )
+ job_wrapper.change_state( model.Job.states.QUEUED )
except Exception, exc:
job_wrapper.fail( "failure running job", exception=True )
@@ -92,7 +95,7 @@
lwr_job_state.job_wrapper = job_wrapper
lwr_job_state.job_id = job_id
lwr_job_state.old_state = True
- lwr_job_state.running = True
+ lwr_job_state.running = False
lwr_job_state.job_destination = job_destination
self.monitor_job(lwr_job_state)
@@ -207,12 +210,8 @@
job_state.job_destination = job_wrapper.job_destination
job_wrapper.command_line = job.get_command_line()
job_state.job_wrapper = job_wrapper
- if job.get_state() == model.Job.states.RUNNING:
+ if job.get_state() in [model.Job.states.RUNNING, model.Job.states.QUEUED]:
log.debug( "(LWR/%s) is still in running state, adding to the LWR queue" % ( job.get_id()) )
job_state.old_state = True
job_state.running = True
self.monitor_queue.put( job_state )
- elif job.get_state() == model.Job.states.QUEUED:
- # LWR doesn't queue currently, so this indicates galaxy was shutoff while
- # job was being staged. Not sure how to recover from that.
- job_state.job_wrapper.fail( "This job was killed when Galaxy was restarted. Please retry the job." )
diff -r a709ca0ea7289833b8016613a646c88bbf04cef9 -r e456392f0557079488a901a2c0d90459919aa9ea lib/galaxy/jobs/runners/lwr_client/client.py
--- a/lib/galaxy/jobs/runners/lwr_client/client.py
+++ b/lib/galaxy/jobs/runners/lwr_client/client.py
@@ -210,11 +210,25 @@
check_complete_response = self.__raw_execute("check_complete", {"job_id": self.job_id})
return check_complete_response
- def check_complete(self):
+ def check_complete(self, response=None):
"""
Return boolean indicating whether the job is complete.
"""
- return self.raw_check_complete()["complete"] == "true"
+ if response == None:
+ response = self.raw_check_complete()
+ return response["complete"] == "true"
+
+ def get_status(self):
+ check_complete_response = self.raw_check_complete()
+ # Older LWR instances won't set status so use 'complete', at some
+ # point drop backward compatibility.
+ complete = self.check_complete(check_complete_response)
+ old_status = "complete" if complete else "running"
+ status = check_complete_response.get("status", old_status)
+ # Bug in certains older LWR instances returned literal "status".
+ if status not in ["complete", "running", "queued"]:
+ status = old_status
+ return status
def clean(self):
"""
https://bitbucket.org/galaxy/galaxy-central/commits/2ffd8c25e9ce/
Changeset: 2ffd8c25e9ce
User: jmchilton
Date: 2013-05-16 06:50:19
Summary: Fix bug that was preventing LWR jobs from entering the RUNNING state post job "recover" on Galaxy startup.
Affected #: 1 file
diff -r e456392f0557079488a901a2c0d90459919aa9ea -r 2ffd8c25e9cea2c0f3b7e36373eeb5cac27357d2 lib/galaxy/jobs/runners/lwr.py
--- a/lib/galaxy/jobs/runners/lwr.py
+++ b/lib/galaxy/jobs/runners/lwr.py
@@ -210,8 +210,9 @@
job_state.job_destination = job_wrapper.job_destination
job_wrapper.command_line = job.get_command_line()
job_state.job_wrapper = job_wrapper
- if job.get_state() in [model.Job.states.RUNNING, model.Job.states.QUEUED]:
+ state = job.get_state()
+ if state in [model.Job.states.RUNNING, model.Job.states.QUEUED]:
log.debug( "(LWR/%s) is still in running state, adding to the LWR queue" % ( job.get_id()) )
job_state.old_state = True
- job_state.running = True
+ job_state.running = state == model.Job.states.RUNNING
self.monitor_queue.put( job_state )
https://bitbucket.org/galaxy/galaxy-central/commits/ae1fb20222cc/
Changeset: ae1fb20222cc
User: natefoo
Date: 2013-05-20 21:00:11
Summary: Merged in jmchilton/galaxy-central-lwr (pull request #166)
LWR fix for setting metadata externally.
Affected #: 4 files
diff -r 64c6e3dfaa2bd54f344026752f4ab7ae008a5094 -r ae1fb20222cc7d075009b59295e247c00616bc15 lib/galaxy/jobs/runners/__init__.py
--- a/lib/galaxy/jobs/runners/__init__.py
+++ b/lib/galaxy/jobs/runners/__init__.py
@@ -7,6 +7,7 @@
import string
import logging
import threading
+import subprocess
from Queue import Queue, Empty
@@ -236,6 +237,30 @@
log.exception( "from_work_dir specified a location not in the working directory: %s, %s" % ( source_file, job_wrapper.working_directory ) )
return output_pairs
+ def _handle_metadata_externally(self, job_wrapper):
+ """
+ Set metadata externally. Used by the local and lwr job runners where this
+ shouldn't be attached to command-line to execute.
+ """
+ #run the metadata setting script here
+ #this is terminate-able when output dataset/job is deleted
+ #so that long running set_meta()s can be canceled without having to reboot the server
+ if job_wrapper.get_state() not in [ model.Job.states.ERROR, model.Job.states.DELETED ] and job_wrapper.output_paths:
+ external_metadata_script = job_wrapper.setup_external_metadata( output_fnames=job_wrapper.get_output_fnames(),
+ set_extension=True,
+ tmp_dir=job_wrapper.working_directory,
+ #we don't want to overwrite metadata that was copied over in init_meta(), as per established behavior
+ kwds={ 'overwrite' : False } )
+ log.debug( 'executing external set_meta script for job %d: %s' % ( job_wrapper.job_id, external_metadata_script ) )
+ external_metadata_proc = subprocess.Popen( args=external_metadata_script,
+ shell=True,
+ env=os.environ,
+ preexec_fn=os.setpgrp )
+ job_wrapper.external_output_metadata.set_job_runner_external_pid( external_metadata_proc.pid, self.sa_session )
+ external_metadata_proc.wait()
+ log.debug( 'execution of external set_meta for job %d finished' % job_wrapper.job_id )
+
+
class AsynchronousJobState( object ):
"""
Encapsulate the state of an asynchronous job, this should be subclassed as
diff -r 64c6e3dfaa2bd54f344026752f4ab7ae008a5094 -r ae1fb20222cc7d075009b59295e247c00616bc15 lib/galaxy/jobs/runners/local.py
--- a/lib/galaxy/jobs/runners/local.py
+++ b/lib/galaxy/jobs/runners/local.py
@@ -88,23 +88,7 @@
job_wrapper.fail( "failure running job", exception=True )
log.exception("failure running job %d" % job_wrapper.job_id)
return
- #run the metadata setting script here
- #this is terminate-able when output dataset/job is deleted
- #so that long running set_meta()s can be canceled without having to reboot the server
- if job_wrapper.get_state() not in [ model.Job.states.ERROR, model.Job.states.DELETED ] and job_wrapper.output_paths:
- external_metadata_script = job_wrapper.setup_external_metadata( output_fnames = job_wrapper.get_output_fnames(),
- set_extension = True,
- tmp_dir = job_wrapper.working_directory,
- kwds = { 'overwrite' : False } ) #we don't want to overwrite metadata that was copied over in init_meta(), as per established behavior
- log.debug( 'executing external set_meta script for job %d: %s' % ( job_wrapper.job_id, external_metadata_script ) )
- external_metadata_proc = subprocess.Popen( args = external_metadata_script,
- shell = True,
- env = os.environ,
- preexec_fn = os.setpgrp )
- job_wrapper.external_output_metadata.set_job_runner_external_pid( external_metadata_proc.pid, self.sa_session )
- external_metadata_proc.wait()
- log.debug( 'execution of external set_meta for job %d finished' % job_wrapper.job_id )
-
+ self._handle_metadata_externally( job_wrapper )
# Finish the job!
try:
job_wrapper.finish( stdout, stderr, exit_code )
diff -r 64c6e3dfaa2bd54f344026752f4ab7ae008a5094 -r ae1fb20222cc7d075009b59295e247c00616bc15 lib/galaxy/jobs/runners/lwr.py
--- a/lib/galaxy/jobs/runners/lwr.py
+++ b/lib/galaxy/jobs/runners/lwr.py
@@ -1,5 +1,4 @@
import logging
-import subprocess
from galaxy import model
from galaxy.jobs.runners import AsynchronousJobState, AsynchronousJobRunner
@@ -7,6 +6,7 @@
import errno
from time import sleep
+import os
from lwr_client import FileStager, Client, url_to_destination_params
@@ -34,15 +34,18 @@
def check_watched_item(self, job_state):
try:
client = self.get_client_from_state(job_state)
- complete = client.check_complete()
+ status = client.get_status()
except Exception:
# An orphaned job was put into the queue at app startup, so remote server went down
# either way we are done I guess.
self.mark_as_finished(job_state)
return None
- if complete:
+ if status == "complete":
self.mark_as_finished(job_state)
return None
+ if status == "running" and not job_state.running:
+ job_state.running = True
+ job_state.job_wrapper.change_state( model.Job.states.RUNNING )
return job_state
def queue_job(self, job_wrapper):
@@ -81,7 +84,7 @@
job_id = file_stager.job_id
client.launch( rebuilt_command_line )
job_wrapper.set_job_destination( job_destination, job_id )
- job_wrapper.change_state( model.Job.states.RUNNING )
+ job_wrapper.change_state( model.Job.states.QUEUED )
except Exception, exc:
job_wrapper.fail( "failure running job", exception=True )
@@ -92,7 +95,7 @@
lwr_job_state.job_wrapper = job_wrapper
lwr_job_state.job_id = job_id
lwr_job_state.old_state = True
- lwr_job_state.running = True
+ lwr_job_state.running = False
lwr_job_state.job_destination = job_destination
self.monitor_job(lwr_job_state)
@@ -140,23 +143,8 @@
job_wrapper.fail( "failure running job", exception=True )
log.exception("failure running job %d" % job_wrapper.job_id)
return
- #run the metadata setting script here
- #this is terminate-able when output dataset/job is deleted
- #so that long running set_meta()s can be canceled without having to reboot the server
- if job_wrapper.get_state() not in [ model.Job.states.ERROR, model.Job.states.DELETED ] and job_wrapper.output_paths:
- external_metadata_script = job_wrapper.setup_external_metadata( output_fnames = job_wrapper.get_output_fnames(),
- set_extension = True,
- kwds = { 'overwrite' : False } ) #we don't want to overwrite metadata that was copied over in init_meta(), as per established behavior
- log.debug( 'executing external set_meta script for job %d: %s' % ( job_wrapper.job_id, external_metadata_script ) )
- external_metadata_proc = subprocess.Popen( args = external_metadata_script,
- shell = True,
- env = os.environ,
- preexec_fn = os.setpgrp )
- job_wrapper.external_output_metadata.set_job_runner_external_pid( external_metadata_proc.pid, self.sa_session )
- external_metadata_proc.wait()
- log.debug( 'execution of external set_meta finished for job %d' % job_wrapper.job_id )
-
- # Finish the job
+ self._handle_metadata_externally( job_wrapper )
+ # Finish the job
try:
job_wrapper.finish( stdout, stderr )
except:
@@ -222,12 +210,9 @@
job_state.job_destination = job_wrapper.job_destination
job_wrapper.command_line = job.get_command_line()
job_state.job_wrapper = job_wrapper
- if job.get_state() == model.Job.states.RUNNING:
+ state = job.get_state()
+ if state in [model.Job.states.RUNNING, model.Job.states.QUEUED]:
log.debug( "(LWR/%s) is still in running state, adding to the LWR queue" % ( job.get_id()) )
job_state.old_state = True
- job_state.running = True
+ job_state.running = state == model.Job.states.RUNNING
self.monitor_queue.put( job_state )
- elif job.get_state() == model.Job.states.QUEUED:
- # LWR doesn't queue currently, so this indicates galaxy was shutoff while
- # job was being staged. Not sure how to recover from that.
- job_state.job_wrapper.fail( "This job was killed when Galaxy was restarted. Please retry the job." )
diff -r 64c6e3dfaa2bd54f344026752f4ab7ae008a5094 -r ae1fb20222cc7d075009b59295e247c00616bc15 lib/galaxy/jobs/runners/lwr_client/client.py
--- a/lib/galaxy/jobs/runners/lwr_client/client.py
+++ b/lib/galaxy/jobs/runners/lwr_client/client.py
@@ -210,11 +210,25 @@
check_complete_response = self.__raw_execute("check_complete", {"job_id": self.job_id})
return check_complete_response
- def check_complete(self):
+ def check_complete(self, response=None):
"""
Return boolean indicating whether the job is complete.
"""
- return self.raw_check_complete()["complete"] == "true"
+ if response == None:
+ response = self.raw_check_complete()
+ return response["complete"] == "true"
+
+ def get_status(self):
+ check_complete_response = self.raw_check_complete()
+ # Older LWR instances won't set status so use 'complete', at some
+ # point drop backward compatibility.
+ complete = self.check_complete(check_complete_response)
+ old_status = "complete" if complete else "running"
+ status = check_complete_response.get("status", old_status)
+ # Bug in certains older LWR instances returned literal "status".
+ if status not in ["complete", "running", "queued"]:
+ status = old_status
+ return status
def clean(self):
"""
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/237209336f03/
Changeset: 237209336f03
User: jgoecks
Date: 2013-05-19 17:59:50
Summary: Add library normalization and dispersion options to Cuffdiff wrapper.
Affected #: 2 files
diff -r 0afdd38eb5ec63708a58b2e2bb8aadecf7f42f72 -r 237209336f0337ea9f47df39548df3c11900f182 tools/ngs_rna/cuffdiff_wrapper.py
--- a/tools/ngs_rna/cuffdiff_wrapper.py
+++ b/tools/ngs_rna/cuffdiff_wrapper.py
@@ -62,6 +62,8 @@
parser.add_option( '-c', '--min-alignment-count', dest='min_alignment_count', help='The minimum number of alignments in a locus for needed to conduct significance testing on changes in that locus observed between samples. If no testing is performed, changes in the locus are deemed not signficant, and the locus\' observed changes don\'t contribute to correction for multiple testing. The default is 1,000 fragment alignments (up to 2,000 paired reads).' )
parser.add_option( '--FDR', dest='FDR', help='The allowed false discovery rate. The default is 0.05.' )
parser.add_option( '-u', '--multi-read-correct', dest='multi_read_correct', action="store_true", help='Tells Cufflinks to do an initial estimation procedure to more accurately weight reads mapping to multiple locations in the genome')
+ parser.add_option( '--library-norm-method', dest='library_norm_method' )
+ parser.add_option( '--dispersion-method', dest='dispersion_method' )
# Advanced Options:
parser.add_option( '--num-importance-samples', dest='num_importance_samples', help='Sets the number of importance samples generated for each locus during abundance estimation. Default: 1000' )
@@ -143,6 +145,10 @@
cmd = "cuffdiff --no-update-check -q"
# Add options.
+ if options.library_norm_method:
+ cmd += ( " --library-norm-method %s" % options.library_norm_method )
+ if options.dispersion_method:
+ cmd += ( " --dispersion-method %s" % options.dispersion_method )
if options.inner_dist_std_dev:
cmd += ( " -s %i" % int ( options.inner_dist_std_dev ) )
if options.num_threads:
diff -r 0afdd38eb5ec63708a58b2e2bb8aadecf7f42f72 -r 237209336f0337ea9f47df39548df3c11900f182 tools/ngs_rna/cuffdiff_wrapper.xml
--- a/tools/ngs_rna/cuffdiff_wrapper.xml
+++ b/tools/ngs_rna/cuffdiff_wrapper.xml
@@ -9,6 +9,8 @@
--FDR=$fdr
--num-threads="4"
--min-alignment-count=$min_alignment_count
+ --library-norm-method=$library_norm_method
+ --dispersion-method=$dispersion_method
--isoforms_fpkm_tracking_output=$isoforms_fpkm_tracking
--genes_fpkm_tracking_output=$genes_fpkm_tracking
@@ -92,6 +94,18 @@
</when></conditional>
+ <param name="library_norm_method" type="select" label="Library normalization method">
+ <option value="geometric" selected="True">geometric</option>
+ <option value="classic-fpkm">classic-fpkm</option>
+ <option value="quartile">quartile</option>
+ </param>
+
+ <param name="dispersion_method" type="select" label="Dispersion estimation method" help="If using only one sample per condition, you must use 'blind.'">
+ <option value="pooled" selected="True">pooled</option>
+ <option value="per-condition">per-condition</option>
+ <option value="blind">blind</option>
+ </param>
+
<param name="fdr" type="float" value="0.05" label="False Discovery Rate" help="The allowed false discovery rate."/><param name="min_alignment_count" type="integer" value="10" label="Min Alignment Count" help="The minimum number of alignments in a locus for needed to conduct significance testing on changes in that locus observed between samples."/>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.