galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
December 2012
- 1 participants
- 142 discussions
commit/galaxy-central: jgoecks: Update Cuff* labels to reflect GFF3 compatibility.
by Bitbucket 04 Dec '12
by Bitbucket 04 Dec '12
04 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/7bd130c33ebc/
changeset: 7bd130c33ebc
user: jgoecks
date: 2012-12-04 15:09:36
summary: Update Cuff* labels to reflect GFF3 compatibility.
affected #: 3 files
diff -r 0c1f8a06eb563f312d5e90818b34358430c6edf9 -r 7bd130c33ebc218d97df83a63555abed222b301b tools/ngs_rna/cuffcompare_wrapper.xml
--- a/tools/ngs_rna/cuffcompare_wrapper.xml
+++ b/tools/ngs_rna/cuffcompare_wrapper.xml
@@ -48,7 +48,7 @@
<option value="Yes">Yes</option></param><when value="Yes">
- <param format="gff3,gtf" name="reference_annotation" type="data" label="Reference Annotation" help="Make sure your annotation file is in GTF format and that Galaxy knows that your file is GTF--not GFF."/>
+ <param format="gff3,gtf" name="reference_annotation" type="data" label="Reference Annotation" help="Requires an annotation file in GFF3 or GTF format."/><param name="ignore_nonoverlapping_reference" type="boolean" label="Ignore reference transcripts that are not overlapped by any transcript in input files"/></when><when value="No">
diff -r 0c1f8a06eb563f312d5e90818b34358430c6edf9 -r 7bd130c33ebc218d97df83a63555abed222b301b tools/ngs_rna/cuffdiff_wrapper.xml
--- a/tools/ngs_rna/cuffdiff_wrapper.xml
+++ b/tools/ngs_rna/cuffdiff_wrapper.xml
@@ -72,7 +72,7 @@
</command><inputs>
- <param format="gtf,gff3" name="gtf_input" type="data" label="Transcripts" help="A transcript GFF3/GTF file produced by cufflinks, cuffcompare, or other source."/>
+ <param format="gtf,gff3" name="gtf_input" type="data" label="Transcripts" help="A transcript GFF3 or GTF file produced by cufflinks, cuffcompare, or other source."/><conditional name="group_analysis"><param name="do_groups" type="select" label="Perform replicate analysis" help="Perform cuffdiff with replicates in each group."><option value="No">No</option>
diff -r 0c1f8a06eb563f312d5e90818b34358430c6edf9 -r 7bd130c33ebc218d97df83a63555abed222b301b tools/ngs_rna/cuffmerge_wrapper.xml
--- a/tools/ngs_rna/cuffmerge_wrapper.xml
+++ b/tools/ngs_rna/cuffmerge_wrapper.xml
@@ -47,7 +47,7 @@
<option value="Yes">Yes</option></param><when value="Yes">
- <param format="gff3,gtf" name="reference_annotation" type="data" label="Reference Annotation" help="Make sure your annotation file is in GTF format and that Galaxy knows that your file is GTF--not GFF."/>
+ <param format="gff3,gtf" name="reference_annotation" type="data" label="Reference Annotation" help="Requires an annotation file in GFF3 or GTF format."/></when><when value="No"></when>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/088960e4d379/
changeset: 088960e4d379
user: Kyle Ellrott
date: 2012-11-30 19:58:39
summary: Patch to make extended_metadata loader work better, and fixing library loader to catch non-ascii errors
affected #: 2 files
diff -r 88aba66bb81351cbd625ca7fb9ed39874016b36a -r 088960e4d3793d473ac0b657215a3336c9f1d263 lib/galaxy/webapps/galaxy/api/library_contents.py
--- a/lib/galaxy/webapps/galaxy/api/library_contents.py
+++ b/lib/galaxy/webapps/galaxy/api/library_contents.py
@@ -181,7 +181,7 @@
else:
#BUG: Everything is cast to string, which can lead to false positives
#for cross type comparisions, ie "True" == True
- yield prefix, str(meta)
+ yield prefix, ("%s" % (meta)).encode("utf8", errors='replace')
@web.expose_api
def update( self, trans, id, library_id, payload, **kwd ):
diff -r 88aba66bb81351cbd625ca7fb9ed39874016b36a -r 088960e4d3793d473ac0b657215a3336c9f1d263 scripts/api/load_data_with_metadata.py
--- a/scripts/api/load_data_with_metadata.py
+++ b/scripts/api/load_data_with_metadata.py
@@ -6,7 +6,7 @@
system in the library
Sample call:
-python load_data_with_metadata.py <api_key><api_url> /tmp/g_inbox/ /tmp/g_inbox/done/ "API Imports"
+python load_data_with_metadata.py <api_key><api_url> /data/folder "API Imports"
NOTE: The upload method used requires the data library filesystem upload allow_library_path_paste
"""
@@ -18,7 +18,7 @@
sys.path.insert( 0, os.path.dirname( __file__ ) )
from common import submit, display
-def main(api_key, api_url, in_folder, out_folder, data_library):
+def main(api_key, api_url, in_folder, data_library):
# Find/Create data library with the above name. Assume we're putting datasets in the root folder '/'
libs = display(api_key, api_url + 'libraries', return_formatted=False)
library_id = None
@@ -36,11 +36,11 @@
if not library_id or not library_folder_id:
print "Failure to configure library destination."
sys.exit(1)
- # Watch in_folder, upload anything that shows up there to data library and get ldda,
- # invoke workflow, move file to out_folder.
+
for fname in os.listdir(in_folder):
fullpath = os.path.join(in_folder, fname)
if os.path.isfile(fullpath) and os.path.exists(fullpath + ".json"):
+ print "Loading", fullpath
data = {}
data['folder_id'] = library_folder_id
data['file_type'] = 'auto'
@@ -49,6 +49,8 @@
data['filesystem_paths'] = fullpath
data['create_type'] = 'file'
+ data['link_data_only'] = 'link_to_files'
+
handle = open( fullpath + ".json" )
smeta = handle.read()
handle.close()
@@ -61,10 +63,9 @@
api_key = sys.argv[1]
api_url = sys.argv[2]
in_folder = sys.argv[3]
- out_folder = sys.argv[4]
- data_library = sys.argv[5]
+ data_library = sys.argv[4]
except IndexError:
- print 'usage: %s key url in_folder out_folder data_library' % os.path.basename( sys.argv[0] )
+ print 'usage: %s key url in_folder data_library' % os.path.basename( sys.argv[0] )
sys.exit( 1 )
- main(api_key, api_url, in_folder, out_folder, data_library )
+ main(api_key, api_url, in_folder, data_library )
https://bitbucket.org/galaxy/galaxy-central/changeset/0c1f8a06eb56/
changeset: 0c1f8a06eb56
user: dannon
date: 2012-12-04 15:07:31
summary: Merged in kellrott/galaxy-central (pull request #90)
affected #: 2 files
diff -r 89deda81b0c949de68941f37e77c4c55097199f0 -r 0c1f8a06eb563f312d5e90818b34358430c6edf9 lib/galaxy/webapps/galaxy/api/library_contents.py
--- a/lib/galaxy/webapps/galaxy/api/library_contents.py
+++ b/lib/galaxy/webapps/galaxy/api/library_contents.py
@@ -181,7 +181,7 @@
else:
#BUG: Everything is cast to string, which can lead to false positives
#for cross type comparisions, ie "True" == True
- yield prefix, str(meta)
+ yield prefix, ("%s" % (meta)).encode("utf8", errors='replace')
@web.expose_api
def update( self, trans, id, library_id, payload, **kwd ):
diff -r 89deda81b0c949de68941f37e77c4c55097199f0 -r 0c1f8a06eb563f312d5e90818b34358430c6edf9 scripts/api/load_data_with_metadata.py
--- a/scripts/api/load_data_with_metadata.py
+++ b/scripts/api/load_data_with_metadata.py
@@ -6,7 +6,7 @@
system in the library
Sample call:
-python load_data_with_metadata.py <api_key><api_url> /tmp/g_inbox/ /tmp/g_inbox/done/ "API Imports"
+python load_data_with_metadata.py <api_key><api_url> /data/folder "API Imports"
NOTE: The upload method used requires the data library filesystem upload allow_library_path_paste
"""
@@ -18,7 +18,7 @@
sys.path.insert( 0, os.path.dirname( __file__ ) )
from common import submit, display
-def main(api_key, api_url, in_folder, out_folder, data_library):
+def main(api_key, api_url, in_folder, data_library):
# Find/Create data library with the above name. Assume we're putting datasets in the root folder '/'
libs = display(api_key, api_url + 'libraries', return_formatted=False)
library_id = None
@@ -36,11 +36,11 @@
if not library_id or not library_folder_id:
print "Failure to configure library destination."
sys.exit(1)
- # Watch in_folder, upload anything that shows up there to data library and get ldda,
- # invoke workflow, move file to out_folder.
+
for fname in os.listdir(in_folder):
fullpath = os.path.join(in_folder, fname)
if os.path.isfile(fullpath) and os.path.exists(fullpath + ".json"):
+ print "Loading", fullpath
data = {}
data['folder_id'] = library_folder_id
data['file_type'] = 'auto'
@@ -49,6 +49,8 @@
data['filesystem_paths'] = fullpath
data['create_type'] = 'file'
+ data['link_data_only'] = 'link_to_files'
+
handle = open( fullpath + ".json" )
smeta = handle.read()
handle.close()
@@ -61,10 +63,9 @@
api_key = sys.argv[1]
api_url = sys.argv[2]
in_folder = sys.argv[3]
- out_folder = sys.argv[4]
- data_library = sys.argv[5]
+ data_library = sys.argv[4]
except IndexError:
- print 'usage: %s key url in_folder out_folder data_library' % os.path.basename( sys.argv[0] )
+ print 'usage: %s key url in_folder data_library' % os.path.basename( sys.argv[0] )
sys.exit( 1 )
- main(api_key, api_url, in_folder, out_folder, data_library )
+ main(api_key, api_url, in_folder, data_library )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/cb81a84db021/
changeset: cb81a84db021
user: Kyle Ellrott
date: 2012-11-30 23:01:28
summary: Fixing missing JobWrapper import
affected #: 1 file
diff -r 03ec137ca8a3148aa771769e80963b65194b7895 -r cb81a84db021353757f1cdc1e40fb19e38662ec8 lib/galaxy/jobs/manager.py
--- a/lib/galaxy/jobs/manager.py
+++ b/lib/galaxy/jobs/manager.py
@@ -12,7 +12,7 @@
from sqlalchemy.sql.expression import and_, or_
from galaxy import model
-from galaxy.jobs import handler, Sleeper, NoopQueue
+from galaxy.jobs import handler, Sleeper, NoopQueue, JobWrapper
from galaxy.util.json import from_json_string
log = logging.getLogger( __name__ )
https://bitbucket.org/galaxy/galaxy-central/changeset/89deda81b0c9/
changeset: 89deda81b0c9
user: dannon
date: 2012-12-04 14:49:22
summary: Merged in kellrott/galaxy-central (pull request #91)
affected #: 1 file
diff -r 9aec3401a724eeb8e47a13de6de13c1eb122026c -r 89deda81b0c949de68941f37e77c4c55097199f0 lib/galaxy/jobs/manager.py
--- a/lib/galaxy/jobs/manager.py
+++ b/lib/galaxy/jobs/manager.py
@@ -12,7 +12,7 @@
from sqlalchemy.sql.expression import and_, or_
from galaxy import model
-from galaxy.jobs import handler, Sleeper, NoopQueue
+from galaxy.jobs import handler, Sleeper, NoopQueue, JobWrapper
from galaxy.util.json import from_json_string
log = logging.getLogger( __name__ )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jgoecks: Enable and document GFF annotation support for Cuffcompare/merge/diff.
by Bitbucket 03 Dec '12
by Bitbucket 03 Dec '12
03 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/9aec3401a724/
changeset: 9aec3401a724
user: jgoecks
date: 2012-12-04 03:02:45
summary: Enable and document GFF annotation support for Cuffcompare/merge/diff.
affected #: 3 files
diff -r 1ac27213bafb6d3fef210c17728eeef0338bd8ad -r 9aec3401a724eeb8e47a13de6de13c1eb122026c tools/ngs_rna/cuffcompare_wrapper.xml
--- a/tools/ngs_rna/cuffcompare_wrapper.xml
+++ b/tools/ngs_rna/cuffcompare_wrapper.xml
@@ -48,7 +48,7 @@
<option value="Yes">Yes</option></param><when value="Yes">
- <param format="gtf" name="reference_annotation" type="data" label="Reference Annotation" help="Make sure your annotation file is in GTF format and that Galaxy knows that your file is GTF--not GFF."/>
+ <param format="gff3,gtf" name="reference_annotation" type="data" label="Reference Annotation" help="Make sure your annotation file is in GTF format and that Galaxy knows that your file is GTF--not GFF."/><param name="ignore_nonoverlapping_reference" type="boolean" label="Ignore reference transcripts that are not overlapped by any transcript in input files"/></when><when value="No">
diff -r 1ac27213bafb6d3fef210c17728eeef0338bd8ad -r 9aec3401a724eeb8e47a13de6de13c1eb122026c tools/ngs_rna/cuffdiff_wrapper.xml
--- a/tools/ngs_rna/cuffdiff_wrapper.xml
+++ b/tools/ngs_rna/cuffdiff_wrapper.xml
@@ -72,7 +72,7 @@
</command><inputs>
- <param format="gtf,gff3" name="gtf_input" type="data" label="Transcripts" help="A transcript GTF file produced by cufflinks, cuffcompare, or other source."/>
+ <param format="gtf,gff3" name="gtf_input" type="data" label="Transcripts" help="A transcript GFF3/GTF file produced by cufflinks, cuffcompare, or other source."/><conditional name="group_analysis"><param name="do_groups" type="select" label="Perform replicate analysis" help="Perform cuffdiff with replicates in each group."><option value="No">No</option>
diff -r 1ac27213bafb6d3fef210c17728eeef0338bd8ad -r 9aec3401a724eeb8e47a13de6de13c1eb122026c tools/ngs_rna/cuffmerge_wrapper.xml
--- a/tools/ngs_rna/cuffmerge_wrapper.xml
+++ b/tools/ngs_rna/cuffmerge_wrapper.xml
@@ -47,7 +47,7 @@
<option value="Yes">Yes</option></param><when value="Yes">
- <param format="gtf" name="reference_annotation" type="data" label="Reference Annotation" help="Make sure your annotation file is in GTF format and that Galaxy knows that your file is GTF--not GFF."/>
+ <param format="gff3,gtf" name="reference_annotation" type="data" label="Reference Annotation" help="Make sure your annotation file is in GTF format and that Galaxy knows that your file is GTF--not GFF."/></when><when value="No"></when>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
6 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/c2c7e22ee51c/
changeset: c2c7e22ee51c
user: jmchilton
date: 2012-11-14 22:49:53
summary: Fixes for multiple input data parameters, they would fail if only a single input and in production settings where multiple Galaxy processes are used (the input parameters must take a different path through the code in this case). This changeset address both of those issues. The introduction of the DatasetListWrapper class to address this may seem like overkill right now, but I think over the coming months it will prove useful. Once there are multiple ways of selecting many files (be it this, by tag, the composite dataset stuff I am working on) it will prove valuable to have uniform ways of accessing those files in Cheetah templates, this class will help pave the way for that.
affected #: 2 files
diff -r cc7df5ca1d47dbbd98614c21589435f84c67f9f5 -r c2c7e22ee51c001e4a19ce397c90ccb61e4d4ca2 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -2256,12 +2256,11 @@
current = values["__current_case__"]
wrap_values( input.cases[current].inputs, values )
elif isinstance( input, DataToolParameter ) and input.multiple:
- values = input_values[ input.name ]
input_values[ input.name ] = \
- [DatasetFilenameWrapper( value,
- datatypes_registry = self.app.datatypes_registry,
- tool = self,
- name = input.name ) for value in values]
+ DatasetListWrapper( input_values[ input.name ],
+ datatypes_registry = self.app.datatypes_registry,
+ tool = self,
+ name = input.name )
elif isinstance( input, DataToolParameter ):
## FIXME: We're populating param_dict with conversions when
## wrapping values, this should happen as a separate
@@ -2333,10 +2332,13 @@
# but this should be considered DEPRECATED, instead use:
# $dataset.get_child( 'name' ).filename
for name, data in input_datasets.items():
- param_dict[name] = DatasetFilenameWrapper( data,
- datatypes_registry = self.app.datatypes_registry,
- tool = self,
- name = name )
+ param_dict_value = param_dict.get(name, None)
+ if not isinstance(param_dict_value, (DatasetFilenameWrapper, DatasetListWrapper)):
+ param_dict[name] = DatasetFilenameWrapper( data,
+ datatypes_registry = self.app.datatypes_registry,
+ tool = self,
+ name = name )
+
if data:
for child in data.children:
param_dict[ "_CHILD___%s___%s" % ( name, child.designation ) ] = DatasetFilenameWrapper( child )
@@ -3102,7 +3104,16 @@
return getattr( self.dataset, key )
def __nonzero__( self ):
return bool( self.dataset )
-
+
+class DatasetListWrapper( list ):
+ """
+ """
+ def __init__( self, datasets, **kwargs ):
+ if not isinstance(datasets, list):
+ datasets = [datasets]
+ list.__init__( self, [DatasetFilenameWrapper(dataset, **kwargs) for dataset in datasets] )
+
+
def json_fix( val ):
if isinstance( val, list ):
return [ json_fix( v ) for v in val ]
diff -r cc7df5ca1d47dbbd98614c21589435f84c67f9f5 -r c2c7e22ee51c001e4a19ce397c90ccb61e4d4ca2 lib/galaxy/tools/actions/__init__.py
--- a/lib/galaxy/tools/actions/__init__.py
+++ b/lib/galaxy/tools/actions/__init__.py
@@ -145,6 +145,12 @@
values = input_values[ input.name ]
current = values[ "__current_case__" ]
wrap_values( input.cases[current].inputs, values, skip_missing_values = skip_missing_values )
+ elif isinstance( input, DataToolParameter ) and input.multiple:
+ input_values[ input.name ] = \
+ galaxy.tools.DatasetListWrapper( input_values[ input.name ],
+ datatypes_registry = trans.app.datatypes_registry,
+ tool = tool,
+ name = input.name )
elif isinstance( input, DataToolParameter ):
input_values[ input.name ] = \
galaxy.tools.DatasetFilenameWrapper( input_values[ input.name ],
https://bitbucket.org/galaxy/galaxy-central/changeset/e8c84dd71578/
changeset: e8c84dd71578
user: jmchilton
date: 2012-11-14 23:10:31
summary: Allow output data attributes `format` and `metadata_source` to work with multiple input data parameters - the first item selected will be used as the source. This decision was discussed with Dannon in IRC and he thought it was an acceptable approach.
affected #: 1 file
diff -r c2c7e22ee51c001e4a19ce397c90ccb61e4d4ca2 -r e8c84dd715782e7c1d709d8068e6033b835f7f39 lib/galaxy/tools/actions/__init__.py
--- a/lib/galaxy/tools/actions/__init__.py
+++ b/lib/galaxy/tools/actions/__init__.py
@@ -62,7 +62,11 @@
# If there are multiple inputs with the same name, they
# are stored as name1, name2, ...
for i, v in enumerate( value ):
- input_datasets[ prefix + input.name + str( i + 1 ) ] = process_dataset( v )
+ processed_dataset = process_dataset( v )
+ if i == 0:
+ # Allow copying metadata to output, first item will be source.
+ input_datasets[ prefix + input.name ] = processed_dataset
+ input_datasets[ prefix + input.name + str( i + 1 ) ] = processed_dataset
conversions = []
for conversion_name, conversion_extensions, conversion_datatypes in input.conversions:
new_data = process_dataset( input_datasets[ prefix + input.name + str( i + 1 ) ], conversion_datatypes )
https://bitbucket.org/galaxy/galaxy-central/changeset/df2eb3960fd9/
changeset: df2eb3960fd9
user: jmchilton
date: 2012-11-15 05:17:55
summary: Fix another error encountered only in multiple process mode, this one related to job rerunning.
affected #: 1 file
diff -r e8c84dd715782e7c1d709d8068e6033b835f7f39 -r df2eb3960fd92e7877bdd31cf5368cb062a9471c lib/galaxy/tools/parameters/basic.py
--- a/lib/galaxy/tools/parameters/basic.py
+++ b/lib/galaxy/tools/parameters/basic.py
@@ -1551,6 +1551,8 @@
raise ValueError( "History does not include a dataset of the required format / build" )
if value in [None, "None"]:
return None
+ if isinstance( value, str ) and value.find( "," ) > 0:
+ value = [ int( value_part ) for value_part in value.split( "," ) ]
if isinstance( value, list ):
rval = [ trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( v ) for v in value ]
elif isinstance( value, trans.app.model.HistoryDatasetAssociation ):
https://bitbucket.org/galaxy/galaxy-central/changeset/71dbc3556ee6/
changeset: 71dbc3556ee6
user: jmchilton
date: 2012-11-28 18:24:20
summary: Fix for "View Details" display of jobs run with multiple input data parameters.
affected #: 1 file
diff -r df2eb3960fd92e7877bdd31cf5368cb062a9471c -r 71dbc3556ee69959d35bae887e7b783561d779f6 lib/galaxy/tools/parameters/basic.py
--- a/lib/galaxy/tools/parameters/basic.py
+++ b/lib/galaxy/tools/parameters/basic.py
@@ -1600,8 +1600,10 @@
return value.file_name
def value_to_display_text( self, value, app ):
+ if not isinstance(value, list):
+ value = [ value ]
if value:
- return "%s: %s" % ( value.hid, value.name )
+ return ", ".join( [ "%s: %s" % ( item.hid, item.name ) for item in value ] )
else:
return "No dataset"
https://bitbucket.org/galaxy/galaxy-central/changeset/caa480e454b3/
changeset: caa480e454b3
user: jmchilton
date: 2012-12-03 20:10:30
summary: Fix for this issue: http://dev.list.galaxyproject.org/workflow-input-param-issue-td4657311.html. Reporter of bug, Marc Logghe, verified this fixed the immediate problem caused by my previous pull request, though the underlying issues with workflows when parameter names are the same seem to still be a problem but that is outside the scope of this pull request.
affected #: 2 files
diff -r 71dbc3556ee69959d35bae887e7b783561d779f6 -r caa480e454b377a94a5b1ebc545c253b0594643e templates/workflow/display.mako
--- a/templates/workflow/display.mako
+++ b/templates/workflow/display.mako
@@ -45,9 +45,12 @@
%if isinstance( param, DataToolParameter ):
%if ( prefix + param.name ) in step.input_connections_by_name:
<%
- conn = step.input_connections_by_name[ prefix + param.name ]
+ conns = step.input_connections_by_name[ prefix + param.name ]
+ if not isinstance(conns, list):
+ conns = [conns]
+ vals = ["Output dataset '%s' from step %d" % (conn.output_name, int(conn.output_step.order_index)+1) for conn in conns]
%>
- Output dataset '${conn.output_name}' from step ${int(conn.output_step.order_index)+1}
+ ${",".join(vals)}
%else:
<i>select at runtime</i>
%endif
diff -r 71dbc3556ee69959d35bae887e7b783561d779f6 -r caa480e454b377a94a5b1ebc545c253b0594643e templates/workflow/run.mako
--- a/templates/workflow/run.mako
+++ b/templates/workflow/run.mako
@@ -243,9 +243,12 @@
%if isinstance( param, DataToolParameter ):
%if ( prefix + param.name ) in step.input_connections_by_name:
<%
- conn = step.input_connections_by_name[ prefix + param.name ]
+ conns = step.input_connections_by_name[ prefix + param.name ]
+ if not isinstance(conns, list):
+ conns = [conns]
+ vals = ["Output dataset '%s' from step %d" % (conn.output_name, int(conn.output_step.order_index)+1) for conn in conns]
%>
- Output dataset '${conn.output_name}' from step ${int(conn.output_step.order_index)+1}
+ ${",".join(vals)}
%else:
## FIXME: Initialize in the controller
<%
https://bitbucket.org/galaxy/galaxy-central/changeset/1ac27213bafb/
changeset: 1ac27213bafb
user: dannon
date: 2012-12-03 23:19:52
summary: Merged in jmchilton/galaxy-central-multi-input-tool-fixes-2 (pull request #85)
affected #: 5 files
diff -r 8a3f874b8e0a20afb362b8c4e92989a6cf76509b -r 1ac27213bafb6d3fef210c17728eeef0338bd8ad lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -2256,12 +2256,11 @@
current = values["__current_case__"]
wrap_values( input.cases[current].inputs, values )
elif isinstance( input, DataToolParameter ) and input.multiple:
- values = input_values[ input.name ]
input_values[ input.name ] = \
- [DatasetFilenameWrapper( value,
- datatypes_registry = self.app.datatypes_registry,
- tool = self,
- name = input.name ) for value in values]
+ DatasetListWrapper( input_values[ input.name ],
+ datatypes_registry = self.app.datatypes_registry,
+ tool = self,
+ name = input.name )
elif isinstance( input, DataToolParameter ):
## FIXME: We're populating param_dict with conversions when
## wrapping values, this should happen as a separate
@@ -2333,10 +2332,13 @@
# but this should be considered DEPRECATED, instead use:
# $dataset.get_child( 'name' ).filename
for name, data in input_datasets.items():
- param_dict[name] = DatasetFilenameWrapper( data,
- datatypes_registry = self.app.datatypes_registry,
- tool = self,
- name = name )
+ param_dict_value = param_dict.get(name, None)
+ if not isinstance(param_dict_value, (DatasetFilenameWrapper, DatasetListWrapper)):
+ param_dict[name] = DatasetFilenameWrapper( data,
+ datatypes_registry = self.app.datatypes_registry,
+ tool = self,
+ name = name )
+
if data:
for child in data.children:
param_dict[ "_CHILD___%s___%s" % ( name, child.designation ) ] = DatasetFilenameWrapper( child )
@@ -3102,7 +3104,16 @@
return getattr( self.dataset, key )
def __nonzero__( self ):
return bool( self.dataset )
-
+
+class DatasetListWrapper( list ):
+ """
+ """
+ def __init__( self, datasets, **kwargs ):
+ if not isinstance(datasets, list):
+ datasets = [datasets]
+ list.__init__( self, [DatasetFilenameWrapper(dataset, **kwargs) for dataset in datasets] )
+
+
def json_fix( val ):
if isinstance( val, list ):
return [ json_fix( v ) for v in val ]
diff -r 8a3f874b8e0a20afb362b8c4e92989a6cf76509b -r 1ac27213bafb6d3fef210c17728eeef0338bd8ad lib/galaxy/tools/actions/__init__.py
--- a/lib/galaxy/tools/actions/__init__.py
+++ b/lib/galaxy/tools/actions/__init__.py
@@ -62,7 +62,11 @@
# If there are multiple inputs with the same name, they
# are stored as name1, name2, ...
for i, v in enumerate( value ):
- input_datasets[ prefix + input.name + str( i + 1 ) ] = process_dataset( v )
+ processed_dataset = process_dataset( v )
+ if i == 0:
+ # Allow copying metadata to output, first item will be source.
+ input_datasets[ prefix + input.name ] = processed_dataset
+ input_datasets[ prefix + input.name + str( i + 1 ) ] = processed_dataset
conversions = []
for conversion_name, conversion_extensions, conversion_datatypes in input.conversions:
new_data = process_dataset( input_datasets[ prefix + input.name + str( i + 1 ) ], conversion_datatypes )
@@ -145,6 +149,12 @@
values = input_values[ input.name ]
current = values[ "__current_case__" ]
wrap_values( input.cases[current].inputs, values, skip_missing_values = skip_missing_values )
+ elif isinstance( input, DataToolParameter ) and input.multiple:
+ input_values[ input.name ] = \
+ galaxy.tools.DatasetListWrapper( input_values[ input.name ],
+ datatypes_registry = trans.app.datatypes_registry,
+ tool = tool,
+ name = input.name )
elif isinstance( input, DataToolParameter ):
input_values[ input.name ] = \
galaxy.tools.DatasetFilenameWrapper( input_values[ input.name ],
diff -r 8a3f874b8e0a20afb362b8c4e92989a6cf76509b -r 1ac27213bafb6d3fef210c17728eeef0338bd8ad lib/galaxy/tools/parameters/basic.py
--- a/lib/galaxy/tools/parameters/basic.py
+++ b/lib/galaxy/tools/parameters/basic.py
@@ -1551,6 +1551,8 @@
raise ValueError( "History does not include a dataset of the required format / build" )
if value in [None, "None"]:
return None
+ if isinstance( value, str ) and value.find( "," ) > 0:
+ value = [ int( value_part ) for value_part in value.split( "," ) ]
if isinstance( value, list ):
rval = [ trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( v ) for v in value ]
elif isinstance( value, trans.app.model.HistoryDatasetAssociation ):
@@ -1598,8 +1600,10 @@
return value.file_name
def value_to_display_text( self, value, app ):
+ if not isinstance(value, list):
+ value = [ value ]
if value:
- return "%s: %s" % ( value.hid, value.name )
+ return ", ".join( [ "%s: %s" % ( item.hid, item.name ) for item in value ] )
else:
return "No dataset"
diff -r 8a3f874b8e0a20afb362b8c4e92989a6cf76509b -r 1ac27213bafb6d3fef210c17728eeef0338bd8ad templates/workflow/display.mako
--- a/templates/workflow/display.mako
+++ b/templates/workflow/display.mako
@@ -45,9 +45,12 @@
%if isinstance( param, DataToolParameter ):
%if ( prefix + param.name ) in step.input_connections_by_name:
<%
- conn = step.input_connections_by_name[ prefix + param.name ]
+ conns = step.input_connections_by_name[ prefix + param.name ]
+ if not isinstance(conns, list):
+ conns = [conns]
+ vals = ["Output dataset '%s' from step %d" % (conn.output_name, int(conn.output_step.order_index)+1) for conn in conns]
%>
- Output dataset '${conn.output_name}' from step ${int(conn.output_step.order_index)+1}
+ ${",".join(vals)}
%else:
<i>select at runtime</i>
%endif
diff -r 8a3f874b8e0a20afb362b8c4e92989a6cf76509b -r 1ac27213bafb6d3fef210c17728eeef0338bd8ad templates/workflow/run.mako
--- a/templates/workflow/run.mako
+++ b/templates/workflow/run.mako
@@ -243,9 +243,12 @@
%if isinstance( param, DataToolParameter ):
%if ( prefix + param.name ) in step.input_connections_by_name:
<%
- conn = step.input_connections_by_name[ prefix + param.name ]
+ conns = step.input_connections_by_name[ prefix + param.name ]
+ if not isinstance(conns, list):
+ conns = [conns]
+ vals = ["Output dataset '%s' from step %d" % (conn.output_name, int(conn.output_step.order_index)+1) for conn in conns]
%>
- Output dataset '${conn.output_name}' from step ${int(conn.output_step.order_index)+1}
+ ${",".join(vals)}
%else:
## FIXME: Initialize in the controller
<%
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: inithello: Added tool shed functional test script for repository dependencies.
by Bitbucket 03 Dec '12
by Bitbucket 03 Dec '12
03 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/8a3f874b8e0a/
changeset: 8a3f874b8e0a
user: inithello
date: 2012-12-03 20:09:23
summary: Added tool shed functional test script for repository dependencies.
affected #: 6 files
diff -r 8510ddb2a507285890f95af5d2a8d88606d99d47 -r 8a3f874b8e0a20afb362b8c4e92989a6cf76509b test/tool_shed/base/twilltestcase.py
--- a/test/tool_shed/base/twilltestcase.py
+++ b/test/tool_shed/base/twilltestcase.py
@@ -1,6 +1,7 @@
from base.twilltestcase import *
from galaxy.webapps.community.util.hgweb_config import *
from test_db_util import *
+import string
from galaxy import eggs
eggs.require('mercurial')
diff -r 8510ddb2a507285890f95af5d2a8d88606d99d47 -r 8a3f874b8e0a20afb362b8c4e92989a6cf76509b test/tool_shed/functional/test_0000_basic_repository_features.py
--- a/test/tool_shed/functional/test_0000_basic_repository_features.py
+++ b/test/tool_shed/functional/test_0000_basic_repository_features.py
@@ -6,10 +6,10 @@
admin_email = 'test(a)bx.psu.edu'
admin_username = 'admin-user'
-regular_user = None
-regular_user_private_role = None
-regular_email = 'test-1(a)bx.psu.edu'
-regular_username = 'user1'
+test_user_1 = None
+test_user_1_private_role = None
+test_user_1_email = 'test-1(a)bx.psu.edu'
+test_user_1_name = 'user1'
repository_name = 'filtering'
repository_description = "Galaxy's filtering tool"
@@ -19,10 +19,10 @@
def test_0000_initiate_users( self ):
"""Create necessary user accounts and login as an admin user."""
- self.login( email=regular_email, username=regular_username )
- regular_user = get_user( regular_email )
- assert regular_user is not None, 'Problem retrieving user with email %s from the database' % regular_email
- regular_user_private_role = get_private_role( regular_user )
+ self.login( email=test_user_1_email, username=test_user_1_name )
+ test_user_1 = get_user( test_user_1_email )
+ assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % test_user_1_email
+ test_user_1_private_role = get_private_role( test_user_1 )
self.logout()
self.login( email=admin_email, username=admin_username )
admin_user = get_user( admin_email )
@@ -55,8 +55,8 @@
def test_0025_grant_write_access( self ):
'''Grant write access to another user'''
repository = get_repository_by_name_and_owner( repository_name, admin_username )
- self.grant_write_access( repository, usernames=[ regular_username ] )
- self.revoke_write_access( repository, regular_username )
+ self.grant_write_access( repository, usernames=[ test_user_1_name ] )
+ self.revoke_write_access( repository, test_user_1_name )
def test_0030_upload_filtering_1_1_0( self ):
"""Upload filtering_1.1.0.tar to the repository"""
repository = get_repository_by_name_and_owner( repository_name, admin_username )
diff -r 8510ddb2a507285890f95af5d2a8d88606d99d47 -r 8a3f874b8e0a20afb362b8c4e92989a6cf76509b test/tool_shed/functional/test_0010_repository_with_tool_dependencies.py
--- a/test/tool_shed/functional/test_0010_repository_with_tool_dependencies.py
+++ b/test/tool_shed/functional/test_0010_repository_with_tool_dependencies.py
@@ -6,10 +6,10 @@
admin_email = 'test(a)bx.psu.edu'
admin_username = 'admin-user'
-regular_user = None
-regular_user_private_role = None
-regular_email = 'test-1(a)bx.psu.edu'
-regular_username = 'user1'
+test_user_1 = None
+test_user_1_private_role = None
+test_user_1_email = 'test-1(a)bx.psu.edu'
+test_user_1_name = 'user1'
repository_name = 'freebayes'
repository_description = "Galaxy's freebayes tool"
@@ -18,17 +18,21 @@
class TestFreebayesRepository( ShedTwillTestCase ):
'''Testing freebayes with tool data table entries, .loc files, and tool dependencies.'''
def test_0000_create_or_login_admin_user( self ):
+ """Create necessary user accounts and login as an admin user."""
self.logout()
self.login( email=admin_email, username=admin_username )
admin_user = get_user( admin_email )
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % admin_email
admin_user_private_role = get_private_role( admin_user )
+ def test_0005_create_categories( self ):
+ """Create categories"""
+ self.create_category( 'SNP Analysis', 'Tools for single nucleotide polymorphism data such as WGA' )
def test_0005_create_freebayes_repository_and_upload_tool_xml( self ):
'''Upload freebayes.xml without tool_data_table_conf.xml.sample. This should result in an error and invalid tool.'''
self.create_repository( repository_name,
repository_description,
repository_long_description=repository_long_description,
- categories=[ 'Text Manipulation' ],
+ categories=[ 'SNP Analysis' ],
strings_displayed=[] )
repository = get_repository_by_name_and_owner( repository_name, admin_username )
self.upload_file( repository,
@@ -36,7 +40,7 @@
valid_tools_only=False,
strings_displayed=[ 'Metadata was defined', 'This file requires an entry', 'tool_data_table_conf' ],
commit_message='Uploaded the tool xml.' )
- self.display_manage_repository_page( repository, strings_not_displayed=[ 'Valid tools' ] )
+ self.display_manage_repository_page( repository, strings_displayed=[ 'Invalid tools' ], strings_not_displayed=[ 'Valid tools' ] )
tip = self.get_repository_tip( repository )
self.check_repository_invalid_tools_for_changeset_revision( repository,
tip,
@@ -50,6 +54,8 @@
commit_message='Uploaded the tool data table sample file.' )
def test_0015_upload_missing_sample_loc_file( self ):
'''Upload the missing sam_fa_indices.loc.sample file to the repository.'''
+ # Freebayes does not generate an error when the loc file is missing.
+ # TODO: Generate a test case for that situation.
repository = get_repository_by_name_and_owner( repository_name, admin_username )
self.upload_file( repository,
'freebayes/sam_fa_indices.loc.sample',
diff -r 8510ddb2a507285890f95af5d2a8d88606d99d47 -r 8a3f874b8e0a20afb362b8c4e92989a6cf76509b test/tool_shed/functional/test_0020_emboss_repository_dependencies.py
--- /dev/null
+++ b/test/tool_shed/functional/test_0020_emboss_repository_dependencies.py
@@ -0,0 +1,95 @@
+from tool_shed.base.twilltestcase import *
+from tool_shed.base.test_db_util import *
+
+admin_user = None
+admin_user_private_role = None
+admin_email = 'test(a)bx.psu.edu'
+admin_username = 'admin-user'
+
+test_user_1 = None
+test_user_1_private_role = None
+test_user_1_email = 'test-1(a)bx.psu.edu'
+test_user_1_name = 'user1'
+
+datatypes_repository_name = 'emboss_datatypes'
+datatypes_repository_description = "Galaxy applicable data formats used by Emboss tools."
+datatypes_repository_long_description = "Galaxy applicable data formats used by Emboss tools. This repository contains no tools."
+
+emboss_repository_name = 'emboss_5'
+emboss_repository_description = 'Galaxy wrappers for Emboss version 5.0.0 tools'
+emboss_repository_long_description = 'Galaxy wrappers for Emboss version 5.0.0 tools'
+
+new_repository_dependencies_xml = '''<?xml version="1.0"?>
+<repositories>
+ <repository toolshed="${toolshed_url}" name="${repository_name}" owner="${owner}" changeset_revision="${changeset_revision}" />
+</repositories>
+'''
+
+class TestEmbossRepositoryDependencies( ShedTwillTestCase ):
+ '''Testing emboss 5 with repository dependencies.'''
+ def test_0000_initiate_users( self ):
+ """Create necessary user accounts and login as an admin user."""
+ self.logout()
+ self.login( email=admin_email, username=admin_username )
+ admin_user = get_user( admin_email )
+ assert admin_user is not None, 'Problem retrieving user with email %s from the database' % admin_email
+ admin_user_private_role = get_private_role( admin_user )
+ self.logout()
+ self.login( email=test_user_1_email, username=test_user_1_name )
+ test_user_1 = get_user( test_user_1_email )
+ assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % regular_email
+ test_user_1_private_role = get_private_role( test_user_1 )
+ def test_0005_create_categories( self ):
+ """Create categories"""
+ self.logout()
+ self.login( email=admin_email, username=admin_username )
+ self.create_category( 'Sequence Analysis', 'Tools for performing Protein and DNA/RNA analysis' )
+ def test_0010_create_emboss_datatypes_repository_and_upload_tarball( self ):
+ '''Create the emboss_datatypes repository and upload the tarball.'''
+ self.logout()
+ self.login( email=test_user_1_email, username=test_user_1_name )
+ self.create_repository( datatypes_repository_name,
+ datatypes_repository_description,
+ repository_long_description=datatypes_repository_long_description,
+ categories=[ 'Sequence Analysis' ],
+ strings_displayed=[] )
+ repository = get_repository_by_name_and_owner( datatypes_repository_name, test_user_1_name )
+ self.upload_file( repository,
+ 'emboss_5/datatypes_conf.xml',
+ commit_message='Uploaded datatypes_conf.xml.' )
+ def test_0015_verify_datatypes_in_datatypes_repository( self ):
+ '''Verify that the emboss_datatypes repository contains datatype entries.'''
+ repository = get_repository_by_name_and_owner( datatypes_repository_name, test_user_1_name )
+ self.display_manage_repository_page( repository, strings_displayed=[ 'Datatypes', 'equicktandem', 'hennig86', 'vectorstrip' ] )
+ def test_0020_generate_repository_dependencies_xml( self ):
+ '''Generate the repository_dependencies.xml file for the emboss_5 repository.'''
+ datatypes_repository = get_repository_by_name_and_owner( datatypes_repository_name, test_user_1_name )
+ changeset_revision = self.get_repository_tip( datatypes_repository )
+ template_parser = string.Template( new_repository_dependencies_xml )
+ repository_dependency_xml = template_parser.safe_substitute( toolshed_url=self.url,
+ owner=test_user_1_name,
+ repository_name=datatypes_repository.name,
+ changeset_revision=changeset_revision )
+ # Save the generated xml to test-data/emboss_5/repository_dependencies.xml.
+ file( self.get_filename( 'emboss_5/repository_dependencies.xml' ), 'w' ).write( repository_dependency_xml )
+ def test_0025_create_emboss_5_repository_and_upload_files( self ):
+ '''Create the emboss_5 repository and upload a tool tarball, then generate and upload repository_dependencies.xml.'''
+ self.create_repository( emboss_repository_name,
+ emboss_repository_description,
+ repository_long_description=emboss_repository_long_description,
+ categories=[ 'Text Manipulation' ],
+ strings_displayed=[] )
+ repository = get_repository_by_name_and_owner( emboss_repository_name, test_user_1_name )
+ self.upload_file( repository, 'emboss_5/emboss_5.tar', commit_message='Uploaded emboss_5.tar' )
+ self.upload_file( repository, 'emboss_5/repository_dependencies.xml', commit_message='Uploaded repository_dependencies.xml' )
+ def test_0030_verify_emboss_5_repository_dependency_on_emboss_datatypes( self ):
+ '''Verify that the emboss_5 repository now depends on the emboss_datatypes repository with correct name, owner, and changeset revision.'''
+ repository = get_repository_by_name_and_owner( emboss_repository_name, test_user_1_name )
+ datatypes_repository = get_repository_by_name_and_owner( datatypes_repository_name, test_user_1_name )
+ changeset_revision = self.get_repository_tip( datatypes_repository )
+ strings_displayed = [ datatypes_repository_name, test_user_1_name, changeset_revision, 'Repository dependencies' ]
+ self.display_manage_repository_page( repository, strings_displayed=strings_displayed )
+ def test_0035_cleanup( self ):
+ '''Clean up generated test data.'''
+ if os.path.exists( self.get_filename( 'emboss_5/repository_dependencies.xml' ) ):
+ os.remove( self.get_filename( 'emboss_5/repository_dependencies.xml' ) )
diff -r 8510ddb2a507285890f95af5d2a8d88606d99d47 -r 8a3f874b8e0a20afb362b8c4e92989a6cf76509b test/tool_shed/test_data/emboss_5/datatypes_conf.xml
--- /dev/null
+++ b/test/tool_shed/test_data/emboss_5/datatypes_conf.xml
@@ -0,0 +1,101 @@
+<?xml version="1.0"?>
+<datatypes>
+ <registration>
+ <datatype extension="acedb" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="asn1" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="btwisted" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="cai" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="charge" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="checktrans" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="chips" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="clustal" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="codata" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="codcmp" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="coderet" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="compseq" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="cpgplot" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="cpgreport" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="cusp" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="cut" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="dan" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="dbmotif" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="diffseq" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="digest" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="dreg" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="einverted" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="embl" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="epestfind" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="equicktandem" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="est2genome" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="etandem" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="excel" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="feattable" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="fitch" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="freak" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="fuzznuc" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="fuzzpro" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="fuzztran" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="garnier" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="gcg" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="geecee" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="genbank" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="helixturnhelix" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="hennig86" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="hmoment" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="ig" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="isochore" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="jackknifer" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="jackknifernon" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="markx0" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="markx1" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="markx10" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="markx2" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="markx3" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="match" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="mega" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="meganon" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="motif" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="msf" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="nametable" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="ncbi" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="needle" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="newcpgreport" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="newcpgseek" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="nexus" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="nexusnon" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="noreturn" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="pair" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="palindrome" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="pepcoil" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="pepinfo" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="pepstats" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="phylip" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="phylipnon" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="pir" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="polydot" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="preg" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="prettyseq" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="primersearch" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="regions" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="score" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="selex" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="seqtable" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="showfeat" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="showorf" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="simple" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="sixpack" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="srs" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="srspair" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="staden" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="strider" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="supermatcher" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="swiss" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="syco" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="table" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="tagseq" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="textsearch" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="vectorstrip" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="wobble" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="wordcount" type="galaxy.datatypes.data:Text" subclass="True"/>
+ </registration>
+</datatypes>
diff -r 8510ddb2a507285890f95af5d2a8d88606d99d47 -r 8a3f874b8e0a20afb362b8c4e92989a6cf76509b test/tool_shed/test_data/emboss_5/emboss_5.tar
Binary file test/tool_shed/test_data/emboss_5/emboss_5.tar has changed
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: james_taylor: History: Restore underline to history item titles
by Bitbucket 03 Dec '12
by Bitbucket 03 Dec '12
03 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/8510ddb2a507/
changeset: 8510ddb2a507
user: james_taylor
date: 2012-12-03 19:22:18
summary: History: Restore underline to history item titles
affected #: 1 file
diff -r 83e46d828ddce8e561f7f4922fb7671dec8cf22b -r 8510ddb2a507285890f95af5d2a8d88606d99d47 templates/root/alternate_history.mako
--- a/templates/root/alternate_history.mako
+++ b/templates/root/alternate_history.mako
@@ -454,7 +454,7 @@
}
.historyItemTitle {
- text-decoration: none;
+ text-decoration: underline;
cursor: pointer;
-webkit-user-select: none;
-moz-user-select: none;
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: inithello: Tool shed functional tests for repository with tool dependencies.
by Bitbucket 03 Dec '12
by Bitbucket 03 Dec '12
03 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/83e46d828ddc/
changeset: 83e46d828ddc
user: inithello
date: 2012-12-03 17:02:22
summary: Tool shed functional tests for repository with tool dependencies.
affected #: 6 files
diff -r 3ff1e4ec000a8dae6a533af70b0489d2bf0d4824 -r 83e46d828ddce8e561f7f4922fb7671dec8cf22b test/tool_shed/functional/test_0010_repository_with_tool_dependencies.py
--- a/test/tool_shed/functional/test_0010_repository_with_tool_dependencies.py
+++ b/test/tool_shed/functional/test_0010_repository_with_tool_dependencies.py
@@ -15,5 +15,62 @@
repository_description = "Galaxy's freebayes tool"
repository_long_description = "Long description of Galaxy's freebayes tool"
-class TestRepositoryWithToolDependencies( ShedTwillTestCase ):
- pass
\ No newline at end of file
+class TestFreebayesRepository( ShedTwillTestCase ):
+ '''Testing freebayes with tool data table entries, .loc files, and tool dependencies.'''
+ def test_0000_create_or_login_admin_user( self ):
+ self.logout()
+ self.login( email=admin_email, username=admin_username )
+ admin_user = get_user( admin_email )
+ assert admin_user is not None, 'Problem retrieving user with email %s from the database' % admin_email
+ admin_user_private_role = get_private_role( admin_user )
+ def test_0005_create_freebayes_repository_and_upload_tool_xml( self ):
+ '''Upload freebayes.xml without tool_data_table_conf.xml.sample. This should result in an error and invalid tool.'''
+ self.create_repository( repository_name,
+ repository_description,
+ repository_long_description=repository_long_description,
+ categories=[ 'Text Manipulation' ],
+ strings_displayed=[] )
+ repository = get_repository_by_name_and_owner( repository_name, admin_username )
+ self.upload_file( repository,
+ 'freebayes/freebayes.xml',
+ valid_tools_only=False,
+ strings_displayed=[ 'Metadata was defined', 'This file requires an entry', 'tool_data_table_conf' ],
+ commit_message='Uploaded the tool xml.' )
+ self.display_manage_repository_page( repository, strings_not_displayed=[ 'Valid tools' ] )
+ tip = self.get_repository_tip( repository )
+ self.check_repository_invalid_tools_for_changeset_revision( repository,
+ tip,
+ strings_displayed=[ 'requires an entry', 'tool_data_table_conf.xml' ] )
+ def test_0010_upload_missing_tool_data_table_conf_file( self ):
+ '''Upload the missing tool_data_table_conf.xml.sample file to the repository.'''
+ repository = get_repository_by_name_and_owner( repository_name, admin_username )
+ self.upload_file( repository,
+ 'freebayes/tool_data_table_conf.xml.sample',
+ strings_displayed=[],
+ commit_message='Uploaded the tool data table sample file.' )
+ def test_0015_upload_missing_sample_loc_file( self ):
+ '''Upload the missing sam_fa_indices.loc.sample file to the repository.'''
+ repository = get_repository_by_name_and_owner( repository_name, admin_username )
+ self.upload_file( repository,
+ 'freebayes/sam_fa_indices.loc.sample',
+ strings_displayed=[],
+ commit_message='Uploaded tool data table .loc file.' )
+ def test_0020_upload_invalid_tool_dependency_xml( self ):
+ '''Upload tool_dependencies.xml defining version 0.9.5 of the freebayes package.'''
+ repository = get_repository_by_name_and_owner( repository_name, admin_username )
+ self.upload_file( repository,
+ os.path.join( 'freebayes', 'invalid_deps', 'tool_dependencies.xml' ),
+ strings_displayed=[ 'Name, version and type from a tool requirement tag does not match' ],
+ commit_message='Uploaded invalid tool dependency XML.' )
+ def test_0025_upload_valid_tool_dependency_xml( self ):
+ '''Upload tool_dependencies.xml defining version 0.9.4_9696d0ce8a962f7bb61c4791be5ce44312b81cf8 of the freebayes package.'''
+ repository = get_repository_by_name_and_owner( repository_name, admin_username )
+ self.upload_file( repository,
+ os.path.join( 'freebayes', 'tool_dependencies.xml' ),
+ commit_message='Uploaded valid tool dependency XML.' )
+ def test_0030_verify_tool_dependencies( self ):
+ '''Verify that the uploaded tool_dependencies.xml specifies the correct package versions.'''
+ repository = get_repository_by_name_and_owner( repository_name, admin_username )
+ self.display_manage_repository_page( repository,
+ strings_displayed=[ 'freebayes', '0.9.4_9696d0ce8a9', 'samtools', '0.1.18', 'Valid tools' ],
+ strings_not_displayed=[ 'Invalid tools' ] )
diff -r 3ff1e4ec000a8dae6a533af70b0489d2bf0d4824 -r 83e46d828ddce8e561f7f4922fb7671dec8cf22b test/tool_shed/test_data/freebayes/freebayes.xml
--- /dev/null
+++ b/test/tool_shed/test_data/freebayes/freebayes.xml
@@ -0,0 +1,669 @@
+<?xml version="1.0"?>
+<tool id="freebayes" name="FreeBayes" version="0.0.2">
+ <requirements>
+ <requirement type="package" version="0.9.4_9696d0ce8a962f7bb61c4791be5ce44312b81cf8">freebayes</requirement>
+ <requirement type="package" version="0.1.18">samtools</requirement>
+ </requirements>
+ <description> - Bayesian genetic variant detector</description>
+ <command>
+ ##set up input files
+ #set $reference_fasta_filename = "localref.fa"
+ #if str( $reference_source.reference_source_selector ) == "history":
+ ln -s "${reference_source.ref_file}" "${reference_fasta_filename}" &&
+ samtools faidx "${reference_fasta_filename}" 2>&1 || echo "Error running samtools faidx for FreeBayes" >&2 &&
+ #else:
+ #set $reference_fasta_filename = str( $reference_source.ref_file.fields.path )
+ #end if
+ #for $bam_count, $input_bam in enumerate( $reference_source.input_bams ):
+ ln -s "${input_bam.input_bam}" "localbam_${bam_count}.bam" &&
+ ln -s "${input_bam.input_bam.metadata.bam_index}" "localbam_${bam_count}.bam.bai" &&
+ #end for
+ ##finished setting up inputs
+
+ ##start FreeBayes commandline
+ freebayes
+ #for $bam_count, $input_bam in enumerate( $reference_source.input_bams ):
+ --bam "localbam_${bam_count}.bam"
+ #end for
+ --fasta-reference "${reference_fasta_filename}"
+
+ ##outputs
+ --vcf "${output_vcf}"
+
+ ##advanced options
+ #if str( $options_type.options_type_selector ) == "advanced":
+ ##additional outputs
+ #if $options_type.output_trace_option:
+ --trace "${output_trace}"
+ #end if
+ #if $options_type.output_failed_alleles_option:
+ --failed-alleles "${output_failed_alleles_bed}"
+ #end if
+
+ ##additional inputs
+ #if str( $options_type.target_limit_type.target_limit_type_selector ) == "limit_by_target_file":
+ --targets "${options_type.target_limit_type.input_target_bed}"
+ #elif str( $options_type.target_limit_type.target_limit_type_selector ) == "limit_by_region":
+ --region "${options_type.target_limit_type.region_chromosome}:${options_type.target_limit_type.region_start}..${options_type.target_limit_type.region_end}"
+ #end if
+ #if $options_type.input_sample_file:
+ --samples "${options_type.input_sample_file}"
+ #end if
+ #if $options_type.input_populations_file:
+ --populations "${options_type.input_populations_file}"
+ #end if
+ #if $options_type.input_cnv_map_bed:
+ --cnv-map "${options_type.input_cnv_map_bed}"
+ #end if
+ #if str( $options_type.input_variant_type.input_variant_type_selector ) == "provide_vcf":
+ --variant-input "${options_type.input_variant_type.input_variant_vcf}"
+ ${options_type.input_variant_type.only_use_input_alleles}
+ #end if
+
+ ##reporting
+ #if str( $options_type.section_reporting_type.section_reporting_type_selector ) == "set":
+ --pvar "${options_type.section_reporting_type.pvar}"
+ ${options_type.section_reporting_type.show_reference_repeats}
+ #end if
+
+ ##population model
+ #if str( $options_type.section_population_model_type.section_population_model_type_selector ) == "set":
+ --theta "${options_type.section_population_model_type.theta}"
+ --ploidy "${options_type.section_population_model_type.ploidy}"
+ ${options_type.section_population_model_type.pooled}
+ #end if
+
+ ##reference allele
+ #if str( $options_type.use_reference_allele_type.use_reference_allele_type_selector ) == "include_reference_allele":
+ --use-reference-allele
+ ${options_type.use_reference_allele_type.diploid_reference}
+ --reference-quality "${options_type.use_reference_allele_type.reference_quality_mq},${options_type.use_reference_allele_type.reference_quality_bq}"
+ #end if
+
+ ##allele scope
+ #if str( $options_type.section_allele_scope_type.section_allele_scope_type_selector ) == "set":
+ ${options_type.section_allele_scope_type.no_snps}
+ ${options_type.section_allele_scope_type.no_indels}
+ ${options_type.section_allele_scope_type.no_mnps}
+ ${options_type.section_allele_scope_type.no_complex}
+ --use-best-n-alleles "${options_type.section_allele_scope_type.use_best_n_alleles}"
+ #if $options_type.section_allele_scope_type.max_complex_gap:
+ --max-complex-gap "${options_type.section_allele_scope_type.max_complex_gap}"
+ #end if
+ #end if
+
+ ##indel realignment
+ ${options_type.left_align_indels}
+
+ ##input filters
+ #if str( $options_type.section_input_filters_type.section_input_filters_type_selector ) == "set":
+ ${options_type.section_input_filters_type.use_duplicate_reads}
+ #if str( $options_type.section_input_filters_type.no_filter_type.no_filter_type_selector ) == "apply_filters":
+ --min-mapping-quality "${options_type.section_input_filters_type.no_filter_type.min_mapping_quality}"
+ --min-base-quality "${options_type.section_input_filters_type.no_filter_type.min_base_quality}"
+ --min-supporting-quality "${options_type.section_input_filters_type.no_filter_type.min_supporting_quality_mq},${options_type.section_input_filters_type.no_filter_type.min_supporting_quality_bq}"
+ #else:
+ --no-filters
+ #end if
+ --mismatch-base-quality-threshold "${options_type.section_input_filters_type.mismatch_base_quality_threshold}"
+ #if $options_type.section_input_filters_type.read_mismatch_limit:
+ --read-mismatch-limit "${options_type.section_input_filters_type.read_mismatch_limit}"
+ #end if
+ --read-max-mismatch-fraction "${options_type.section_input_filters_type.read_max_mismatch_fraction}"
+ #if $options_type.section_input_filters_type.read_snp_limit:
+ --read-snp-limit "${options_type.section_input_filters_type.read_snp_limit}"
+ #end if
+ #if $options_type.section_input_filters_type.read_indel_limit:
+ --read-indel-limit "${options_type.section_input_filters_type.read_indel_limit}"
+ #end if
+ --indel-exclusion-window "${options_type.section_input_filters_type.indel_exclusion_window}"
+ --min-alternate-fraction "${options_type.section_input_filters_type.min_alternate_fraction}"
+ --min-alternate-count "${options_type.section_input_filters_type.min_alternate_count}"
+ --min-alternate-qsum "${options_type.section_input_filters_type.min_alternate_qsum}"
+ --min-alternate-total "${options_type.section_input_filters_type.min_alternate_total}"
+ --min-coverage "${options_type.section_input_filters_type.min_coverage}"
+ #end if
+
+ ##bayesian priors
+ #if str( $options_type.section_bayesian_priors_type.section_bayesian_priors_type_selector ) == "set":
+ ${options_type.section_bayesian_priors_type.no_ewens_priors}
+ ${options_type.section_bayesian_priors_type.no_population_priors}
+ ${options_type.section_bayesian_priors_type.hwe_priors}
+ #end if
+
+ ##observation prior expectations
+ #if str( $options_type.section_observation_prior_expectations_type.section_observation_prior_expectations_type_selector ) == "set":
+ ${options_type.section_observation_prior_expectations_type.binomial_obs_priors}
+ ${options_type.section_observation_prior_expectations_type.allele_balance_priors}
+ #end if
+
+ ##algorithmic features
+ #if str( $options_type.section_algorithmic_features_type.section_algorithmic_features_type_selector ) == "set":
+ --site-selection-max-iterations "${options_type.section_algorithmic_features_type.site_selection_max_iterations}"
+ --genotyping-max-iterations "${options_type.section_algorithmic_features_type.genotyping_max_iterations}"
+ --genotyping-max-banddepth "${options_type.section_algorithmic_features_type.genotyping_max_banddepth}"
+ --posterior-integration-limits "${options_type.section_algorithmic_features_type.posterior_integration_limits_n},${options_type.section_algorithmic_features_type.posterior_integration_limits_m}"
+ ${options_type.section_algorithmic_features_type.no_permute}
+ ${options_type.section_algorithmic_features_type.exclude_unobserved_genotypes}
+ #if $options_type.section_algorithmic_features_type.genotype_variant_threshold:
+ --genotype-variant-threshold "${options_type.section_algorithmic_features_type.genotype_variant_threshold}"
+ #end if
+ ${options_type.section_algorithmic_features_type.use_mapping_quality}
+ --read-dependence-factor "${options_type.section_algorithmic_features_type.read_dependence_factor}"
+ ${options_type.section_algorithmic_features_type.no_marginals}
+ #end if
+
+ #end if
+ </command>
+ <inputs>
+ <conditional name="reference_source">
+ <param name="reference_source_selector" type="select" label="Choose the source for the reference list">
+ <option value="cached">Locally cached</option>
+ <option value="history">History</option>
+ </param>
+ <when value="cached">
+ <repeat name="input_bams" title="Sample BAM file" min="1">
+ <param name="input_bam" type="data" format="bam" label="BAM file">
+ <validator type="unspecified_build" />
+ </param>
+ </repeat>
+ <param name="ref_file" type="select" label="Using reference genome">
+ <options from_data_table="sam_fa_indexes">
+ <!-- <filter type="sam_fa_indexes" key="dbkey" ref="input_bam" column="value"/> does not yet work in a repeat...-->
+ </options>
+ <validator type="no_options" message="A built-in reference genome is not available for the build associated with the selected input file"/>
+ </param>
+ </when>
+ <when value="history"><!-- FIX ME!!!! -->
+ <repeat name="input_bams" title="Sample BAM file" min="1">
+ <param name="input_bam" type="data" format="bam" label="BAM file" />
+ </repeat>
+ <param name="ref_file" type="data" format="fasta" label="Using reference file" />
+ </when>
+ </conditional>
+
+ <conditional name="options_type">
+ <param name="options_type_selector" type="select" label="Basic or Advanced options">
+ <option value="basic" selected="True">Basic</option>
+ <option value="advanced">Advanced</option>
+ </param>
+ <when value="basic">
+ <!-- Do nothing here -->
+ </when>
+ <when value="advanced">
+
+ <!-- output -->
+ <param name="output_failed_alleles_option" type="boolean" truevalue="--failed-alleles" falsevalue="" checked="False" label="Write out failed alleles file" />
+ <param name="output_trace_option" type="boolean" truevalue="--trace" falsevalue="" checked="False" label="Write out algorithm trace file" />
+
+
+ <!-- input -->
+ <conditional name="target_limit_type">
+ <param name="target_limit_type_selector" type="select" label="Limit analysis to listed targets">
+ <option value="do_not_limit" selected="True">Do not limit</option>
+ <option value="limit_by_target_file">Limit by target file</option>
+ <option value="limit_by_region">Limit to region</option>
+ </param>
+ <when value="do_not_limit">
+ <!-- Do nothing here -->
+ </when>
+ <when value="limit_by_target_file">
+ <param name="input_target_bed" type="data" format="bed" label="Limit analysis to targets listed in the BED-format FILE." />
+ </when>
+ <when value="limit_by_region">
+ <param name="region_chromosome" type="text" label="Region Chromosome" value="" /><!--only once? -->
+ <param name="region_start" type="integer" label="Region Start" value="" />
+ <param name="region_end" type="integer" label="Region End" value="" />
+ </when>
+ </conditional>
+ <param name="input_sample_file" type="data" format="txt" label="Limit analysis to samples listed (one per line) in the FILE" optional="True" />
+ <param name="input_populations_file" type="data" format="txt" label="Populations File" optional="True" />
+ <param name="input_cnv_map_bed" type="data" format="bed" label="Read a copy number map from the BED file FILE" optional="True" />
+ <conditional name="input_variant_type">
+ <param name="input_variant_type_selector" type="select" label="Provide variants file">
+ <option value="do_not_provide" selected="True">Do not provide</option>
+ <option value="provide_vcf">Provide VCF file</option>
+ </param>
+ <when value="do_not_provide">
+ <!-- Do nothing here -->
+ </when>
+ <when value="provide_vcf">
+ <param name="input_variant_vcf" type="data" format="vcf" label="Use variants reported in VCF file as input to the algorithm" />
+ <param name="only_use_input_alleles" type="boolean" truevalue="--only-use-input-alleles" falsevalue="" checked="False" label="Only provide variant calls and genotype likelihoods for sites in VCF" />
+ </when>
+ </conditional>
+
+
+ <!-- reporting -->
+ <conditional name="section_reporting_type">
+ <param name="section_reporting_type_selector" type="select" label="Set Reporting options">
+ <option value="do_not_set" selected="True">Do not set</option>
+ <option value="set">Set</option>
+ </param>
+ <when value="do_not_set">
+ <!-- do nothing here -->
+ </when>
+ <when value="set">
+ <param name="pvar" type="float" label="Report sites if the probability that there is a polymorphism at the site is greater" value="0.0001" />
+ <param name="show_reference_repeats" type="boolean" truevalue="--show-reference-repeats" falsevalue="" checked="False" label="Calculate and show information about reference repeats" />
+ </when>
+ </conditional>
+
+
+ <!-- population model -->
+ <conditional name="section_population_model_type">
+ <param name="section_population_model_type_selector" type="select" label="Set population model options">
+ <option value="do_not_set" selected="True">Do not set</option>
+ <option value="set">Set</option>
+ </param>
+ <when value="do_not_set">
+ <!-- do nothing here -->
+ </when>
+ <when value="set">
+ <param name="theta" type="float" label="expected mutation rate or pairwise nucleotide diversity among the population" value="0.001" help="This serves as the single parameter to the Ewens Sampling Formula prior model"/>
+ <param name="ploidy" type="integer" label="default ploidy for the analysis" value="2" />
+ <param name="pooled" type="boolean" truevalue="--pooled" falsevalue="" checked="False" label="Assume that samples result from pooled sequencing" help="When using this flag, set --ploidy to the number of alleles in each sample." />
+ </when>
+ </conditional>
+
+ <!-- reference allele -->
+ <conditional name="use_reference_allele_type">
+ <param name="use_reference_allele_type_selector" type="select" label="Include the reference allele in the analysis">
+ <option value="do_not_include_reference_allele" selected="True">Do not include</option>
+ <option value="include_reference_allele">Include</option>
+ </param>
+ <when value="do_not_include_reference_allele">
+ <!-- Do nothing here -->
+ </when>
+ <when value="include_reference_allele">
+ <param name="diploid_reference" type="boolean" truevalue="--diploid-reference" falsevalue="" checked="False" label="Treat reference as diploid" />
+ <param name="reference_quality_mq" type="integer" label="Assign mapping quality" value="100" />
+ <param name="reference_quality_bq" type="integer" label="Assign base quality" value="60" />
+ </when>
+ </conditional>
+
+ <!-- allele scope -->
+ <conditional name="section_allele_scope_type">
+ <param name="section_allele_scope_type_selector" type="select" label="Set allele scope options">
+ <option value="do_not_set" selected="True">Do not set</option>
+ <option value="set">Set</option>
+ </param>
+ <when value="do_not_set">
+ <!-- do nothing here -->
+ </when>
+ <when value="set">
+ <param name="no_snps" type="boolean" truevalue="--no-snps" falsevalue="" checked="False" label="Ignore SNP alleles" />
+ <param name="no_indels" type="boolean" truevalue="--no-indels" falsevalue="" checked="False" label="Ignore insertion and deletion alleles" />
+ <param name="no_mnps" type="boolean" truevalue="--no-mnps" falsevalue="" checked="False" label="Ignore multi-nuceotide polymorphisms, MNPs" />
+ <param name="no_complex" type="boolean" truevalue="--no-complex" falsevalue="" checked="False" label="Ignore complex events (composites of other classes)" />
+ <param name="use_best_n_alleles" type="integer" label="Evaluate only the best N SNP alleles" value="0" min="0" help="Ranked by sum of supporting quality scores; Set to 0 to use all" />
+ <param name="max_complex_gap" type="integer" label="Allow complex alleles with contiguous embedded matches of up to this length" value="" optional="True"/>
+ </when>
+ </conditional>
+
+ <!-- indel realignment -->
+ <param name="left_align_indels" type="boolean" truevalue="--left-align-indels" falsevalue="" checked="False" label="Left-realign and merge gaps embedded in reads" />
+
+ <!-- input filters -->
+ <conditional name="section_input_filters_type">
+ <param name="section_input_filters_type_selector" type="select" label="Set input filters options">
+ <option value="do_not_set" selected="True">Do not set</option>
+ <option value="set">Set</option>
+ </param>
+ <when value="do_not_set">
+ <!-- do nothing here -->
+ </when>
+ <when value="set">
+ <param name="use_duplicate_reads" type="boolean" truevalue="--use-duplicate-reads" falsevalue="" checked="False" label="Include duplicate-marked alignments in the analysis" />
+ <conditional name="no_filter_type">
+ <param name="no_filter_type_selector" type="select" label="Apply filters">
+ <option value="apply_filters" selected="True">Apply</option>
+ <option value="no_filters">Do not apply</option>
+ </param>
+ <when value="no_filters">
+ <!-- Do nothing here --><!-- no-filters -->
+ </when>
+ <when value="apply_filters">
+ <param name="min_mapping_quality" type="integer" label="Exclude alignments from analysis if they have a mapping quality less than" value="30" />
+ <param name="min_base_quality" type="integer" label="Exclude alleles from analysis if their supporting base quality less than" value="20" />
+ <param name="min_supporting_quality_mq" type="integer" label="In order to consider an alternate allele, at least one supporting alignment must have mapping quality" value="0" />
+ <param name="min_supporting_quality_bq" type="integer" label="In order to consider an alternate allele, at least one supporting alignment must have base quality" value="0" />
+ </when>
+ </conditional>
+ <param name="mismatch_base_quality_threshold" type="integer" label="Count mismatches toward read-mismatch-limit if the base quality of the mismatch is >=" value="10" />
+ <param name="read_mismatch_limit" type="integer" label="Exclude reads with more than N mismatches where each mismatch has base quality >= mismatch-base-quality-threshold" value="" optional="True" />
+ <param name="read_max_mismatch_fraction" type="float" label="Exclude reads with more than N [0,1] fraction of mismatches where each mismatch has base quality >= mismatch-base-quality-threshold" value="1.0" />
+ <param name="read_snp_limit" type="integer" label="Exclude reads with more than N base mismatches, ignoring gaps with quality >= mismatch-base-quality-threshold" value="" optional="True" />
+ <param name="read_indel_limit" type="integer" label="Exclude reads with more than N separate gaps" value="" optional="True" />
+ <param name="indel_exclusion_window" type="integer" label="Ignore portions of alignments this many bases from a putative insertion or deletion allele" value="0" />
+ <param name="min_alternate_fraction" type="float" label="Require at least this fraction of observations supporting an alternate allele within a single individual in the in order to evaluate the position" value="0" />
+ <param name="min_alternate_count" type="integer" label="Require at least this count of observations supporting an alternate allele within a single individual in order to evaluate the position" value="1" />
+ <param name="min_alternate_qsum" type="integer" label="Require at least this sum of quality of observations supporting an alternate allele within a single individual in order to evaluate the position" value="0" />
+ <param name="min_alternate_total" type="integer" label="Require at least this count of observations supporting an alternate allele within the total population in order to use the allele in analysis" value="1" />
+ <param name="min_coverage" type="integer" label="Require at least this coverage to process a site" value="0" />
+ </when>
+ </conditional>
+
+
+ <!-- bayesian priors -->
+ <conditional name="section_bayesian_priors_type">
+ <param name="section_bayesian_priors_type_selector" type="select" label="Set bayesian priors options">
+ <option value="do_not_set" selected="True">Do not set</option>
+ <option value="set">Set</option>
+ </param>
+ <when value="do_not_set">
+ <!-- do nothing here -->
+ </when>
+ <when value="set">
+ <param name="no_ewens_priors" type="boolean" truevalue="--no-ewens-priors" falsevalue="" checked="False" label="Turns off the Ewens' Sampling Formula component of the priors" />
+ <param name="no_population_priors" type="boolean" truevalue="--no-population-priors" falsevalue="" checked="False" label="No population priors" help="Equivalent to --pooled --no-ewens-priors" />
+ <param name="hwe_priors" type="boolean" truevalue="--hwe-priors" falsevalue="" checked="False" label="Use the probability of the combination arising under HWE given the allele frequency as estimated by observation frequency" />
+ </when>
+ </conditional>
+
+ <!-- observation prior expectations -->
+ <conditional name="section_observation_prior_expectations_type">
+ <param name="section_observation_prior_expectations_type_selector" type="select" label="Set observation prior expectations options">
+ <option value="do_not_set" selected="True">Do not set</option>
+ <option value="set">Set</option>
+ </param>
+ <when value="do_not_set">
+ <!-- do nothing here -->
+ </when>
+ <when value="set">
+ <param name="binomial_obs_priors" type="boolean" truevalue="--binomial-obs-priors" falsevalue="" checked="False" label="Incorporate expectations about osbervations into the priors, Uses read placement probability, strand balance probability, and read position (5'-3') probability" />
+ <param name="allele_balance_priors" type="boolean" truevalue="--allele-balance-priors" falsevalue="" checked="False" label="Use aggregate probability of observation balance between alleles as a component of the priors. Best for observations with minimal inherent reference bias" />
+ </when>
+ </conditional>
+
+
+ <!-- algorithmic features -->
+ <conditional name="section_algorithmic_features_type">
+ <param name="section_algorithmic_features_type_selector" type="select" label="Set algorithmic features options">
+ <option value="do_not_set" selected="True">Do not set</option>
+ <option value="set">Set</option>
+ </param>
+ <when value="do_not_set">
+ <!-- do nothing here -->
+ </when>
+ <when value="set">
+ <param name="site_selection_max_iterations" type="integer" label="Uses hill-climbing algorithm to search posterior space for N iterations to determine if the site should be evaluated." value="5" help="Set to 0 to prevent use of this algorithm for site selection, and to a low integer for improvide site selection at a slight performance penalty" />
+ <param name="genotyping_max_iterations" type="integer" label="Iterate no more than N times during genotyping step" value="25" />
+ <param name="genotyping_max_banddepth" type="integer" label="Integrate no deeper than the Nth best genotype by likelihood when genotyping" value="6" />
+ <param name="posterior_integration_limits_n" type="integer" label="Posteriror integration limit N" help="Integrate all genotype combinations in our posterior space which include no more than N samples with their Mth best data likelihood." value="1" />
+ <param name="posterior_integration_limits_m" type="integer" label="Posteriror integration limit M" help="Integrate all genotype combinations in our posterior space which include no more than N samples with their Mth best data likelihood." value="3" />
+ <param name="no_permute" type="boolean" truevalue="--no-permute" falsevalue="" checked="False" label="Do not scale prior probability of genotype combination given allele frequency by the number of permutations of included genotypes" />
+ <param name="exclude_unobserved_genotypes" type="boolean" truevalue="--exclude-unobserved-genotypes" falsevalue="" checked="False" label="Skip sample genotypings for which the sample has no supporting reads" />
+ <param name="genotype_variant_threshold" type="integer" label="Limit posterior integration to samples where the second-best genotype likelihood is no more than log(N) from the highest genotype likelihood for the sample" value="" optional="True" />
+ <param name="use_mapping_quality" type="boolean" truevalue="--use-mapping-quality" falsevalue="" checked="False" label="Use mapping quality of alleles when calculating data likelihoods" />
+ <param name="read_dependence_factor" type="float" label="Incorporate non-independence of reads by scaling successive observations by this factor during data likelihood calculations" value="0.9" />
+ <param name="no_marginals" type="boolean" truevalue="--no-marginals" falsevalue="" checked="False" label="Do not calculate the marginal probability of genotypes. Saves time and improves scaling performance in large populations" />
+ </when>
+ </conditional>
+
+
+ </when>
+ </conditional>
+
+ </inputs>
+ <outputs>
+ <data format="vcf" name="output_vcf" label="${tool.name} on ${on_string} (variants)" />
+ <data format="bed" name="output_failed_alleles_bed" label="${tool.name} on ${on_string} (failed alleles)">
+ <filter>options_type['options_type_selector'] == "advanced" and options_type['output_failed_alleles_option'] is True</filter>
+ </data>
+ <data format="txt" name="output_trace" label="${tool.name} on ${on_string} (trace)">
+ <filter>options_type['options_type_selector'] == "advanced" and options_type['output_trace_option'] is True</filter>
+ </data>
+ </outputs>
+ <tests>
+ <test>
+ <param name="reference_source_selector" value="history" />
+ <param name="ref_file" ftype="fasta" value="phiX.fasta"/>
+ <param name="input_bam" ftype="bam" value="gatk/fake_phiX_reads_1.bam"/>
+ <param name="options_type_selector" value="basic"/>
+ <output name="output_vcf" file="variant_detection/freebayes/freebayes_out_1.vcf.contains" compare="contains"/>
+ <!-- <output name="output_failed_alleles_bed" file="empty_file.dat" />
+ <output name="output_trace" file="variant_detection/freebayes/freebayes_out_1.output_trace" /> -->
+ </test>
+ </tests>
+ <help>
+**What it does**
+
+This tool uses FreeBayes to call SNPS given a reference sequence and a BAM alignment file.
+
+FreeBayes is a high-performance, flexible, and open-source Bayesian genetic variant detector. It operates on BAM alignment files, which are produced by most contemporary short-read aligners.
+
+In addition to substantial performance improvements over its predecessors (PolyBayes, GigaBayes, and BamBayes), it expands the scope of SNP and small-indel variant calling to populations of individuals with heterogeneous copy number. FreeBayes is currently under active development.
+
+Go `here <http://bioinformatics.bc.edu/marthlab/FreeBayes>`_ for details on FreeBayes.
+
+------
+
+**Inputs**
+
+FreeBayes accepts an input aligned BAM file.
+
+
+**Outputs**
+
+The output is in the VCF format.
+
+-------
+
+**Settings**::
+
+ input and output:
+
+ -b --bam FILE Add FILE to the set of BAM files to be analyzed.
+ -c --stdin Read BAM input on stdin.
+ -v --vcf FILE Output VCF-format results to FILE.
+ -f --fasta-reference FILE
+ Use FILE as the reference sequence for analysis.
+ An index file (FILE.fai) will be created if none exists.
+ If neither --targets nor --region are specified, FreeBayes
+ will analyze every position in this reference.
+ -t --targets FILE
+ Limit analysis to targets listed in the BED-format FILE.
+ -r --region <chrom>:<start_position>..<end_position>
+ Limit analysis to the specified region, 0-base coordinates,
+ end_position not included (same as BED format).
+ -s --samples FILE
+ Limit analysis to samples listed (one per line) in the FILE.
+ By default FreeBayes will analyze all samples in its input
+ BAM files.
+ --populations FILE
+ Each line of FILE should list a sample and a population which
+ it is part of. The population-based bayesian inference model
+ will then be partitioned on the basis of the populations.
+ -A --cnv-map FILE
+ Read a copy number map from the BED file FILE, which has
+ the format:
+ reference sequence, start, end, sample name, copy number
+ ... for each region in each sample which does not have the
+ default copy number as set by --ploidy.
+ -L --trace FILE Output an algorithmic trace to FILE.
+ --failed-alleles FILE
+ Write a BED file of the analyzed positions which do not
+ pass --pvar to FILE.
+ -@ --variant-input VCF
+ Use variants reported in VCF file as input to the algorithm.
+ A report will be generated for every record in the VCF file.
+ -l --only-use-input-alleles
+ Only provide variant calls and genotype likelihoods for sites
+ and alleles which are provided in the VCF input, and provide
+ output in the VCF for all input alleles, not just those which
+ have support in the data.
+
+ reporting:
+
+ -P --pvar N Report sites if the probability that there is a polymorphism
+ at the site is greater than N. default: 0.0001
+ -_ --show-reference-repeats
+ Calculate and show information about reference repeats in
+ the VCF output.
+
+ population model:
+
+ -T --theta N The expected mutation rate or pairwise nucleotide diversity
+ among the population under analysis. This serves as the
+ single parameter to the Ewens Sampling Formula prior model
+ default: 0.001
+ -p --ploidy N Sets the default ploidy for the analysis to N. default: 2
+ -J --pooled Assume that samples result from pooled sequencing.
+ When using this flag, set --ploidy to the number of
+ alleles in each sample.
+
+ reference allele:
+
+ -Z --use-reference-allele
+ This flag includes the reference allele in the analysis as
+ if it is another sample from the same population.
+ -H --diploid-reference
+ If using the reference sequence as a sample (-Z),
+ treat it as diploid. default: false (reference is haploid)
+ --reference-quality MQ,BQ
+ Assign mapping quality of MQ to the reference allele at each
+ site and base quality of BQ. default: 100,60
+
+ allele scope:
+
+ -I --no-snps Ignore SNP alleles.
+ -i --no-indels Ignore insertion and deletion alleles.
+ -X --no-mnps Ignore multi-nuceotide polymorphisms, MNPs.
+ -u --no-complex Ignore complex events (composites of other classes).
+ -n --use-best-n-alleles N
+ Evaluate only the best N SNP alleles, ranked by sum of
+ supporting quality scores. (Set to 0 to use all; default: all)
+ -E --max-complex-gap N
+ Allow complex alleles with contiguous embedded matches of up
+ to this length.
+
+ indel realignment:
+
+ -O --left-align-indels
+ Left-realign and merge gaps embedded in reads. default: false
+
+ input filters:
+
+ -4 --use-duplicate-reads
+ Include duplicate-marked alignments in the analysis.
+ default: exclude duplicates
+ -m --min-mapping-quality Q
+ Exclude alignments from analysis if they have a mapping
+ quality less than Q. default: 30
+ -q --min-base-quality Q
+ Exclude alleles from analysis if their supporting base
+ quality is less than Q. default: 20
+ -R --min-supporting-quality MQ,BQ
+ In order to consider an alternate allele, at least one supporting
+ alignment must have mapping quality MQ, and one supporting
+ allele must have base quality BQ. default: 0,0, unset
+ -Q --mismatch-base-quality-threshold Q
+ Count mismatches toward --read-mismatch-limit if the base
+ quality of the mismatch is >= Q. default: 10
+ -U --read-mismatch-limit N
+ Exclude reads with more than N mismatches where each mismatch
+ has base quality >= mismatch-base-quality-threshold.
+ default: ~unbounded
+ -z --read-max-mismatch-fraction N
+ Exclude reads with more than N [0,1] fraction of mismatches where
+ each mismatch has base quality >= mismatch-base-quality-threshold
+ default: 1.0
+ -$ --read-snp-limit N
+ Exclude reads with more than N base mismatches, ignoring gaps
+ with quality >= mismatch-base-quality-threshold.
+ default: ~unbounded
+ -e --read-indel-limit N
+ Exclude reads with more than N separate gaps.
+ default: ~unbounded
+ -0 --no-filters Do not use any input base and mapping quality filters
+ Equivalent to -m 0 -q 0 -R 0 -S 0
+ -x --indel-exclusion-window
+ Ignore portions of alignments this many bases from a
+ putative insertion or deletion allele. default: 0
+ -F --min-alternate-fraction N
+ Require at least this fraction of observations supporting
+ an alternate allele within a single individual in the
+ in order to evaluate the position. default: 0.0
+ -C --min-alternate-count N
+ Require at least this count of observations supporting
+ an alternate allele within a single individual in order
+ to evaluate the position. default: 1
+ -3 --min-alternate-qsum N
+ Require at least this sum of quality of observations supporting
+ an alternate allele within a single individual in order
+ to evaluate the position. default: 0
+ -G --min-alternate-total N
+ Require at least this count of observations supporting
+ an alternate allele within the total population in order
+ to use the allele in analysis. default: 1
+ -! --min-coverage N
+ Require at least this coverage to process a site. default: 0
+
+ bayesian priors:
+
+ -Y --no-ewens-priors
+ Turns off the Ewens' Sampling Formula component of the priors.
+ -k --no-population-priors
+ Equivalent to --pooled --no-ewens-priors
+ -w --hwe-priors Use the probability of the combination arising under HWE given
+ the allele frequency as estimated by observation frequency.
+
+ observation prior expectations:
+
+ -V --binomial-obs-priors
+ Incorporate expectations about osbervations into the priors,
+ Uses read placement probability, strand balance probability,
+ and read position (5'-3') probability.
+ -a --allele-balance-priors
+ Use aggregate probability of observation balance between alleles
+ as a component of the priors. Best for observations with minimal
+ inherent reference bias.
+
+ algorithmic features:
+
+ -M --site-selection-max-iterations N
+ Uses hill-climbing algorithm to search posterior space for N
+ iterations to determine if the site should be evaluated. Set to 0
+ to prevent use of this algorithm for site selection, and
+ to a low integer for improvide site selection at a slight
+ performance penalty. default: 5.
+ -B --genotyping-max-iterations N
+ Iterate no more than N times during genotyping step. default: 25.
+ --genotyping-max-banddepth N
+ Integrate no deeper than the Nth best genotype by likelihood when
+ genotyping. default: 6.
+ -W --posterior-integration-limits N,M
+ Integrate all genotype combinations in our posterior space
+ which include no more than N samples with their Mth best
+ data likelihood. default: 1,3.
+ -K --no-permute
+ Do not scale prior probability of genotype combination given allele
+ frequency by the number of permutations of included genotypes.
+ -N --exclude-unobserved-genotypes
+ Skip sample genotypings for which the sample has no supporting reads.
+ -S --genotype-variant-threshold N
+ Limit posterior integration to samples where the second-best
+ genotype likelihood is no more than log(N) from the highest
+ genotype likelihood for the sample. default: ~unbounded
+ -j --use-mapping-quality
+ Use mapping quality of alleles when calculating data likelihoods.
+ -D --read-dependence-factor N
+ Incorporate non-independence of reads by scaling successive
+ observations by this factor during data likelihood
+ calculations. default: 0.9
+ -= --no-marginals
+ Do not calculate the marginal probability of genotypes. Saves
+ time and improves scaling performance in large populations.
+
+
+------
+
+**Citation**
+
+For the underlying tool, please cite `FreeBayes <http://bioinformatics.bc.edu/marthlab/FreeBayes>`_.
+
+If you use this tool in Galaxy, please cite Blankenberg D, et al. *In preparation.*
+
+ </help>
+</tool>
diff -r 3ff1e4ec000a8dae6a533af70b0489d2bf0d4824 -r 83e46d828ddce8e561f7f4922fb7671dec8cf22b test/tool_shed/test_data/freebayes/invalid_deps/tool_dependencies.xml
--- /dev/null
+++ b/test/tool_shed/test_data/freebayes/invalid_deps/tool_dependencies.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<tool_dependency>
+ <package name="freebayes" version="0.9.5">
+ <install version="1.0">
+ <actions>
+ <action type="shell_command">git clone --recursive git://github.com/ekg/freebayes.git</action>
+ <action type="shell_command">git reset --hard 9696d0ce8a962f7bb61c4791be5ce44312b81cf8</action>
+ <action type="shell_command">make</action>
+ <action type="move_directory_files">
+ <source_directory>bin</source_directory>
+ <destination_directory>$INSTALL_DIR/bin</destination_directory>
+ </action>
+ <action type="set_environment">
+ <environment_variable name="PATH" action="prepend_to">$INSTALL_DIR/bin</environment_variable>
+ </action>
+ </actions>
+ </install>
+ <readme>
+FreeBayes requires g++ and the standard C and C++ development libraries.
+Additionally, cmake is required for building the BamTools API.
+ </readme>
+ </package>
+ <package name="samtools" version="0.2.15">
+ <install version="1.0">
+ <actions>
+ <action type="download_by_url">http://sourceforge.net/projects/samtools/files/samtools/0.1.18/samtools-0.1…</action>
+ <action type="shell_command">sed -i .bak -e 's/-lcurses/-lncurses/g' Makefile</action>
+ <action type="shell_command">make</action>
+ <action type="move_file">
+ <source>samtools</source>
+ <destination>$INSTALL_DIR/bin</destination>
+ </action>
+ <action type="move_file">
+ <source>misc/maq2sam-long</source>
+ <destination>$INSTALL_DIR/bin</destination>
+ </action>
+ <action type="set_environment">
+ <environment_variable name="PATH" action="prepend_to">$INSTALL_DIR/bin</environment_variable>
+ </action>
+ </actions>
+ </install>
+ <readme>
+Compiling SAMtools requires the ncurses and zlib development libraries.
+ </readme>
+ </package>
+</tool_dependency>
diff -r 3ff1e4ec000a8dae6a533af70b0489d2bf0d4824 -r 83e46d828ddce8e561f7f4922fb7671dec8cf22b test/tool_shed/test_data/freebayes/sam_fa_indices.loc.sample
--- /dev/null
+++ b/test/tool_shed/test_data/freebayes/sam_fa_indices.loc.sample
@@ -0,0 +1,28 @@
+#This is a sample file distributed with Galaxy that enables tools
+#to use a directory of Samtools indexed sequences data files. You will need
+#to create these data files and then create a sam_fa_indices.loc file
+#similar to this one (store it in this directory) that points to
+#the directories in which those files are stored. The sam_fa_indices.loc
+#file has this format (white space characters are TAB characters):
+#
+#index <seq><location>
+#
+#So, for example, if you had hg18 indexed stored in
+#/depot/data2/galaxy/sam/,
+#then the sam_fa_indices.loc entry would look like this:
+#
+#index hg18 /depot/data2/galaxy/sam/hg18.fa
+#
+#and your /depot/data2/galaxy/sam/ directory
+#would contain hg18.fa and hg18.fa.fai files:
+#
+#-rw-r--r-- 1 james universe 830134 2005-09-13 10:12 hg18.fa
+#-rw-r--r-- 1 james universe 527388 2005-09-13 10:12 hg18.fa.fai
+#
+#Your sam_fa_indices.loc file should include an entry per line for
+#each index set you have stored. The file in the path does actually
+#exist, but it should never be directly used. Instead, the name serves
+#as a prefix for the index file. For example:
+#
+#index hg18 /depot/data2/galaxy/sam/hg18.fa
+#index hg19 /depot/data2/galaxy/sam/hg19.fa
diff -r 3ff1e4ec000a8dae6a533af70b0489d2bf0d4824 -r 83e46d828ddce8e561f7f4922fb7671dec8cf22b test/tool_shed/test_data/freebayes/tool_data_table_conf.xml.sample
--- /dev/null
+++ b/test/tool_shed/test_data/freebayes/tool_data_table_conf.xml.sample
@@ -0,0 +1,8 @@
+<!-- Use the file tool_data_table_conf.xml.oldlocstyle if you don't want to update your loc files as changed in revision 4550:535d276c92bc-->
+<tables>
+ <!-- Location of SAMTools indexes and other files -->
+ <table name="sam_fa_indexes" comment_char="#">
+ <columns>line_type, value, path</columns>
+ <file path="tool-data/sam_fa_indices.loc" />
+ </table>
+</tables>
diff -r 3ff1e4ec000a8dae6a533af70b0489d2bf0d4824 -r 83e46d828ddce8e561f7f4922fb7671dec8cf22b test/tool_shed/test_data/freebayes/tool_dependencies.xml
--- /dev/null
+++ b/test/tool_shed/test_data/freebayes/tool_dependencies.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<tool_dependency>
+ <package name="freebayes" version="0.9.4_9696d0ce8a962f7bb61c4791be5ce44312b81cf8">
+ <install version="1.0">
+ <actions>
+ <action type="shell_command">git clone --recursive git://github.com/ekg/freebayes.git</action>
+ <action type="shell_command">git reset --hard 9696d0ce8a962f7bb61c4791be5ce44312b81cf8</action>
+ <action type="shell_command">make</action>
+ <action type="move_directory_files">
+ <source_directory>bin</source_directory>
+ <destination_directory>$INSTALL_DIR/bin</destination_directory>
+ </action>
+ <action type="set_environment">
+ <environment_variable name="PATH" action="prepend_to">$INSTALL_DIR/bin</environment_variable>
+ </action>
+ </actions>
+ </install>
+ <readme>
+FreeBayes requires g++ and the standard C and C++ development libraries.
+Additionally, cmake is required for building the BamTools API.
+ </readme>
+ </package>
+ <package name="samtools" version="0.1.18">
+ <install version="1.0">
+ <actions>
+ <action type="download_by_url">http://sourceforge.net/projects/samtools/files/samtools/0.1.18/samtools-0.1…</action>
+ <action type="shell_command">sed -i .bak -e 's/-lcurses/-lncurses/g' Makefile</action>
+ <action type="shell_command">make</action>
+ <action type="move_file">
+ <source>samtools</source>
+ <destination>$INSTALL_DIR/bin</destination>
+ </action>
+ <action type="move_file">
+ <source>misc/maq2sam-long</source>
+ <destination>$INSTALL_DIR/bin</destination>
+ </action>
+ <action type="set_environment">
+ <environment_variable name="PATH" action="prepend_to">$INSTALL_DIR/bin</environment_variable>
+ </action>
+ </actions>
+ </install>
+ <readme>
+Compiling SAMtools requires the ncurses and zlib development libraries.
+ </readme>
+ </package>
+</tool_dependency>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: inithello: Don't allow uploading files to a deprecated repository.
by Bitbucket 03 Dec '12
by Bitbucket 03 Dec '12
03 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/3ff1e4ec000a/
changeset: 3ff1e4ec000a
user: inithello
date: 2012-12-03 16:41:21
summary: Don't allow uploading files to a deprecated repository.
affected #: 1 file
diff -r 12f55a688c82bb3e26de0ebba3cb202b16cf82e3 -r 3ff1e4ec000a8dae6a533af70b0489d2bf0d4824 templates/webapps/community/repository/browse_repository.mako
--- a/templates/webapps/community/repository/browse_repository.mako
+++ b/templates/webapps/community/repository/browse_repository.mako
@@ -9,7 +9,7 @@
is_new = repository.is_new( trans.app )
can_contact_owner = trans.user and trans.user != repository.user
can_push = trans.app.security_agent.can_push( trans.app, trans.user, repository )
- can_upload = can_push
+ can_upload = can_push and ( not repository.deprecated )
can_download = not is_new and ( not is_malicious or can_push )
can_browse_contents = not is_new
can_rate = trans.user and repository.user != trans.user
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: inithello: More tool shed functional test enhancements.
by Bitbucket 03 Dec '12
by Bitbucket 03 Dec '12
03 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/12f55a688c82/
changeset: 12f55a688c82
user: inithello
date: 2012-12-03 16:07:31
summary: More tool shed functional test enhancements.
affected #: 2 files
diff -r cdbee5bc4a6d2140401ba0ab1ce7de3e9b9882b0 -r 12f55a688c82bb3e26de0ebba3cb202b16cf82e3 test/tool_shed/base/twilltestcase.py
--- a/test/tool_shed/base/twilltestcase.py
+++ b/test/tool_shed/base/twilltestcase.py
@@ -40,7 +40,7 @@
self.check_string_not_in_page( string )
def check_for_valid_tools( self, repository, strings_displayed=[], strings_not_displayed=[] ):
strings_displayed.append( 'Valid tools' )
- self.manage_repository( repository, strings_displayed, strings_not_displayed )
+ self.display_manage_repository_page( repository, strings_displayed, strings_not_displayed )
def check_count_of_metadata_revisions_associated_with_repository( self, repository, metadata_count ):
self.check_repository_changelog( repository )
self.check_string_count_in_page( 'Repository metadata is associated with this change set.', metadata_count )
@@ -63,6 +63,8 @@
'''
repository_metadata = get_repository_metadata_by_repository_id_changeset_revision( repository.id, changeset_revision )
metadata = repository_metadata.metadata
+ if 'tools' not in metadata:
+ raise AssertionError( 'No tools in %s revision %s.' % ( repository.name, changeset_revision ) )
for tool_dict in metadata[ 'tools' ]:
metadata_strings_displayed = [ tool_dict[ 'guid' ],
tool_dict[ 'version' ],
@@ -78,18 +80,18 @@
changeset_revision=changeset_revision,
strings_displayed=[ '%s (version %s)' % ( tool_dict[ 'name' ], tool_dict[ 'version' ] ) ],
strings_not_displayed=[] )
- def check_repository_invalid_tools_for_changeset_revision( self, repository, changeset_revision ):
+ def check_repository_invalid_tools_for_changeset_revision( self, repository, changeset_revision, strings_displayed=[], strings_not_displayed=[] ):
+ '''Load the invalid tool page for each invalid tool associated with this changeset revision and verify the received error messages.'''
repository_metadata = get_repository_metadata_by_repository_id_changeset_revision( repository.id, changeset_revision )
metadata = repository_metadata.metadata
if 'invalid_tools' not in metadata:
return
- for tool_xml in metadata[ 'tools' ]:
- tool_path = '%s/%s' % ( self.get_repo_path( repository ), tool_xml )
- self.load_display_tool_page( repository,
- tool_xml_path=tool_path,
+ for tool_xml in metadata[ 'invalid_tools' ]:
+ self.load_invalid_tool_page( repository,
+ tool_xml=tool_xml,
changeset_revision=changeset_revision,
- strings_displayed=[ 'properly loaded' ],
- strings_not_displayed=[] )
+ strings_displayed=strings_displayed,
+ strings_not_displayed=strings_not_displayed )
def check_string_count_in_page( self, pattern, min_count, max_count=None ):
"""Checks the number of 'pattern' occurrences in the current browser page"""
page = self.last_page()
@@ -260,19 +262,23 @@
invalid_tools.append( dict( tools=repository_metadata.metadata[ 'invalid_tools' ], changeset_revision=repository_metadata.changeset_revision ) )
return valid_tools, invalid_tools
def grant_write_access( self, repository, usernames=[], strings_displayed=[], strings_not_displayed=[] ):
- self.manage_repository( repository )
+ self.display_manage_repository_page( repository )
tc.fv( "3", "allow_push", '-Select one' )
for username in usernames:
tc.fv( "3", "allow_push", '+%s' % username )
tc.submit( 'user_access_button' )
self.check_for_strings( strings_displayed, strings_not_displayed )
- def load_display_tool_page( self, repository, tool_xml_path, changeset_revision, strings_displayed=[], strings_not_displayed=[] ):
- repository_id = self.security.encode_id( repository.id )
- url = '/repository/display_tool?repository_id=%s&tool_config=%s&changeset_revision=%s' % \
- ( repository_id, tool_xml_path, changeset_revision )
+ def load_invalid_tool_page( self, repository, tool_xml, changeset_revision, strings_displayed=[], strings_not_displayed=[] ):
+ url = '/repository/load_invalid_tool?repository_id=%s&tool_config=%s&changeset_revision=%s' % \
+ ( self.security.encode_id( repository.id ), tool_xml, changeset_revision )
self.visit_url( url )
self.check_for_strings( strings_displayed, strings_not_displayed )
- def manage_repository( self, repository, strings_displayed=[], strings_not_displayed=[] ):
+ def load_display_tool_page( self, repository, tool_xml_path, changeset_revision, strings_displayed=[], strings_not_displayed=[] ):
+ url = '/repository/display_tool?repository_id=%s&tool_config=%s&changeset_revision=%s' % \
+ ( self.security.encode_id( repository.id ), tool_xml_path, changeset_revision )
+ self.visit_url( url )
+ self.check_for_strings( strings_displayed, strings_not_displayed )
+ def display_manage_repository_page( self, repository, strings_displayed=[], strings_not_displayed=[] ):
url = '/repository/manage_repository?id=%s' % self.security.encode_id( repository.id )
self.visit_url( url )
self.check_for_strings( strings_displayed, strings_not_displayed )
@@ -280,21 +286,20 @@
url = '/repository/manage_repository?user_access_button=Remove&id=%s&remove_auth=%s' % \
( self.security.encode_id( repository.id ), username )
self.visit_url( url )
- def search_for_valid_tools( self, search_options={}, strings_displayed=[], strings_not_displayed=[] ):
- for exact_matches in [ True, False ]:
- for key, value in search_options.items():
- url = '/repository/find_tools'
- self.visit_url( url )
- tc.fv( "1", "exact_matches", exact_matches )
- tc.fv( "1", key, value )
- tc.submit()
- self.check_for_strings( strings_displayed, strings_not_displayed )
+ def search_for_valid_tools( self, search_fields={}, exact_matches=False, strings_displayed=[], strings_not_displayed=[] ):
+ for field_name, search_string in search_fields.items():
+ url = '/repository/find_tools'
+ self.visit_url( url )
+ tc.fv( "1", "exact_matches", exact_matches )
+ tc.fv( "1", field_name, search_string )
+ tc.submit()
+ self.check_for_strings( strings_displayed, strings_not_displayed )
def set_repository_deprecated( self, repository, set_deprecated=True, strings_displayed=[], strings_not_displayed=[] ):
url = '/repository/deprecate?id=%s&mark_deprecated=%s' % ( self.security.encode_id( repository.id ), set_deprecated )
self.visit_url( url )
self.check_for_strings( strings_displayed, strings_not_displayed )
def set_repository_malicious( self, repository, set_malicious=True, strings_displayed=[], strings_not_displayed=[] ):
- self.manage_repository( repository )
+ self.display_manage_repository_page( repository )
tc.fv( "malicious", "malicious", set_malicious )
tc.submit( "malicious_button" )
self.check_for_strings( strings_displayed, strings_not_displayed )
diff -r cdbee5bc4a6d2140401ba0ab1ce7de3e9b9882b0 -r 12f55a688c82bb3e26de0ebba3cb202b16cf82e3 test/tool_shed/functional/test_0000_basic_repository_features.py
--- a/test/tool_shed/functional/test_0000_basic_repository_features.py
+++ b/test/tool_shed/functional/test_0000_basic_repository_features.py
@@ -83,9 +83,10 @@
strings_displayed=[ 'The repository tip has been defined as <b>not</b> malicious.' ] )
self.set_repository_deprecated( repository,
strings_displayed=[ 'has been marked as deprecated', 'Mark as not deprecated' ] )
- self.manage_repository( repository,
+ self.display_manage_repository_page( repository,
strings_displayed=[ 'This repository has been marked as deprecated' ],
strings_not_displayed=[ 'Upload files', 'Reset all repository metadata' ] )
+ self.browse_repository( repository, strings_not_displayed=[ 'Upload files' ] )
self.set_repository_deprecated( repository,
strings_displayed=[ 'has been marked as not deprecated', 'Mark as deprecated' ],
set_deprecated=False )
@@ -105,7 +106,7 @@
commit_message="Uploaded filtering.txt",
uncompress_file='No',
remove_repo_files_not_in_tar='No' )
- self.manage_repository( repository, strings_displayed=[ 'Readme file for filtering 1.1.0' ] )
+ self.display_manage_repository_page( repository, strings_displayed=[ 'Readme file for filtering 1.1.0' ] )
def test_0055_upload_filtering_test_data( self ):
'''Upload filtering test data.'''
repository = get_repository_by_name_and_owner( repository_name, admin_username )
@@ -129,7 +130,7 @@
tip = self.get_repository_tip( repository )
self.check_for_valid_tools( repository )
strings_displayed = self.get_repository_metadata_revisions( repository ).append( 'Select a revision' )
- self.manage_repository( repository, strings_displayed=strings_displayed )
+ self.display_manage_repository_page( repository, strings_displayed=strings_displayed )
self.check_count_of_metadata_revisions_associated_with_repository( repository, metadata_count=2 )
self.check_repository_tools_for_changeset_revision( repository, tip )
self.check_repository_metadata( repository, tip_only=False )
@@ -137,19 +138,19 @@
'''Upload readme.txt file associated with tool version 2.2.0.'''
repository = get_repository_by_name_and_owner( repository_name, admin_username )
self.upload_file( repository, 'readme.txt', commit_message="Uploaded readme.txt" )
- self.manage_repository( repository, strings_displayed=[ 'This is a readme file.' ] )
+ self.display_manage_repository_page( repository, strings_displayed=[ 'This is a readme file.' ] )
# Verify that there is a different readme file for each metadata revision.
metadata_revisions = self.get_repository_metadata_revisions( repository )
- self.manage_repository( repository, strings_displayed=[ 'Readme file for filtering 1.1.0', 'This is a readme file.' ] )
+ self.display_manage_repository_page( repository, strings_displayed=[ 'Readme file for filtering 1.1.0', 'This is a readme file.' ] )
def test_0075_delete_readme_txt_file( self ):
'''Delete the readme.txt file.'''
repository = get_repository_by_name_and_owner( repository_name, admin_username )
self.delete_files_from_repository( repository, filenames=[ 'readme.txt' ] )
self.check_count_of_metadata_revisions_associated_with_repository( repository, metadata_count=2 )
- self.manage_repository( repository, strings_displayed=[ 'Readme file for filtering 1.1.0' ] )
+ self.display_manage_repository_page( repository, strings_displayed=[ 'Readme file for filtering 1.1.0' ] )
def test_0080_search_for_valid_filter_tool( self ):
- '''Verify that the "search for valid tool" feature finds the filtering tool.'''
+ '''Search for the filtering tool by tool ID, name, and version.'''
repository = get_repository_by_name_and_owner( repository_name, admin_username )
tip_changeset = self.get_repository_tip( repository )
- search_options = dict( tool_id='Filter1', tool_name='filter', tool_version='2.2.0' )
- self.search_for_valid_tools( search_options=search_options, strings_displayed=[ tip_changeset ], strings_not_displayed=[] )
+ search_fields = dict( tool_id='Filter1', tool_name='filter', tool_version='2.2.0' )
+ self.search_for_valid_tools( search_fields=search_fields, strings_displayed=[ tip_changeset ], strings_not_displayed=[] )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0