1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/5432c6f3fe42/
Changeset: 5432c6f3fe42
User: jgoecks
Date: 2013-09-11 22:27:16
Summary: Trackster: make compatible with recent Backbone-relational update by using findOrCreate rather than creating new dataset objects each time. This fixes composite tracks and readding tracks.
Affected #: 1 file
diff -r bfcb397e240947dead624e730c3bc1ebb9fbf933 -r 5432c6f3fe421b820673d6096cc039085555b44c static/scripts/viz/trackster/tracks.js
--- a/static/scripts/viz/trackster/tracks.js
+++ b/static/scripts/viz/trackster/tracks.js
@@ -2321,7 +2321,7 @@
this.dataset = null;
if (obj_dict.dataset) {
// Dataset can be a Backbone model or a dict that can be used to create a model.
- this.dataset = (obj_dict.dataset instanceof Backbone.Model ? obj_dict.dataset : new data.Dataset(obj_dict.dataset) );
+ this.dataset = (obj_dict.dataset instanceof Backbone.Model ? obj_dict.dataset : data.Dataset.findOrCreate(obj_dict.dataset) );
}
this.dataset_check_type = 'converted_datasets_state';
this.data_url_extra_params = {};
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/4ff34b0a0f9d/
Changeset: 4ff34b0a0f9d
User: jgoecks
Date: 2013-09-11 20:45:06
Summary: Remove TODO because it is not needed--any Backbone object can be a mixin.
Affected #: 1 file
diff -r 5c606445f35e6328e92e70a2c55eb148086696e1 -r 4ff34b0a0f9dc763b87ce20bcb39c75447aed94d static/scripts/viz/circster.js
--- a/static/scripts/viz/circster.js
+++ b/static/scripts/viz/circster.js
@@ -3,7 +3,6 @@
/**
* Utility class for working with SVG.
*/
-// TODO: make into a mixin.
var SVGUtils = Backbone.Model.extend({
/**
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/63c492b8f6da/
Changeset: 63c492b8f6da
User: dannon
Date: 2013-09-11 20:35:33
Summary: Touch.
Affected #: 1 file
diff -r fe1a800dfcb6d842c3769c3334d602ce95095570 -r 63c492b8f6da79d7ecc4459419c4e90fe4bf4fc6 universe_wsgi.ini.sample
--- a/universe_wsgi.ini.sample
+++ b/universe_wsgi.ini.sample
@@ -42,7 +42,7 @@
# Set the number of seconds a thread can work before you should kill it (assuming it will never finish) to 3 hours.
threadpool_kill_thread_limit = 10800
-
+
# ---- Filters --------------------------------------------------------------
# Filters sit between Galaxy and the HTTP server.
https://bitbucket.org/galaxy/galaxy-central/commits/5c606445f35e/
Changeset: 5c606445f35e
User: dannon
Date: 2013-09-11 20:36:00
Summary: Merge
Affected #: 1 file
diff -r 63c492b8f6da79d7ecc4459419c4e90fe4bf4fc6 -r 5c606445f35e6328e92e70a2c55eb148086696e1 lib/tool_shed/util/common_install_util.py
--- a/lib/tool_shed/util/common_install_util.py
+++ b/lib/tool_shed/util/common_install_util.py
@@ -344,7 +344,7 @@
required_repository_tups.append( components_list )
else:
# We have a single repository with no dependencies.
- components_list = [ tool_shed_url, repository_name, repository_owner, changeset_revision, 'False', 'False' ]
+ components_list = [ tool_shed_url, repository_name, repository_owner, changeset_revision ]
required_repository_tups.append( components_list )
if required_repository_tups:
# The value of required_repository_tups is a list of tuples, so we need to encode it.
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/48191107fd2d/
Changeset: 48191107fd2d
User: Dave Bouvier
Date: 2013-09-11 20:16:52
Summary: Backward compatibility fix for installing repositories without repository dependencies.
Affected #: 1 file
diff -r fe1a800dfcb6d842c3769c3334d602ce95095570 -r 48191107fd2da06e10af5e7d83e197585e859f06 lib/tool_shed/util/common_install_util.py
--- a/lib/tool_shed/util/common_install_util.py
+++ b/lib/tool_shed/util/common_install_util.py
@@ -344,7 +344,7 @@
required_repository_tups.append( components_list )
else:
# We have a single repository with no dependencies.
- components_list = [ tool_shed_url, repository_name, repository_owner, changeset_revision, 'False', 'False' ]
+ components_list = [ tool_shed_url, repository_name, repository_owner, changeset_revision ]
required_repository_tups.append( components_list )
if required_repository_tups:
# The value of required_repository_tups is a list of tuples, so we need to encode it.
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
25 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/0a37731994c9/
Changeset: 0a37731994c9
User: saketkc
Date: 2013-06-19 08:51:46
Summary: Adding a small check to skip the assertion of tagNumber being
an integer for VCF4.1 files.
I am not sure if there are better ways or beter checks that can be implemen=
ted to
do this. So, this should require discussions.
The description of ##INFO and ##FORMAT tag as given on
[http://www.1000genomes.org/wiki/Analysis/Variant%20Call%20Format/vcf-varia=
nt-call-format-version-41]
is :
##INFO=3D<ID=3DID,Number=3Dnumber,Type=3Dtype,Description=3D=E2=80=9Ddescri=
ption=E2=80=9D>
Possible Types for INFO fields are: Integer, Float, Flag, Character, and St=
ring.
The Number entry is an Integer that describes the number of values that can=
be included with the INFO field.
For example, if the INFO field contains a single number, then this value sh=
ould be 1;
if the INFO field describes a pair of numbers, then this value should be 2 =
and so on.
If the field has one value per alternate allele then this value should be '=
A';
if the field has one value for each possible genotype (more relevant to the=
FORMAT tags) then this value should be 'G'.
If the number of possible values varies, is unknown, or is unbounded, then =
this value should be '.'.
The 'Flag' type indicates that the INFO field does not contain a Value entr=
y, and hence the Number should
be 0 in this case. The Description value must be surrounded by double-quote=
s.
Double-quote character can be escaped with backslash (\") and backslash as =
\\.
Affected #: 1 file
diff -r 81a2bb351c1a0f545a1f69b5514a29d96eb4575a -r 0a37731994c91fd59d2d31f=
79156abb58cc76d75 tools/vcf_tools/vcfClass.py
--- a/tools/vcf_tools/vcfClass.py
+++ b/tools/vcf_tools/vcfClass.py
@@ -12,12 +12,13 @@
self.hasHeader =3D True
self.headerText =3D ""
self.headerTitles =3D ""
+ self.vcfFormat =3D ""
#self.headerInfoText =3D ""
#self.headerFormatText =3D ""
=20
# Store the info and format tags as well as the lines that describe
# them in a dictionary.
- self.numberDataSets =3D 0=20
+ self.numberDataSets =3D 0
self.includedDataSets =3D {}
self.infoHeaderTags =3D {}
self.infoHeaderString =3D {}
@@ -63,6 +64,7 @@
# Determine the type of information in the header line.
def getHeaderLine(self, filename, writeOut):
self.headerLine =3D self.filehandle.readline().rstrip("\n")
+ if self.headerLine.startswith("##fileformat"): success =3D self.getvcf=
Format()
if self.headerLine.startswith("##INFO"): success =3D self.headerInfo(w=
riteOut, "info")
elif self.headerLine.startswith("##FORMAT"): success =3D self.headerIn=
fo(writeOut, "format")
elif self.headerLine.startswith("##FILE"): success =3D self.headerFile=
s(writeOut)
@@ -72,6 +74,18 @@
=20
return success
=20
+# Read VCF format
+ def getvcfFormat(self):
+ try:
+ self.vcfFormat =3D self.headerLine.split("=3D",1)[1]
+ self.vcfFormat =3D float( self.vcfFormat.split("VCFv",1)[1] )## =
Extract the version number rather than the whole string
+ except IndexError:
+ print >> sys.stderr, "\nError parsing the fileformat"
+ print >> sys.stderr, "The following fileformat header is wrongly=
formatted: ", self.headerLine
+ exit(1)
+ return True
+
+
# Read information on an info field from the header line.
def headerInfo(self, writeOut, lineType):
tag =3D self.headerLine.split("=3D",1)
@@ -93,11 +107,15 @@
# an integer or a '.' to indicate variable number of entries.
if tagNumber =3D=3D ".": tagNumber =3D "variable"
else:
- try: tagNumber =3D int(tagNumber)
- except ValueError:
- print >> sys.stderr, "\nError parsing header. Problem with info t=
ag:", tagID
- print >> sys.stderr, "Number of fields associated with this tag is=
not an integer or '.'"
- exit(1)
+ if self.vcfFormat<4.1:
+
+ try:
+ tagNumber =3D int(tagNumber)
+
+ except ValueError:
+ print >> sys.stderr, "\nError parsing header. Problem with in=
fo tag:", tagID
+ print >> sys.stderr, "Number of fields associated with this ta=
g is not an integer or '.'"
+ exit(1)
=20
if lineType =3D=3D "info":
self.infoHeaderTags[tagID] =3D tagNumber, tagType, tagDescription
@@ -161,7 +179,7 @@
return False
=20
# If there is no header in the vcf file, close and reopen the
-# file so that the first line is avaiable for parsing as a=20
+# file so that the first line is avaiable for parsing as a
# vcf record.
def noHeader(self, filename, writeOut):
if writeOut: print >> sys.stdout, "No header lines present in", filena=
me
@@ -216,7 +234,7 @@
else: self.hasGenotypes =3D False
=20
# Add the reference sequence to the dictionary. If it didn't previously
-# exist append the reference sequence to the end of the list as well.=20
+# exist append the reference sequence to the end of the list as well.
# This ensures that the order in which the reference sequences appeared
# in the header can be preserved.
if self.referenceSequence not in self.referenceSequences:
@@ -274,7 +292,7 @@
# Check that there are as many fields as in the format field. If not, thi=
s must
# be because the information is not known. In this case, it is permitted =
that
# the genotype information is either . or ./.
- if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):=20
+ if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):
self.genotypeFields[ self.samplesList[i] ] =3D "."
else:
if len(self.genotypeFormats) !=3D len(genotypeInfo):
@@ -381,7 +399,7 @@
=20
# First check that the variant class (VC) is listed as SNP.
vc =3D self.info.split("VC=3D",1)
- if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)=20
+ if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)
else:
snp =3D []
snp.append(vc[1])
https://bitbucket.org/galaxy/galaxy-central/commits/ebdaa310677b/
Changeset: ebdaa310677b
User: saketkc
Date: 2013-06-19 08:52:22
Summary: Automated merge with ssh://bitbucket.org/galaxy/galaxy-central
Affected #: 1 file
diff -r 170dd4c157b8b5e010804ba4a1ef3b5da08fa49d -r ebdaa310677b57337f5fcc0=
aa2ac475379bf271c tools/vcf_tools/vcfClass.py
--- a/tools/vcf_tools/vcfClass.py
+++ b/tools/vcf_tools/vcfClass.py
@@ -12,12 +12,13 @@
self.hasHeader =3D True
self.headerText =3D ""
self.headerTitles =3D ""
+ self.vcfFormat =3D ""
#self.headerInfoText =3D ""
#self.headerFormatText =3D ""
=20
# Store the info and format tags as well as the lines that describe
# them in a dictionary.
- self.numberDataSets =3D 0=20
+ self.numberDataSets =3D 0
self.includedDataSets =3D {}
self.infoHeaderTags =3D {}
self.infoHeaderString =3D {}
@@ -63,6 +64,7 @@
# Determine the type of information in the header line.
def getHeaderLine(self, filename, writeOut):
self.headerLine =3D self.filehandle.readline().rstrip("\n")
+ if self.headerLine.startswith("##fileformat"): success =3D self.getvcf=
Format()
if self.headerLine.startswith("##INFO"): success =3D self.headerInfo(w=
riteOut, "info")
elif self.headerLine.startswith("##FORMAT"): success =3D self.headerIn=
fo(writeOut, "format")
elif self.headerLine.startswith("##FILE"): success =3D self.headerFile=
s(writeOut)
@@ -72,6 +74,18 @@
=20
return success
=20
+# Read VCF format
+ def getvcfFormat(self):
+ try:
+ self.vcfFormat =3D self.headerLine.split("=3D",1)[1]
+ self.vcfFormat =3D float( self.vcfFormat.split("VCFv",1)[1] )## =
Extract the version number rather than the whole string
+ except IndexError:
+ print >> sys.stderr, "\nError parsing the fileformat"
+ print >> sys.stderr, "The following fileformat header is wrongly=
formatted: ", self.headerLine
+ exit(1)
+ return True
+
+
# Read information on an info field from the header line.
def headerInfo(self, writeOut, lineType):
tag =3D self.headerLine.split("=3D",1)
@@ -93,11 +107,15 @@
# an integer or a '.' to indicate variable number of entries.
if tagNumber =3D=3D ".": tagNumber =3D "variable"
else:
- try: tagNumber =3D int(tagNumber)
- except ValueError:
- print >> sys.stderr, "\nError parsing header. Problem with info t=
ag:", tagID
- print >> sys.stderr, "Number of fields associated with this tag is=
not an integer or '.'"
- exit(1)
+ if self.vcfFormat<4.1:
+
+ try:
+ tagNumber =3D int(tagNumber)
+
+ except ValueError:
+ print >> sys.stderr, "\nError parsing header. Problem with in=
fo tag:", tagID
+ print >> sys.stderr, "Number of fields associated with this ta=
g is not an integer or '.'"
+ exit(1)
=20
if lineType =3D=3D "info":
self.infoHeaderTags[tagID] =3D tagNumber, tagType, tagDescription
@@ -161,7 +179,7 @@
return False
=20
# If there is no header in the vcf file, close and reopen the
-# file so that the first line is avaiable for parsing as a=20
+# file so that the first line is avaiable for parsing as a
# vcf record.
def noHeader(self, filename, writeOut):
if writeOut: print >> sys.stdout, "No header lines present in", filena=
me
@@ -216,7 +234,7 @@
else: self.hasGenotypes =3D False
=20
# Add the reference sequence to the dictionary. If it didn't previously
-# exist append the reference sequence to the end of the list as well.=20
+# exist append the reference sequence to the end of the list as well.
# This ensures that the order in which the reference sequences appeared
# in the header can be preserved.
if self.referenceSequence not in self.referenceSequences:
@@ -274,7 +292,7 @@
# Check that there are as many fields as in the format field. If not, thi=
s must
# be because the information is not known. In this case, it is permitted =
that
# the genotype information is either . or ./.
- if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):=20
+ if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):
self.genotypeFields[ self.samplesList[i] ] =3D "."
else:
if len(self.genotypeFormats) !=3D len(genotypeInfo):
@@ -381,7 +399,7 @@
=20
# First check that the variant class (VC) is listed as SNP.
vc =3D self.info.split("VC=3D",1)
- if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)=20
+ if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)
else:
snp =3D []
snp.append(vc[1])
https://bitbucket.org/galaxy/galaxy-central/commits/493bee44220a/
Changeset: 493bee44220a
User: saketkc
Date: 2013-06-20 05:58:14
Summary: Automated merge with ssh://bitbucket.org/galaxy/galaxy-central
Affected #: 1 file
diff -r 36d9e5bcd2cbcd5b34b2ae0e7839a71a55350011 -r 493bee44220a54f4d0595bf=
3fd49efdf8a8795bd tools/vcf_tools/vcfClass.py
--- a/tools/vcf_tools/vcfClass.py
+++ b/tools/vcf_tools/vcfClass.py
@@ -12,12 +12,13 @@
self.hasHeader =3D True
self.headerText =3D ""
self.headerTitles =3D ""
+ self.vcfFormat =3D ""
#self.headerInfoText =3D ""
#self.headerFormatText =3D ""
=20
# Store the info and format tags as well as the lines that describe
# them in a dictionary.
- self.numberDataSets =3D 0=20
+ self.numberDataSets =3D 0
self.includedDataSets =3D {}
self.infoHeaderTags =3D {}
self.infoHeaderString =3D {}
@@ -63,6 +64,7 @@
# Determine the type of information in the header line.
def getHeaderLine(self, filename, writeOut):
self.headerLine =3D self.filehandle.readline().rstrip("\n")
+ if self.headerLine.startswith("##fileformat"): success =3D self.getvcf=
Format()
if self.headerLine.startswith("##INFO"): success =3D self.headerInfo(w=
riteOut, "info")
elif self.headerLine.startswith("##FORMAT"): success =3D self.headerIn=
fo(writeOut, "format")
elif self.headerLine.startswith("##FILE"): success =3D self.headerFile=
s(writeOut)
@@ -72,6 +74,18 @@
=20
return success
=20
+# Read VCF format
+ def getvcfFormat(self):
+ try:
+ self.vcfFormat =3D self.headerLine.split("=3D",1)[1]
+ self.vcfFormat =3D float( self.vcfFormat.split("VCFv",1)[1] )## =
Extract the version number rather than the whole string
+ except IndexError:
+ print >> sys.stderr, "\nError parsing the fileformat"
+ print >> sys.stderr, "The following fileformat header is wrongly=
formatted: ", self.headerLine
+ exit(1)
+ return True
+
+
# Read information on an info field from the header line.
def headerInfo(self, writeOut, lineType):
tag =3D self.headerLine.split("=3D",1)
@@ -93,11 +107,15 @@
# an integer or a '.' to indicate variable number of entries.
if tagNumber =3D=3D ".": tagNumber =3D "variable"
else:
- try: tagNumber =3D int(tagNumber)
- except ValueError:
- print >> sys.stderr, "\nError parsing header. Problem with info t=
ag:", tagID
- print >> sys.stderr, "Number of fields associated with this tag is=
not an integer or '.'"
- exit(1)
+ if self.vcfFormat<4.1:
+
+ try:
+ tagNumber =3D int(tagNumber)
+
+ except ValueError:
+ print >> sys.stderr, "\nError parsing header. Problem with in=
fo tag:", tagID
+ print >> sys.stderr, "Number of fields associated with this ta=
g is not an integer or '.'"
+ exit(1)
=20
if lineType =3D=3D "info":
self.infoHeaderTags[tagID] =3D tagNumber, tagType, tagDescription
@@ -161,7 +179,7 @@
return False
=20
# If there is no header in the vcf file, close and reopen the
-# file so that the first line is avaiable for parsing as a=20
+# file so that the first line is avaiable for parsing as a
# vcf record.
def noHeader(self, filename, writeOut):
if writeOut: print >> sys.stdout, "No header lines present in", filena=
me
@@ -216,7 +234,7 @@
else: self.hasGenotypes =3D False
=20
# Add the reference sequence to the dictionary. If it didn't previously
-# exist append the reference sequence to the end of the list as well.=20
+# exist append the reference sequence to the end of the list as well.
# This ensures that the order in which the reference sequences appeared
# in the header can be preserved.
if self.referenceSequence not in self.referenceSequences:
@@ -274,7 +292,7 @@
# Check that there are as many fields as in the format field. If not, thi=
s must
# be because the information is not known. In this case, it is permitted =
that
# the genotype information is either . or ./.
- if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):=20
+ if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):
self.genotypeFields[ self.samplesList[i] ] =3D "."
else:
if len(self.genotypeFormats) !=3D len(genotypeInfo):
@@ -381,7 +399,7 @@
=20
# First check that the variant class (VC) is listed as SNP.
vc =3D self.info.split("VC=3D",1)
- if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)=20
+ if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)
else:
snp =3D []
snp.append(vc[1])
https://bitbucket.org/galaxy/galaxy-central/commits/a52434fb61a8/
Changeset: a52434fb61a8
User: saketkc
Date: 2013-06-20 18:32:29
Summary: Automated merge with ssh://bitbucket.org/galaxy/galaxy-central
Affected #: 1 file
diff -r 7362302b2c638d377931fa710d40ba2f86f25dba -r a52434fb61a8968735ed1b5=
03b6a5b2b253963ea tools/vcf_tools/vcfClass.py
--- a/tools/vcf_tools/vcfClass.py
+++ b/tools/vcf_tools/vcfClass.py
@@ -12,12 +12,13 @@
self.hasHeader =3D True
self.headerText =3D ""
self.headerTitles =3D ""
+ self.vcfFormat =3D ""
#self.headerInfoText =3D ""
#self.headerFormatText =3D ""
=20
# Store the info and format tags as well as the lines that describe
# them in a dictionary.
- self.numberDataSets =3D 0=20
+ self.numberDataSets =3D 0
self.includedDataSets =3D {}
self.infoHeaderTags =3D {}
self.infoHeaderString =3D {}
@@ -63,6 +64,7 @@
# Determine the type of information in the header line.
def getHeaderLine(self, filename, writeOut):
self.headerLine =3D self.filehandle.readline().rstrip("\n")
+ if self.headerLine.startswith("##fileformat"): success =3D self.getvcf=
Format()
if self.headerLine.startswith("##INFO"): success =3D self.headerInfo(w=
riteOut, "info")
elif self.headerLine.startswith("##FORMAT"): success =3D self.headerIn=
fo(writeOut, "format")
elif self.headerLine.startswith("##FILE"): success =3D self.headerFile=
s(writeOut)
@@ -72,6 +74,18 @@
=20
return success
=20
+# Read VCF format
+ def getvcfFormat(self):
+ try:
+ self.vcfFormat =3D self.headerLine.split("=3D",1)[1]
+ self.vcfFormat =3D float( self.vcfFormat.split("VCFv",1)[1] )## =
Extract the version number rather than the whole string
+ except IndexError:
+ print >> sys.stderr, "\nError parsing the fileformat"
+ print >> sys.stderr, "The following fileformat header is wrongly=
formatted: ", self.headerLine
+ exit(1)
+ return True
+
+
# Read information on an info field from the header line.
def headerInfo(self, writeOut, lineType):
tag =3D self.headerLine.split("=3D",1)
@@ -93,11 +107,15 @@
# an integer or a '.' to indicate variable number of entries.
if tagNumber =3D=3D ".": tagNumber =3D "variable"
else:
- try: tagNumber =3D int(tagNumber)
- except ValueError:
- print >> sys.stderr, "\nError parsing header. Problem with info t=
ag:", tagID
- print >> sys.stderr, "Number of fields associated with this tag is=
not an integer or '.'"
- exit(1)
+ if self.vcfFormat<4.1:
+
+ try:
+ tagNumber =3D int(tagNumber)
+
+ except ValueError:
+ print >> sys.stderr, "\nError parsing header. Problem with in=
fo tag:", tagID
+ print >> sys.stderr, "Number of fields associated with this ta=
g is not an integer or '.'"
+ exit(1)
=20
if lineType =3D=3D "info":
self.infoHeaderTags[tagID] =3D tagNumber, tagType, tagDescription
@@ -161,7 +179,7 @@
return False
=20
# If there is no header in the vcf file, close and reopen the
-# file so that the first line is avaiable for parsing as a=20
+# file so that the first line is avaiable for parsing as a
# vcf record.
def noHeader(self, filename, writeOut):
if writeOut: print >> sys.stdout, "No header lines present in", filena=
me
@@ -216,7 +234,7 @@
else: self.hasGenotypes =3D False
=20
# Add the reference sequence to the dictionary. If it didn't previously
-# exist append the reference sequence to the end of the list as well.=20
+# exist append the reference sequence to the end of the list as well.
# This ensures that the order in which the reference sequences appeared
# in the header can be preserved.
if self.referenceSequence not in self.referenceSequences:
@@ -274,7 +292,7 @@
# Check that there are as many fields as in the format field. If not, thi=
s must
# be because the information is not known. In this case, it is permitted =
that
# the genotype information is either . or ./.
- if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):=20
+ if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):
self.genotypeFields[ self.samplesList[i] ] =3D "."
else:
if len(self.genotypeFormats) !=3D len(genotypeInfo):
@@ -381,7 +399,7 @@
=20
# First check that the variant class (VC) is listed as SNP.
vc =3D self.info.split("VC=3D",1)
- if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)=20
+ if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)
else:
snp =3D []
snp.append(vc[1])
https://bitbucket.org/galaxy/galaxy-central/commits/b1bb5b64dd8e/
Changeset: b1bb5b64dd8e
User: saketkc
Date: 2013-06-20 18:58:11
Summary: Automated merge with ssh://bitbucket.org/galaxy/galaxy-central
Affected #: 1 file
diff -r 13b5283f964cfa7364b653f2c578b4d7cc27c6e5 -r b1bb5b64dd8eb871451b6bd=
489cc26a6c08dbb7e tools/vcf_tools/vcfClass.py
--- a/tools/vcf_tools/vcfClass.py
+++ b/tools/vcf_tools/vcfClass.py
@@ -12,12 +12,13 @@
self.hasHeader =3D True
self.headerText =3D ""
self.headerTitles =3D ""
+ self.vcfFormat =3D ""
#self.headerInfoText =3D ""
#self.headerFormatText =3D ""
=20
# Store the info and format tags as well as the lines that describe
# them in a dictionary.
- self.numberDataSets =3D 0=20
+ self.numberDataSets =3D 0
self.includedDataSets =3D {}
self.infoHeaderTags =3D {}
self.infoHeaderString =3D {}
@@ -63,6 +64,7 @@
# Determine the type of information in the header line.
def getHeaderLine(self, filename, writeOut):
self.headerLine =3D self.filehandle.readline().rstrip("\n")
+ if self.headerLine.startswith("##fileformat"): success =3D self.getvcf=
Format()
if self.headerLine.startswith("##INFO"): success =3D self.headerInfo(w=
riteOut, "info")
elif self.headerLine.startswith("##FORMAT"): success =3D self.headerIn=
fo(writeOut, "format")
elif self.headerLine.startswith("##FILE"): success =3D self.headerFile=
s(writeOut)
@@ -72,6 +74,18 @@
=20
return success
=20
+# Read VCF format
+ def getvcfFormat(self):
+ try:
+ self.vcfFormat =3D self.headerLine.split("=3D",1)[1]
+ self.vcfFormat =3D float( self.vcfFormat.split("VCFv",1)[1] )## =
Extract the version number rather than the whole string
+ except IndexError:
+ print >> sys.stderr, "\nError parsing the fileformat"
+ print >> sys.stderr, "The following fileformat header is wrongly=
formatted: ", self.headerLine
+ exit(1)
+ return True
+
+
# Read information on an info field from the header line.
def headerInfo(self, writeOut, lineType):
tag =3D self.headerLine.split("=3D",1)
@@ -93,11 +107,15 @@
# an integer or a '.' to indicate variable number of entries.
if tagNumber =3D=3D ".": tagNumber =3D "variable"
else:
- try: tagNumber =3D int(tagNumber)
- except ValueError:
- print >> sys.stderr, "\nError parsing header. Problem with info t=
ag:", tagID
- print >> sys.stderr, "Number of fields associated with this tag is=
not an integer or '.'"
- exit(1)
+ if self.vcfFormat<4.1:
+
+ try:
+ tagNumber =3D int(tagNumber)
+
+ except ValueError:
+ print >> sys.stderr, "\nError parsing header. Problem with in=
fo tag:", tagID
+ print >> sys.stderr, "Number of fields associated with this ta=
g is not an integer or '.'"
+ exit(1)
=20
if lineType =3D=3D "info":
self.infoHeaderTags[tagID] =3D tagNumber, tagType, tagDescription
@@ -161,7 +179,7 @@
return False
=20
# If there is no header in the vcf file, close and reopen the
-# file so that the first line is avaiable for parsing as a=20
+# file so that the first line is avaiable for parsing as a
# vcf record.
def noHeader(self, filename, writeOut):
if writeOut: print >> sys.stdout, "No header lines present in", filena=
me
@@ -216,7 +234,7 @@
else: self.hasGenotypes =3D False
=20
# Add the reference sequence to the dictionary. If it didn't previously
-# exist append the reference sequence to the end of the list as well.=20
+# exist append the reference sequence to the end of the list as well.
# This ensures that the order in which the reference sequences appeared
# in the header can be preserved.
if self.referenceSequence not in self.referenceSequences:
@@ -274,7 +292,7 @@
# Check that there are as many fields as in the format field. If not, thi=
s must
# be because the information is not known. In this case, it is permitted =
that
# the genotype information is either . or ./.
- if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):=20
+ if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):
self.genotypeFields[ self.samplesList[i] ] =3D "."
else:
if len(self.genotypeFormats) !=3D len(genotypeInfo):
@@ -381,7 +399,7 @@
=20
# First check that the variant class (VC) is listed as SNP.
vc =3D self.info.split("VC=3D",1)
- if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)=20
+ if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)
else:
snp =3D []
snp.append(vc[1])
https://bitbucket.org/galaxy/galaxy-central/commits/caa8fd7a0dcf/
Changeset: caa8fd7a0dcf
User: saketkc
Date: 2013-06-20 19:02:55
Summary: Automated merge with ssh://bitbucket.org/galaxy/galaxy-central
Affected #: 1 file
diff -r 89e7db1cd8098fd6191ab3b4af92e4af32e4c651 -r caa8fd7a0dcf279fec70c3d=
1581296610fc6f636 tools/vcf_tools/vcfClass.py
--- a/tools/vcf_tools/vcfClass.py
+++ b/tools/vcf_tools/vcfClass.py
@@ -12,12 +12,13 @@
self.hasHeader =3D True
self.headerText =3D ""
self.headerTitles =3D ""
+ self.vcfFormat =3D ""
#self.headerInfoText =3D ""
#self.headerFormatText =3D ""
=20
# Store the info and format tags as well as the lines that describe
# them in a dictionary.
- self.numberDataSets =3D 0=20
+ self.numberDataSets =3D 0
self.includedDataSets =3D {}
self.infoHeaderTags =3D {}
self.infoHeaderString =3D {}
@@ -63,6 +64,7 @@
# Determine the type of information in the header line.
def getHeaderLine(self, filename, writeOut):
self.headerLine =3D self.filehandle.readline().rstrip("\n")
+ if self.headerLine.startswith("##fileformat"): success =3D self.getvcf=
Format()
if self.headerLine.startswith("##INFO"): success =3D self.headerInfo(w=
riteOut, "info")
elif self.headerLine.startswith("##FORMAT"): success =3D self.headerIn=
fo(writeOut, "format")
elif self.headerLine.startswith("##FILE"): success =3D self.headerFile=
s(writeOut)
@@ -72,6 +74,18 @@
=20
return success
=20
+# Read VCF format
+ def getvcfFormat(self):
+ try:
+ self.vcfFormat =3D self.headerLine.split("=3D",1)[1]
+ self.vcfFormat =3D float( self.vcfFormat.split("VCFv",1)[1] )## =
Extract the version number rather than the whole string
+ except IndexError:
+ print >> sys.stderr, "\nError parsing the fileformat"
+ print >> sys.stderr, "The following fileformat header is wrongly=
formatted: ", self.headerLine
+ exit(1)
+ return True
+
+
# Read information on an info field from the header line.
def headerInfo(self, writeOut, lineType):
tag =3D self.headerLine.split("=3D",1)
@@ -93,11 +107,15 @@
# an integer or a '.' to indicate variable number of entries.
if tagNumber =3D=3D ".": tagNumber =3D "variable"
else:
- try: tagNumber =3D int(tagNumber)
- except ValueError:
- print >> sys.stderr, "\nError parsing header. Problem with info t=
ag:", tagID
- print >> sys.stderr, "Number of fields associated with this tag is=
not an integer or '.'"
- exit(1)
+ if self.vcfFormat<4.1:
+
+ try:
+ tagNumber =3D int(tagNumber)
+
+ except ValueError:
+ print >> sys.stderr, "\nError parsing header. Problem with in=
fo tag:", tagID
+ print >> sys.stderr, "Number of fields associated with this ta=
g is not an integer or '.'"
+ exit(1)
=20
if lineType =3D=3D "info":
self.infoHeaderTags[tagID] =3D tagNumber, tagType, tagDescription
@@ -161,7 +179,7 @@
return False
=20
# If there is no header in the vcf file, close and reopen the
-# file so that the first line is avaiable for parsing as a=20
+# file so that the first line is avaiable for parsing as a
# vcf record.
def noHeader(self, filename, writeOut):
if writeOut: print >> sys.stdout, "No header lines present in", filena=
me
@@ -216,7 +234,7 @@
else: self.hasGenotypes =3D False
=20
# Add the reference sequence to the dictionary. If it didn't previously
-# exist append the reference sequence to the end of the list as well.=20
+# exist append the reference sequence to the end of the list as well.
# This ensures that the order in which the reference sequences appeared
# in the header can be preserved.
if self.referenceSequence not in self.referenceSequences:
@@ -274,7 +292,7 @@
# Check that there are as many fields as in the format field. If not, thi=
s must
# be because the information is not known. In this case, it is permitted =
that
# the genotype information is either . or ./.
- if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):=20
+ if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):
self.genotypeFields[ self.samplesList[i] ] =3D "."
else:
if len(self.genotypeFormats) !=3D len(genotypeInfo):
@@ -381,7 +399,7 @@
=20
# First check that the variant class (VC) is listed as SNP.
vc =3D self.info.split("VC=3D",1)
- if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)=20
+ if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)
else:
snp =3D []
snp.append(vc[1])
https://bitbucket.org/galaxy/galaxy-central/commits/adb63b769897/
Changeset: adb63b769897
User: saketkc
Date: 2013-06-21 07:01:54
Summary: Automated merge with ssh://bitbucket.org/galaxy/galaxy-central
Affected #: 1 file
diff -r cf58e35b3a973311ab291e90b501c6f4bc31b6d3 -r adb63b769897f2d2f34b935=
d1a9597df6d1a9cdf tools/vcf_tools/vcfClass.py
--- a/tools/vcf_tools/vcfClass.py
+++ b/tools/vcf_tools/vcfClass.py
@@ -12,12 +12,13 @@
self.hasHeader =3D True
self.headerText =3D ""
self.headerTitles =3D ""
+ self.vcfFormat =3D ""
#self.headerInfoText =3D ""
#self.headerFormatText =3D ""
=20
# Store the info and format tags as well as the lines that describe
# them in a dictionary.
- self.numberDataSets =3D 0=20
+ self.numberDataSets =3D 0
self.includedDataSets =3D {}
self.infoHeaderTags =3D {}
self.infoHeaderString =3D {}
@@ -63,6 +64,7 @@
# Determine the type of information in the header line.
def getHeaderLine(self, filename, writeOut):
self.headerLine =3D self.filehandle.readline().rstrip("\n")
+ if self.headerLine.startswith("##fileformat"): success =3D self.getvcf=
Format()
if self.headerLine.startswith("##INFO"): success =3D self.headerInfo(w=
riteOut, "info")
elif self.headerLine.startswith("##FORMAT"): success =3D self.headerIn=
fo(writeOut, "format")
elif self.headerLine.startswith("##FILE"): success =3D self.headerFile=
s(writeOut)
@@ -72,6 +74,18 @@
=20
return success
=20
+# Read VCF format
+ def getvcfFormat(self):
+ try:
+ self.vcfFormat =3D self.headerLine.split("=3D",1)[1]
+ self.vcfFormat =3D float( self.vcfFormat.split("VCFv",1)[1] )## =
Extract the version number rather than the whole string
+ except IndexError:
+ print >> sys.stderr, "\nError parsing the fileformat"
+ print >> sys.stderr, "The following fileformat header is wrongly=
formatted: ", self.headerLine
+ exit(1)
+ return True
+
+
# Read information on an info field from the header line.
def headerInfo(self, writeOut, lineType):
tag =3D self.headerLine.split("=3D",1)
@@ -93,11 +107,15 @@
# an integer or a '.' to indicate variable number of entries.
if tagNumber =3D=3D ".": tagNumber =3D "variable"
else:
- try: tagNumber =3D int(tagNumber)
- except ValueError:
- print >> sys.stderr, "\nError parsing header. Problem with info t=
ag:", tagID
- print >> sys.stderr, "Number of fields associated with this tag is=
not an integer or '.'"
- exit(1)
+ if self.vcfFormat<4.1:
+
+ try:
+ tagNumber =3D int(tagNumber)
+
+ except ValueError:
+ print >> sys.stderr, "\nError parsing header. Problem with in=
fo tag:", tagID
+ print >> sys.stderr, "Number of fields associated with this ta=
g is not an integer or '.'"
+ exit(1)
=20
if lineType =3D=3D "info":
self.infoHeaderTags[tagID] =3D tagNumber, tagType, tagDescription
@@ -161,7 +179,7 @@
return False
=20
# If there is no header in the vcf file, close and reopen the
-# file so that the first line is avaiable for parsing as a=20
+# file so that the first line is avaiable for parsing as a
# vcf record.
def noHeader(self, filename, writeOut):
if writeOut: print >> sys.stdout, "No header lines present in", filena=
me
@@ -216,7 +234,7 @@
else: self.hasGenotypes =3D False
=20
# Add the reference sequence to the dictionary. If it didn't previously
-# exist append the reference sequence to the end of the list as well.=20
+# exist append the reference sequence to the end of the list as well.
# This ensures that the order in which the reference sequences appeared
# in the header can be preserved.
if self.referenceSequence not in self.referenceSequences:
@@ -274,7 +292,7 @@
# Check that there are as many fields as in the format field. If not, thi=
s must
# be because the information is not known. In this case, it is permitted =
that
# the genotype information is either . or ./.
- if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):=20
+ if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):
self.genotypeFields[ self.samplesList[i] ] =3D "."
else:
if len(self.genotypeFormats) !=3D len(genotypeInfo):
@@ -381,7 +399,7 @@
=20
# First check that the variant class (VC) is listed as SNP.
vc =3D self.info.split("VC=3D",1)
- if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)=20
+ if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)
else:
snp =3D []
snp.append(vc[1])
https://bitbucket.org/galaxy/galaxy-central/commits/b092c9d9a17b/
Changeset: b092c9d9a17b
User: saketkc
Date: 2013-06-21 12:18:18
Summary: Automated merge with ssh://bitbucket.org/saketkc/galaxy-central
Affected #: 1 file
diff -r adb63b769897f2d2f34b935d1a9597df6d1a9cdf -r b092c9d9a17b448e5cb7249=
828dbcd90ab27f1f1 lib/galaxy/workflow/modules.py
--- a/lib/galaxy/workflow/modules.py
+++ b/lib/galaxy/workflow/modules.py
@@ -246,8 +246,8 @@
return module_factory.from_dict(trans, from_json_string(st=
ep.config), secure=3DFalse)
module =3D Class( trans, tool_id )
module.state =3D galaxy.tools.DefaultToolState()
- if step.tool_version and (step.tool_version !=3D tool.version=
):
- module.version_changes.append("%s: using version '%s' inst=
ead of version '%s' indicated in this workflow." % (tool_id, tool.version, =
step.tool_version))
+ if step.tool_version and (step.tool_version !=3D module.tool.v=
ersion):
+ module.version_changes.append("%s: using version '%s' inst=
ead of version '%s' indicated in this workflow." % (tool_id, module.tool.ve=
rsion, step.tool_version))
module.state.inputs =3D module.tool.params_from_strings( step.=
tool_inputs, trans.app, ignore_errors=3DTrue )
module.errors =3D step.tool_errors
# module.post_job_actions =3D step.post_job_actions
https://bitbucket.org/galaxy/galaxy-central/commits/46f4beebb766/
Changeset: 46f4beebb766
User: saketkc
Date: 2013-06-22 12:02:56
Summary: Automated merge with ssh://bitbucket.org/galaxy/galaxy-central
Affected #: 1 file
diff -r 4f7b7e4ca213498824d5fba7526676ddf976b823 -r 46f4beebb766fd75edb59bc=
3598342ef95775af9 tools/vcf_tools/vcfClass.py
--- a/tools/vcf_tools/vcfClass.py
+++ b/tools/vcf_tools/vcfClass.py
@@ -12,12 +12,13 @@
self.hasHeader =3D True
self.headerText =3D ""
self.headerTitles =3D ""
+ self.vcfFormat =3D ""
#self.headerInfoText =3D ""
#self.headerFormatText =3D ""
=20
# Store the info and format tags as well as the lines that describe
# them in a dictionary.
- self.numberDataSets =3D 0=20
+ self.numberDataSets =3D 0
self.includedDataSets =3D {}
self.infoHeaderTags =3D {}
self.infoHeaderString =3D {}
@@ -63,6 +64,7 @@
# Determine the type of information in the header line.
def getHeaderLine(self, filename, writeOut):
self.headerLine =3D self.filehandle.readline().rstrip("\n")
+ if self.headerLine.startswith("##fileformat"): success =3D self.getvcf=
Format()
if self.headerLine.startswith("##INFO"): success =3D self.headerInfo(w=
riteOut, "info")
elif self.headerLine.startswith("##FORMAT"): success =3D self.headerIn=
fo(writeOut, "format")
elif self.headerLine.startswith("##FILE"): success =3D self.headerFile=
s(writeOut)
@@ -72,6 +74,18 @@
=20
return success
=20
+# Read VCF format
+ def getvcfFormat(self):
+ try:
+ self.vcfFormat =3D self.headerLine.split("=3D",1)[1]
+ self.vcfFormat =3D float( self.vcfFormat.split("VCFv",1)[1] )## =
Extract the version number rather than the whole string
+ except IndexError:
+ print >> sys.stderr, "\nError parsing the fileformat"
+ print >> sys.stderr, "The following fileformat header is wrongly=
formatted: ", self.headerLine
+ exit(1)
+ return True
+
+
# Read information on an info field from the header line.
def headerInfo(self, writeOut, lineType):
tag =3D self.headerLine.split("=3D",1)
@@ -93,11 +107,15 @@
# an integer or a '.' to indicate variable number of entries.
if tagNumber =3D=3D ".": tagNumber =3D "variable"
else:
- try: tagNumber =3D int(tagNumber)
- except ValueError:
- print >> sys.stderr, "\nError parsing header. Problem with info t=
ag:", tagID
- print >> sys.stderr, "Number of fields associated with this tag is=
not an integer or '.'"
- exit(1)
+ if self.vcfFormat<4.1:
+
+ try:
+ tagNumber =3D int(tagNumber)
+
+ except ValueError:
+ print >> sys.stderr, "\nError parsing header. Problem with in=
fo tag:", tagID
+ print >> sys.stderr, "Number of fields associated with this ta=
g is not an integer or '.'"
+ exit(1)
=20
if lineType =3D=3D "info":
self.infoHeaderTags[tagID] =3D tagNumber, tagType, tagDescription
@@ -161,7 +179,7 @@
return False
=20
# If there is no header in the vcf file, close and reopen the
-# file so that the first line is avaiable for parsing as a=20
+# file so that the first line is avaiable for parsing as a
# vcf record.
def noHeader(self, filename, writeOut):
if writeOut: print >> sys.stdout, "No header lines present in", filena=
me
@@ -216,7 +234,7 @@
else: self.hasGenotypes =3D False
=20
# Add the reference sequence to the dictionary. If it didn't previously
-# exist append the reference sequence to the end of the list as well.=20
+# exist append the reference sequence to the end of the list as well.
# This ensures that the order in which the reference sequences appeared
# in the header can be preserved.
if self.referenceSequence not in self.referenceSequences:
@@ -274,7 +292,7 @@
# Check that there are as many fields as in the format field. If not, thi=
s must
# be because the information is not known. In this case, it is permitted =
that
# the genotype information is either . or ./.
- if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):=20
+ if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):
self.genotypeFields[ self.samplesList[i] ] =3D "."
else:
if len(self.genotypeFormats) !=3D len(genotypeInfo):
@@ -381,7 +399,7 @@
=20
# First check that the variant class (VC) is listed as SNP.
vc =3D self.info.split("VC=3D",1)
- if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)=20
+ if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)
else:
snp =3D []
snp.append(vc[1])
https://bitbucket.org/galaxy/galaxy-central/commits/1ff57c7a9deb/
Changeset: 1ff57c7a9deb
User: saketkc
Date: 2013-06-26 21:44:43
Summary: Automated merge with ssh://bitbucket.org/galaxy/galaxy-central
Affected #: 62 files
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/galaxy/datatypes/binary.py
--- a/lib/galaxy/datatypes/binary.py
+++ b/lib/galaxy/datatypes/binary.py
@@ -267,25 +267,25 @@
# bam does not use '#' to indicate comments/headers - we need to strip=
out those headers from the std. providers
#TODO:?? seems like there should be an easier way to do/inherit this -=
metadata.comment_char?
#TODO: incorporate samtools options to control output: regions first, =
then flags, etc.
- @dataproviders.decorators.dataprovider_factory( 'line' )
+ @dataproviders.decorators.dataprovider_factory( 'line', dataproviders.=
line.FilteredLineDataProvider.settings )
def line_dataprovider( self, dataset, **settings ):
samtools_source =3D dataproviders.dataset.SamtoolsDataProvider( da=
taset )
settings[ 'comment_char' ] =3D '@'
return dataproviders.line.FilteredLineDataProvider( samtools_sourc=
e, **settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'regex-line' )
+ @dataproviders.decorators.dataprovider_factory( 'regex-line', dataprov=
iders.line.RegexLineDataProvider.settings )
def regex_line_dataprovider( self, dataset, **settings ):
samtools_source =3D dataproviders.dataset.SamtoolsDataProvider( da=
taset )
settings[ 'comment_char' ] =3D '@'
return dataproviders.line.RegexLineDataProvider( samtools_source, =
**settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'column' )
+ @dataproviders.decorators.dataprovider_factory( 'column', dataprovider=
s.column.ColumnarDataProvider.settings )
def column_dataprovider( self, dataset, **settings ):
samtools_source =3D dataproviders.dataset.SamtoolsDataProvider( da=
taset )
settings[ 'comment_char' ] =3D '@'
return dataproviders.column.ColumnarDataProvider( samtools_source,=
**settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'map' )
+ @dataproviders.decorators.dataprovider_factory( 'map', dataproviders.c=
olumn.MapDataProvider.settings )
def map_dataprovider( self, dataset, **settings ):
samtools_source =3D dataproviders.dataset.SamtoolsDataProvider( da=
taset )
settings[ 'comment_char' ] =3D '@'
@@ -293,30 +293,30 @@
=20
# these can't be used directly - may need BamColumn, BamMap (Bam metad=
ata -> column/map)
# OR - see genomic_region_dataprovider
- #(a)dataproviders.decorators.dataprovider_factory( 'dataset-column' )
+ #(a)dataproviders.decorators.dataprovider_factory( 'dataset-column', dat=
aproviders.column.ColumnarDataProvider.settings )
#def dataset_column_dataprovider( self, dataset, **settings ):
# settings[ 'comment_char' ] =3D '@'
# return super( Sam, self ).dataset_column_dataprovider( dataset, *=
*settings )
=20
- #(a)dataproviders.decorators.dataprovider_factory( 'dataset-map' )
+ #(a)dataproviders.decorators.dataprovider_factory( 'dataset-map', datapr=
oviders.column.MapDataProvider.settings )
#def dataset_map_dataprovider( self, dataset, **settings ):
# settings[ 'comment_char' ] =3D '@'
# return super( Sam, self ).dataset_map_dataprovider( dataset, **se=
ttings )
=20
- @dataproviders.decorators.dataprovider_factory( 'header' )
+ @dataproviders.decorators.dataprovider_factory( 'header', dataprovider=
s.line.RegexLineDataProvider.settings )
def header_dataprovider( self, dataset, **settings ):
# in this case we can use an option of samtools view to provide ju=
st what we need (w/o regex)
samtools_source =3D dataproviders.dataset.SamtoolsDataProvider( da=
taset, '-H' )
return dataproviders.line.RegexLineDataProvider( samtools_source, =
**settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'id-seq-qual' )
+ @dataproviders.decorators.dataprovider_factory( 'id-seq-qual', datapro=
viders.column.MapDataProvider.settings )
def id_seq_qual_dataprovider( self, dataset, **settings ):
settings[ 'indeces' ] =3D [ 0, 9, 10 ]
settings[ 'column_types' ] =3D [ 'str', 'str', 'str' ]
settings[ 'column_names' ] =3D [ 'id', 'seq', 'qual' ]
return self.map_dataprovider( dataset, **settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'genomic-region' )
+ @dataproviders.decorators.dataprovider_factory( 'genomic-region', data=
providers.column.ColumnarDataProvider.settings )
def genomic_region_dataprovider( self, dataset, **settings ):
# GenomicRegionDataProvider currently requires a dataset as source=
- may not be necc.
#TODO:?? consider (at least) the possible use of a kwarg: metadata=
_source (def. to source.dataset),
@@ -330,7 +330,7 @@
settings[ 'column_types' ] =3D [ 'str', 'int', 'int' ]
return self.column_dataprovider( dataset, **settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'genomic-region-map' )
+ @dataproviders.decorators.dataprovider_factory( 'genomic-region-map', =
dataproviders.column.MapDataProvider.settings )
def genomic_region_map_dataprovider( self, dataset, **settings ):
settings[ 'indeces' ] =3D [ 2, 3, 3 ]
settings[ 'column_types' ] =3D [ 'str', 'int', 'int' ]
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/galaxy/datatypes/data.py
--- a/lib/galaxy/datatypes/data.py
+++ b/lib/galaxy/datatypes/data.py
@@ -593,7 +593,6 @@
Base dataprovider factory for all datatypes that returns the prope=
r provider
for the given `data_format` or raises a `NoProviderAvailable`.
"""
- #TODO:?? is this handling super class providers?
if self.has_dataprovider( data_format ):
return self.dataproviders[ data_format ]( self, dataset, **set=
tings )
raise dataproviders.exceptions.NoProviderAvailable( self, data_for=
mat )
@@ -603,12 +602,12 @@
dataset_source =3D dataproviders.dataset.DatasetDataProvider( data=
set )
return dataproviders.base.DataProvider( dataset_source, **settings=
)
=20
- @dataproviders.decorators.dataprovider_factory( 'chunk' )
+ @dataproviders.decorators.dataprovider_factory( 'chunk', dataproviders=
.chunk.ChunkDataProvider.settings )
def chunk_dataprovider( self, dataset, **settings ):
dataset_source =3D dataproviders.dataset.DatasetDataProvider( data=
set )
return dataproviders.chunk.ChunkDataProvider( dataset_source, **se=
ttings )
=20
- @dataproviders.decorators.dataprovider_factory( 'chunk64' )
+ @dataproviders.decorators.dataprovider_factory( 'chunk64', dataprovide=
rs.chunk.Base64ChunkDataProvider.settings )
def chunk64_dataprovider( self, dataset, **settings ):
dataset_source =3D dataproviders.dataset.DatasetDataProvider( data=
set )
return dataproviders.chunk.Base64ChunkDataProvider( dataset_source=
, **settings )
@@ -785,7 +784,7 @@
split =3D classmethod(split)
=20
# ------------- Dataproviders
- @dataproviders.decorators.dataprovider_factory( 'line' )
+ @dataproviders.decorators.dataprovider_factory( 'line', dataproviders.=
line.FilteredLineDataProvider.settings )
def line_dataprovider( self, dataset, **settings ):
"""
Returns an iterator over the dataset's lines (that have been `stri=
p`ed)
@@ -794,7 +793,7 @@
dataset_source =3D dataproviders.dataset.DatasetDataProvider( data=
set )
return dataproviders.line.FilteredLineDataProvider( dataset_source=
, **settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'regex-line' )
+ @dataproviders.decorators.dataprovider_factory( 'regex-line', dataprov=
iders.line.RegexLineDataProvider.settings )
def regex_line_dataprovider( self, dataset, **settings ):
"""
Returns an iterator over the dataset's lines
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/galaxy/datatypes/dataproviders/base.py
--- a/lib/galaxy/datatypes/dataproviders/base.py
+++ b/lib/galaxy/datatypes/dataproviders/base.py
@@ -22,8 +22,13 @@
=20
icorporate existing visualization/dataproviders
some of the sources (esp. in datasets) don't need to be re-created
+YAGNI: InterleavingMultiSourceDataProvider, CombiningMultiSourceDataProvid=
er
=20
-YAGNI: InterleavingMultiSourceDataProvider, CombiningMultiSourceDataProvid=
er
+datasets API entry point:
+ kwargs should be parsed from strings 2 layers up (in the DatasetsAPI) =
- that's the 'proper' place for that.
+ but how would it know how/what to parse if it doesn't have access to t=
he classes used in the provider?
+ Building a giant list by sweeping all possible dprov classes doesn=
't make sense
+ For now - I'm burying them in the class __init__s - but I don't like t=
hat
"""
=20
import logging
@@ -31,6 +36,31 @@
=20
=20
# ------------------------------------------------------------------------=
----- base classes
+class HasSettings( type ):
+ """
+ Metaclass for data providers that allows defining and inheriting
+ a dictionary named 'settings'.
+
+ Useful for allowing class level access to expected variable types
+ passed to class `__init__` functions so they can be parsed from a quer=
y string.
+ """
+ # yeah - this is all too acrobatic
+ def __new__( cls, name, base_classes, attributes ):
+ settings =3D {}
+ # get settings defined in base classes
+ for base_class in base_classes:
+ base_settings =3D getattr( base_class, 'settings', None )
+ if base_settings:
+ settings.update( base_settings )
+ # get settings defined in this class
+ new_settings =3D attributes.pop( 'settings', None )
+ if new_settings:
+ settings.update( new_settings )
+ attributes[ 'settings' ] =3D settings
+ return type.__new__( cls, name, base_classes, attributes )
+
+
+# ------------------------------------------------------------------------=
----- base classes
class DataProvider( object ):
"""
Base class for all data providers. Data providers:
@@ -39,6 +69,12 @@
(c) do not allow write methods
(but otherwise implement the other file object interface metho=
ds)
"""
+ # a definition of expected types for keyword arguments sent to __init__
+ # useful for controlling how query string dictionaries can be parsed=
into correct types for __init__
+ # empty in this base class
+ __metaclass__ =3D HasSettings
+ settings =3D {}
+
def __init__( self, source, **kwargs ):
"""
:param source: the source that this iterator will loop over.
@@ -130,13 +166,16 @@
- `num_valid_data_read`: how many data have been returned from `fi=
lter`.
- `num_data_returned`: how many data has this provider yielded.
"""
+ # not useful here - we don't want functions over the query string
+ #settings.update({ 'filter_fn': 'function' })
+
def __init__( self, source, filter_fn=3DNone, **kwargs ):
"""
:param filter_fn: a lambda or function that will be passed a datum=
and
return either the (optionally modified) datum or None.
"""
super( FilteredDataProvider, self ).__init__( source, **kwargs )
- self.filter_fn =3D filter_fn
+ self.filter_fn =3D filter_fn if hasattr( filter_fn, '__call__' ) e=
lse None
# count how many data we got from the source
self.num_data_read =3D 0
# how many valid data have we gotten from the source
@@ -179,6 +218,12 @@
=20
Useful for grabbing sections from a source (e.g. pagination).
"""
+ # define the expected types of these __init__ arguments so they can be=
parsed out from query strings
+ settings =3D {
+ 'limit' : 'int',
+ 'offset': 'int'
+ }
+
#TODO: may want to squash this into DataProvider
def __init__( self, source, offset=3D0, limit=3DNone, **kwargs ):
"""
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/galaxy/datatypes/dataproviders/chunk.py
--- a/lib/galaxy/datatypes/dataproviders/chunk.py
+++ b/lib/galaxy/datatypes/dataproviders/chunk.py
@@ -26,6 +26,10 @@
"""
MAX_CHUNK_SIZE =3D 2**16
DEFAULT_CHUNK_SIZE =3D MAX_CHUNK_SIZE
+ settings =3D {
+ 'chunk_index' : 'int',
+ 'chunk_size' : 'int'
+ }
=20
#TODO: subclass from LimitedOffsetDataProvider?
# see web/framework/base.iterate_file, util/__init__.file_reader, and =
datatypes.tabular
@@ -38,8 +42,8 @@
(gen. in bytes).
"""
super( ChunkDataProvider, self ).__init__( source, **kwargs )
- self.chunk_size =3D chunk_size
- self.chunk_pos =3D chunk_index * self.chunk_size
+ self.chunk_size =3D int( chunk_size )
+ self.chunk_pos =3D int( chunk_index ) * self.chunk_size
=20
def validate_source( self, source ):
"""
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/galaxy/datatypes/dataproviders/column.py
--- a/lib/galaxy/datatypes/dataproviders/column.py
+++ b/lib/galaxy/datatypes/dataproviders/column.py
@@ -29,6 +29,14 @@
the same number of columns as the number of indeces asked for (even if=
they
are filled with None).
"""
+ settings =3D {
+ 'indeces' : 'list:int',
+ 'column_count' : 'int',
+ 'column_types' : 'list:str',
+ 'parse_columns' : 'bool',
+ 'deliminator' : 'str'
+ }
+
def __init__( self, source, indeces=3DNone,
column_count=3DNone, column_types=3DNone, parsers=3DNone, pars=
e_columns=3DTrue,
deliminator=3D'\t', **kwargs ):
@@ -91,11 +99,11 @@
# how/whether to parse each column value
self.parsers =3D {}
if parse_columns:
- self.parsers =3D self._get_default_parsers()
+ self.parsers =3D self.get_default_parsers()
# overwrite with user desired parsers
self.parsers.update( parsers or {} )
=20
- def _get_default_parsers( self ):
+ def get_default_parsers( self ):
"""
Return parser dictionary keyed for each columnar type
(as defined in datatypes).
@@ -132,7 +140,7 @@
#'gffstrand': # -, +, ?, or '.' for None, etc.
}
=20
- def _parse_value( self, val, type ):
+ def parse_value( self, val, type ):
"""
Attempt to parse and return the given value based on the given typ=
e.
=20
@@ -153,7 +161,7 @@
return None
return val
=20
- def _get_column_type( self, index ):
+ def get_column_type( self, index ):
"""
Get the column type for the parser from `self.column_types` or `No=
ne`
if the type is unavailable.
@@ -165,18 +173,18 @@
except IndexError, ind_err:
return None
=20
- def _parse_column_at_index( self, columns, parser_index, index ):
+ def parse_column_at_index( self, columns, parser_index, index ):
"""
Get the column type for the parser from `self.column_types` or `No=
ne`
if the type is unavailable.
"""
try:
- return self._parse_value( columns[ index ], self._get_column_t=
ype( parser_index ) )
+ return self.parse_value( columns[ index ], self.get_column_typ=
e( parser_index ) )
# if a selected index is not within columns, return None
except IndexError, index_err:
return None
=20
- def _parse_columns_from_line( self, line ):
+ def parse_columns_from_line( self, line ):
"""
Returns a list of the desired, parsed columns.
:param line: the line to parse
@@ -188,13 +196,13 @@
selected_indeces =3D self.selected_column_indeces or list( xrange(=
len( all_columns ) ) )
parsed_columns =3D []
for parser_index, column_index in enumerate( selected_indeces ):
- parsed_columns.append( self._parse_column_at_index( all_column=
s, parser_index, column_index ) )
+ parsed_columns.append( self.parse_column_at_index( all_columns=
, parser_index, column_index ) )
return parsed_columns
=20
def __iter__( self ):
parent_gen =3D super( ColumnarDataProvider, self ).__iter__()
for line in parent_gen:
- columns =3D self._parse_columns_from_line( line )
+ columns =3D self.parse_columns_from_line( line )
yield columns
=20
#TODO: implement column filters here and not below - flatten hierarchy
@@ -223,6 +231,10 @@
.. note: that the subclass constructors are passed kwargs - so they're
params (limit, offset, etc.) are also applicable here.
"""
+ settings =3D {
+ 'column_names' : 'list:str',
+ }
+
def __init__( self, source, column_names=3DNone, **kwargs ):
"""
:param column_names: an ordered list of strings that will be used =
as the keys
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/galaxy/datatypes/dataproviders/dataset.py
--- a/lib/galaxy/datatypes/dataproviders/dataset.py
+++ b/lib/galaxy/datatypes/dataproviders/dataset.py
@@ -141,7 +141,7 @@
"""
# metadata columns are 1-based indeces
column =3D getattr( self.dataset.metadata, name )
- return ( column - 1 ) if isinstance( column, int ) else None
+ return ( column - 1 ) if ( isinstance( column, int ) and column > =
0 ) else None
=20
def get_genomic_region_indeces( self, check=3DFalse ):
"""
@@ -271,6 +271,12 @@
"""
# dictionary keys when named_columns=3DTrue
COLUMN_NAMES =3D [ 'chrom', 'start', 'end' ]
+ settings =3D {
+ 'chrom_column' : 'int',
+ 'start_column' : 'int',
+ 'end_column' : 'int',
+ 'named_columns' : 'bool',
+ }
=20
def __init__( self, dataset, chrom_column=3DNone, start_column=3DNone,=
end_column=3DNone, named_columns=3DFalse, **kwargs ):
"""
@@ -333,6 +339,14 @@
'chrom', 'start', 'end' (and 'strand' and 'name' if available).
"""
COLUMN_NAMES =3D [ 'chrom', 'start', 'end', 'strand', 'name' ]
+ settings =3D {
+ 'chrom_column' : 'int',
+ 'start_column' : 'int',
+ 'end_column' : 'int',
+ 'strand_column' : 'int',
+ 'name_column' : 'int',
+ 'named_columns' : 'bool',
+ }
=20
def __init__( self, dataset, chrom_column=3DNone, start_column=3DNone,=
end_column=3DNone,
strand_column=3DNone, name_column=3DNone, named_columns=
=3DFalse, **kwargs ):
@@ -349,25 +363,40 @@
dataset_source =3D DatasetDataProvider( dataset )
=20
# get genomic indeces and add strand and name
+ self.column_names =3D []
+ indeces =3D []
+ #TODO: this is sort of involved and oogly
if chrom_column =3D=3D None:
chrom_column =3D dataset_source.get_metadata_column_index_by_n=
ame( 'chromCol' )
+ if chrom_column !=3D None:
+ self.column_names.append( 'chrom' )
+ indeces.append( chrom_column )
if start_column =3D=3D None:
start_column =3D dataset_source.get_metadata_column_index_by_n=
ame( 'startCol' )
+ if start_column !=3D None:
+ self.column_names.append( 'start' )
+ indeces.append( start_column )
if end_column =3D=3D None:
end_column =3D dataset_source.get_metadata_column_index_by_nam=
e( 'endCol' )
+ if end_column !=3D None:
+ self.column_names.append( 'end' )
+ indeces.append( end_column )
if strand_column =3D=3D None:
strand_column =3D dataset_source.get_metadata_column_index_by_=
name( 'strandCol' )
+ if strand_column !=3D None:
+ self.column_names.append( 'strand' )
+ indeces.append( strand_column )
if name_column =3D=3D None:
name_column =3D dataset_source.get_metadata_column_index_by_na=
me( 'nameCol' )
- indeces =3D [ chrom_column, start_column, end_column, strand_colum=
n, name_column ]
+ if name_column !=3D None:
+ self.column_names.append( 'name' )
+ indeces.append( name_column )
+
kwargs.update({ 'indeces' : indeces })
-
if not kwargs.get( 'column_types', None ):
kwargs.update({ 'column_types' : dataset_source.get_metadata_c=
olumn_types( indeces=3Dindeces ) })
=20
self.named_columns =3D named_columns
- if self.named_columns:
- self.column_names =3D self.COLUMN_NAMES
=20
super( IntervalDataProvider, self ).__init__( dataset_source, **kw=
args )
=20
@@ -390,6 +419,10 @@
sequence: <joined lines of nucleotide/amino data>
}
"""
+ settings =3D {
+ 'ids' : 'list:str',
+ }
+
def __init__( self, source, ids=3DNone, **kwargs ):
"""
:param ids: optionally return only ids (and sequences) that are in=
this list.
@@ -419,6 +452,10 @@
sequence: <joined lines of nucleotide/amino data>
}
"""
+ settings =3D {
+ 'ids' : 'list:str',
+ }
+
def __init__( self, source, ids=3DNone, **kwargs ):
"""
:param ids: optionally return only ids (and sequences) that are in=
this list.
@@ -445,6 +482,10 @@
Class that returns chrom, pos, data from a wiggle source.
"""
COLUMN_NAMES =3D [ 'chrom', 'pos', 'value' ]
+ settings =3D {
+ 'named_columns' : 'bool',
+ 'column_names' : 'list:str',
+ }
=20
def __init__( self, source, named_columns=3DFalse, column_names=3DNone=
, **kwargs ):
"""
@@ -483,6 +524,10 @@
Class that returns chrom, pos, data from a wiggle source.
"""
COLUMN_NAMES =3D [ 'chrom', 'pos', 'value' ]
+ settings =3D {
+ 'named_columns' : 'bool',
+ 'column_names' : 'list:str',
+ }
=20
def __init__( self, source, chrom, start, end, named_columns=3DFalse, =
column_names=3DNone, **kwargs ):
"""
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/galaxy/datatypes/dataproviders/decorators.py
--- a/lib/galaxy/datatypes/dataproviders/decorators.py
+++ b/lib/galaxy/datatypes/dataproviders/decorators.py
@@ -87,17 +87,40 @@
# log.debug( '\t\t ', fn.__doc__ )
return cls
=20
-def dataprovider_factory( name ):
+def dataprovider_factory( name, settings=3DNone ):
"""
- Wraps a class method and marks it as a dataprovider factory.
+ Wraps a class method and marks it as a dataprovider factory and create=
s a
+ function to parse query strings to __init__ arguments as the
+ `parse_query_string_settings` attribute of the factory function.
+
+ An example use of the `parse_query_string_settings`:
+ ..example::
+ kwargs =3D dataset.datatype.dataproviders[ provider ].parse_query_stri=
ng_settings( query_kwargs )
+ return list( dataset.datatype.dataprovider( dataset, provider, **kwarg=
s ) )
=20
:param name: what name/key to register the factory under in `cls.datap=
roviders`
- :param type: any hashable var
+ :type name: any hashable var
+ :param settings: dictionary containing key/type pairs for parsing quer=
y strings
+ to __init__ arguments
+ :type settings: dictionary
"""
+ #TODO:?? use *args for settings allowing mulitple dictionaries
+ # make a function available through the name->provider dispatch to par=
se query strings
+ # callable like:
+ # settings_dict =3D dataproviders[ provider_name ].parse_query_string_=
settings( query_kwargs )
+ #TODO: ugh - overly complicated but the best I could think of
+ def parse_query_string_settings( query_kwargs ):
+ return _parse_query_string_settings( query_kwargs, settings )
+
#log.debug( 'dataprovider:', name )
def named_dataprovider_factory( func ):
#log.debug( 'named_dataprovider_factory:', name, '->', func.__name=
__ )
setattr( func, _DATAPROVIDER_METHOD_NAME_KEY, name )
+
+ setattr( func, 'parse_query_string_settings', parse_query_string_s=
ettings )
+ setattr( func, 'settings', settings )
+ #TODO: I want a way to inherit settings from the previous provider=
( this_name ) instead of defining over and over
+
#log.debug( '\t setting:', getattr( func, _DATAPROVIDER_METHOD_NAM=
E_KEY ) )
@wraps( func )
def wrapped_dataprovider_factory( self, *args, **kwargs ):
@@ -105,3 +128,38 @@
return func( self, *args, **kwargs )
return wrapped_dataprovider_factory
return named_dataprovider_factory
+
+def _parse_query_string_settings( query_kwargs, settings=3DNone ):
+ """
+ Parse the values in `query_kwargs` from strings to the proper types
+ listed in the same key in `settings`.
+ """
+ def list_from_query_string( s ):
+ # assume csv
+ return s.split( ',' )
+
+ parsers =3D {
+ 'int' : int,
+ 'float' : float,
+ 'bool' : bool,
+ 'list:str' : lambda s: list_from_query_string( s ),
+ 'list:int' : lambda s: [ int( i ) for i in list_from_query_string=
( s ) ],
+ }
+ settings =3D settings or {}
+ # yay! yet another set of query string parsers! <-- sarcasm
+ # work through the keys in settings finding matching keys in query_kwa=
rgs
+ # if found in both, get the expected/needed type from settings and s=
tore the new parsed value
+ # if we can't parse it (no parser, bad value), delete the key from q=
uery_kwargs so the provider will use the defaults
+ for key in settings:
+ if key in query_kwargs:
+ #TODO: this would be the place to sanitize any strings
+ query_value =3D query_kwargs[ key ]
+ needed_type =3D settings[ key ]
+ try:
+ query_kwargs[ key ] =3D parsers[ needed_type ]( query_valu=
e )
+ except ( KeyError, ValueError ):
+ del query_kwargs[ key ]
+
+ #TODO:?? do we want to remove query_kwarg entries NOT in settings?
+ return query_kwargs
+
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/galaxy/datatypes/dataproviders/line.py
--- a/lib/galaxy/datatypes/dataproviders/line.py
+++ b/lib/galaxy/datatypes/dataproviders/line.py
@@ -27,6 +27,12 @@
to return.
"""
DEFAULT_COMMENT_CHAR =3D '#'
+ settings =3D {
+ 'string_lines' : 'bool',
+ 'provide_blank' : 'bool',
+ 'comment_char' : 'str',
+ }
+
def __init__( self, source, strip_lines=3DTrue, provide_blank=3DFalse,=
comment_char=3DDEFAULT_COMMENT_CHAR, **kwargs ):
"""
:param strip_lines: remove whitespace from the beginning an ending
@@ -78,6 +84,11 @@
.. note:: the regex matches are effectively OR'd (if **any** regex mat=
ches
the line it is considered valid and will be provided).
"""
+ settings =3D {
+ 'regex_list' : 'list:str',
+ 'invert' : 'bool',
+ }
+
def __init__( self, source, regex_list=3DNone, invert=3DFalse, **kwarg=
s ):
"""
:param regex_list: list of strings or regular expression strings t=
hat will
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/galaxy/datatypes/interval.py
--- a/lib/galaxy/datatypes/interval.py
+++ b/lib/galaxy/datatypes/interval.py
@@ -334,20 +334,24 @@
return None
=20
# ------------- Dataproviders
- @dataproviders.decorators.dataprovider_factory( 'genomic-region' )
+ @dataproviders.decorators.dataprovider_factory( 'genomic-region',
+ dataproviders.dataset.=
GenomicRegionDataProvider.settings )
def genomic_region_dataprovider( self, dataset, **settings ):
return dataproviders.dataset.GenomicRegionDataProvider( dataset, *=
*settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'genomic-region-map' )
+ @dataproviders.decorators.dataprovider_factory( 'genomic-region-map',
+ dataproviders.dataset.=
GenomicRegionDataProvider.settings )
def genomic_region_map_dataprovider( self, dataset, **settings ):
settings[ 'named_columns' ] =3D True
return self.genomic_region_dataprovider( dataset, **settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'interval' )
+ @dataproviders.decorators.dataprovider_factory( 'interval',
+ dataproviders.dataset.=
IntervalDataProvider.settings )
def interval_dataprovider( self, dataset, **settings ):
return dataproviders.dataset.IntervalDataProvider( dataset, **sett=
ings )
=20
- @dataproviders.decorators.dataprovider_factory( 'interval-map' )
+ @dataproviders.decorators.dataprovider_factory( 'interval-map',
+ dataproviders.dataset.=
IntervalDataProvider.settings )
def interval_map_dataprovider( self, dataset, **settings ):
settings[ 'named_columns' ] =3D True
return self.interval_dataprovider( dataset, **settings )
@@ -809,20 +813,24 @@
=20
# ------------- Dataproviders
# redefine bc super is Tabular
- @dataproviders.decorators.dataprovider_factory( 'genomic-region' )
+ @dataproviders.decorators.dataprovider_factory( 'genomic-region',
+ dataproviders.dataset.=
GenomicRegionDataProvider.settings )
def genomic_region_dataprovider( self, dataset, **settings ):
return dataproviders.dataset.GenomicRegionDataProvider( dataset, 0=
, 3, 4, **settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'genomic-region-map' )
+ @dataproviders.decorators.dataprovider_factory( 'genomic-region-map',
+ dataproviders.dataset.=
GenomicRegionDataProvider.settings )
def genomic_region_map_dataprovider( self, dataset, **settings ):
settings[ 'named_columns' ] =3D True
return self.genomic_region_dataprovider( dataset, **settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'interval' )
+ @dataproviders.decorators.dataprovider_factory( 'interval',
+ dataproviders.dataset.=
IntervalDataProvider.settings )
def interval_dataprovider( self, dataset, **settings ):
return dataproviders.dataset.IntervalDataProvider( dataset, 0, 3, =
4, 6, 2, **settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'interval-map' )
+ @dataproviders.decorators.dataprovider_factory( 'interval-map',
+ dataproviders.dataset.=
IntervalDataProvider.settings )
def interval_map_dataprovider( self, dataset, **settings ):
settings[ 'named_columns' ] =3D True
return self.interval_dataprovider( dataset, **settings )
@@ -1193,12 +1201,12 @@
return resolution
=20
# ------------- Dataproviders
- @dataproviders.decorators.dataprovider_factory( 'wiggle' )
+ @dataproviders.decorators.dataprovider_factory( 'wiggle', dataprovider=
s.dataset.WiggleDataProvider.settings )
def wiggle_dataprovider( self, dataset, **settings ):
dataset_source =3D dataproviders.dataset.DatasetDataProvider( data=
set )
return dataproviders.dataset.WiggleDataProvider( dataset_source, *=
*settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'wiggle-map' )
+ @dataproviders.decorators.dataprovider_factory( 'wiggle-map', dataprov=
iders.dataset.WiggleDataProvider.settings )
def wiggle_map_dataprovider( self, dataset, **settings ):
dataset_source =3D dataproviders.dataset.DatasetDataProvider( data=
set )
settings[ 'named_columns' ] =3D True
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/galaxy/datatypes/sequence.py
--- a/lib/galaxy/datatypes/sequence.py
+++ b/lib/galaxy/datatypes/sequence.py
@@ -15,8 +15,6 @@
from galaxy import util
from sniff import *
=20
-from galaxy.datatypes import dataproviders
-
import pkg_resources
pkg_resources.require("simplejson")
import simplejson
@@ -399,15 +397,6 @@
f.close()
_count_split =3D classmethod(_count_split)
=20
- def provider( self, dataset, data_format, **settings ):
- from galaxy.dataproviders import dataset as dataset_providers
-
- if data_format =3D=3D 'id_seq':
- source =3D dataset_providers.DatasetDataProvider( dataset )
- return dataset_providers.FastaDataProvider( source, **settings=
)
-
- return super( Fasta, self ).provider( dataset, data_format, **sett=
ings )
-
=20
class csFasta( Sequence ):
""" Class representing the SOLID Color-Space sequence ( csfasta ) """
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/galaxy/datatypes/sniff.py
--- a/lib/galaxy/datatypes/sniff.py
+++ b/lib/galaxy/datatypes/sniff.py
@@ -6,6 +6,7 @@
from galaxy import util
from galaxy.datatypes.checkers import *
from encodings import search_function as encodings_search_function
+from binary import Binary
=20
log =3D logging.getLogger(__name__)
=20
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/galaxy/datatypes/tabular.py
--- a/lib/galaxy/datatypes/tabular.py
+++ b/lib/galaxy/datatypes/tabular.py
@@ -345,26 +345,25 @@
return vizs
=20
# ------------- Dataproviders
- @dataproviders.decorators.dataprovider_factory( 'column' )
+ @dataproviders.decorators.dataprovider_factory( 'column', dataprovider=
s.column.ColumnarDataProvider.settings )
def column_dataprovider( self, dataset, **settings ):
"""Uses column settings that are passed in"""
- print 'Tabular.comment_char:', settings.get( 'comment_char', None )
-
dataset_source =3D dataproviders.dataset.DatasetDataProvider( data=
set )
return dataproviders.column.ColumnarDataProvider( dataset_source, =
**settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'dataset-column' )
+ @dataproviders.decorators.dataprovider_factory( 'dataset-column',
+ dataproviders.column.C=
olumnarDataProvider.settings )
def dataset_column_dataprovider( self, dataset, **settings ):
"""Attempts to get column settings from dataset.metadata"""
return dataproviders.dataset.DatasetColumnarDataProvider( dataset,=
**settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'map' )
+ @dataproviders.decorators.dataprovider_factory( 'map', dataproviders.c=
olumn.MapDataProvider.settings )
def map_dataprovider( self, dataset, **settings ):
"""Uses column settings that are passed in"""
dataset_source =3D dataproviders.dataset.DatasetDataProvider( data=
set )
return dataproviders.column.MapDataProvider( dataset_source, **set=
tings )
=20
- @dataproviders.decorators.dataprovider_factory( 'dataset-map' )
+ @dataproviders.decorators.dataprovider_factory( 'dataset-map', datapro=
viders.column.MapDataProvider.settings )
def dataset_map_dataprovider( self, dataset, **settings ):
"""Attempts to get column settings from dataset.metadata"""
return dataproviders.dataset.DatasetMapDataProvider( dataset, **se=
ttings )
@@ -502,55 +501,58 @@
# ------------- Dataproviders
# sam does not use '#' to indicate comments/headers - we need to strip=
out those headers from the std. providers
#TODO:?? seems like there should be an easier way to do this - metadat=
a.comment_char?
- @dataproviders.decorators.dataprovider_factory( 'line' )
+ @dataproviders.decorators.dataprovider_factory( 'line', dataproviders.=
line.FilteredLineDataProvider.settings )
def line_dataprovider( self, dataset, **settings ):
settings[ 'comment_char' ] =3D '@'
return super( Sam, self ).line_dataprovider( dataset, **settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'regex-line' )
+ @dataproviders.decorators.dataprovider_factory( 'regex-line', dataprov=
iders.line.RegexLineDataProvider.settings )
def regex_line_dataprovider( self, dataset, **settings ):
settings[ 'comment_char' ] =3D '@'
return super( Sam, self ).regex_line_dataprovider( dataset, **sett=
ings )
=20
- @dataproviders.decorators.dataprovider_factory( 'column' )
+ @dataproviders.decorators.dataprovider_factory( 'column', dataprovider=
s.column.ColumnarDataProvider.settings )
def column_dataprovider( self, dataset, **settings ):
settings[ 'comment_char' ] =3D '@'
return super( Sam, self ).column_dataprovider( dataset, **settings=
)
=20
- @dataproviders.decorators.dataprovider_factory( 'dataset-column' )
+ @dataproviders.decorators.dataprovider_factory( 'dataset-column',
+ dataproviders.column.C=
olumnarDataProvider.settings )
def dataset_column_dataprovider( self, dataset, **settings ):
settings[ 'comment_char' ] =3D '@'
return super( Sam, self ).dataset_column_dataprovider( dataset, **=
settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'map' )
+ @dataproviders.decorators.dataprovider_factory( 'map', dataproviders.c=
olumn.MapDataProvider.settings )
def map_dataprovider( self, dataset, **settings ):
settings[ 'comment_char' ] =3D '@'
return super( Sam, self ).map_dataprovider( dataset, **settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'dataset-map' )
+ @dataproviders.decorators.dataprovider_factory( 'dataset-map', datapro=
viders.column.MapDataProvider.settings )
def dataset_map_dataprovider( self, dataset, **settings ):
settings[ 'comment_char' ] =3D '@'
return super( Sam, self ).dataset_map_dataprovider( dataset, **set=
tings )
=20
- @dataproviders.decorators.dataprovider_factory( 'header' )
+ @dataproviders.decorators.dataprovider_factory( 'header', dataprovider=
s.line.RegexLineDataProvider.settings )
def header_dataprovider( self, dataset, **settings ):
dataset_source =3D dataproviders.dataset.DatasetDataProvider( data=
set )
headers_source =3D dataproviders.line.RegexLineDataProvider( datas=
et_source, regex_list=3D[ '^@' ] )
return dataproviders.line.RegexLineDataProvider( headers_source, *=
*settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'id-seq-qual' )
+ @dataproviders.decorators.dataprovider_factory( 'id-seq-qual', map_dat=
aprovider.settings )
def id_seq_qual_dataprovider( self, dataset, **settings ):
# provided as an example of a specified column map (w/o metadata)
settings[ 'indeces' ] =3D [ 0, 9, 10 ]
settings[ 'column_names' ] =3D [ 'id', 'seq', 'qual' ]
return self.map_dataprovider( dataset, **settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'genomic-region' )
+ @dataproviders.decorators.dataprovider_factory( 'genomic-region',
+ dataproviders.dataset.=
GenomicRegionDataProvider.settings )
def genomic_region_dataprovider( self, dataset, **settings ):
settings[ 'comment_char' ] =3D '@'
return dataproviders.dataset.GenomicRegionDataProvider( dataset, 2=
, 3, 3, **settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'genomic-region-map' )
+ @dataproviders.decorators.dataprovider_factory( 'genomic-region-map',
+ dataproviders.dataset.=
GenomicRegionDataProvider.settings )
def genomic_region_map_dataprovider( self, dataset, **settings ):
settings[ 'comment_char' ] =3D '@'
return dataproviders.dataset.GenomicRegionDataProvider( dataset, 2=
, 3, 3, True, **settings )
@@ -621,11 +623,13 @@
return False
=20
# ------------- Dataproviders
- @dataproviders.decorators.dataprovider_factory( 'genomic-region' )
+ @dataproviders.decorators.dataprovider_factory( 'genomic-region',
+ dataproviders.dataset.=
GenomicRegionDataProvider.settings )
def genomic_region_dataprovider( self, dataset, **settings ):
return dataproviders.dataset.GenomicRegionDataProvider( dataset, *=
*settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'genomic-region-map' )
+ @dataproviders.decorators.dataprovider_factory( 'genomic-region-map',
+ dataproviders.dataset.=
GenomicRegionDataProvider.settings )
def genomic_region_map_dataprovider( self, dataset, **settings ):
settings[ 'named_columns' ] =3D True
return self.genomic_region_dataprovider( dataset, **settings )
@@ -668,11 +672,13 @@
dataset.metadata.sample_names =3D line.split()[ 9: ]
=20
# ------------- Dataproviders
- @dataproviders.decorators.dataprovider_factory( 'genomic-region' )
+ @dataproviders.decorators.dataprovider_factory( 'genomic-region',
+ dataproviders.dataset.=
GenomicRegionDataProvider.settings )
def genomic_region_dataprovider( self, dataset, **settings ):
return dataproviders.dataset.GenomicRegionDataProvider( dataset, 0=
, 1, 1, **settings )
=20
- @dataproviders.decorators.dataprovider_factory( 'genomic-region-map' )
+ @dataproviders.decorators.dataprovider_factory( 'genomic-region-map',
+ dataproviders.dataset.=
GenomicRegionDataProvider.settings )
def genomic_region_map_dataprovider( self, dataset, **settings ):
settings[ 'named_columns' ] =3D True
return self.genomic_region_dataprovider( dataset, **settings )
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/galaxy/visualization/data_providers/registry.py
--- a/lib/galaxy/visualization/data_providers/registry.py
+++ b/lib/galaxy/visualization/data_providers/registry.py
@@ -32,7 +32,7 @@
"bigwig": genome.BigWigDataProvider,
"bigbed": genome.BigBedDataProvider,
=20
- "column": ColumnDataProvider
+ "column_with_stats": ColumnDataProvider
}
=20
def get_data_provider( self, trans, name=3DNone, source=3D'data', raw=
=3DFalse, original_dataset=3DNone ):
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/galaxy/visualization/registry.py
--- a/lib/galaxy/visualization/registry.py
+++ b/lib/galaxy/visualization/registry.py
@@ -15,22 +15,27 @@
log =3D logging.getLogger( __name__ )
=20
__TODO__ =3D """
- BUGS:
- anon users clicking a viz link gets 'must be' msg in galaxy_main (=
w/ masthead)
- should not show visualizations (no icon)?
- newick files aren't being sniffed prop? - datatype is txt
+BUGS:
+ anon users clicking a viz link gets 'must be' msg in galaxy_main (w/ m=
asthead)
+ should not show visualizations (no icon)?
+ newick files aren't being sniffed prop? - datatype is txt
=20
- have parsers create objects instead of dicts
- allow data_sources with no model_class but have tests (isAdmin, etc.)
- maybe that's an instance of User model_class?
- some confused vocabulary in docs, var names
- tests:
- anding, grouping, not
- data_sources:
- lists of
- add description element to visualization.
+have parsers create objects instead of dicts
+allow data_sources with no model_class but have tests (isAdmin, etc.)
+ maybe that's an instance of User model_class?
+some confused vocabulary in docs, var names
+tests:
+ anding, grouping, not
+data_sources:
+ lists of
+add description element to visualization.
+
+TESTS to add:
+ has dataprovider
+ user is admin
"""
=20
+# ------------------------------------------------------------------- the =
registry
class VisualizationsRegistry( object ):
"""
Main responsibilities are:
@@ -93,6 +98,45 @@
"""
self.listings =3D VisualizationsConfigParser.parse( self.configura=
tion_filepath )
=20
+ def get_visualization( self, trans, visualization_name, target_object =
):
+ """
+ Return data to build a url to the visualization with the given
+ `visualization_name` if it's applicable to `target_object` or
+ `None` if it's not.
+ """
+ # a little weird to pass trans because this registry is part of th=
e trans.app
+ listing_data =3D self.listings.get( visualization_name, None )
+ if not listing_data:
+ return None
+
+ data_sources =3D listing_data[ 'data_sources' ]
+ for data_source in data_sources:
+ # currently a model class is required
+ model_class =3D data_source[ 'model_class' ]
+ if not isinstance( target_object, model_class ):
+ continue
+
+ # tests are optional - default is the above class test
+ tests =3D data_source[ 'tests' ]
+ if tests and not self.is_object_applicable( trans, target_obje=
ct, tests ):
+ continue
+
+ param_data =3D data_source[ 'to_params' ]
+ url =3D self.get_visualization_url( trans, target_object, visu=
alization_name, param_data )
+ link_text =3D listing_data.get( 'link_text', None )
+ if not link_text:
+ # default to visualization name, titlecase, and replace un=
derscores
+ link_text =3D visualization_name.title().replace( '_', ' '=
)
+ render_location =3D listing_data.get( 'render_location' )
+ # remap some of these vars for direct use in ui.js, PopupMenu =
(e.g. text->html)
+ return {
+ 'href' : url,
+ 'html' : link_text,
+ 'target': render_location
+ }
+
+ return None
+
# -- building links to visualizations from objects --
def get_visualizations( self, trans, target_object ):
"""
@@ -100,36 +144,11 @@
the urls to call in order to render the visualizations.
"""
#TODO:?? a list of objects? YAGNI?
- # a little weird to pass trans because this registry is part of th=
e trans.app
applicable_visualizations =3D []
- for vis_name, listing_data in self.listings.items():
-
- data_sources =3D listing_data[ 'data_sources' ]
- for data_source in data_sources:
- # currently a model class is required
- model_class =3D data_source[ 'model_class' ]
- if not isinstance( target_object, model_class ):
- continue
-
- # tests are optional - default is the above class test
- tests =3D data_source[ 'tests' ]
- if tests and not self.is_object_applicable( trans, target_=
object, tests ):
- continue
-
- param_data =3D data_source[ 'to_params' ]
- url =3D self.get_visualization_url( trans, target_object, =
vis_name, param_data )
- link_text =3D listing_data.get( 'link_text', None )
- if not link_text:
- # default to visualization name, titlecase, and replac=
e underscores
- link_text =3D vis_name.title().replace( '_', ' ' )
- render_location =3D listing_data.get( 'render_location' )
- # remap some of these vars for direct use in ui.js, PopupM=
enu (e.g. text->html)
- applicable_visualizations.append({
- 'href' : url,
- 'html' : link_text,
- 'target': render_location
- })
-
+ for vis_name in self.listings:
+ url_data =3D self.get_visualization( trans, vis_name, target_o=
bject )
+ if url_data:
+ applicable_visualizations.append( url_data )
return applicable_visualizations
=20
def is_object_applicable( self, trans, target_object, data_source_test=
s ):
@@ -151,10 +170,11 @@
# convert datatypes to their actual classes (for use w=
ith isinstance)
test_result =3D trans.app.datatypes_registry.get_datat=
ype_class_by_name( test_result )
if not test_result:
- # warn if can't find class, but continue
+ # warn if can't find class, but continue (with oth=
er tests)
log.warn( 'visualizations_registry cannot find cla=
ss (%s) for applicability test', test_result )
continue
=20
+ #NOTE: tests are OR'd, if any test passes - the visualization =
can be applied
if test_fn( target_object, test_result ):
#log.debug( 'test passed' )
return True
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/galaxy/webapps/galaxy/api/datasets.py
--- a/lib/galaxy/webapps/galaxy/api/datasets.py
+++ b/lib/galaxy/webapps/galaxy/api/datasets.py
@@ -6,6 +6,7 @@
from galaxy.web.base.controller import BaseAPIController, UsesVisualizatio=
nMixin, UsesHistoryDatasetAssociationMixin
from galaxy.web.base.controller import UsesHistoryMixin
from galaxy.web.framework.helpers import is_true
+from galaxy.datatypes import dataproviders
=20
import logging
log =3D logging.getLogger( __name__ )
@@ -217,10 +218,24 @@
return msg
=20
registry =3D trans.app.data_provider_registry
+
# allow the caller to specifiy which provider is used
- if provider and provider in registry.dataset_type_name_to_data_pro=
vider:
- data_provider =3D registry.dataset_type_name_to_data_provider[=
provider ]( dataset )
- # or have it look up by datatype
+ # pulling from the original providers if possible, then the new =
providers
+ if provider:
+ if provider in registry.dataset_type_name_to_data_provider:
+ data_provider =3D registry.dataset_type_name_to_data_provi=
der[ provider ]( dataset )
+
+ elif dataset.datatype.has_dataprovider( provider ):
+ kwargs =3D dataset.datatype.dataproviders[ provider ].pars=
e_query_string_settings( kwargs )
+ # use dictionary to allow more than the data itself to be =
returned (data totals, other meta, etc.)
+ return {
+ 'data': list( dataset.datatype.dataprovider( dataset, =
provider, **kwargs ) )
+ }
+
+ else:
+ raise dataproviders.exceptions.NoProviderAvailable( datase=
t.datatype, provider )
+
+ # no provider name: look up by datatype
else:
data_provider =3D registry.get_data_provider( trans, raw=3DTru=
e, original_dataset=3Ddataset )
=20
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py
--- a/lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py
+++ b/lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py
@@ -179,6 +179,11 @@
# Get the information about the Galaxy components (e.g., tool pane=
section, tool config file, etc) that will contain the repository informati=
on.
install_repository_dependencies =3D payload.get( 'install_reposito=
ry_dependencies', False )
install_tool_dependencies =3D payload.get( 'install_tool_dependenc=
ies', False )
+ if install_tool_dependencies:
+ if trans.app.config.tool_dependency_dir is None:
+ no_tool_dependency_dir_message =3D "Tool dependencies can =
be automatically installed only if you set the value of your 'tool_dependen=
cy_dir' "
+ no_tool_dependency_dir_message +=3D "setting in your Galax=
y configuration file (universe_wsgi.ini) and restart your Galaxy server."
+ raise HTTPBadRequest( detail=3Dno_tool_dependency_dir_mess=
age )
new_tool_panel_section =3D payload.get( 'new_tool_panel_section_la=
bel', '' )
shed_tool_conf =3D payload.get( 'shed_tool_conf', None )
if shed_tool_conf:
@@ -211,13 +216,8 @@
tool_path=3Dtool_path,
tool_shed_url=3Dtool_shed_url )
# Create the tool_shed_repository database records and gather addi=
tional information for repository installation.
- created_or_updated_tool_shed_repositories, tool_panel_section_keys=
, repo_info_dicts, filtered_repo_info_dicts, message =3D \
+ created_or_updated_tool_shed_repositories, tool_panel_section_keys=
, repo_info_dicts, filtered_repo_info_dicts =3D \
repository_util.handle_tool_shed_repositories( trans, installa=
tion_dict, using_api=3DTrue )
- if message and len( repo_info_dicts ) =3D=3D 1:
- # We're attempting to install a single repository that has alr=
eady been installed into this Galaxy instance.
- log.error( message, exc_info=3DTrue )
- trans.response.status =3D 500
- return dict( status=3D'error', error=3Dmessage )
if created_or_updated_tool_shed_repositories:
# Build the dictionary of information necessary for installing=
the repositories.
installation_dict =3D dict( created_or_updated_tool_shed_repos=
itories=3Dcreated_or_updated_tool_shed_repositories,
@@ -266,11 +266,7 @@
acti=
on=3D'show',
id=
=3Dtrans.security.encode_id( tool_shed_repository.id ) )
installed_tool_shed_repositories.append( tool_shed_rep=
ository_dict )
- elif message:
- log.error( message, exc_info=3DTrue )
- trans.response.status =3D 500
- return dict( status=3D'error', error=3Dmessage )
- elif not created_or_updated_tool_shed_repositories and not message:
+ else:
# We're attempting to install more than 1 repository, and all =
of them have already been installed.
return dict( status=3D'error', error=3D'All repositories that =
you are attempting to install have been previously installed.' )
# Display the list of installed repositories.
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
--- a/lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
+++ b/lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
@@ -155,7 +155,8 @@
def browse_tool_dependency( self, trans, **kwd ):
message =3D kwd.get( 'message', '' )
status =3D kwd.get( 'status', 'done' )
- tool_dependency =3D tool_dependency_util.get_tool_dependency( tran=
s, kwd[ 'id' ] )
+ tool_dependency_ids =3D tool_dependency_util.get_tool_dependency_i=
ds( as_string=3DFalse, **kwd )
+ tool_dependency =3D tool_dependency_util.get_tool_dependency( tran=
s, tool_dependency_ids[ 0 ] )
if tool_dependency.in_error_state:
message =3D "This tool dependency is not installed correctly (=
see the <b>Tool dependency installation error</b> below). "
message +=3D "Choose <b>Uninstall this tool dependency</b> fro=
m the <b>Repository Actions</b> menu, correct problems "
@@ -482,9 +483,7 @@
def install_tool_dependencies( self, trans, **kwd ):
message =3D kwd.get( 'message', '' )
status =3D kwd.get( 'status', 'done' )
- tool_dependency_ids =3D util.listify( kwd.get( 'tool_dependency_id=
s', None ) )
- if not tool_dependency_ids:
- tool_dependency_ids =3D util.listify( kwd.get( 'id', None ) )
+ tool_dependency_ids =3D tool_dependency_util.get_tool_dependency_i=
ds( as_string=3DFalse, **kwd )
tool_dependencies =3D []
for tool_dependency_id in tool_dependency_ids:
tool_dependency =3D tool_dependency_util.get_tool_dependency( =
trans, tool_dependency_id )
@@ -731,11 +730,11 @@
kwd[ 'status' ] =3D 'error'
installed_tool_dependencies_select_field =3D suc.build_tool_depend=
encies_select_field( trans,
=
tool_shed_repository=3Dtool_shed_repository,
- =
name=3D'tool_dependency_ids',
+ =
name=3D'inst_td_ids',
=
uninstalled=3DFalse )
uninstalled_tool_dependencies_select_field =3D suc.build_tool_depe=
ndencies_select_field( trans,
=
tool_shed_repository=3Dtool_shed_repository,
- =
name=3D'tool_dependency_ids',
+ =
name=3D'uninstalled_tool_dependency_ids',
=
uninstalled=3DTrue )
return trans.fill_template( '/admin/tool_shed_repository/manage_re=
pository_tool_dependencies.mako',
repository=3Dtool_shed_repository,
@@ -886,15 +885,8 @@
tool_panel_section=3Dtool_panel_sect=
ion,
tool_path=3Dtool_path,
tool_shed_url=3Dtool_shed_url )
- created_or_updated_tool_shed_repositories, tool_panel_section_=
keys, repo_info_dicts, filtered_repo_info_dicts, message =3D \
+ created_or_updated_tool_shed_repositories, tool_panel_section_=
keys, repo_info_dicts, filtered_repo_info_dicts =3D \
repository_util.handle_tool_shed_repositories( trans, inst=
allation_dict, using_api=3DFalse )
- if message and len( repo_info_dicts ) =3D=3D 1:
- # We're undoubtedly attempting to install a repository tha=
t has been previously installed.
- return trans.response.send_redirect( web.url_for( controll=
er=3D'admin_toolshed',
- action=
=3D'browse_repositories',
- message=
=3Dmessage,
- status=
=3D'error' ) )
-
if created_or_updated_tool_shed_repositories:
installation_dict =3D dict( created_or_updated_tool_shed_r=
epositories=3Dcreated_or_updated_tool_shed_repositories,
filtered_repo_info_dicts=3Dfilte=
red_repo_info_dicts,
@@ -1128,7 +1120,7 @@
reposi=
tory_dependencies=3Drepository_dependencies )
repo_info_dicts.append( repo_info_dict )
# Make sure all tool_shed_repository records exist.
- created_or_updated_tool_shed_repositories, tool_panel_section_keys=
, repo_info_dicts, filtered_repo_info_dicts, message =3D \
+ created_or_updated_tool_shed_repositories, tool_panel_section_keys=
, repo_info_dicts, filtered_repo_info_dicts =3D \
repository_dependency_util.create_repository_dependency_object=
s( trans=3Dtrans,
=
tool_path=3Dtool_path,
=
tool_shed_url=3Dtool_shed_url,
@@ -1175,6 +1167,18 @@
initiate_repository_installation_ids=
=3Dencoded_repository_ids,
reinstalling=3DTrue )
=20
+ @web.expose
+ @web.require_admin
+ def repair_repository( self, trans, **kwd ):
+ """
+ Inspect the repository dependency hierarchy for a specified reposi=
tory and attempt to make sure they are all properly installed as well as
+ each repository's tool dependencies.
+ """
+ message =3D kwd.get( 'message', '' )
+ status =3D kwd.get( 'status', 'done' )
+ repository_id =3D kwd[ 'id' ]
+ tool_shed_repository =3D suc.get_installed_tool_shed_repository( t=
rans, repository_id )
+
@web.json
def repository_installation_status_updates( self, trans, ids=3DNone, s=
tatus_list=3DNone ):
# Avoid caching
@@ -1517,7 +1521,7 @@
def uninstall_tool_dependencies( self, trans, **kwd ):
message =3D kwd.get( 'message', '' )
status =3D kwd.get( 'status', 'done' )
- tool_dependency_ids =3D util.listify( kwd.get( 'tool_dependency_id=
s', None ) )
+ tool_dependency_ids =3D tool_dependency_util.get_tool_dependency_i=
ds( as_string=3DFalse, **kwd )
if not tool_dependency_ids:
tool_dependency_ids =3D util.listify( kwd.get( 'id', None ) )
tool_dependencies =3D []
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/tool_shed/galaxy_install/repository_util.py
--- a/lib/tool_shed/galaxy_install/repository_util.py
+++ b/lib/tool_shed/galaxy_install/repository_util.py
@@ -358,7 +358,7 @@
tool_panel_section =3D installation_dict[ 'tool_panel_section' ]
tool_path =3D installation_dict[ 'tool_path' ]
tool_shed_url =3D installation_dict[ 'tool_shed_url' ]
- created_or_updated_tool_shed_repositories, tool_panel_section_keys, re=
po_info_dicts, filtered_repo_info_dicts, message =3D \
+ created_or_updated_tool_shed_repositories, tool_panel_section_keys, re=
po_info_dicts, filtered_repo_info_dicts =3D \
repository_dependency_util.create_repository_dependency_objects( t=
rans=3Dtrans,
t=
ool_path=3Dtool_path,
t=
ool_shed_url=3Dtool_shed_url,
@@ -368,11 +368,7 @@
n=
o_changes_checked=3Dno_changes_checked,
t=
ool_panel_section=3Dtool_panel_section,
n=
ew_tool_panel_section=3Dnew_tool_panel_section )
- if message and len( repo_info_dicts ) =3D=3D 1 and not using_api:
- installed_tool_shed_repository =3D created_or_updated_tool_shed_re=
positories[ 0 ]
- message +=3D 'Click <a href=3D"%s">here</a> to manage the reposito=
ry. ' % \
- ( web.url_for( controller=3D'admin_toolshed', action=3D'manage=
_repository', id=3Dtrans.security.encode_id( installed_tool_shed_repository=
.id ) ) )
- return created_or_updated_tool_shed_repositories, tool_panel_section_k=
eys, repo_info_dicts, filtered_repo_info_dicts, message
+ return created_or_updated_tool_shed_repositories, tool_panel_section_k=
eys, repo_info_dicts, filtered_repo_info_dicts
=20
def initiate_repository_installation( trans, installation_dict ):
# The following installation_dict entries are all required.
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/tool_shed/galaxy_install/tool_dependencies/common_uti=
l.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/common_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/common_util.py
@@ -216,7 +216,7 @@
return os.path.abspath( file_path )
raise ValueError( 'Could not find path to file %s' % os.path.abspath( =
os.path.join( file_path, file_name ) ) )
=20
-def url_download( install_dir, downloaded_file_name, download_url ):
+def url_download( install_dir, downloaded_file_name, download_url, extract=
=3DTrue ):
file_path =3D os.path.join( install_dir, downloaded_file_name )
src =3D None
dst =3D None
@@ -236,7 +236,22 @@
src.close()
if dst:
dst.close()
- return os.path.abspath( file_path )
+ if extract:
+ if istar( file_path ):
+ # <action type=3D"download_by_url">http://sourceforge.net/proj=
ects/samtools/files/samtools/0.1.18/samtools-0.1.18.tar.bz2</action>
+ extract_tar( file_path, install_dir )
+ dir =3D tar_extraction_directory( install_dir, downloaded_file=
_name )
+ elif isjar( file_path ):
+ dir =3D os.path.curdir
+ elif iszip( file_path ):
+ # <action type=3D"download_by_url">http://downloads.sourceforg=
e.net/project/picard/picard-tools/1.56/picard-tools-1.56.zip</action>
+ zip_archive_extracted =3D extract_zip( file_path, install_dir )
+ dir =3D zip_extraction_directory( install_dir, downloaded_file=
_name )
+ else:
+ dir =3D install_dir
+ else:
+ dir =3D install_dir
+ return dir
=20
def zip_extraction_directory( file_path, file_name ):
"""Try to return the correct extraction directory."""
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/tool_shed/galaxy_install/tool_dependencies/fabric_uti=
l.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
@@ -180,19 +180,7 @@
downloaded_filename =3D action_dict[ 'target_filen=
ame' ]
else:
downloaded_filename =3D os.path.split( url )[ -1 ]
- downloaded_file_path =3D common_util.url_download( wor=
k_dir, downloaded_filename, url )
- if common_util.istar( downloaded_file_path ):
- # <action type=3D"download_by_url">http://sourcefo=rge.net/projects/samtools/files/samtools/0.1.18/samtools-0.1.18.tar.bz2</ac=
tion>
- common_util.extract_tar( downloaded_file_path, wor=
k_dir )
- dir =3D common_util.tar_extraction_directory( work=
_dir, downloaded_filename )
- elif common_util.isjar( downloaded_file_path ):
- dir =3D os.path.curdir
- elif common_util.iszip( downloaded_file_path ):
- # <action type=3D"download_by_url">http://download=s.sourceforge.net/project/picard/picard-tools/1.56/picard-tools-1.56.zip</a=
ction>
- zip_archive_extracted =3D common_util.extract_zip(=
downloaded_file_path, work_dir )
- dir =3D common_util.zip_extraction_directory( work=
_dir, downloaded_filename )
- else:
- dir =3D os.path.curdir
+ dir =3D common_util.url_download( work_dir, downloaded=
_filename, url, extract=3DTrue )
elif action_type =3D=3D 'shell_command':
# <action type=3D"shell_command">git clone --recursive=
git://github.com/ekg/freebayes.git</action>
# Eliminate the shell_command clone action so remainin=
g actions can be processed correctly.
@@ -206,7 +194,7 @@
# Download a single file to the working directory.
filtered_actions =3D actions[ 1: ]
url =3D action_dict[ 'url' ]
- if action_dict[ 'target_filename' ]:
+ if 'target_filename' in action_dict:
# Sometimes compressed archives extracts their con=
tent to a folder other than the default defined file name. Using this
# attribute will ensure that the file name is set =
appropriately and can be located after download, decompression and extracti=
on.
filename =3D action_dict[ 'target_filename' ]
@@ -227,10 +215,10 @@
if not os.path.exists( full_path_to_dir ):
os.makedirs( full_path_to_dir )
# The package has been down-loaded, so we can now perform =
all of the actions defined for building it.
- with lcd( dir ):
- for action_tup in filtered_actions:
+ for action_tup in filtered_actions:
+ current_dir =3D os.path.abspath( os.path.join( work_di=
r, dir ) )
+ with lcd( current_dir ):
action_type, action_dict =3D action_tup
- current_dir =3D os.path.abspath( os.path.join( wor=
k_dir, dir ) )
if action_type =3D=3D 'make_directory':
common_util.make_directory( full_path=3Daction=
_dict[ 'full_path' ] )
elif action_type =3D=3D 'move_directory_files':
@@ -316,13 +304,20 @@
if return_code:
return
elif action_type =3D=3D 'download_file':
- # Download a single file to the current direct=
ory.
+ # Download a single file to the current workin=
g directory.
url =3D action_dict[ 'url' ]
- if action_dict[ 'target_filename' ]:
+ if 'target_filename' in action_dict:
filename =3D action_dict[ 'target_filename=
' ]
else:
filename =3D url.split( '/' )[ -1 ]
- common_util.url_download( current_dir, filenam=
e, url )
+ extract =3D action_dict.get( 'extract', False )
+ common_util.url_download( current_dir, filenam=
e, url, extract=3Dextract )
+ elif action_type =3D=3D 'change_directory':
+ target_directory =3D os.path.realpath( os.path=
.join( current_dir, action_dict[ 'directory' ] ) )
+ if target_directory.startswith( os.path.realpa=
th( current_dir ) ) and os.path.exists( target_directory ):
+ dir =3D target_directory
+ else:
+ log.error( 'Invalid or nonexistent directo=
ry %s specified, ignoring change_directory action.', target_directory )
=20
def log_results( command, fabric_AttributeString, file_path ):
"""
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/tool_shed/galaxy_install/tool_dependencies/install_ut=
il.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
@@ -13,6 +13,7 @@
from tool_shed.util import xml_util
from galaxy.model.orm import and_
from galaxy.web import url_for
+from galaxy.util import asbool
=20
log =3D logging.getLogger( __name__ )
=20
@@ -390,15 +391,19 @@
# <action type=3D"download_by_url">http://sourceforge.net/proj=
ects/samtools/files/samtools/0.1.18/samtools-0.1.18.tar.bz2</action>
if action_elem.text:
action_dict[ 'url' ] =3D action_elem.text
- if 'target_filename' in action_elem.attrib:
- action_dict[ 'target_filename' ] =3D action_elem.attri=
b[ 'target_filename' ]
+ target_filename =3D action_elem.get( 'target_filename', No=
ne )
+ if target_filename:
+ action_dict[ 'target_filename' ] =3D target_filename
else:
continue
elif action_type =3D=3D 'download_file':
# <action type=3D"download_file">http://effectors.org/download=
/version/TTSS_GUI-1.0.1.jar</action>
if action_elem.text:
action_dict[ 'url' ] =3D action_elem.text
- action_dict[ 'target_filename' ] =3D action_elem.attrib.ge=
t( 'target_filename', None )
+ target_filename =3D action_elem.get( 'target_filename', No=
ne )
+ if target_filename:
+ action_dict[ 'target_filename' ] =3D target_filename
+ action_dict[ 'extract' ] =3D asbool( action_elem.get( 'ext=
ract', False ) )
else:
continue
elif action_type =3D=3D 'make_directory':
@@ -407,6 +412,12 @@
action_dict[ 'full_path' ] =3D evaluate_template( action_e=
lem.text )
else:
continue
+ elif action_type =3D=3D 'change_directory':
+ # <action type=3D"change_directory">PHYLIP-3.6b</action>
+ if action_elem.text:
+ action_dict[ 'directory' ] =3D action_elem.text
+ else:
+ continue
elif action_type in [ 'move_directory_files', 'move_file' ]:
# <action type=3D"move_file">
# <source>misc/some_file</source>
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/tool_shed/util/repository_dependency_util.py
--- a/lib/tool_shed/util/repository_dependency_util.py
+++ b/lib/tool_shed/util/repository_dependency_util.py
@@ -103,7 +103,6 @@
the dependency relationships between installed repositories. This met=
hod is called when new repositories are being installed into a Galaxy
instance and when uninstalled repositories are being reinstalled.
"""
- message =3D ''
# The following list will be maintained within this method to contain =
all created or updated tool shed repositories, including repository depende=
ncies
# that may not be installed.
all_created_or_updated_tool_shed_repositories =3D []
@@ -241,7 +240,7 @@
filtered_repo_info_dicts.append( repo_info_dict )
# Build repository dependency relationships even if the user chose to =
not install repository dependencies.
build_repository_dependency_relationships( trans, all_repo_info_dicts,=
all_created_or_updated_tool_shed_repositories )
- return created_or_updated_tool_shed_repositories, tool_panel_section_k=
eys, all_repo_info_dicts, filtered_repo_info_dicts, message
+ return created_or_updated_tool_shed_repositories, tool_panel_section_k=
eys, all_repo_info_dicts, filtered_repo_info_dicts
=20
def generate_message_for_invalid_repository_dependencies( metadata_dict ):
"""Return the error message associated with an invalid repository depe=
ndency for display in the caller."""
diff -r 46f4beebb766fd75edb59bc3598342ef95775af9 -r 1ff57c7a9debe0ced8bd216=
a33789218190f1f47 lib/tool_shed/util/tool_dependency_util.py
--- a/lib/tool_shed/util/tool_dependency_util.py
+++ b/lib/tool_shed/util/tool_dependency_util.py
@@ -220,9 +220,16 @@
=20
def get_tool_dependency_ids( as_string=3DFalse, **kwd ):
tool_dependency_id =3D kwd.get( 'tool_dependency_id', None )
- tool_dependency_ids =3D util.listify( kwd.get( 'tool_dependency_ids', =
None ) )
- if not tool_dependency_ids:
- tool_dependency_ids =3D util.listify( kwd.get( 'id', None ) )
+ if 'tool_dependency_ids' in kwd:
+ tool_dependency_ids =3D util.listify( kwd[ 'tool_dependency_ids' ]=
)
+ elif 'id' in kwd:
+ tool_dependency_ids =3D util.listify( kwd[ 'id' ] )
+ elif 'inst_td_ids' in kwd:
+ tool_dependency_ids =3D util.listify( kwd[ 'inst_td_ids' ] )
+ elif 'uninstalled_tool_dependency_ids' in kwd:
+ tool_dependency_ids =3D util.listify( kwd[ 'uninstalled_tool_depen=
dency_ids' ] )
+ else:
+ tool_dependency_ids =3D []
if tool_dependency_id and tool_dependency_id not in tool_dependency_id=
s:
tool_dependency_ids.append( tool_dependency_id )
if as_string:
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/galaxy/galaxy-central/commits/f1e5dfbbea46/
Changeset: f1e5dfbbea46
User: saketkc
Date: 2013-06-27 06:35:20
Summary: Automated merge with ssh://bitbucket.org/galaxy/galaxy-central
Affected #: 1 file
diff -r 3fa9df444b4b81f94b1c42a033c685a6e23827be -r f1e5dfbbea46f0957ce4849=
4996444eb61f4a818 tools/vcf_tools/vcfClass.py
--- a/tools/vcf_tools/vcfClass.py
+++ b/tools/vcf_tools/vcfClass.py
@@ -12,12 +12,13 @@
self.hasHeader =3D True
self.headerText =3D ""
self.headerTitles =3D ""
+ self.vcfFormat =3D ""
#self.headerInfoText =3D ""
#self.headerFormatText =3D ""
=20
# Store the info and format tags as well as the lines that describe
# them in a dictionary.
- self.numberDataSets =3D 0=20
+ self.numberDataSets =3D 0
self.includedDataSets =3D {}
self.infoHeaderTags =3D {}
self.infoHeaderString =3D {}
@@ -63,6 +64,7 @@
# Determine the type of information in the header line.
def getHeaderLine(self, filename, writeOut):
self.headerLine =3D self.filehandle.readline().rstrip("\n")
+ if self.headerLine.startswith("##fileformat"): success =3D self.getvcf=
Format()
if self.headerLine.startswith("##INFO"): success =3D self.headerInfo(w=
riteOut, "info")
elif self.headerLine.startswith("##FORMAT"): success =3D self.headerIn=
fo(writeOut, "format")
elif self.headerLine.startswith("##FILE"): success =3D self.headerFile=
s(writeOut)
@@ -72,6 +74,18 @@
=20
return success
=20
+# Read VCF format
+ def getvcfFormat(self):
+ try:
+ self.vcfFormat =3D self.headerLine.split("=3D",1)[1]
+ self.vcfFormat =3D float( self.vcfFormat.split("VCFv",1)[1] )## =
Extract the version number rather than the whole string
+ except IndexError:
+ print >> sys.stderr, "\nError parsing the fileformat"
+ print >> sys.stderr, "The following fileformat header is wrongly=
formatted: ", self.headerLine
+ exit(1)
+ return True
+
+
# Read information on an info field from the header line.
def headerInfo(self, writeOut, lineType):
tag =3D self.headerLine.split("=3D",1)
@@ -93,11 +107,15 @@
# an integer or a '.' to indicate variable number of entries.
if tagNumber =3D=3D ".": tagNumber =3D "variable"
else:
- try: tagNumber =3D int(tagNumber)
- except ValueError:
- print >> sys.stderr, "\nError parsing header. Problem with info t=
ag:", tagID
- print >> sys.stderr, "Number of fields associated with this tag is=
not an integer or '.'"
- exit(1)
+ if self.vcfFormat<4.1:
+
+ try:
+ tagNumber =3D int(tagNumber)
+
+ except ValueError:
+ print >> sys.stderr, "\nError parsing header. Problem with in=
fo tag:", tagID
+ print >> sys.stderr, "Number of fields associated with this ta=
g is not an integer or '.'"
+ exit(1)
=20
if lineType =3D=3D "info":
self.infoHeaderTags[tagID] =3D tagNumber, tagType, tagDescription
@@ -161,7 +179,7 @@
return False
=20
# If there is no header in the vcf file, close and reopen the
-# file so that the first line is avaiable for parsing as a=20
+# file so that the first line is avaiable for parsing as a
# vcf record.
def noHeader(self, filename, writeOut):
if writeOut: print >> sys.stdout, "No header lines present in", filena=
me
@@ -216,7 +234,7 @@
else: self.hasGenotypes =3D False
=20
# Add the reference sequence to the dictionary. If it didn't previously
-# exist append the reference sequence to the end of the list as well.=20
+# exist append the reference sequence to the end of the list as well.
# This ensures that the order in which the reference sequences appeared
# in the header can be preserved.
if self.referenceSequence not in self.referenceSequences:
@@ -274,7 +292,7 @@
# Check that there are as many fields as in the format field. If not, thi=
s must
# be because the information is not known. In this case, it is permitted =
that
# the genotype information is either . or ./.
- if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):=20
+ if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):
self.genotypeFields[ self.samplesList[i] ] =3D "."
else:
if len(self.genotypeFormats) !=3D len(genotypeInfo):
@@ -381,7 +399,7 @@
=20
# First check that the variant class (VC) is listed as SNP.
vc =3D self.info.split("VC=3D",1)
- if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)=20
+ if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)
else:
snp =3D []
snp.append(vc[1])
https://bitbucket.org/galaxy/galaxy-central/commits/7e13235ae59a/
Changeset: 7e13235ae59a
User: saketkc
Date: 2013-06-28 07:00:22
Summary: Automated merge with ssh://bitbucket.org/galaxy/galaxy-central
Affected #: 1 file
diff -r b89b721a0b3db47cdeddef35f03ce4c2ffcb47b5 -r 7e13235ae59a1fb5eaa795c=
0797b4afa38001a38 tools/vcf_tools/vcfClass.py
--- a/tools/vcf_tools/vcfClass.py
+++ b/tools/vcf_tools/vcfClass.py
@@ -12,12 +12,13 @@
self.hasHeader =3D True
self.headerText =3D ""
self.headerTitles =3D ""
+ self.vcfFormat =3D ""
#self.headerInfoText =3D ""
#self.headerFormatText =3D ""
=20
# Store the info and format tags as well as the lines that describe
# them in a dictionary.
- self.numberDataSets =3D 0=20
+ self.numberDataSets =3D 0
self.includedDataSets =3D {}
self.infoHeaderTags =3D {}
self.infoHeaderString =3D {}
@@ -63,6 +64,7 @@
# Determine the type of information in the header line.
def getHeaderLine(self, filename, writeOut):
self.headerLine =3D self.filehandle.readline().rstrip("\n")
+ if self.headerLine.startswith("##fileformat"): success =3D self.getvcf=
Format()
if self.headerLine.startswith("##INFO"): success =3D self.headerInfo(w=
riteOut, "info")
elif self.headerLine.startswith("##FORMAT"): success =3D self.headerIn=
fo(writeOut, "format")
elif self.headerLine.startswith("##FILE"): success =3D self.headerFile=
s(writeOut)
@@ -72,6 +74,18 @@
=20
return success
=20
+# Read VCF format
+ def getvcfFormat(self):
+ try:
+ self.vcfFormat =3D self.headerLine.split("=3D",1)[1]
+ self.vcfFormat =3D float( self.vcfFormat.split("VCFv",1)[1] )## =
Extract the version number rather than the whole string
+ except IndexError:
+ print >> sys.stderr, "\nError parsing the fileformat"
+ print >> sys.stderr, "The following fileformat header is wrongly=
formatted: ", self.headerLine
+ exit(1)
+ return True
+
+
# Read information on an info field from the header line.
def headerInfo(self, writeOut, lineType):
tag =3D self.headerLine.split("=3D",1)
@@ -93,11 +107,15 @@
# an integer or a '.' to indicate variable number of entries.
if tagNumber =3D=3D ".": tagNumber =3D "variable"
else:
- try: tagNumber =3D int(tagNumber)
- except ValueError:
- print >> sys.stderr, "\nError parsing header. Problem with info t=
ag:", tagID
- print >> sys.stderr, "Number of fields associated with this tag is=
not an integer or '.'"
- exit(1)
+ if self.vcfFormat<4.1:
+
+ try:
+ tagNumber =3D int(tagNumber)
+
+ except ValueError:
+ print >> sys.stderr, "\nError parsing header. Problem with in=
fo tag:", tagID
+ print >> sys.stderr, "Number of fields associated with this ta=
g is not an integer or '.'"
+ exit(1)
=20
if lineType =3D=3D "info":
self.infoHeaderTags[tagID] =3D tagNumber, tagType, tagDescription
@@ -161,7 +179,7 @@
return False
=20
# If there is no header in the vcf file, close and reopen the
-# file so that the first line is avaiable for parsing as a=20
+# file so that the first line is avaiable for parsing as a
# vcf record.
def noHeader(self, filename, writeOut):
if writeOut: print >> sys.stdout, "No header lines present in", filena=
me
@@ -216,7 +234,7 @@
else: self.hasGenotypes =3D False
=20
# Add the reference sequence to the dictionary. If it didn't previously
-# exist append the reference sequence to the end of the list as well.=20
+# exist append the reference sequence to the end of the list as well.
# This ensures that the order in which the reference sequences appeared
# in the header can be preserved.
if self.referenceSequence not in self.referenceSequences:
@@ -274,7 +292,7 @@
# Check that there are as many fields as in the format field. If not, thi=
s must
# be because the information is not known. In this case, it is permitted =
that
# the genotype information is either . or ./.
- if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):=20
+ if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):
self.genotypeFields[ self.samplesList[i] ] =3D "."
else:
if len(self.genotypeFormats) !=3D len(genotypeInfo):
@@ -381,7 +399,7 @@
=20
# First check that the variant class (VC) is listed as SNP.
vc =3D self.info.split("VC=3D",1)
- if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)=20
+ if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)
else:
snp =3D []
snp.append(vc[1])
https://bitbucket.org/galaxy/galaxy-central/commits/8a88c559d596/
Changeset: 8a88c559d596
User: saketkc
Date: 2013-06-29 11:41:22
Summary: Automated merge with ssh://bitbucket.org/galaxy/galaxy-central
Affected #: 1 file
diff -r 019ad31c3c2502005846dadc5c0457bbb6f80712 -r 8a88c559d5961d41c630b8e=
2dd1dafe6f275b9b2 tools/vcf_tools/vcfClass.py
--- a/tools/vcf_tools/vcfClass.py
+++ b/tools/vcf_tools/vcfClass.py
@@ -12,12 +12,13 @@
self.hasHeader =3D True
self.headerText =3D ""
self.headerTitles =3D ""
+ self.vcfFormat =3D ""
#self.headerInfoText =3D ""
#self.headerFormatText =3D ""
=20
# Store the info and format tags as well as the lines that describe
# them in a dictionary.
- self.numberDataSets =3D 0=20
+ self.numberDataSets =3D 0
self.includedDataSets =3D {}
self.infoHeaderTags =3D {}
self.infoHeaderString =3D {}
@@ -63,6 +64,7 @@
# Determine the type of information in the header line.
def getHeaderLine(self, filename, writeOut):
self.headerLine =3D self.filehandle.readline().rstrip("\n")
+ if self.headerLine.startswith("##fileformat"): success =3D self.getvcf=
Format()
if self.headerLine.startswith("##INFO"): success =3D self.headerInfo(w=
riteOut, "info")
elif self.headerLine.startswith("##FORMAT"): success =3D self.headerIn=
fo(writeOut, "format")
elif self.headerLine.startswith("##FILE"): success =3D self.headerFile=
s(writeOut)
@@ -72,6 +74,18 @@
=20
return success
=20
+# Read VCF format
+ def getvcfFormat(self):
+ try:
+ self.vcfFormat =3D self.headerLine.split("=3D",1)[1]
+ self.vcfFormat =3D float( self.vcfFormat.split("VCFv",1)[1] )## =
Extract the version number rather than the whole string
+ except IndexError:
+ print >> sys.stderr, "\nError parsing the fileformat"
+ print >> sys.stderr, "The following fileformat header is wrongly=
formatted: ", self.headerLine
+ exit(1)
+ return True
+
+
# Read information on an info field from the header line.
def headerInfo(self, writeOut, lineType):
tag =3D self.headerLine.split("=3D",1)
@@ -93,11 +107,15 @@
# an integer or a '.' to indicate variable number of entries.
if tagNumber =3D=3D ".": tagNumber =3D "variable"
else:
- try: tagNumber =3D int(tagNumber)
- except ValueError:
- print >> sys.stderr, "\nError parsing header. Problem with info t=
ag:", tagID
- print >> sys.stderr, "Number of fields associated with this tag is=
not an integer or '.'"
- exit(1)
+ if self.vcfFormat<4.1:
+
+ try:
+ tagNumber =3D int(tagNumber)
+
+ except ValueError:
+ print >> sys.stderr, "\nError parsing header. Problem with in=
fo tag:", tagID
+ print >> sys.stderr, "Number of fields associated with this ta=
g is not an integer or '.'"
+ exit(1)
=20
if lineType =3D=3D "info":
self.infoHeaderTags[tagID] =3D tagNumber, tagType, tagDescription
@@ -161,7 +179,7 @@
return False
=20
# If there is no header in the vcf file, close and reopen the
-# file so that the first line is avaiable for parsing as a=20
+# file so that the first line is avaiable for parsing as a
# vcf record.
def noHeader(self, filename, writeOut):
if writeOut: print >> sys.stdout, "No header lines present in", filena=
me
@@ -216,7 +234,7 @@
else: self.hasGenotypes =3D False
=20
# Add the reference sequence to the dictionary. If it didn't previously
-# exist append the reference sequence to the end of the list as well.=20
+# exist append the reference sequence to the end of the list as well.
# This ensures that the order in which the reference sequences appeared
# in the header can be preserved.
if self.referenceSequence not in self.referenceSequences:
@@ -274,7 +292,7 @@
# Check that there are as many fields as in the format field. If not, thi=
s must
# be because the information is not known. In this case, it is permitted =
that
# the genotype information is either . or ./.
- if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):=20
+ if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):
self.genotypeFields[ self.samplesList[i] ] =3D "."
else:
if len(self.genotypeFormats) !=3D len(genotypeInfo):
@@ -381,7 +399,7 @@
=20
# First check that the variant class (VC) is listed as SNP.
vc =3D self.info.split("VC=3D",1)
- if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)=20
+ if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)
else:
snp =3D []
snp.append(vc[1])
https://bitbucket.org/galaxy/galaxy-central/commits/13dc3f5865da/
Changeset: 13dc3f5865da
User: saketkc
Date: 2013-07-01 22:14:34
Summary: Automated merge with ssh://bitbucket.org/galaxy/galaxy-central
Affected #: 1 file
diff -r 2cabbf3687634090fbbc024726f15f43db4ff314 -r 13dc3f5865da864c3823b31=
df455b5f6f1acb9f3 tools/vcf_tools/vcfClass.py
--- a/tools/vcf_tools/vcfClass.py
+++ b/tools/vcf_tools/vcfClass.py
@@ -12,12 +12,13 @@
self.hasHeader =3D True
self.headerText =3D ""
self.headerTitles =3D ""
+ self.vcfFormat =3D ""
#self.headerInfoText =3D ""
#self.headerFormatText =3D ""
=20
# Store the info and format tags as well as the lines that describe
# them in a dictionary.
- self.numberDataSets =3D 0=20
+ self.numberDataSets =3D 0
self.includedDataSets =3D {}
self.infoHeaderTags =3D {}
self.infoHeaderString =3D {}
@@ -63,6 +64,7 @@
# Determine the type of information in the header line.
def getHeaderLine(self, filename, writeOut):
self.headerLine =3D self.filehandle.readline().rstrip("\n")
+ if self.headerLine.startswith("##fileformat"): success =3D self.getvcf=
Format()
if self.headerLine.startswith("##INFO"): success =3D self.headerInfo(w=
riteOut, "info")
elif self.headerLine.startswith("##FORMAT"): success =3D self.headerIn=
fo(writeOut, "format")
elif self.headerLine.startswith("##FILE"): success =3D self.headerFile=
s(writeOut)
@@ -72,6 +74,18 @@
=20
return success
=20
+# Read VCF format
+ def getvcfFormat(self):
+ try:
+ self.vcfFormat =3D self.headerLine.split("=3D",1)[1]
+ self.vcfFormat =3D float( self.vcfFormat.split("VCFv",1)[1] )## =
Extract the version number rather than the whole string
+ except IndexError:
+ print >> sys.stderr, "\nError parsing the fileformat"
+ print >> sys.stderr, "The following fileformat header is wrongly=
formatted: ", self.headerLine
+ exit(1)
+ return True
+
+
# Read information on an info field from the header line.
def headerInfo(self, writeOut, lineType):
tag =3D self.headerLine.split("=3D",1)
@@ -93,11 +107,15 @@
# an integer or a '.' to indicate variable number of entries.
if tagNumber =3D=3D ".": tagNumber =3D "variable"
else:
- try: tagNumber =3D int(tagNumber)
- except ValueError:
- print >> sys.stderr, "\nError parsing header. Problem with info t=
ag:", tagID
- print >> sys.stderr, "Number of fields associated with this tag is=
not an integer or '.'"
- exit(1)
+ if self.vcfFormat<4.1:
+
+ try:
+ tagNumber =3D int(tagNumber)
+
+ except ValueError:
+ print >> sys.stderr, "\nError parsing header. Problem with in=
fo tag:", tagID
+ print >> sys.stderr, "Number of fields associated with this ta=
g is not an integer or '.'"
+ exit(1)
=20
if lineType =3D=3D "info":
self.infoHeaderTags[tagID] =3D tagNumber, tagType, tagDescription
@@ -161,7 +179,7 @@
return False
=20
# If there is no header in the vcf file, close and reopen the
-# file so that the first line is avaiable for parsing as a=20
+# file so that the first line is avaiable for parsing as a
# vcf record.
def noHeader(self, filename, writeOut):
if writeOut: print >> sys.stdout, "No header lines present in", filena=
me
@@ -216,7 +234,7 @@
else: self.hasGenotypes =3D False
=20
# Add the reference sequence to the dictionary. If it didn't previously
-# exist append the reference sequence to the end of the list as well.=20
+# exist append the reference sequence to the end of the list as well.
# This ensures that the order in which the reference sequences appeared
# in the header can be preserved.
if self.referenceSequence not in self.referenceSequences:
@@ -274,7 +292,7 @@
# Check that there are as many fields as in the format field. If not, thi=
s must
# be because the information is not known. In this case, it is permitted =
that
# the genotype information is either . or ./.
- if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):=20
+ if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):
self.genotypeFields[ self.samplesList[i] ] =3D "."
else:
if len(self.genotypeFormats) !=3D len(genotypeInfo):
@@ -381,7 +399,7 @@
=20
# First check that the variant class (VC) is listed as SNP.
vc =3D self.info.split("VC=3D",1)
- if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)=20
+ if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)
else:
snp =3D []
snp.append(vc[1])
https://bitbucket.org/galaxy/galaxy-central/commits/868bfd34f465/
Changeset: 868bfd34f465
User: saketkc
Date: 2013-07-09 10:41:15
Summary: Automated merge with ssh://bitbucket.org/galaxy/galaxy-central
Affected #: 1 file
diff -r 86efa5ac1fae6fb46e7af9804e036a7ab44b0e26 -r 868bfd34f465dc1b6176d02=
fce93baecf3129279 tools/vcf_tools/vcfClass.py
--- a/tools/vcf_tools/vcfClass.py
+++ b/tools/vcf_tools/vcfClass.py
@@ -12,12 +12,13 @@
self.hasHeader =3D True
self.headerText =3D ""
self.headerTitles =3D ""
+ self.vcfFormat =3D ""
#self.headerInfoText =3D ""
#self.headerFormatText =3D ""
=20
# Store the info and format tags as well as the lines that describe
# them in a dictionary.
- self.numberDataSets =3D 0=20
+ self.numberDataSets =3D 0
self.includedDataSets =3D {}
self.infoHeaderTags =3D {}
self.infoHeaderString =3D {}
@@ -63,6 +64,7 @@
# Determine the type of information in the header line.
def getHeaderLine(self, filename, writeOut):
self.headerLine =3D self.filehandle.readline().rstrip("\n")
+ if self.headerLine.startswith("##fileformat"): success =3D self.getvcf=
Format()
if self.headerLine.startswith("##INFO"): success =3D self.headerInfo(w=
riteOut, "info")
elif self.headerLine.startswith("##FORMAT"): success =3D self.headerIn=
fo(writeOut, "format")
elif self.headerLine.startswith("##FILE"): success =3D self.headerFile=
s(writeOut)
@@ -72,6 +74,18 @@
=20
return success
=20
+# Read VCF format
+ def getvcfFormat(self):
+ try:
+ self.vcfFormat =3D self.headerLine.split("=3D",1)[1]
+ self.vcfFormat =3D float( self.vcfFormat.split("VCFv",1)[1] )## =
Extract the version number rather than the whole string
+ except IndexError:
+ print >> sys.stderr, "\nError parsing the fileformat"
+ print >> sys.stderr, "The following fileformat header is wrongly=
formatted: ", self.headerLine
+ exit(1)
+ return True
+
+
# Read information on an info field from the header line.
def headerInfo(self, writeOut, lineType):
tag =3D self.headerLine.split("=3D",1)
@@ -93,11 +107,15 @@
# an integer or a '.' to indicate variable number of entries.
if tagNumber =3D=3D ".": tagNumber =3D "variable"
else:
- try: tagNumber =3D int(tagNumber)
- except ValueError:
- print >> sys.stderr, "\nError parsing header. Problem with info t=
ag:", tagID
- print >> sys.stderr, "Number of fields associated with this tag is=
not an integer or '.'"
- exit(1)
+ if self.vcfFormat<4.1:
+
+ try:
+ tagNumber =3D int(tagNumber)
+
+ except ValueError:
+ print >> sys.stderr, "\nError parsing header. Problem with in=
fo tag:", tagID
+ print >> sys.stderr, "Number of fields associated with this ta=
g is not an integer or '.'"
+ exit(1)
=20
if lineType =3D=3D "info":
self.infoHeaderTags[tagID] =3D tagNumber, tagType, tagDescription
@@ -161,7 +179,7 @@
return False
=20
# If there is no header in the vcf file, close and reopen the
-# file so that the first line is avaiable for parsing as a=20
+# file so that the first line is avaiable for parsing as a
# vcf record.
def noHeader(self, filename, writeOut):
if writeOut: print >> sys.stdout, "No header lines present in", filena=
me
@@ -216,7 +234,7 @@
else: self.hasGenotypes =3D False
=20
# Add the reference sequence to the dictionary. If it didn't previously
-# exist append the reference sequence to the end of the list as well.=20
+# exist append the reference sequence to the end of the list as well.
# This ensures that the order in which the reference sequences appeared
# in the header can be preserved.
if self.referenceSequence not in self.referenceSequences:
@@ -274,7 +292,7 @@
# Check that there are as many fields as in the format field. If not, thi=
s must
# be because the information is not known. In this case, it is permitted =
that
# the genotype information is either . or ./.
- if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):=20
+ if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):
self.genotypeFields[ self.samplesList[i] ] =3D "."
else:
if len(self.genotypeFormats) !=3D len(genotypeInfo):
@@ -381,7 +399,7 @@
=20
# First check that the variant class (VC) is listed as SNP.
vc =3D self.info.split("VC=3D",1)
- if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)=20
+ if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)
else:
snp =3D []
snp.append(vc[1])
https://bitbucket.org/galaxy/galaxy-central/commits/f8c0c28b902e/
Changeset: f8c0c28b902e
User: saketkc
Date: 2013-07-10 21:30:38
Summary: vcfclass change merged
Affected #: 1 file
diff -r 4cc057df762c219406af27dd04ac725a07a5a6eb -r f8c0c28b902ecad5821ece5=
5030a9e7d02f779cf tools/vcf_tools/vcfClass.py
--- a/tools/vcf_tools/vcfClass.py
+++ b/tools/vcf_tools/vcfClass.py
@@ -12,12 +12,13 @@
self.hasHeader =3D True
self.headerText =3D ""
self.headerTitles =3D ""
+ self.vcfFormat =3D ""
#self.headerInfoText =3D ""
#self.headerFormatText =3D ""
=20
# Store the info and format tags as well as the lines that describe
# them in a dictionary.
- self.numberDataSets =3D 0=20
+ self.numberDataSets =3D 0
self.includedDataSets =3D {}
self.infoHeaderTags =3D {}
self.infoHeaderString =3D {}
@@ -63,6 +64,7 @@
# Determine the type of information in the header line.
def getHeaderLine(self, filename, writeOut):
self.headerLine =3D self.filehandle.readline().rstrip("\n")
+ if self.headerLine.startswith("##fileformat"): success =3D self.getvcf=
Format()
if self.headerLine.startswith("##INFO"): success =3D self.headerInfo(w=
riteOut, "info")
elif self.headerLine.startswith("##FORMAT"): success =3D self.headerIn=
fo(writeOut, "format")
elif self.headerLine.startswith("##FILE"): success =3D self.headerFile=
s(writeOut)
@@ -72,6 +74,18 @@
=20
return success
=20
+# Read VCF format
+ def getvcfFormat(self):
+ try:
+ self.vcfFormat =3D self.headerLine.split("=3D",1)[1]
+ self.vcfFormat =3D float( self.vcfFormat.split("VCFv",1)[1] )## =
Extract the version number rather than the whole string
+ except IndexError:
+ print >> sys.stderr, "\nError parsing the fileformat"
+ print >> sys.stderr, "The following fileformat header is wrongly=
formatted: ", self.headerLine
+ exit(1)
+ return True
+
+
# Read information on an info field from the header line.
def headerInfo(self, writeOut, lineType):
tag =3D self.headerLine.split("=3D",1)
@@ -93,11 +107,15 @@
# an integer or a '.' to indicate variable number of entries.
if tagNumber =3D=3D ".": tagNumber =3D "variable"
else:
- try: tagNumber =3D int(tagNumber)
- except ValueError:
- print >> sys.stderr, "\nError parsing header. Problem with info t=
ag:", tagID
- print >> sys.stderr, "Number of fields associated with this tag is=
not an integer or '.'"
- exit(1)
+ if self.vcfFormat<4.1:
+
+ try:
+ tagNumber =3D int(tagNumber)
+
+ except ValueError:
+ print >> sys.stderr, "\nError parsing header. Problem with in=
fo tag:", tagID
+ print >> sys.stderr, "Number of fields associated with this ta=
g is not an integer or '.'"
+ exit(1)
=20
if lineType =3D=3D "info":
self.infoHeaderTags[tagID] =3D tagNumber, tagType, tagDescription
@@ -161,7 +179,7 @@
return False
=20
# If there is no header in the vcf file, close and reopen the
-# file so that the first line is avaiable for parsing as a=20
+# file so that the first line is avaiable for parsing as a
# vcf record.
def noHeader(self, filename, writeOut):
if writeOut: print >> sys.stdout, "No header lines present in", filena=
me
@@ -216,7 +234,7 @@
else: self.hasGenotypes =3D False
=20
# Add the reference sequence to the dictionary. If it didn't previously
-# exist append the reference sequence to the end of the list as well.=20
+# exist append the reference sequence to the end of the list as well.
# This ensures that the order in which the reference sequences appeared
# in the header can be preserved.
if self.referenceSequence not in self.referenceSequences:
@@ -274,7 +292,7 @@
# Check that there are as many fields as in the format field. If not, thi=
s must
# be because the information is not known. In this case, it is permitted =
that
# the genotype information is either . or ./.
- if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):=20
+ if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):
self.genotypeFields[ self.samplesList[i] ] =3D "."
else:
if len(self.genotypeFormats) !=3D len(genotypeInfo):
@@ -381,7 +399,7 @@
=20
# First check that the variant class (VC) is listed as SNP.
vc =3D self.info.split("VC=3D",1)
- if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)=20
+ if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)
else:
snp =3D []
snp.append(vc[1])
https://bitbucket.org/galaxy/galaxy-central/commits/4ff5c8a3ff34/
Changeset: 4ff5c8a3ff34
User: saketkc
Date: 2013-07-24 20:59:58
Summary: Automated merge with ssh://bitbucket.org/galaxy/galaxy-central
Affected #: 1 file
diff -r b42dfe74e237ea7f9c77059e427db92d9859bc67 -r 4ff5c8a3ff347824b430c99=
3303f2d97d907e2ed tools/vcf_tools/vcfClass.py
--- a/tools/vcf_tools/vcfClass.py
+++ b/tools/vcf_tools/vcfClass.py
@@ -12,12 +12,13 @@
self.hasHeader =3D True
self.headerText =3D ""
self.headerTitles =3D ""
+ self.vcfFormat =3D ""
#self.headerInfoText =3D ""
#self.headerFormatText =3D ""
=20
# Store the info and format tags as well as the lines that describe
# them in a dictionary.
- self.numberDataSets =3D 0=20
+ self.numberDataSets =3D 0
self.includedDataSets =3D {}
self.infoHeaderTags =3D {}
self.infoHeaderString =3D {}
@@ -63,6 +64,7 @@
# Determine the type of information in the header line.
def getHeaderLine(self, filename, writeOut):
self.headerLine =3D self.filehandle.readline().rstrip("\n")
+ if self.headerLine.startswith("##fileformat"): success =3D self.getvcf=
Format()
if self.headerLine.startswith("##INFO"): success =3D self.headerInfo(w=
riteOut, "info")
elif self.headerLine.startswith("##FORMAT"): success =3D self.headerIn=
fo(writeOut, "format")
elif self.headerLine.startswith("##FILE"): success =3D self.headerFile=
s(writeOut)
@@ -72,6 +74,18 @@
=20
return success
=20
+# Read VCF format
+ def getvcfFormat(self):
+ try:
+ self.vcfFormat =3D self.headerLine.split("=3D",1)[1]
+ self.vcfFormat =3D float( self.vcfFormat.split("VCFv",1)[1] )## =
Extract the version number rather than the whole string
+ except IndexError:
+ print >> sys.stderr, "\nError parsing the fileformat"
+ print >> sys.stderr, "The following fileformat header is wrongly=
formatted: ", self.headerLine
+ exit(1)
+ return True
+
+
# Read information on an info field from the header line.
def headerInfo(self, writeOut, lineType):
tag =3D self.headerLine.split("=3D",1)
@@ -93,11 +107,15 @@
# an integer or a '.' to indicate variable number of entries.
if tagNumber =3D=3D ".": tagNumber =3D "variable"
else:
- try: tagNumber =3D int(tagNumber)
- except ValueError:
- print >> sys.stderr, "\nError parsing header. Problem with info t=
ag:", tagID
- print >> sys.stderr, "Number of fields associated with this tag is=
not an integer or '.'"
- exit(1)
+ if self.vcfFormat<4.1:
+
+ try:
+ tagNumber =3D int(tagNumber)
+
+ except ValueError:
+ print >> sys.stderr, "\nError parsing header. Problem with in=
fo tag:", tagID
+ print >> sys.stderr, "Number of fields associated with this ta=
g is not an integer or '.'"
+ exit(1)
=20
if lineType =3D=3D "info":
self.infoHeaderTags[tagID] =3D tagNumber, tagType, tagDescription
@@ -161,7 +179,7 @@
return False
=20
# If there is no header in the vcf file, close and reopen the
-# file so that the first line is avaiable for parsing as a=20
+# file so that the first line is avaiable for parsing as a
# vcf record.
def noHeader(self, filename, writeOut):
if writeOut: print >> sys.stdout, "No header lines present in", filena=
me
@@ -216,7 +234,7 @@
else: self.hasGenotypes =3D False
=20
# Add the reference sequence to the dictionary. If it didn't previously
-# exist append the reference sequence to the end of the list as well.=20
+# exist append the reference sequence to the end of the list as well.
# This ensures that the order in which the reference sequences appeared
# in the header can be preserved.
if self.referenceSequence not in self.referenceSequences:
@@ -274,7 +292,7 @@
# Check that there are as many fields as in the format field. If not, thi=
s must
# be because the information is not known. In this case, it is permitted =
that
# the genotype information is either . or ./.
- if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):=20
+ if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):
self.genotypeFields[ self.samplesList[i] ] =3D "."
else:
if len(self.genotypeFormats) !=3D len(genotypeInfo):
@@ -381,7 +399,7 @@
=20
# First check that the variant class (VC) is listed as SNP.
vc =3D self.info.split("VC=3D",1)
- if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)=20
+ if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)
else:
snp =3D []
snp.append(vc[1])
https://bitbucket.org/galaxy/galaxy-central/commits/28bad82d523b/
Changeset: 28bad82d523b
User: saketkc
Date: 2013-07-25 15:07:54
Summary: Automated merge with ssh://bitbucket.org/galaxy/galaxy-central
Affected #: 1 file
diff -r 2abd0819d354d3d11182297c7206408d299f0d16 -r 28bad82d523b2053fa69c70=
301de69a74e57f323 tools/vcf_tools/vcfClass.py
--- a/tools/vcf_tools/vcfClass.py
+++ b/tools/vcf_tools/vcfClass.py
@@ -12,12 +12,13 @@
self.hasHeader =3D True
self.headerText =3D ""
self.headerTitles =3D ""
+ self.vcfFormat =3D ""
#self.headerInfoText =3D ""
#self.headerFormatText =3D ""
=20
# Store the info and format tags as well as the lines that describe
# them in a dictionary.
- self.numberDataSets =3D 0=20
+ self.numberDataSets =3D 0
self.includedDataSets =3D {}
self.infoHeaderTags =3D {}
self.infoHeaderString =3D {}
@@ -63,6 +64,7 @@
# Determine the type of information in the header line.
def getHeaderLine(self, filename, writeOut):
self.headerLine =3D self.filehandle.readline().rstrip("\n")
+ if self.headerLine.startswith("##fileformat"): success =3D self.getvcf=
Format()
if self.headerLine.startswith("##INFO"): success =3D self.headerInfo(w=
riteOut, "info")
elif self.headerLine.startswith("##FORMAT"): success =3D self.headerIn=
fo(writeOut, "format")
elif self.headerLine.startswith("##FILE"): success =3D self.headerFile=
s(writeOut)
@@ -72,6 +74,18 @@
=20
return success
=20
+# Read VCF format
+ def getvcfFormat(self):
+ try:
+ self.vcfFormat =3D self.headerLine.split("=3D",1)[1]
+ self.vcfFormat =3D float( self.vcfFormat.split("VCFv",1)[1] )## =
Extract the version number rather than the whole string
+ except IndexError:
+ print >> sys.stderr, "\nError parsing the fileformat"
+ print >> sys.stderr, "The following fileformat header is wrongly=
formatted: ", self.headerLine
+ exit(1)
+ return True
+
+
# Read information on an info field from the header line.
def headerInfo(self, writeOut, lineType):
tag =3D self.headerLine.split("=3D",1)
@@ -93,11 +107,15 @@
# an integer or a '.' to indicate variable number of entries.
if tagNumber =3D=3D ".": tagNumber =3D "variable"
else:
- try: tagNumber =3D int(tagNumber)
- except ValueError:
- print >> sys.stderr, "\nError parsing header. Problem with info t=
ag:", tagID
- print >> sys.stderr, "Number of fields associated with this tag is=
not an integer or '.'"
- exit(1)
+ if self.vcfFormat<4.1:
+
+ try:
+ tagNumber =3D int(tagNumber)
+
+ except ValueError:
+ print >> sys.stderr, "\nError parsing header. Problem with in=
fo tag:", tagID
+ print >> sys.stderr, "Number of fields associated with this ta=
g is not an integer or '.'"
+ exit(1)
=20
if lineType =3D=3D "info":
self.infoHeaderTags[tagID] =3D tagNumber, tagType, tagDescription
@@ -161,7 +179,7 @@
return False
=20
# If there is no header in the vcf file, close and reopen the
-# file so that the first line is avaiable for parsing as a=20
+# file so that the first line is avaiable for parsing as a
# vcf record.
def noHeader(self, filename, writeOut):
if writeOut: print >> sys.stdout, "No header lines present in", filena=
me
@@ -216,7 +234,7 @@
else: self.hasGenotypes =3D False
=20
# Add the reference sequence to the dictionary. If it didn't previously
-# exist append the reference sequence to the end of the list as well.=20
+# exist append the reference sequence to the end of the list as well.
# This ensures that the order in which the reference sequences appeared
# in the header can be preserved.
if self.referenceSequence not in self.referenceSequences:
@@ -274,7 +292,7 @@
# Check that there are as many fields as in the format field. If not, thi=
s must
# be because the information is not known. In this case, it is permitted =
that
# the genotype information is either . or ./.
- if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):=20
+ if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):
self.genotypeFields[ self.samplesList[i] ] =3D "."
else:
if len(self.genotypeFormats) !=3D len(genotypeInfo):
@@ -381,7 +399,7 @@
=20
# First check that the variant class (VC) is listed as SNP.
vc =3D self.info.split("VC=3D",1)
- if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)=20
+ if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)
else:
snp =3D []
snp.append(vc[1])
https://bitbucket.org/galaxy/galaxy-central/commits/a360e1b7b506/
Changeset: a360e1b7b506
User: saketkc
Date: 2013-07-31 15:48:50
Summary: Automated merge with ssh://bitbucket.org/galaxy/galaxy-central
Affected #: 1 file
diff -r 951e853b0bcd2c62cedee0b95d46c9e36ab6c605 -r a360e1b7b506450385be74b=
2c6b7762d3e794bbd tools/vcf_tools/vcfClass.py
--- a/tools/vcf_tools/vcfClass.py
+++ b/tools/vcf_tools/vcfClass.py
@@ -12,12 +12,13 @@
self.hasHeader =3D True
self.headerText =3D ""
self.headerTitles =3D ""
+ self.vcfFormat =3D ""
#self.headerInfoText =3D ""
#self.headerFormatText =3D ""
=20
# Store the info and format tags as well as the lines that describe
# them in a dictionary.
- self.numberDataSets =3D 0=20
+ self.numberDataSets =3D 0
self.includedDataSets =3D {}
self.infoHeaderTags =3D {}
self.infoHeaderString =3D {}
@@ -63,6 +64,7 @@
# Determine the type of information in the header line.
def getHeaderLine(self, filename, writeOut):
self.headerLine =3D self.filehandle.readline().rstrip("\n")
+ if self.headerLine.startswith("##fileformat"): success =3D self.getvcf=
Format()
if self.headerLine.startswith("##INFO"): success =3D self.headerInfo(w=
riteOut, "info")
elif self.headerLine.startswith("##FORMAT"): success =3D self.headerIn=
fo(writeOut, "format")
elif self.headerLine.startswith("##FILE"): success =3D self.headerFile=
s(writeOut)
@@ -72,6 +74,18 @@
=20
return success
=20
+# Read VCF format
+ def getvcfFormat(self):
+ try:
+ self.vcfFormat =3D self.headerLine.split("=3D",1)[1]
+ self.vcfFormat =3D float( self.vcfFormat.split("VCFv",1)[1] )## =
Extract the version number rather than the whole string
+ except IndexError:
+ print >> sys.stderr, "\nError parsing the fileformat"
+ print >> sys.stderr, "The following fileformat header is wrongly=
formatted: ", self.headerLine
+ exit(1)
+ return True
+
+
# Read information on an info field from the header line.
def headerInfo(self, writeOut, lineType):
tag =3D self.headerLine.split("=3D",1)
@@ -93,11 +107,15 @@
# an integer or a '.' to indicate variable number of entries.
if tagNumber =3D=3D ".": tagNumber =3D "variable"
else:
- try: tagNumber =3D int(tagNumber)
- except ValueError:
- print >> sys.stderr, "\nError parsing header. Problem with info t=
ag:", tagID
- print >> sys.stderr, "Number of fields associated with this tag is=
not an integer or '.'"
- exit(1)
+ if self.vcfFormat<4.1:
+
+ try:
+ tagNumber =3D int(tagNumber)
+
+ except ValueError:
+ print >> sys.stderr, "\nError parsing header. Problem with in=
fo tag:", tagID
+ print >> sys.stderr, "Number of fields associated with this ta=
g is not an integer or '.'"
+ exit(1)
=20
if lineType =3D=3D "info":
self.infoHeaderTags[tagID] =3D tagNumber, tagType, tagDescription
@@ -161,7 +179,7 @@
return False
=20
# If there is no header in the vcf file, close and reopen the
-# file so that the first line is avaiable for parsing as a=20
+# file so that the first line is avaiable for parsing as a
# vcf record.
def noHeader(self, filename, writeOut):
if writeOut: print >> sys.stdout, "No header lines present in", filena=
me
@@ -216,7 +234,7 @@
else: self.hasGenotypes =3D False
=20
# Add the reference sequence to the dictionary. If it didn't previously
-# exist append the reference sequence to the end of the list as well.=20
+# exist append the reference sequence to the end of the list as well.
# This ensures that the order in which the reference sequences appeared
# in the header can be preserved.
if self.referenceSequence not in self.referenceSequences:
@@ -274,7 +292,7 @@
# Check that there are as many fields as in the format field. If not, thi=
s must
# be because the information is not known. In this case, it is permitted =
that
# the genotype information is either . or ./.
- if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):=20
+ if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):
self.genotypeFields[ self.samplesList[i] ] =3D "."
else:
if len(self.genotypeFormats) !=3D len(genotypeInfo):
@@ -381,7 +399,7 @@
=20
# First check that the variant class (VC) is listed as SNP.
vc =3D self.info.split("VC=3D",1)
- if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)=20
+ if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)
else:
snp =3D []
snp.append(vc[1])
https://bitbucket.org/galaxy/galaxy-central/commits/79ae7df72fba/
Changeset: 79ae7df72fba
User: saketkc
Date: 2013-08-02 19:20:58
Summary: Automated merge with ssh://bitbucket.org/galaxy/galaxy-central
Affected #: 24 files
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -85,6 +85,7 @@
.coverage
htmlcov
run_unit_tests.html
+test/unit/**.log
=20
# Project files
*.kpf
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c lib/galaxy/datatypes/binary.py
--- a/lib/galaxy/datatypes/binary.py
+++ b/lib/galaxy/datatypes/binary.py
@@ -475,6 +475,9 @@
=20
def sniff(self, filename):
try:
+ # All twobit files start with a 16-byte header. If the file is=
smaller than 16 bytes, it's obviously not a valid twobit file.
+ if os.path.getsize(filename) < 16:
+ return False
input =3D file(filename)
magic =3D struct.unpack(">L", input.read(TWOBIT_MAGIC_SIZE))[0]
if magic =3D=3D TWOBIT_MAGIC_NUMBER or magic =3D=3D TWOBIT_MAG=
IC_NUMBER_SWAP:
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c lib/galaxy/tools/parameters/basic.py
--- a/lib/galaxy/tools/parameters/basic.py
+++ b/lib/galaxy/tools/parameters/basic.py
@@ -1514,8 +1514,8 @@
NOTE: This is wasteful since dynamic options and dataset collection
happens twice (here and when generating HTML).=20
"""
- # Can't look at history in workflow mode
- if trans is None or trans.workflow_building_mode:
+ # Can't look at history in workflow mode. Tool shed has no histori=
es.
+ if trans is None or trans.workflow_building_mode or trans.webapp.n=
ame =3D=3D 'tool_shed':
return DummyDataset()
assert trans is not None, "DataToolParameter requires a trans"
history =3D trans.get_history()
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c lib/galaxy/visualization/data_providers/genome.py
--- a/lib/galaxy/visualization/data_providers/genome.py
+++ b/lib/galaxy/visualization/data_providers/genome.py
@@ -13,6 +13,7 @@
from galaxy.util.json import from_json_string
from bx.interval_index_file import Indexes
from bx.bbi.bigwig_file import BigWigFile
+from bx.bbi.bigbed_file import BigBedFile
from galaxy.util.lrucache import LRUCache
from galaxy.visualization.data_providers.basic import BaseDataProvider
from galaxy.visualization.data_providers.cigar import get_ref_based_read_s=
eq_and_cigar
@@ -861,14 +862,14 @@
"""
Returns an iterator that provides data in the region chrom:start-e=
nd
"""
- start, end =3D int(start), int(end)
+ start, end =3D int( start ), int( end )
orig_data_filename =3D self.original_dataset.file_name
index_filename =3D self.converted_dataset.file_name
=20
# Attempt to open the BAM file with index
bamfile =3D csamtools.Samfile( filename=3Dorig_data_filename, mode=
=3D'rb', index_filename=3Dindex_filename )
try:
- data =3D bamfile.fetch(start=3Dstart, end=3Dend, reference=3Dc=
hrom)
+ data =3D bamfile.fetch( start=3Dstart, end=3Dend, reference=3D=
chrom )
except ValueError, e:
# Try alternative chrom naming.
chrom =3D _convert_between_ucsc_and_ensemble_naming( chrom )
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c lib/galaxy/webapps/galaxy/api/histories.py
--- a/lib/galaxy/webapps/galaxy/api/histories.py
+++ b/lib/galaxy/webapps/galaxy/api/histories.py
@@ -1,5 +1,7 @@
"""
API operations on a history.
+
+.. seealso:: :class:`galaxy.model.History`
"""
=20
import pkg_resources
@@ -21,17 +23,28 @@
@web.expose_api_anonymous
def index( self, trans, deleted=3D'False', **kwd ):
"""
- GET /api/histories
- GET /api/histories/deleted
- Displays a collection (list) of histories.
+ index( trans, deleted=3D'False' )
+ * GET /api/histories:
+ return undeleted histories for the current user
+ * GET /api/histories/deleted:
+ return deleted histories for the current user
+ .. note:: Anonymous users are allowed to get their current history
+
+ :type deleted: boolean
+ :param deleted: if True, show only deleted histories, if False, n=
on-deleted
+
+ :rtype: list
+ :returns: list of dictionaries containing summary history inform=
ation
"""
#TODO: query (by name, date, etc.)
rval =3D []
deleted =3D string_as_bool( deleted )
try:
if trans.user:
- query =3D trans.sa_session.query(trans.app.model.History )=
.filter_by( user=3Dtrans.user, deleted=3Ddeleted ).order_by(
- desc(trans.app.model.History.table.c.update_time)).all=
()
+ query =3D ( trans.sa_session.query( trans.app.model.Histor=
y )
+ .filter_by( user=3Dtrans.user, deleted=3Ddelet=
ed )
+ .order_by( desc( trans.app.model.History.table=
.c.update_time ) )
+ .all() )
for history in query:
item =3D history.get_api_value(value_mapper=3D{'id':tr=
ans.security.encode_id})
item['url'] =3D url_for( 'history', id=3Dtrans.securit=
y.encode_id( history.id ) )
@@ -52,11 +65,25 @@
=20
@web.expose_api_anonymous
def show( self, trans, id, deleted=3D'False', **kwd ):
+ # oh, sphinx - you bastard
"""
- GET /api/histories/{encoded_history_id}
- GET /api/histories/deleted/{encoded_history_id}
- GET /api/histories/most_recently_used
- Displays information about a history.
+ show( trans, id, deleted=3D'False' )
+ * GET /api/histories/{id}:
+ return the history with ``id``
+ * GET /api/histories/deleted/{id}:
+ return the deleted history with ``id``
+ * GET /api/histories/most_recently_used:
+ return the most recently used history
+ .. note:: Anonymous users are allowed to get their current history
+
+ :type id: an encoded id string
+ :param id: the encoded id of the history to query or the str=
ing 'most_recently_used'
+ :type deleted: boolean
+ :param deleted: if True, allow information on a deleted history t=
o be shown.
+
+ :rtype: dictionary
+ :returns: detailed history information from
+ :func:`galaxy.web.base.controller.UsesHistoryDatasetAssociatio=
nMixin.get_history_dict`
"""
#TODO: GET /api/histories/{encoded_history_id}?as_archive=3DTrue
#TODO: GET /api/histories/s/{username}/{slug}
@@ -94,8 +121,16 @@
@web.expose_api
def create( self, trans, payload, **kwd ):
"""
- POST /api/histories
- Creates a new history.
+ create( trans, payload )
+ * POST /api/histories:
+ create a new history
+
+ :type payload: dict
+ :param payload: (optional) dictionary structure containing:
+ * name: the new history's name
+ =20
+ :rtype: dict
+ :returns: element view of new history
"""
hist_name =3D None
if payload.get( 'name', None ):
@@ -115,8 +150,24 @@
@web.expose_api
def delete( self, trans, id, **kwd ):
"""
- DELETE /api/histories/{encoded_history_id}
- Deletes a history
+ delete( self, trans, id, **kwd )
+ * DELETE /api/histories/{id}
+ delete the history with the given ``id``
+ .. note:: Currently does not stop any active jobs in the history.
+
+ :type id: str
+ :param id: the encoded id of the history to delete
+ :type kwd: dict
+ :param kwd: (optional) dictionary structure containing:
+ =20
+ * payload: a dictionary itself containing:
+ * purge: if True, purge the history and all of it's HDAs
+
+ :rtype: dict
+ :returns: an error object if an error occurred or a dictionary c=
ontaining:
+ * id: the encoded id of the history,
+ * deleted: if the history was marked as deleted,
+ * purged: if the history was purged
"""
history_id =3D id
# a request body is optional here
@@ -175,8 +226,15 @@
@web.expose_api
def undelete( self, trans, id, **kwd ):
"""
- POST /api/histories/deleted/{encoded_history_id}/undelete
- Undeletes a history
+ undelete( self, trans, id, **kwd )
+ * POST /api/histories/deleted/{id}/undelete:
+ undelete history (that hasn't been purged) with the given ``id=
``
+
+ :type id: str
+ :param id: the encoded id of the history to undelete
+
+ :rtype: str
+ :returns: 'OK' if the history was undeleted
"""
history_id =3D id
history =3D self.get_history( trans, history_id, check_ownership=
=3DTrue, check_accessible=3DFalse, deleted=3DTrue )
@@ -188,8 +246,21 @@
@web.expose_api
def update( self, trans, id, payload, **kwd ):
"""
- PUT /api/histories/{encoded_history_id}
- Changes an existing history.
+ update( self, trans, id, payload, **kwd )
+ * PUT /api/histories/{id}
+ updates the values for the history with the given ``id``
+
+ :type id: str
+ :param id: the encoded id of the history to undelete
+ :type payload: dict
+ :param payload: a dictionary containing any or all the
+ fields in :func:`galaxy.model.History.get_api_value` and/or th=
e following:
+ =20
+ * annotation: an annotation for the history
+
+ :rtype: dict
+ :returns: an error object if an error occurred or a dictionary c=
ontaining
+ any values that were different from the original and, therefor=
e, updated
"""
#TODO: PUT /api/histories/{encoded_history_id} payload =3D { ratin=
g: rating } (w/ no security checks)
try:
@@ -255,6 +326,6 @@
raise ValueError( 'annotation must be a string or unic=
ode: %s' %( str( type( val ) ) ) )
validated_payload[ 'annotation' ] =3D sanitize_html( val, =
'utf-8' )
elif key not in valid_but_uneditable_keys:
- raise AttributeError( 'unknown key: %s' %( str( key ) ) )
+ pass
+ #log.warn( 'unknown key: %s', str( key ) )
return validated_payload
-
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c lib/galaxy/webapps/galaxy/api/history_contents.py
--- a/lib/galaxy/webapps/galaxy/api/history_contents.py
+++ b/lib/galaxy/webapps/galaxy/api/history_contents.py
@@ -15,21 +15,27 @@
@web.expose_api_anonymous
def index( self, trans, history_id, ids=3DNone, **kwd ):
"""
- GET /api/histories/{encoded_history_id}/contents
- Displays a collection (list) of history contents (HDAs)
+ index( self, trans, history_id, ids=3DNone, **kwd )
+ * GET /api/histories/{history_id}/contents
+ return a list of HDA data for the history with the given ``id``
+ .. note:: Anonymous users are allowed to get their current history=
contents
=20
- :param history_id: an encoded id string of the `History` to search
- :param ids: (optional) a comma separated list of encoded `HDA` ids
-
- If Ids is not given, index returns a list of *summary* json object=
s for
- every `HDA` associated with the given `history_id`.
- See _summary_hda_dict.
+ If Ids is not given, index returns a list of *summary* objects for
+ every HDA associated with the given `history_id`.
=20
If ids is given, index returns a *more complete* json object for e=
ach
HDA in the ids list.
=20
- Note: Anonymous users are allowed to get their current history con=
tents
- (generally useful for browser UI access of the api)
+ :type history_id: str
+ :param history_id: encoded id string of the HDA's History
+ :type ids: str
+ :param ids: (optional) a comma separated list of encoded `=
HDA` ids
+
+ :rtype: list
+ :returns: dictionaries containing summary or detailed HDA inform=
ation
+ .. seealso::
+ :func:`_summary_hda_dict` and
+ :func:`galaxy.web.base.controller.UsesHistoryDatasetAssociatio=
nMixin.get_hda_dict`
"""
rval =3D []
try:
@@ -78,13 +84,13 @@
#TODO: move to model or Mixin
def _summary_hda_dict( self, trans, history_id, hda ):
"""
- Returns a dictionary based on the HDA in .. _summary form::
- {
- 'id' : < the encoded dataset id >,
- 'name' : < currently only returns 'file' >,
- 'type' : < name of the dataset >,
- 'url' : < api url to retrieve this datasets full data >,
- }
+ Returns a dictionary based on the HDA in summary form::
+ {
+ 'id' : < the encoded dataset id >,
+ 'name' : < currently only returns 'file' >,
+ 'type' : < name of the dataset >,
+ 'url' : < api url to retrieve this datasets full data >,
+ }
"""
api_type =3D "file"
encoded_id =3D trans.security.encode_id( hda.id )
@@ -98,8 +104,19 @@
@web.expose_api_anonymous
def show( self, trans, id, history_id, **kwd ):
"""
- GET /api/histories/{encoded_history_id}/contents/{encoded_content_=
id}
- Displays information about a history content (dataset).
+ show( self, trans, id, history_id, **kwd )
+ * GET /api/histories/{history_id}/contents/{id}
+ return detailed information about an HDA within a history
+ .. note:: Anonymous users are allowed to get their current history=
contents
+
+ :type id: str
+ :param ids: the encoded id of the HDA to return
+ :type history_id: str
+ :param history_id: encoded id string of the HDA's History
+
+ :rtype: dict
+ :returns: dictionary containing detailed HDA information
+ .. seealso:: :func:`galaxy.web.base.controller.UsesHistoryDatasetA=
ssociationMixin.get_hda_dict`
"""
hda_dict =3D {}
try:
@@ -135,8 +152,18 @@
@web.expose_api
def create( self, trans, history_id, payload, **kwd ):
"""
- POST /api/histories/{encoded_history_id}/contents
- Creates a new history content item (file, aka HistoryDatasetAssoci=
ation).
+ create( self, trans, history_id, payload, **kwd )
+ * POST /api/histories/{history_id}/contents
+ create a new HDA by copying an accessible LibraryDataset
+
+ :type history_id: str
+ :param history_id: encoded id string of the new HDA's History
+ :type payload: dict
+ :param payload: dictionary structure containing::
+ 'from_ld_id': the encoded id of the LibraryDataset to copy
+
+ :rtype: dict
+ :returns: dictionary containing detailed information for the new=
HDA
"""
#TODO: copy existing, accessible hda - dataset controller, copy_da=
tasets
#TODO: convert existing, accessible hda - model.DatasetInstance(or=
hda.datatype).get_converter_types
@@ -173,8 +200,24 @@
@web.expose_api
def update( self, trans, history_id, id, payload, **kwd ):
"""
- PUT /api/histories/{encoded_history_id}/contents/{encoded_content_=
id}
- Changes an existing history dataset.
+ update( self, trans, history_id, id, payload, **kwd )
+ * PUT /api/histories/{history_id}/contents/{id}
+ updates the values for the HDA with the given ``id``
+
+ :type history_id: str
+ :param history_id: encoded id string of the HDA's History
+ :type id: str
+ :param id: the encoded id of the history to undelete
+ :type payload: dict
+ :param payload: a dictionary containing any or all the
+ fields in :func:`galaxy.model.HistoryDatasetAssociation.get_ap=
i_value`
+ and/or the following:
+
+ * annotation: an annotation for the HDA
+
+ :rtype: dict
+ :returns: an error object if an error occurred or a dictionary c=
ontaining
+ any values that were different from the original and, therefor=
e, updated
"""
#TODO: PUT /api/histories/{encoded_history_id} payload =3D { ratin=
g: rating } (w/ no security checks)
changed =3D {}
@@ -251,6 +294,7 @@
raise ValueError( 'misc_info must be a string or unico=
de: %s' %( str( type( val ) ) ) )
validated_payload[ 'info' ] =3D util.sanitize_html.sanitiz=
e_html( val, 'utf-8' )
elif key not in valid_but_uneditable_keys:
- raise AttributeError( 'unknown key: %s' %( str( key ) ) )
+ pass
+ #log.warn( 'unknown key: %s', str( key ) )
return validated_payload
=20
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c lib/galaxy/webapps/galaxy/api/libraries.py
--- a/lib/galaxy/webapps/galaxy/api/libraries.py
+++ b/lib/galaxy/webapps/galaxy/api/libraries.py
@@ -15,9 +15,18 @@
@web.expose_api
def index( self, trans, deleted=3D'False', **kwd ):
"""
- GET /api/libraries
- GET /api/libraries/deleted
- Displays a collection (list) of libraries.
+ index( self, trans, deleted=3D'False', **kwd )
+ * GET /api/libraries:
+ returns a list of summary data for libraries
+ * GET /api/libraries/deleted:
+ returns a list of summary data for deleted libraries
+
+ :type deleted: boolean
+ :param deleted: if True, show only deleted libraries, if False, n=
on-deleted
+
+ :rtype: list
+ :returns: list of dictionaries containing library information
+ .. seealso:: :attr:`galaxy.model.Library.api_collection_visible_ke=
ys`
"""
log.debug( "LibrariesController.index: enter" )
query =3D trans.sa_session.query( trans.app.model.Library )
@@ -49,9 +58,20 @@
@web.expose_api
def show( self, trans, id, deleted=3D'False', **kwd ):
"""
- GET /api/libraries/{encoded_library_id}
- GET /api/libraries/deleted/{encoded_library_id}
- Displays information about a library.
+ show( self, trans, id, deleted=3D'False', **kwd )
+ * GET /api/libraries/{id}:
+ returns detailed information about a library
+ * GET /api/libraries/deleted/{id}:
+ returns detailed information about a deleted library
+
+ :type id: an encoded id string
+ :param id: the encoded id of the library
+ :type deleted: boolean
+ :param deleted: if True, allow information on a deleted library
+
+ :rtype: dictionary
+ :returns: detailed library information
+ .. seealso:: :attr:`galaxy.model.Library.api_element_visible_keys`
"""
log.debug( "LibraryContentsController.show: enter" )
library_id =3D id
@@ -75,8 +95,20 @@
@web.expose_api
def create( self, trans, payload, **kwd ):
"""
- POST /api/libraries
- Creates a new library.
+ create( self, trans, payload, **kwd )
+ * POST /api/libraries:
+ create a new library
+ .. note:: Currently, only admin users can create libraries.
+
+ :type payload: dict
+ :param payload: (optional) dictionary structure containing::
+ 'name': the new library's name
+ 'description': the new library's description
+ 'synopsis': the new library's synopsis
+
+ :rtype: dict
+ :returns: a dictionary containing the id, name, and 'show' url
+ of the new library
"""
if not trans.user_is_admin():
raise HTTPForbidden( detail=3D'You are not authorized to creat=
e a new library.' )
@@ -102,6 +134,19 @@
=20
@web.expose_api
def delete( self, trans, id, **kwd ):
+ """
+ delete( self, trans, id, **kwd )
+ * DELETE /api/histories/{id}
+ mark the library with the given ``id`` as deleted
+ .. note:: Currently, only admin users can delete libraries.
+
+ :type id: str
+ :param id: the encoded id of the library to delete
+
+ :rtype: dictionary
+ :returns: detailed library information
+ .. seealso:: :attr:`galaxy.model.Library.api_element_visible_keys`
+ """
if not trans.user_is_admin():
raise HTTPForbidden( detail=3D'You are not authorized to delet=
e libraries.' )
try:
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c lib/galaxy/webapps/galaxy/api/library_contents.py
--- a/lib/galaxy/webapps/galaxy/api/library_contents.py
+++ b/lib/galaxy/webapps/galaxy/api/library_contents.py
@@ -19,8 +19,21 @@
# TODO: Add parameter to only get top level of datasets/subfolders.
def index( self, trans, library_id, **kwd ):
"""
- GET /api/libraries/{encoded_library_id}/contents
- Displays a collection (list) of library contents (files and folder=
s).
+ index( self, trans, library_id, **kwd )
+ * GET /api/libraries/{library_id}/contents:
+ return a list of library files and folders
+
+ :type library_id: str
+ :param library_id: encoded id string of the library that contains=
this item
+
+ :rtype: list
+ :returns: list of dictionaries of the form:
+
+ * id: the encoded id of the library item
+ * name: the 'libary path'
+ or relationship of the library item to the root
+ * type: 'file' or 'folder'
+ * url: the url to get detailed information on the library item
"""
rval =3D []
current_user_roles =3D trans.get_current_user_roles()
@@ -80,8 +93,20 @@
@web.expose_api
def show( self, trans, id, library_id, **kwd ):
"""
- GET /api/libraries/{encoded_library_id}/contents/{encoded_content_=
id}
- Displays information about a library content (file or folder).
+ show( self, trans, id, library_id, **kwd )
+ * GET /api/libraries/{library_id}/contents/{id}
+ return information about library file or folder
+
+ :type id: str
+ :param id: the encoded id of the library item to return
+ :type library_id: str
+ :param library_id: encoded id string of the library that contains=
this item
+
+ :rtype: dict
+ :returns: detailed library item information
+ .. seealso::
+ :func:`galaxy.model.LibraryDataset.get_api_value` and
+ :attr:`galaxy.model.LibraryFolder.api_element_visible_keys`
"""
class_name, content_id =3D self.__decode_library_content_id( trans=
, id )
if class_name =3D=3D 'LibraryFolder':
@@ -93,8 +118,29 @@
@web.expose_api
def create( self, trans, library_id, payload, **kwd ):
"""
- POST /api/libraries/{encoded_library_id}/contents
- Creates a new library content item (file or folder).
+ create( self, trans, library_id, payload, **kwd )
+ * POST /api/libraries/{library_id}/contents:
+ create a new library file or folder
+
+ To copy an HDA into a library send ``create_type`` of 'file' and
+ the HDA's encoded id in ``from_hda_id`` (and optionally ``ldda_mes=
sage``).
+
+ :type library_id: str
+ :param library_id: encoded id string of the library that contains=
this item
+ :type payload: dict
+ :param payload: dictionary structure containing:
+ =20
+ * folder_id: the parent folder of the new item
+ * create_type: the type of item to create ('file' or 'folder')
+ * from_hda_id: (optional) the id of an accessible HDA to copy=
into the
+ library
+ * ldda_message: (optional) the new message attribute of the LD=
DA created
+ * extended_metadata: (optional) dub-dictionary containing any =
extended
+ metadata to associate with the item
+
+ :rtype: dict
+ :returns: a dictionary containing the id, name,
+ and 'show' url of the new item
"""
create_type =3D None
if 'create_type' not in payload:
@@ -195,10 +241,10 @@
=20
def _copy_hda_to_library_folder( self, trans, from_hda_id, library_id,=
folder_id, ldda_message=3D'' ):
"""
- Copies hda `from_hda_id` to library folder `library_folder_id` opt=
ionally
- adding `ldda_message` to the new ldda's `message`.
+ Copies hda ``from_hda_id`` to library folder ``library_folder_id``=
optionally
+ adding ``ldda_message`` to the new ldda's ``message``.
=20
- `library_contents.create` will branch to this if called with 'from=
_hda_id'
+ ``library_contents.create`` will branch to this if called with 'fr=
om_hda_id'
in it's payload.
"""
log.debug( '_copy_hda_to_library_folder: %s' %( str(( from_hda_id,=
library_id, folder_id, ldda_message )) ) )
@@ -236,10 +282,23 @@
return rval
=20
@web.expose_api
- def update( self, trans, id, library_id, payload, **kwd ):
+ def update( self, trans, id, library_id, payload, **kwd ):
"""
- PUT /api/libraries/{encoded_library_id}/contents/{encoded_content_=
type_and_id}
- Sets relationships among items
+ update( self, trans, id, library_id, payload, **kwd )
+ * PUT /api/libraries/{library_id}/contents/{id}
+ create a ImplicitlyConvertedDatasetAssociation
+ .. seealso:: :class:`galaxy.model.ImplicitlyConvertedDatasetAssoci=
ation`
+
+ :type id: str
+ :param id: the encoded id of the library item to return
+ :type library_id: str
+ :param library_id: encoded id string of the library that contains=
this item
+ :type payload: dict
+ :param payload: dictionary structure containing::
+ 'converted_dataset_id':
+
+ :rtype: None
+ :returns: None
"""
if 'converted_dataset_id' in payload:
converted_id =3D payload.pop( 'converted_dataset_id' )
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c lib/tool_shed/galaxy_install/tool_dependencies/common_uti=
l.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/common_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/common_util.py
@@ -8,6 +8,7 @@
import zipfile
import tool_shed.util.shed_util_common as suc
from galaxy.datatypes import checkers
+from urllib2 import HTTPError
=20
log =3D logging.getLogger( __name__ )
=20
@@ -70,6 +71,23 @@
__shellquote(env_shell_file_path=
))
return cmd
=20
+def download_binary_from_url( url, work_dir, install_dir ):
+ '''
+ Download a pre-compiled binary from the specified URL. If the download=
ed file is an archive,
+ extract it into install_dir and delete the archive.
+ '''
+ downloaded_filename =3D os.path.split( url )[ -1 ]
+ try:
+ dir =3D url_download( work_dir, downloaded_filename, url, extract=
=3DTrue )
+ downloaded_filepath =3D os.path.join( work_dir, downloaded_filenam=
e )
+ if is_compressed( downloaded_filepath ):
+ os.remove( downloaded_filepath )
+ move_directory_files( current_dir=3Dwork_dir,
+ source_dir=3Ddir,
+ destination_dir=3Dinstall_dir )
+ return True
+ except HTTPError:
+ return False
=20
def extract_tar( file_name, file_path ):
if isgzip( file_name ) or isbz2( file_name ):
@@ -190,6 +208,12 @@
def iszip( file_path ):
return checkers.check_zip( file_path )
=20
+def is_compressed( file_path ):
+ if isjar( file_path ):
+ return False
+ else:
+ return iszip( file_path ) or isgzip( file_path ) or istar( file_pa=
th ) or isbz2( file_path )
+
def make_directory( full_path ):
if not os.path.exists( full_path ):
os.makedirs( full_path )
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c lib/tool_shed/galaxy_install/tool_dependencies/fabric_uti=
l.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
@@ -165,12 +165,29 @@
actions =3D actions_dict.get( 'actions', None )
filtered_actions =3D []
env_shell_file_paths =3D []
+ # Default to false so that the install process will default to compili=
ng.
+ binary_found =3D False
if actions:
with make_tmp_dir() as work_dir:
with lcd( work_dir ):
# The first action in the list of actions will be the one =
that defines the installation process. There
# are currently only two supported processes; download_by_=
url and clone via a "shell_command" action type.
action_type, action_dict =3D actions[ 0 ]
+ if action_type =3D=3D 'download_binary':
+ # Eliminate the download_binary action so remaining ac=
tions can be processed correctly.
+ filtered_actions =3D actions[ 1: ]
+ url =3D action_dict[ 'url' ]
+ # Attempt to download a binary from the specified URL.
+ log.debug( 'Attempting to download from %s', url )
+ binary_found =3D common_util.download_binary_from_url(=
url, work_dir, install_dir )
+ if binary_found:
+ # If the attempt succeeded, set the action_type to=
binary_found, in order to skip any download_by_url or shell_command action=
s.
+ actions =3D filtered_actions
+ action_type =3D 'binary_found'
+ else:
+ # No binary exists, or there was an error download=
ing the binary from the generated URL. Proceed with the remaining actions.
+ del actions[ 0 ]
+ action_type, action_dict =3D actions[ 0 ]
if action_type =3D=3D 'download_by_url':
# Eliminate the download_by_url action so remaining ac=
tions can be processed correctly.
filtered_actions =3D actions[ 1: ]
@@ -220,6 +237,9 @@
current_dir =3D os.path.abspath( os.path.join( work_di=
r, dir ) )
with lcd( current_dir ):
action_type, action_dict =3D action_tup
+ # If a binary was found, we only need to process e=
nvironment variables, file permissions, and any other binary downloads.
+ if binary_found and action_type not in [ 'set_envi=
ronment', 'chmod', 'download_binary' ]:
+ continue
if action_type =3D=3D 'make_directory':
common_util.make_directory( full_path=3Daction=
_dict[ 'full_path' ] )
elif action_type =3D=3D 'move_directory_files':
@@ -348,6 +368,18 @@
dir =3D target_directory.replace( os.path.=
realpath( work_dir ), '' ).lstrip( '/' )
else:
log.error( 'Invalid or nonexistent directo=
ry %s specified, ignoring change_directory action.', target_directory )
+ elif action_type =3D=3D 'chmod':
+ for target_file, mode in action_dict[ 'change_=
modes' ]:
+ if os.path.exists( target_file ):
+ os.chmod( target_file, mode )
+ elif action_type =3D=3D 'download_binary':
+ url =3D action_dict[ 'url' ]
+ binary_found =3D common_util.download_binary_f=
rom_url( url, work_dir, install_dir )
+ if binary_found:
+ log.debug( 'Successfully downloaded binary=
from %s', url )
+ else:
+ log.error( 'Unable to download binary from=
%s', url )
+ =20
=20
def log_results( command, fabric_AttributeString, file_path ):
"""
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c lib/tool_shed/galaxy_install/tool_dependencies/install_ut=
il.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
@@ -1,6 +1,7 @@
import logging
import os
import sys
+import stat
import subprocess
import tempfile
from string import Template
@@ -379,7 +380,22 @@
for action_elem in actions_elem.findall( 'action' ):
action_dict =3D {}
action_type =3D action_elem.get( 'type', 'shell_command' )
- if action_type =3D=3D 'shell_command':
+ if action_type =3D=3D 'download_binary':
+ platform_info_dict =3D tool_dependency_util.get_platform_info_=
dict()
+ platform_info_dict[ 'name' ] =3D tool_dependency.name
+ platform_info_dict[ 'version' ] =3D tool_dependency.version
+ url_template_elems =3D action_elem.findall( 'url_template' )
+ # Check if there are multiple url_template elements, each with=
attrib entries for a specific platform.
+ if len( url_template_elems ) > 1:
+ # <base_url os=3D"darwin" extract=3D"false">http://hgdownl=oad.cse.ucsc.edu/admin/exe/macOSX.${architecture}/faToTwoBit</base_url>
+ # This method returns the url_elem that best matches the c=
urrent platform as received from os.uname().
+ # Currently checked attributes are os and architecture.
+ # These correspond to the values sysname and processor fro=
m the Python documentation for os.uname().
+ url_template_elem =3D tool_dependency_util.get_download_ur=
l_for_platform( url_template_elems, platform_info_dict )
+ else:
+ url_template_elem =3D url_template_elems[ 0 ]
+ action_dict[ 'url' ] =3D Template( url_template_elem.text ).sa=
fe_substitute( platform_info_dict )
+ elif action_type =3D=3D 'shell_command':
# <action type=3D"shell_command">make</action>
action_elem_text =3D evaluate_template( action_elem.text )
if action_elem_text:
@@ -492,6 +508,27 @@
# lxml=3D=3D2.3.0</action>
## Manually specify contents of requirements.txt file to creat=
e dynamically.
action_dict[ 'requirements' ] =3D evaluate_template( action_el=
em.text or 'requirements.txt' )
+ elif action_type =3D=3D 'chmod':
+ # Change the read, write, and execute bits on a file.
+ file_elems =3D action_elem.findall( 'file' )
+ chmod_actions =3D []
+ # A unix octal mode is the sum of the following values:
+ # Owner:
+ # 400 Read 200 Write 100 Execute
+ # Group:
+ # 040 Read 020 Write 010 Execute
+ # World:
+ # 004 Read 002 Write 001 Execute
+ for file_elem in file_elems:
+ # So by the above table, owner read/write/execute and grou=
p read permission would be 740.
+ # Python's os.chmod uses base 10 modes, convert received u=
nix-style octal modes to base 10.
+ received_mode =3D int( file_elem.get( 'mode', 600 ), base=
=3D8 )
+ # For added security, ensure that the setuid and setgid bi=
ts are not set.
+ mode =3D received_mode & ~( stat.S_ISUID | stat.S_ISGID )
+ file =3D evaluate_template( file_elem.text )
+ chmod_tuple =3D ( file, mode )
+ chmod_actions.append( chmod_tuple )
+ action_dict[ 'change_modes' ] =3D chmod_actions
else:
log.debug( "Unsupported action type '%s'. Not proceeding." % s=
tr( action_type ) )
raise Exception( "Unsupported action type '%s' in tool depende=
ncy definition." % str( action_type ) )
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c lib/tool_shed/util/tool_dependency_util.py
--- a/lib/tool_shed/util/tool_dependency_util.py
+++ b/lib/tool_shed/util/tool_dependency_util.py
@@ -39,6 +39,37 @@
tool_dependencies[ dependency_key ] =3D requirements_dict
return tool_dependencies
=20
+def get_download_url_for_platform( url_templates, platform_info_dict ):
+ '''
+ Compare the dict returned by get_platform_info() with the values speci=
fied in the base_url element. Return
+ true if and only if all defined attributes match the corresponding dic=
t entries. If an entry is not
+ defined in the base_url element, it is assumed to be irrelevant at thi=
s stage. For example,
+ <base_url os=3D"darwin">http://hgdownload.cse.ucsc.edu/admin/exe/macOS=
X.${architecture}/faToTwoBit</base_url>
+ where the OS must be 'darwin', but the architecture is filled in later=
using string.Template.
+ '''
+ os_ok =3D False
+ architecture_ok =3D False
+ for url_template in url_templates:
+ os_name =3D url_template.get( 'os', None )
+ architecture =3D url_template.get( 'architecture', None )
+ if os_name:
+ if os_name.lower() =3D=3D platform_info_dict[ 'os' ]:
+ os_ok =3D True
+ else:
+ os_ok =3D False
+ else:
+ os_ok =3D True
+ if architecture:
+ if architecture.lower() =3D=3D platform_info_dict[ 'architectu=
re' ]:
+ architecture_ok =3D True
+ else:
+ architecture_ok =3D False
+ else:
+ architecture_ok =3D True
+ if os_ok and architecture_ok:
+ return url_template
+ return None
+
def create_or_update_tool_dependency( app, tool_shed_repository, name, ver=
sion, type, status, set_status=3DTrue ):
# Called from Galaxy (never the tool shed) when a new repository is be=
ing installed or when an uninstalled repository is being reinstalled.
sa_session =3D app.model.context.current
@@ -204,6 +235,14 @@
missing_tool_dependencies =3D None
return tool_dependencies, missing_tool_dependencies
=20
+def get_platform_info_dict():
+ '''Return a dict with information about the current platform.'''
+ platform_dict =3D {}
+ sysname, nodename, release, version, machine =3D os.uname()
+ platform_dict[ 'os' ] =3D sysname.lower()
+ platform_dict[ 'architecture' ] =3D machine.lower()
+ return platform_dict
+
def get_tool_dependency( trans, id ):
"""Get a tool_dependency from the database via id"""
return trans.sa_session.query( trans.model.ToolDependency ).get( trans=
.security.decode_id( id ) )
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c static/scripts/galaxy.pages.js
--- a/static/scripts/galaxy.pages.js
+++ b/static/scripts/galaxy.pages.js
@@ -164,18 +164,19 @@
{
"Make link": function() {
// Get URL, name/title.
- var sUrl =3D $(wym._options.hrefSelector).val(),
- sName =3D $(".wym_title").val();
+ var sUrl =3D $(wym._options.hrefSelector).val() || '',
+ sId =3D $(".wym_id").val() || '',
+ sName =3D $(wym._options.titleSelector).val() || '=
';
=20
- if (sUrl && sName) {
+ if (sUrl || sId) {
// Create link.
wym._exec(WYMeditor.CREATE_LINK, sStamp);
=20
// Set link attributes.
var link =3D $("a[href=3D" + sStamp + "]", wym._do=
c.body);
link.attr(WYMeditor.HREF, sUrl)
- .attr(WYMeditor.TITLE, $(wym._options.titleSel=
ector).val())
- .attr("id", sName);
+ .attr(WYMeditor.TITLE, sName)
+ .attr("id", sId);
=20
// If link's text is default (wym-...), change it =
to the title.
if (link.text().indexOf('wym-') =3D=3D=3D 0) {
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c static/scripts/packed/galaxy.pages.js
--- a/static/scripts/packed/galaxy.pages.js
+++ b/static/scripts/packed/galaxy.pages.js
@@ -1,1 +1,1 @@
-var Galaxy=3D{ITEM_HISTORY:"item_history",ITEM_DATASET:"item_dataset",ITEM=
_WORKFLOW:"item_workflow",ITEM_PAGE:"item_page",ITEM_VISUALIZATION:"item_vi=
sualization",DIALOG_HISTORY_LINK:"link_history",DIALOG_DATASET_LINK:"link_d=
ataset",DIALOG_WORKFLOW_LINK:"link_workflow",DIALOG_PAGE_LINK:"link_page",D=
IALOG_VISUALIZATION_LINK:"link_visualization",DIALOG_EMBED_HISTORY:"embed_h=
istory",DIALOG_EMBED_DATASET:"embed_dataset",DIALOG_EMBED_WORKFLOW:"embed_w=
orkflow",DIALOG_EMBED_PAGE:"embed_page",DIALOG_EMBED_VISUALIZATION:"embed_v=
isualization",DIALOG_HISTORY_ANNOTATE:"history_annotate",};function init_ga=
laxy_elts(a){$(".annotation",a._doc.body).each(function(){$(this).click(fun=
ction(){var b=3Da._doc.createRange();b.selectNodeContents(this);var d=3Dwin=
dow.getSelection();d.removeAllRanges();d.addRange(b);var c=3D""})})}functio=
n get_item_info(d){var f,c,b;switch(d){case (Galaxy.ITEM_HISTORY):f=3D"Hist=
ory";c=3D"Histories";b=3D"history";item_class=3D"History";break;case (Galax=
y.ITEM_DATASET):f=3D"Dataset";c=3D"Datasets";b=3D"dataset";item_class=3D"Hi=
storyDatasetAssociation";break;case (Galaxy.ITEM_WORKFLOW):f=3D"Workflow";c=
=3D"Workflows";b=3D"workflow";item_class=3D"StoredWorkflow";break;case (Gal=
axy.ITEM_PAGE):f=3D"Page";c=3D"Pages";b=3D"page";item_class=3D"Page";break;=
case (Galaxy.ITEM_VISUALIZATION):f=3D"Visualization";c=3D"Visualizations";b=
=3D"visualization";item_class=3D"Visualization";break}var e=3D"list_"+c.toL=
owerCase()+"_for_selection";var a=3Dlist_objects_url.replace("LIST_ACTION",=
e);return{singular:f,plural:c,controller:b,iclass:item_class,list_ajax_url:=
a}}function make_item_importable(a,c,b){ajax_url=3Dset_accessible_url.repla=
ce("ITEM_CONTROLLER",a);$.ajax({type:"POST",url:ajax_url,data:{id:c,accessi=
ble:"True"},error:function(){alert("Making "+b+" accessible failed")}})}WYM=
editor.editor.prototype.dialog=3Dfunction(i,e,g){var a=3Dthis;var b=3Da.uni=
queStamp();var f=3Da.selected();function h(){$("#set_link_id").click(functi=
on(){$("#link_attribute_label").text("ID/Name");var k=3D$(".wym_href");k.ad=
dClass("wym_id").removeClass("wym_href");if(f){k.val($(f).attr("id"))}$(thi=
s).remove()})}if(i=3D=3DWYMeditor.DIALOG_LINK){if(f){$(a._options.hrefSelec=
tor).val($(f).attr(WYMeditor.HREF));$(a._options.srcSelector).val($(f).attr=
(WYMeditor.SRC));$(a._options.titleSelector).val($(f).attr(WYMeditor.TITLE)=
);$(a._options.altSelector).val($(f).attr(WYMeditor.ALT))}var c,d;if(f){c=
=3D$(f).attr("href");if(c=3D=3Dundefined){c=3D""}d=3D$(f).attr("title");if(=
d=3D=3Dundefined){d=3D""}}show_modal("Create Link","<div><div><label id=3D'=
link_attribute_label'>URL <span style=3D'float: right; font-size: 90%'><a h=
ref=3D'#' id=3D'set_link_id'>Create in-page anchor</a></span></label><br><i=
nput type=3D'text' class=3D'wym_href' value=3D'"+c+"' size=3D'40' /></div><=
div><label>Title</label><br><input type=3D'text' class=3D'wym_title' value=
=3D'"+d+"' size=3D'40' /></div><div>",{"Make link":function(){var l=3D$(a._=
options.hrefSelector).val(),m=3D$(".wym_title").val();if(l&&m){a._exec(WYMe=
ditor.CREATE_LINK,b);var k=3D$("a[href=3D"+b+"]",a._doc.body);k.attr(WYMedi=
tor.HREF,l).attr(WYMeditor.TITLE,$(a._options.titleSelector).val()).attr("i=
d",m);if(k.text().indexOf("wym-")=3D=3D=3D0){k.text(m)}}hide_modal()},Cance=
l:function(){hide_modal()}},{},h)}if(i=3D=3DWYMeditor.DIALOG_IMAGE){if(a._s=
elected_image){$(a._options.dialogImageSelector+" "+a._options.srcSelector)=
.val($(a._selected_image).attr(WYMeditor.SRC));$(a._options.dialogImageSele=
ctor+" "+a._options.titleSelector).val($(a._selected_image).attr(WYMeditor.=
TITLE));$(a._options.dialogImageSelector+" "+a._options.altSelector).val($(=
a._selected_image).attr(WYMeditor.ALT))}show_modal("Image","<div class=3D'r=
ow'><label>URL</label><br><input type=3D'text' class=3D'wym_src' value=3D''=
size=3D'40' /></div><div class=3D'row'><label>Alt text</label><br><input t=
ype=3D'text' class=3D'wym_alt' value=3D'' size=3D'40' /></div><div class=3D=
'row'><label>Title</label><br><input type=3D'text' class=3D'wym_title' valu=
e=3D'' size=3D'40' /></div>",{Insert:function(){var k=3D$(a._options.srcSel=
ector).val();if(k.length>0){a._exec(WYMeditor.INSERT_IMAGE,b);$("img[src$=
=3D"+b+"]",a._doc.body).attr(WYMeditor.SRC,k).attr(WYMeditor.TITLE,$(a._opt=
ions.titleSelector).val()).attr(WYMeditor.ALT,$(a._options.altSelector).val=
())}hide_modal()},Cancel:function(){hide_modal()}});return}if(i=3D=3DWYMedi=
tor.DIALOG_TABLE){show_modal("Table","<div class=3D'row'><label>Caption</la=
bel><br><input type=3D'text' class=3D'wym_caption' value=3D'' size=3D'40' /=
></div><div class=3D'row'><label>Summary</label><br><input type=3D'text' cl=
ass=3D'wym_summary' value=3D'' size=3D'40' /></div><div class=3D'row'><labe=
l>Number Of Rows<br></label><input type=3D'text' class=3D'wym_rows' value=
=3D'3' size=3D'3' /></div><div class=3D'row'><label>Number Of Cols<br></lab=
el><input type=3D'text' class=3D'wym_cols' value=3D'2' size=3D'3' /></div>"=
,{Insert:function(){var o=3D$(a._options.rowsSelector).val();var r=3D$(a._o=
ptions.colsSelector).val();if(o>0&&r>0){var n=3Da._doc.createElement(WYMedi=
tor.TABLE);var l=3Dnull;var q=3Dnull;var k=3D$(a._options.captionSelector).=
val();var p=3Dn.createCaption();p.innerHTML=3Dk;for(x=3D0;x<o;x++){l=3Dn.in=
sertRow(x);for(y=3D0;y<r;y++){l.insertCell(y)}}$(n).attr("summary",$(a._opt=
ions.summarySelector).val());var m=3D$(a.findUp(a.container(),WYMeditor.MAI=
N_CONTAINERS)).get(0);if(!m||!m.parentNode){$(a._doc.body).append(n)}else{$=
(m).after(n)}}hide_modal()},Cancel:function(){hide_modal()}})}if(i=3D=3DGal=
axy.DIALOG_HISTORY_LINK||i=3D=3DGalaxy.DIALOG_DATASET_LINK||i=3D=3DGalaxy.D=
IALOG_WORKFLOW_LINK||i=3D=3DGalaxy.DIALOG_PAGE_LINK||i=3D=3DGalaxy.DIALOG_V=
ISUALIZATION_LINK){var j;switch(i){case (Galaxy.DIALOG_HISTORY_LINK):j=3Dge=
t_item_info(Galaxy.ITEM_HISTORY);break;case (Galaxy.DIALOG_DATASET_LINK):j=
=3Dget_item_info(Galaxy.ITEM_DATASET);break;case (Galaxy.DIALOG_WORKFLOW_LI=
NK):j=3Dget_item_info(Galaxy.ITEM_WORKFLOW);break;case (Galaxy.DIALOG_PAGE_=
LINK):j=3Dget_item_info(Galaxy.ITEM_PAGE);break;case (Galaxy.DIALOG_VISUALI=
ZATION_LINK):j=3Dget_item_info(Galaxy.ITEM_VISUALIZATION);break}$.ajax({url=
:j.list_ajax_url,data:{},error:function(){alert("Failed to list "+j.plural.=
toLowerCase()+" for selection")},success:function(k){show_modal("Insert Lin=
k to "+j.singular,k+"<div><input id=3D'make-importable' type=3D'checkbox' c=
hecked/>Make the selected "+j.plural.toLowerCase()+" accessible so that the=
y can viewed by everyone.</div>",{Insert:function(){var m=3Dfalse;if($("#ma=
ke-importable:checked").val()!=3D=3Dnull){m=3Dtrue}var l=3Dnew Array();$("i=
nput[name=3Did]:checked").each(function(){var n=3D$(this).val();if(m){make_=
item_importable(j.controller,n,j.singular)}url_template=3Dget_name_and_link=
_url+n;ajax_url=3Durl_template.replace("ITEM_CONTROLLER",j.controller);$.ge=
tJSON(ajax_url,function(p){a._exec(WYMeditor.CREATE_LINK,b);var o=3D$("a[hr=
ef=3D"+b+"]",a._doc.body).text();if(o=3D=3D""||o=3D=3Db){a.insert("<a href=
=3D'"+p.link+"'>"+j.singular+" '"+p.name+"'</a>")}else{$("a[href=3D"+b+"]",=
a._doc.body).attr(WYMeditor.HREF,p.link).attr(WYMeditor.TITLE,j.singular+n)=
}})});hide_modal()},Cancel:function(){hide_modal()}})}})}if(i=3D=3DGalaxy.D=
IALOG_EMBED_HISTORY||i=3D=3DGalaxy.DIALOG_EMBED_DATASET||i=3D=3DGalaxy.DIAL=
OG_EMBED_WORKFLOW||i=3D=3DGalaxy.DIALOG_EMBED_PAGE||i=3D=3DGalaxy.DIALOG_EM=
BED_VISUALIZATION){var j;switch(i){case (Galaxy.DIALOG_EMBED_HISTORY):j=3Dg=
et_item_info(Galaxy.ITEM_HISTORY);break;case (Galaxy.DIALOG_EMBED_DATASET):=
j=3Dget_item_info(Galaxy.ITEM_DATASET);break;case (Galaxy.DIALOG_EMBED_WORK=
FLOW):j=3Dget_item_info(Galaxy.ITEM_WORKFLOW);break;case (Galaxy.DIALOG_EMB=
ED_PAGE):j=3Dget_item_info(Galaxy.ITEM_PAGE);break;case (Galaxy.DIALOG_EMBE=
D_VISUALIZATION):j=3Dget_item_info(Galaxy.ITEM_VISUALIZATION);break}$.ajax(=
{url:j.list_ajax_url,data:{},error:function(){alert("Failed to list "+j.plu=
ral.toLowerCase()+" for selection")},success:function(k){if(i=3D=3DGalaxy.D=
IALOG_EMBED_HISTORY||i=3D=3DGalaxy.DIALOG_EMBED_WORKFLOW||i=3D=3DGalaxy.DIA=
LOG_EMBED_VISUALIZATION){k=3Dk+"<div><input id=3D'make-importable' type=3D'=
checkbox' checked/>Make the selected "+j.plural.toLowerCase()+" accessible =
so that they can viewed by everyone.</div>"}show_modal("Embed "+j.plural,k,=
{Embed:function(){var l=3Dfalse;if($("#make-importable:checked").val()!=3Dn=
ull){l=3Dtrue}$("input[name=3Did]:checked").each(function(){var m=3D$(this)=
.val();var p=3D$("label[for=3D'"+m+"']:first").text();if(l){make_item_impor=
table(j.controller,m,j.singular)}var n=3Dj.iclass+"-"+m;var o=3D"<p><div id=
=3D'"+n+"' class=3D'embedded-item "+j.singular.toLowerCase()+" placeholder'=
><p class=3D'title'>Embedded Galaxy "+j.singular+" '"+p+"'</p><p class=3D'c=
ontent'> [Do not edit this bloc=
k; Galaxy will fill it in with the annotated "+j.singular.toLowerCase()+" w=
hen it is displayed.] </p></div></p=
>";a.insert(" ");a.insert(o);$("#"+n,a._doc.body).each(function(){var =
q=3Dtrue;while(q){var r=3D$(this).prev();if(r.length!=3D0&&jQuery.trim(r.te=
xt())=3D=3D""){r.remove()}else{q=3Dfalse}}})});hide_modal()},Cancel:functio=
n(){hide_modal()}})}})}if(i=3D=3DGalaxy.DIALOG_ANNOTATE_HISTORY){$.ajax({ur=
l:list_histories_for_selection_url,data:{},error:function(){alert("Grid ref=
resh failed")},success:function(k){show_modal("Insert Link to History",k,{A=
nnotate:function(){var l=3Dnew Array();$("input[name=3Did]:checked").each(f=
unction(){var m=3D$(this).val();$.ajax({url:get_history_annotation_table_ur=
l,data:{id:m},error:function(){alert("Grid refresh failed")},success:functi=
on(n){a.insert(n);init_galaxy_elts(a)}})});hide_modal()},Cancel:function(){=
hide_modal()}})}})}};$(function(){$(document).ajaxError(function(i,g){var h=
=3Dg.responseText||g.statusText||"Could not connect to server";show_modal("=
Server error",h,{"Ignore error":hide_modal});return false});$("[name=3Dpage=
_content]").wymeditor({skin:"galaxy",basePath:editor_base_path,iframeBasePa=
th:iframe_base_path,boxHtml:"<table class=3D'wym_box' width=3D'100%' height=
=3D'100%'><tr><td><div class=3D'wym_area_top'>"+WYMeditor.TOOLS+"</div></td=
></tr><tr height=3D'100%'><td><div class=3D'wym_area_main' style=3D'height:=
100%;'>"+WYMeditor.IFRAME+WYMeditor.STATUS+"</div></div></td></tr></table>=
",toolsItems:[{name:"Bold",title:"Strong",css:"wym_tools_strong"},{name:"It=
alic",title:"Emphasis",css:"wym_tools_emphasis"},{name:"Superscript",title:=
"Superscript",css:"wym_tools_superscript"},{name:"Subscript",title:"Subscri=
pt",css:"wym_tools_subscript"},{name:"InsertOrderedList",title:"Ordered_Lis=
t",css:"wym_tools_ordered_list"},{name:"InsertUnorderedList",title:"Unorder=
ed_List",css:"wym_tools_unordered_list"},{name:"Indent",title:"Indent",css:=
"wym_tools_indent"},{name:"Outdent",title:"Outdent",css:"wym_tools_outdent"=
},{name:"Undo",title:"Undo",css:"wym_tools_undo"},{name:"Redo",title:"Redo"=
,css:"wym_tools_redo"},{name:"CreateLink",title:"Link",css:"wym_tools_link"=
},{name:"Unlink",title:"Unlink",css:"wym_tools_unlink"},{name:"InsertImage"=
,title:"Image",css:"wym_tools_image"},{name:"InsertTable",title:"Table",css=
:"wym_tools_table"},]});var d=3D$.wymeditors(0);var f=3Dfunction(g){show_mo=
dal("Saving page","progress");$.ajax({url:save_url,type:"POST",data:{id:pag=
e_id,content:d.xhtml(),annotations:JSON.stringify(new Object()),_:"true"},s=
uccess:function(){g()}})};$("#save-button").click(function(){f(function(){h=
ide_modal()})});$("#close-button").click(function(){var h=3Dfalse;if(h){var=
g=3Dfunction(){window.onbeforeunload=3Dundefined;window.document.location=
=3Dpage_list_url};show_modal("Close editor","There are unsaved changes to y=
our page which will be lost.",{Cancel:hide_modal,"Save Changes":function(){=
f(g)}},{"Don't Save":g})}else{window.document.location=3Dpage_list_url}});v=
ar a=3D$("<div class=3D'galaxy-page-editor-button'><a id=3D'insert-galaxy-l=
ink' class=3D'action-button popup' href=3D'#'>Paragraph type</a></div>");$(=
".wym_area_top").append(a);var b=3D{};$.each(d._options.containersItems,fun=
ction(h,g){var i=3Dg.name;b[g.title.replace("_"," ")]=3Dfunction(){d.contai=
ner(i)}});make_popupmenu(a,b);var c=3D$("<div><a id=3D'insert-galaxy-link' =
class=3D'action-button popup' href=3D'#'>Insert Link to Galaxy Object</a></=
div>").addClass("galaxy-page-editor-button");$(".wym_area_top").append(c);m=
ake_popupmenu(c,{"Insert History Link":function(){d.dialog(Galaxy.DIALOG_HI=
STORY_LINK)},"Insert Dataset Link":function(){d.dialog(Galaxy.DIALOG_DATASE=
T_LINK)},"Insert Workflow Link":function(){d.dialog(Galaxy.DIALOG_WORKFLOW_=
LINK)},"Insert Page Link":function(){d.dialog(Galaxy.DIALOG_PAGE_LINK)},"In=
sert Visualization Link":function(){d.dialog(Galaxy.DIALOG_VISUALIZATION_LI=
NK)},});var e=3D$("<div><a id=3D'embed-galaxy-object' class=3D'action-butto=
n popup' href=3D'#'>Embed Galaxy Object</a></div>").addClass("galaxy-page-e=
ditor-button");$(".wym_area_top").append(e);make_popupmenu(e,{"Embed Histor=
y":function(){d.dialog(Galaxy.DIALOG_EMBED_HISTORY)},"Embed Dataset":functi=
on(){d.dialog(Galaxy.DIALOG_EMBED_DATASET)},"Embed Workflow":function(){d.d=
ialog(Galaxy.DIALOG_EMBED_WORKFLOW)},"Embed Visualization":function(){d.dia=
log(Galaxy.DIALOG_EMBED_VISUALIZATION)},})});
\ No newline at end of file
+var Galaxy=3D{ITEM_HISTORY:"item_history",ITEM_DATASET:"item_dataset",ITEM=
_WORKFLOW:"item_workflow",ITEM_PAGE:"item_page",ITEM_VISUALIZATION:"item_vi=
sualization",DIALOG_HISTORY_LINK:"link_history",DIALOG_DATASET_LINK:"link_d=
ataset",DIALOG_WORKFLOW_LINK:"link_workflow",DIALOG_PAGE_LINK:"link_page",D=
IALOG_VISUALIZATION_LINK:"link_visualization",DIALOG_EMBED_HISTORY:"embed_h=
istory",DIALOG_EMBED_DATASET:"embed_dataset",DIALOG_EMBED_WORKFLOW:"embed_w=
orkflow",DIALOG_EMBED_PAGE:"embed_page",DIALOG_EMBED_VISUALIZATION:"embed_v=
isualization",DIALOG_HISTORY_ANNOTATE:"history_annotate",};function init_ga=
laxy_elts(a){$(".annotation",a._doc.body).each(function(){$(this).click(fun=
ction(){var b=3Da._doc.createRange();b.selectNodeContents(this);var d=3Dwin=
dow.getSelection();d.removeAllRanges();d.addRange(b);var c=3D""})})}functio=
n get_item_info(d){var f,c,b;switch(d){case (Galaxy.ITEM_HISTORY):f=3D"Hist=
ory";c=3D"Histories";b=3D"history";item_class=3D"History";break;case (Galax=
y.ITEM_DATASET):f=3D"Dataset";c=3D"Datasets";b=3D"dataset";item_class=3D"Hi=
storyDatasetAssociation";break;case (Galaxy.ITEM_WORKFLOW):f=3D"Workflow";c=
=3D"Workflows";b=3D"workflow";item_class=3D"StoredWorkflow";break;case (Gal=
axy.ITEM_PAGE):f=3D"Page";c=3D"Pages";b=3D"page";item_class=3D"Page";break;=
case (Galaxy.ITEM_VISUALIZATION):f=3D"Visualization";c=3D"Visualizations";b=
=3D"visualization";item_class=3D"Visualization";break}var e=3D"list_"+c.toL=
owerCase()+"_for_selection";var a=3Dlist_objects_url.replace("LIST_ACTION",=
e);return{singular:f,plural:c,controller:b,iclass:item_class,list_ajax_url:=
a}}function make_item_importable(a,c,b){ajax_url=3Dset_accessible_url.repla=
ce("ITEM_CONTROLLER",a);$.ajax({type:"POST",url:ajax_url,data:{id:c,accessi=
ble:"True"},error:function(){alert("Making "+b+" accessible failed")}})}WYM=
editor.editor.prototype.dialog=3Dfunction(i,e,g){var a=3Dthis;var b=3Da.uni=
queStamp();var f=3Da.selected();function h(){$("#set_link_id").click(functi=
on(){$("#link_attribute_label").text("ID/Name");var k=3D$(".wym_href");k.ad=
dClass("wym_id").removeClass("wym_href");if(f){k.val($(f).attr("id"))}$(thi=
s).remove()})}if(i=3D=3DWYMeditor.DIALOG_LINK){if(f){$(a._options.hrefSelec=
tor).val($(f).attr(WYMeditor.HREF));$(a._options.srcSelector).val($(f).attr=
(WYMeditor.SRC));$(a._options.titleSelector).val($(f).attr(WYMeditor.TITLE)=
);$(a._options.altSelector).val($(f).attr(WYMeditor.ALT))}var c,d;if(f){c=
=3D$(f).attr("href");if(c=3D=3Dundefined){c=3D""}d=3D$(f).attr("title");if(=
d=3D=3Dundefined){d=3D""}}show_modal("Create Link","<div><div><label id=3D'=
link_attribute_label'>URL <span style=3D'float: right; font-size: 90%'><a h=
ref=3D'#' id=3D'set_link_id'>Create in-page anchor</a></span></label><br><i=
nput type=3D'text' class=3D'wym_href' value=3D'"+c+"' size=3D'40' /></div><=
div><label>Title</label><br><input type=3D'text' class=3D'wym_title' value=
=3D'"+d+"' size=3D'40' /></div><div>",{"Make link":function(){var m=3D$(a._=
options.hrefSelector).val()||"",k=3D$(".wym_id").val()||"",n=3D$(a._options=
.titleSelector).val()||"";if(m||k){a._exec(WYMeditor.CREATE_LINK,b);var l=
=3D$("a[href=3D"+b+"]",a._doc.body);l.attr(WYMeditor.HREF,m).attr(WYMeditor=
.TITLE,n).attr("id",k);if(l.text().indexOf("wym-")=3D=3D=3D0){l.text(n)}}hi=
de_modal()},Cancel:function(){hide_modal()}},{},h)}if(i=3D=3DWYMeditor.DIAL=
OG_IMAGE){if(a._selected_image){$(a._options.dialogImageSelector+" "+a._opt=
ions.srcSelector).val($(a._selected_image).attr(WYMeditor.SRC));$(a._option=
s.dialogImageSelector+" "+a._options.titleSelector).val($(a._selected_image=
).attr(WYMeditor.TITLE));$(a._options.dialogImageSelector+" "+a._options.al=
tSelector).val($(a._selected_image).attr(WYMeditor.ALT))}show_modal("Image"=
,"<div class=3D'row'><label>URL</label><br><input type=3D'text' class=3D'wy=
m_src' value=3D'' size=3D'40' /></div><div class=3D'row'><label>Alt text</l=
abel><br><input type=3D'text' class=3D'wym_alt' value=3D'' size=3D'40' /></=
div><div class=3D'row'><label>Title</label><br><input type=3D'text' class=
=3D'wym_title' value=3D'' size=3D'40' /></div>",{Insert:function(){var k=3D=
$(a._options.srcSelector).val();if(k.length>0){a._exec(WYMeditor.INSERT_IMA=
GE,b);$("img[src$=3D"+b+"]",a._doc.body).attr(WYMeditor.SRC,k).attr(WYMedit=
or.TITLE,$(a._options.titleSelector).val()).attr(WYMeditor.ALT,$(a._options=
.altSelector).val())}hide_modal()},Cancel:function(){hide_modal()}});return=
}if(i=3D=3DWYMeditor.DIALOG_TABLE){show_modal("Table","<div class=3D'row'><=
label>Caption</label><br><input type=3D'text' class=3D'wym_caption' value=
=3D'' size=3D'40' /></div><div class=3D'row'><label>Summary</label><br><inp=
ut type=3D'text' class=3D'wym_summary' value=3D'' size=3D'40' /></div><div =
class=3D'row'><label>Number Of Rows<br></label><input type=3D'text' class=
=3D'wym_rows' value=3D'3' size=3D'3' /></div><div class=3D'row'><label>Numb=
er Of Cols<br></label><input type=3D'text' class=3D'wym_cols' value=3D'2' s=
ize=3D'3' /></div>",{Insert:function(){var o=3D$(a._options.rowsSelector).v=
al();var r=3D$(a._options.colsSelector).val();if(o>0&&r>0){var n=3Da._doc.c=
reateElement(WYMeditor.TABLE);var l=3Dnull;var q=3Dnull;var k=3D$(a._option=
s.captionSelector).val();var p=3Dn.createCaption();p.innerHTML=3Dk;for(x=3D=
0;x<o;x++){l=3Dn.insertRow(x);for(y=3D0;y<r;y++){l.insertCell(y)}}$(n).attr=
("summary",$(a._options.summarySelector).val());var m=3D$(a.findUp(a.contai=
ner(),WYMeditor.MAIN_CONTAINERS)).get(0);if(!m||!m.parentNode){$(a._doc.bod=
y).append(n)}else{$(m).after(n)}}hide_modal()},Cancel:function(){hide_modal=
()}})}if(i=3D=3DGalaxy.DIALOG_HISTORY_LINK||i=3D=3DGalaxy.DIALOG_DATASET_LI=
NK||i=3D=3DGalaxy.DIALOG_WORKFLOW_LINK||i=3D=3DGalaxy.DIALOG_PAGE_LINK||i=
=3D=3DGalaxy.DIALOG_VISUALIZATION_LINK){var j;switch(i){case (Galaxy.DIALOG=
_HISTORY_LINK):j=3Dget_item_info(Galaxy.ITEM_HISTORY);break;case (Galaxy.DI=
ALOG_DATASET_LINK):j=3Dget_item_info(Galaxy.ITEM_DATASET);break;case (Galax=
y.DIALOG_WORKFLOW_LINK):j=3Dget_item_info(Galaxy.ITEM_WORKFLOW);break;case =
(Galaxy.DIALOG_PAGE_LINK):j=3Dget_item_info(Galaxy.ITEM_PAGE);break;case (G=
alaxy.DIALOG_VISUALIZATION_LINK):j=3Dget_item_info(Galaxy.ITEM_VISUALIZATIO=
N);break}$.ajax({url:j.list_ajax_url,data:{},error:function(){alert("Failed=
to list "+j.plural.toLowerCase()+" for selection")},success:function(k){sh=
ow_modal("Insert Link to "+j.singular,k+"<div><input id=3D'make-importable'=
type=3D'checkbox' checked/>Make the selected "+j.plural.toLowerCase()+" ac=
cessible so that they can viewed by everyone.</div>",{Insert:function(){var=
m=3Dfalse;if($("#make-importable:checked").val()!=3D=3Dnull){m=3Dtrue}var =
l=3Dnew Array();$("input[name=3Did]:checked").each(function(){var n=3D$(thi=
s).val();if(m){make_item_importable(j.controller,n,j.singular)}url_template=
=3Dget_name_and_link_url+n;ajax_url=3Durl_template.replace("ITEM_CONTROLLER=
",j.controller);$.getJSON(ajax_url,function(p){a._exec(WYMeditor.CREATE_LIN=
K,b);var o=3D$("a[href=3D"+b+"]",a._doc.body).text();if(o=3D=3D""||o=3D=3Db=
){a.insert("<a href=3D'"+p.link+"'>"+j.singular+" '"+p.name+"'</a>")}else{$=
("a[href=3D"+b+"]",a._doc.body).attr(WYMeditor.HREF,p.link).attr(WYMeditor.=
TITLE,j.singular+n)}})});hide_modal()},Cancel:function(){hide_modal()}})}})=
}if(i=3D=3DGalaxy.DIALOG_EMBED_HISTORY||i=3D=3DGalaxy.DIALOG_EMBED_DATASET|=
|i=3D=3DGalaxy.DIALOG_EMBED_WORKFLOW||i=3D=3DGalaxy.DIALOG_EMBED_PAGE||i=3D=
=3DGalaxy.DIALOG_EMBED_VISUALIZATION){var j;switch(i){case (Galaxy.DIALOG_E=
MBED_HISTORY):j=3Dget_item_info(Galaxy.ITEM_HISTORY);break;case (Galaxy.DIA=
LOG_EMBED_DATASET):j=3Dget_item_info(Galaxy.ITEM_DATASET);break;case (Galax=
y.DIALOG_EMBED_WORKFLOW):j=3Dget_item_info(Galaxy.ITEM_WORKFLOW);break;case=
(Galaxy.DIALOG_EMBED_PAGE):j=3Dget_item_info(Galaxy.ITEM_PAGE);break;case =
(Galaxy.DIALOG_EMBED_VISUALIZATION):j=3Dget_item_info(Galaxy.ITEM_VISUALIZA=
TION);break}$.ajax({url:j.list_ajax_url,data:{},error:function(){alert("Fai=
led to list "+j.plural.toLowerCase()+" for selection")},success:function(k)=
{if(i=3D=3DGalaxy.DIALOG_EMBED_HISTORY||i=3D=3DGalaxy.DIALOG_EMBED_WORKFLOW=
||i=3D=3DGalaxy.DIALOG_EMBED_VISUALIZATION){k=3Dk+"<div><input id=3D'make-i=
mportable' type=3D'checkbox' checked/>Make the selected "+j.plural.toLowerC=
ase()+" accessible so that they can viewed by everyone.</div>"}show_modal("=
Embed "+j.plural,k,{Embed:function(){var l=3Dfalse;if($("#make-importable:c=
hecked").val()!=3Dnull){l=3Dtrue}$("input[name=3Did]:checked").each(functio=
n(){var m=3D$(this).val();var p=3D$("label[for=3D'"+m+"']:first").text();if=
(l){make_item_importable(j.controller,m,j.singular)}var n=3Dj.iclass+"-"+m;=
var o=3D"<p><div id=3D'"+n+"' class=3D'embedded-item "+j.singular.toLowerCa=
se()+" placeholder'><p class=3D'title'>Embedded Galaxy "+j.singular+" '"+p+=
"'</p><p class=3D'content'> [Do=
not edit this block; Galaxy will fill it in with the annotated "+j.singula=
r.toLowerCase()+" when it is displayed.] =
</p></div></p>";a.insert(" ");a.insert(o);$("#"+n,a._doc.body).e=
ach(function(){var q=3Dtrue;while(q){var r=3D$(this).prev();if(r.length!=3D=
0&&jQuery.trim(r.text())=3D=3D""){r.remove()}else{q=3Dfalse}}})});hide_moda=
l()},Cancel:function(){hide_modal()}})}})}if(i=3D=3DGalaxy.DIALOG_ANNOTATE_=
HISTORY){$.ajax({url:list_histories_for_selection_url,data:{},error:functio=
n(){alert("Grid refresh failed")},success:function(k){show_modal("Insert Li=
nk to History",k,{Annotate:function(){var l=3Dnew Array();$("input[name=3Di=
d]:checked").each(function(){var m=3D$(this).val();$.ajax({url:get_history_=
annotation_table_url,data:{id:m},error:function(){alert("Grid refresh faile=
d")},success:function(n){a.insert(n);init_galaxy_elts(a)}})});hide_modal()}=
,Cancel:function(){hide_modal()}})}})}};$(function(){$(document).ajaxError(=
function(i,g){var h=3Dg.responseText||g.statusText||"Could not connect to s=
erver";show_modal("Server error",h,{"Ignore error":hide_modal});return fals=
e});$("[name=3Dpage_content]").wymeditor({skin:"galaxy",basePath:editor_bas=
e_path,iframeBasePath:iframe_base_path,boxHtml:"<table class=3D'wym_box' wi=
dth=3D'100%' height=3D'100%'><tr><td><div class=3D'wym_area_top'>"+WYMedito=
r.TOOLS+"</div></td></tr><tr height=3D'100%'><td><div class=3D'wym_area_mai=
n' style=3D'height: 100%;'>"+WYMeditor.IFRAME+WYMeditor.STATUS+"</div></div=
></td></tr></table>",toolsItems:[{name:"Bold",title:"Strong",css:"wym_tools=
_strong"},{name:"Italic",title:"Emphasis",css:"wym_tools_emphasis"},{name:"=
Superscript",title:"Superscript",css:"wym_tools_superscript"},{name:"Subscr=
ipt",title:"Subscript",css:"wym_tools_subscript"},{name:"InsertOrderedList"=
,title:"Ordered_List",css:"wym_tools_ordered_list"},{name:"InsertUnorderedL=
ist",title:"Unordered_List",css:"wym_tools_unordered_list"},{name:"Indent",=
title:"Indent",css:"wym_tools_indent"},{name:"Outdent",title:"Outdent",css:=
"wym_tools_outdent"},{name:"Undo",title:"Undo",css:"wym_tools_undo"},{name:=
"Redo",title:"Redo",css:"wym_tools_redo"},{name:"CreateLink",title:"Link",c=
ss:"wym_tools_link"},{name:"Unlink",title:"Unlink",css:"wym_tools_unlink"},=
{name:"InsertImage",title:"Image",css:"wym_tools_image"},{name:"InsertTable=
",title:"Table",css:"wym_tools_table"},]});var d=3D$.wymeditors(0);var f=3D=
function(g){show_modal("Saving page","progress");$.ajax({url:save_url,type:=
"POST",data:{id:page_id,content:d.xhtml(),annotations:JSON.stringify(new Ob=
ject()),_:"true"},success:function(){g()}})};$("#save-button").click(functi=
on(){f(function(){hide_modal()})});$("#close-button").click(function(){var =
h=3Dfalse;if(h){var g=3Dfunction(){window.onbeforeunload=3Dundefined;window=
.document.location=3Dpage_list_url};show_modal("Close editor","There are un=
saved changes to your page which will be lost.",{Cancel:hide_modal,"Save Ch=
anges":function(){f(g)}},{"Don't Save":g})}else{window.document.location=3D=
page_list_url}});var a=3D$("<div class=3D'galaxy-page-editor-button'><a id=
=3D'insert-galaxy-link' class=3D'action-button popup' href=3D'#'>Paragraph =
type</a></div>");$(".wym_area_top").append(a);var b=3D{};$.each(d._options.=
containersItems,function(h,g){var i=3Dg.name;b[g.title.replace("_"," ")]=3D=
function(){d.container(i)}});make_popupmenu(a,b);var c=3D$("<div><a id=3D'i=
nsert-galaxy-link' class=3D'action-button popup' href=3D'#'>Insert Link to =
Galaxy Object</a></div>").addClass("galaxy-page-editor-button");$(".wym_are=
a_top").append(c);make_popupmenu(c,{"Insert History Link":function(){d.dial=
og(Galaxy.DIALOG_HISTORY_LINK)},"Insert Dataset Link":function(){d.dialog(G=
alaxy.DIALOG_DATASET_LINK)},"Insert Workflow Link":function(){d.dialog(Gala=
xy.DIALOG_WORKFLOW_LINK)},"Insert Page Link":function(){d.dialog(Galaxy.DIA=
LOG_PAGE_LINK)},"Insert Visualization Link":function(){d.dialog(Galaxy.DIAL=
OG_VISUALIZATION_LINK)},});var e=3D$("<div><a id=3D'embed-galaxy-object' cl=
ass=3D'action-button popup' href=3D'#'>Embed Galaxy Object</a></div>").addC=
lass("galaxy-page-editor-button");$(".wym_area_top").append(e);make_popupme=
nu(e,{"Embed History":function(){d.dialog(Galaxy.DIALOG_EMBED_HISTORY)},"Em=
bed Dataset":function(){d.dialog(Galaxy.DIALOG_EMBED_DATASET)},"Embed Workf=
low":function(){d.dialog(Galaxy.DIALOG_EMBED_WORKFLOW)},"Embed Visualizatio=
n":function(){d.dialog(Galaxy.DIALOG_EMBED_VISUALIZATION)},})});
\ No newline at end of file
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c static/scripts/packed/mvc/tools.js
--- a/static/scripts/packed/mvc/tools.js
+++ b/static/scripts/packed/mvc/tools.js
@@ -1,1 +1,1 @@
-define(["libs/underscore","viz/trackster/util","mvc/data","libs/backbone/b=
ackbone-relational"],function(q,a,r){var f=3DBackbone.RelationalModel.exten=
d({defaults:{name:null,hidden:false},show:function(){this.set("hidden",fals=
e)},hide:function(){this.set("hidden",true)},is_visible:function(){return !=
this.attributes.hidden}});var k=3DBackbone.RelationalModel.extend({defaults=
:{name:null,label:null,type:null,value:null,num_samples:5},initialize:funct=
ion(){this.attributes.html=3Dunescape(this.attributes.html)},copy:function(=
){return new k(this.toJSON())},get_samples:function(){var u=3Dthis.get("typ=
e"),t=3Dnull;if(u=3D=3D=3D"number"){t=3Dd3.scale.linear().domain([this.get(=
"min"),this.get("max")]).ticks(this.get("num_samples"))}else{if(u=3D=3D=3D"=
select"){t=3Dq.map(this.get("options"),function(v){return v[0]})}}return t}=
});var e=3Df.extend({defaults:{description:null,target:null,inputs:[]},rela=
tions:[{type:Backbone.HasMany,key:"inputs",relatedModel:k,reverseRelation:{=
key:"tool",includeInJSON:false}}],urlRoot:galaxy_paths.get("tool_url"),copy=
:function(u){var v=3Dnew e(this.toJSON());if(u){var t=3Dnew Backbone.Collec=
tion();v.get("inputs").each(function(w){if(w.get_samples()){t.push(w)}});v.=
set("inputs",t)}return v},apply_search_results:function(t){(q.indexOf(t,thi=
s.attributes.id)!=3D=3D-1?this.show():this.hide());return this.is_visible()=
},set_input_value:function(t,u){this.get("inputs").find(function(v){return =
v.get("name")=3D=3D=3Dt}).set("value",u)},set_input_values:function(u){var =
t=3Dthis;q.each(q.keys(u),function(v){t.set_input_value(v,u[v])})},run:func=
tion(){return this._run()},rerun:function(u,t){return this._run({action:"re=
run",target_dataset_id:u.id,regions:t})},get_inputs_dict:function(){var t=
=3D{};this.get("inputs").each(function(u){t[u.get("name")]=3Du.get("value")=
});return t},_run:function(v){var w=3Dq.extend({tool_id:this.id,inputs:this=
.get_inputs_dict()},v);var u=3D$.Deferred(),t=3Dnew a.ServerStateDeferred({=
ajax_settings:{url:this.urlRoot,data:JSON.stringify(w),dataType:"json",cont=
entType:"application/json",type:"POST"},interval:2000,success_fn:function(x=
){return x!=3D=3D"pending"}});$.when(t.go()).then(function(x){u.resolve(new=
r.DatasetCollection().reset(x))});return u}});var i=3DBackbone.Collection.=
extend({model:e});var m=3Df.extend({});var p=3Df.extend({defaults:{elems:[]=
,open:false},clear_search_results:function(){q.each(this.attributes.elems,f=
unction(t){t.show()});this.show();this.set("open",false)},apply_search_resu=
lts:function(u){var v=3Dtrue,t;q.each(this.attributes.elems,function(w){if(=
w instanceof m){t=3Dw;t.hide()}else{if(w instanceof e){if(w.apply_search_re=
sults(u)){v=3Dfalse;if(t){t.show()}}}}});if(v){this.hide()}else{this.show()=
;this.set("open",true)}}});var b=3Df.extend({defaults:{search_hint_string:"=
search tools",min_chars_for_search:3,spinner_url:"",clear_btn_url:"",search=
_url:"",visible:true,query:"",results:null,clear_key:27},initialize:functio=
n(){this.on("change:query",this.do_search)},do_search:function(){var v=3Dth=
is.attributes.query;if(v.length<this.attributes.min_chars_for_search){this.=
set("results",null);return}var u=3Dv+"*";if(this.timer){clearTimeout(this.t=
imer)}$("#search-clear-btn").hide();$("#search-spinner").show();var t=3Dthi=
s;this.timer=3DsetTimeout(function(){$.get(t.attributes.search_url,{query:u=
},function(w){t.set("results",w);$("#search-spinner").hide();$("#search-cle=
ar-btn").show()},"json")},200)},clear_search:function(){this.set("query",""=
);this.set("results",null)}});var j=3DBackbone.Collection.extend({url:"/too=
ls",tools:new i(),parse:function(t){var u=3Dfunction(x){var w=3Dx.type;if(w=
=3D=3D=3D"tool"){return new e(x)}else{if(w=3D=3D=3D"section"){var v=3Dq.map=
(x.elems,u);x.elems=3Dv;return new p(x)}else{if(w=3D=3D=3D"label"){return n=
ew m(x)}}}};return q.map(t,u)},initialize:function(t){this.tool_search=3Dt.=
tool_search;this.tool_search.on("change:results",this.apply_search_results,=
this);this.on("reset",this.populate_tools,this)},populate_tools:function(){=
var t=3Dthis;t.tools=3Dnew i();this.each(function(u){if(u instanceof p){q.e=
ach(u.attributes.elems,function(v){if(v instanceof e){t.tools.push(v)}})}el=
se{if(u instanceof e){t.tools.push(u)}}})},clear_search_results:function(){=
this.each(function(t){if(t instanceof p){t.clear_search_results()}else{t.sh=
ow()}})},apply_search_results:function(){var u=3Dthis.tool_search.attribute=
s.results;if(u=3D=3D=3Dnull){this.clear_search_results();return}var t=3Dnul=
l;this.each(function(v){if(v instanceof m){t=3Dv;t.hide()}else{if(v instanc=
eof e){if(v.apply_search_results(u)){if(t){t.show()}}}else{t=3Dnull;v.apply=
_search_results(u)}}})}});var n=3DBackbone.View.extend({initialize:function=
(){this.model.on("change:hidden",this.update_visible,this);this.update_visi=
ble()},update_visible:function(){(this.model.attributes.hidden?this.$el.hid=
e():this.$el.show())}});var h=3Dn.extend({tagName:"div",template:Handlebars=
.templates.tool_link,render:function(){this.$el.append(this.template(this.m=
odel.toJSON()));return this}});var c=3Dn.extend({tagName:"div",className:"t=
oolPanelLabel",render:function(){this.$el.append($("<span/>").text(this.mod=
el.attributes.name));return this}});var g=3Dn.extend({tagName:"div",classNa=
me:"toolSectionWrapper",template:Handlebars.templates.panel_section,initial=
ize:function(){n.prototype.initialize.call(this);this.model.on("change:open=
",this.update_open,this)},render:function(){this.$el.append(this.template(t=
his.model.toJSON()));var t=3Dthis.$el.find(".toolSectionBody");q.each(this.=
model.attributes.elems,function(u){if(u instanceof e){var v=3Dnew h({model:=
u,className:"toolTitle"});v.render();t.append(v.$el)}else{if(u instanceof m=
){var w=3Dnew c({model:u});w.render();t.append(w.$el)}else{}}});return this=
},events:{"click .toolSectionTitle > a":"toggle"},toggle:function(){this.mo=
del.set("open",!this.model.attributes.open)},update_open:function(){(this.m=
odel.attributes.open?this.$el.children(".toolSectionBody").slideDown("fast"=
):this.$el.children(".toolSectionBody").slideUp("fast"))}});var l=3DBackbon=
e.View.extend({tagName:"div",id:"tool-search",className:"bar",template:Hand=
lebars.templates.tool_search,events:{click:"focus_and_select","keyup :input=
":"query_changed","click #search-clear-btn":"clear"},render:function(){this=
.$el.append(this.template(this.model.toJSON()));if(!this.model.is_visible()=
){this.$el.hide()}this.$el.find(".tooltip").tooltip();return this},focus_an=
d_select:function(){this.$el.find(":input").focus().select()},clear:functio=
n(){this.model.clear_search();this.$el.find(":input").val(this.model.attrib=
utes.search_hint_string);this.focus_and_select();return false},query_change=
d:function(t){if((this.model.attributes.clear_key)&&(this.model.attributes.=
clear_key=3D=3D=3Dt.which)){this.clear();return false}this.model.set("query=
",this.$el.find(":input").val())}});var s=3DBackbone.View.extend({tagName:"=
div",className:"toolMenu",initialize:function(){this.collection.tool_search=
.on("change:results",this.handle_search_results,this)},render:function(){va=
r t=3Dthis;var u=3Dnew l({model:this.collection.tool_search});u.render();t.=
$el.append(u.$el);this.collection.each(function(w){if(w instanceof p){var v=
=3Dnew g({model:w});v.render();t.$el.append(v.$el)}else{if(w instanceof e){=
var x=3Dnew h({model:w,className:"toolTitleNoSection"});x.render();t.$el.ap=
pend(x.$el)}else{if(w instanceof m){var y=3Dnew c({model:w});y.render();t.$=
el.append(y.$el)}}}});t.$el.find("a.tool-link").click(function(x){var w=3D$=
(this).attr("class").split(/\s+/)[0],v=3Dt.collection.tools.get(w);t.trigge=
r("tool_link_click",x,v)});return this},handle_search_results:function(){va=
r t=3Dthis.collection.tool_search.attributes.results;if(t&&t.length=3D=3D=
=3D0){$("#search-no-results").show()}else{$("#search-no-results").hide()}}}=
);var o=3DBackbone.View.extend({className:"toolForm",template:Handlebars.te=
mplates.tool_form,render:function(){this.$el.children().remove();this.$el.a=
ppend(this.template(this.model.toJSON()))}});var d=3DBackbone.View.extend({=
className:"toolMenuAndView",initialize:function(){this.tool_panel_view=3Dne=
w s({collection:this.collection});this.tool_form_view=3Dnew o()},render:fun=
ction(){this.tool_panel_view.render();this.tool_panel_view.$el.css("float",=
"left");this.$el.append(this.tool_panel_view.$el);this.tool_form_view.$el.h=
ide();this.$el.append(this.tool_form_view.$el);var t=3Dthis;this.tool_panel=
_view.on("tool_link_click",function(v,u){v.preventDefault();t.show_tool(u)}=
)},show_tool:function(u){var t=3Dthis;u.fetch().done(function(){t.tool_form=
_view.model=3Du;t.tool_form_view.render();t.tool_form_view.$el.show();$("#l=
eft").width("650px")})}});return{Tool:e,ToolSearch:b,ToolPanel:j,ToolPanelV=
iew:s,ToolFormView:o}});
\ No newline at end of file
+define(["libs/underscore","viz/trackster/util","mvc/data","libs/backbone/b=
ackbone-relational"],function(q,a,r){var g=3DBackbone.RelationalModel.exten=
d({defaults:{name:null,hidden:false},show:function(){this.set("hidden",fals=
e)},hide:function(){this.set("hidden",true)},is_visible:function(){return !=
this.attributes.hidden}});var c=3DBackbone.RelationalModel.extend({defaults=
:{name:null,label:null,type:null,value:null,num_samples:5},initialize:funct=
ion(){this.attributes.html=3Dunescape(this.attributes.html)},copy:function(=
){return new c(this.toJSON())},get_samples:function(){var u=3Dthis.get("typ=
e"),t=3Dnull;if(u=3D=3D=3D"number"){t=3Dd3.scale.linear().domain([this.get(=
"min"),this.get("max")]).ticks(this.get("num_samples"))}else{if(u=3D=3D=3D"=
select"){t=3Dq.map(this.get("options"),function(v){return v[0]})}}return t}=
});var f=3Dg.extend({defaults:{description:null,target:null,inputs:[]},rela=
tions:[{type:Backbone.HasMany,key:"inputs",relatedModel:c,reverseRelation:{=
key:"tool",includeInJSON:false}}],urlRoot:galaxy_paths.get("tool_url"),copy=
:function(u){var v=3Dnew f(this.toJSON());if(u){var t=3Dnew Backbone.Collec=
tion();v.get("inputs").each(function(w){if(w.get_samples()){t.push(w)}});v.=
set("inputs",t)}return v},apply_search_results:function(t){(q.indexOf(t,thi=
s.attributes.id)!=3D=3D-1?this.show():this.hide());return this.is_visible()=
},set_input_value:function(t,u){this.get("inputs").find(function(v){return =
v.get("name")=3D=3D=3Dt}).set("value",u)},set_input_values:function(u){var =
t=3Dthis;q.each(q.keys(u),function(v){t.set_input_value(v,u[v])})},run:func=
tion(){return this._run()},rerun:function(u,t){return this._run({action:"re=
run",target_dataset_id:u.id,regions:t})},get_inputs_dict:function(){var t=
=3D{};this.get("inputs").each(function(u){t[u.get("name")]=3Du.get("value")=
});return t},_run:function(v){var w=3Dq.extend({tool_id:this.id,inputs:this=
.get_inputs_dict()},v);var u=3D$.Deferred(),t=3Dnew a.ServerStateDeferred({=
ajax_settings:{url:this.urlRoot,data:JSON.stringify(w),dataType:"json",cont=
entType:"application/json",type:"POST"},interval:2000,success_fn:function(x=
){return x!=3D=3D"pending"}});$.when(t.go()).then(function(x){u.resolve(new=
r.DatasetCollection().reset(x))});return u}});var j=3DBackbone.Collection.=
extend({model:f});var m=3Dg.extend({});var p=3Dg.extend({defaults:{elems:[]=
,open:false},clear_search_results:function(){q.each(this.attributes.elems,f=
unction(t){t.show()});this.show();this.set("open",false)},apply_search_resu=
lts:function(u){var v=3Dtrue,t;q.each(this.attributes.elems,function(w){if(=
w instanceof m){t=3Dw;t.hide()}else{if(w instanceof f){if(w.apply_search_re=
sults(u)){v=3Dfalse;if(t){t.show()}}}}});if(v){this.hide()}else{this.show()=
;this.set("open",true)}}});var b=3Dg.extend({defaults:{search_hint_string:"=
search tools",min_chars_for_search:3,spinner_url:"",clear_btn_url:"",search=
_url:"",visible:true,query:"",results:null,clear_key:27},initialize:functio=
n(){this.on("change:query",this.do_search)},do_search:function(){var v=3Dth=
is.attributes.query;if(v.length<this.attributes.min_chars_for_search){this.=
set("results",null);return}var u=3Dv+"*";if(this.timer){clearTimeout(this.t=
imer)}$("#search-clear-btn").hide();$("#search-spinner").show();var t=3Dthi=
s;this.timer=3DsetTimeout(function(){$.get(t.attributes.search_url,{query:u=
},function(w){t.set("results",w);$("#search-spinner").hide();$("#search-cle=
ar-btn").show()},"json")},200)},clear_search:function(){this.set("query",""=
);this.set("results",null)}});var k=3DBackbone.Collection.extend({url:"/too=
ls",tools:new j(),parse:function(t){var u=3Dfunction(x){var w=3Dx.type;if(w=
=3D=3D=3D"tool"){return new f(x)}else{if(w=3D=3D=3D"section"){var v=3Dq.map=
(x.elems,u);x.elems=3Dv;return new p(x)}else{if(w=3D=3D=3D"label"){return n=
ew m(x)}}}};return q.map(t,u)},initialize:function(t){this.tool_search=3Dt.=
tool_search;this.tool_search.on("change:results",this.apply_search_results,=
this);this.on("reset",this.populate_tools,this)},populate_tools:function(){=
var t=3Dthis;t.tools=3Dnew j();this.each(function(u){if(u instanceof p){q.e=
ach(u.attributes.elems,function(v){if(v instanceof f){t.tools.push(v)}})}el=
se{if(u instanceof f){t.tools.push(u)}}})},clear_search_results:function(){=
this.each(function(t){if(t instanceof p){t.clear_search_results()}else{t.sh=
ow()}})},apply_search_results:function(){var u=3Dthis.tool_search.attribute=
s.results;if(u=3D=3D=3Dnull){this.clear_search_results();return}var t=3Dnul=
l;this.each(function(v){if(v instanceof m){t=3Dv;t.hide()}else{if(v instanc=
eof f){if(v.apply_search_results(u)){if(t){t.show()}}}else{t=3Dnull;v.apply=
_search_results(u)}}})}});var n=3DBackbone.View.extend({initialize:function=
(){this.model.on("change:hidden",this.update_visible,this);this.update_visi=
ble()},update_visible:function(){(this.model.attributes.hidden?this.$el.hid=
e():this.$el.show())}});var i=3Dn.extend({tagName:"div",template:Handlebars=
.templates.tool_link,render:function(){this.$el.append(this.template(this.m=
odel.toJSON()));return this}});var d=3Dn.extend({tagName:"div",className:"t=
oolPanelLabel",render:function(){this.$el.append($("<span/>").text(this.mod=
el.attributes.name));return this}});var h=3Dn.extend({tagName:"div",classNa=
me:"toolSectionWrapper",template:Handlebars.templates.panel_section,initial=
ize:function(){n.prototype.initialize.call(this);this.model.on("change:open=
",this.update_open,this)},render:function(){this.$el.append(this.template(t=
his.model.toJSON()));var t=3Dthis.$el.find(".toolSectionBody");q.each(this.=
model.attributes.elems,function(u){if(u instanceof f){var v=3Dnew i({model:=
u,className:"toolTitle"});v.render();t.append(v.$el)}else{if(u instanceof m=
){var w=3Dnew d({model:u});w.render();t.append(w.$el)}else{}}});return this=
},events:{"click .toolSectionTitle > a":"toggle"},toggle:function(){this.mo=
del.set("open",!this.model.attributes.open)},update_open:function(){(this.m=
odel.attributes.open?this.$el.children(".toolSectionBody").slideDown("fast"=
):this.$el.children(".toolSectionBody").slideUp("fast"))}});var l=3DBackbon=
e.View.extend({tagName:"div",id:"tool-search",className:"bar",template:Hand=
lebars.templates.tool_search,events:{click:"focus_and_select","keyup :input=
":"query_changed","click #search-clear-btn":"clear"},render:function(){this=
.$el.append(this.template(this.model.toJSON()));if(!this.model.is_visible()=
){this.$el.hide()}this.$el.find(".tooltip").tooltip();return this},focus_an=
d_select:function(){this.$el.find(":input").focus().select()},clear:functio=
n(){this.model.clear_search();this.$el.find(":input").val(this.model.attrib=
utes.search_hint_string);this.focus_and_select();return false},query_change=
d:function(t){if((this.model.attributes.clear_key)&&(this.model.attributes.=
clear_key=3D=3D=3Dt.which)){this.clear();return false}this.model.set("query=
",this.$el.find(":input").val())}});var s=3DBackbone.View.extend({tagName:"=
div",className:"toolMenu",initialize:function(){this.collection.tool_search=
.on("change:results",this.handle_search_results,this)},render:function(){va=
r t=3Dthis;var u=3Dnew l({model:this.collection.tool_search});u.render();t.=
$el.append(u.$el);this.collection.each(function(w){if(w instanceof p){var v=
=3Dnew h({model:w});v.render();t.$el.append(v.$el)}else{if(w instanceof f){=
var x=3Dnew i({model:w,className:"toolTitleNoSection"});x.render();t.$el.ap=
pend(x.$el)}else{if(w instanceof m){var y=3Dnew d({model:w});y.render();t.$=
el.append(y.$el)}}}});t.$el.find("a.tool-link").click(function(x){var w=3D$=
(this).attr("class").split(/\s+/)[0],v=3Dt.collection.tools.get(w);t.trigge=
r("tool_link_click",x,v)});return this},handle_search_results:function(){va=
r t=3Dthis.collection.tool_search.attributes.results;if(t&&t.length=3D=3D=
=3D0){$("#search-no-results").show()}else{$("#search-no-results").hide()}}}=
);var o=3DBackbone.View.extend({className:"toolForm",template:Handlebars.te=
mplates.tool_form,render:function(){this.$el.children().remove();this.$el.a=
ppend(this.template(this.model.toJSON()))}});var e=3DBackbone.View.extend({=
className:"toolMenuAndView",initialize:function(){this.tool_panel_view=3Dne=
w s({collection:this.collection});this.tool_form_view=3Dnew o()},render:fun=
ction(){this.tool_panel_view.render();this.tool_panel_view.$el.css("float",=
"left");this.$el.append(this.tool_panel_view.$el);this.tool_form_view.$el.h=
ide();this.$el.append(this.tool_form_view.$el);var t=3Dthis;this.tool_panel=
_view.on("tool_link_click",function(v,u){v.preventDefault();t.show_tool(u)}=
)},show_tool:function(u){var t=3Dthis;u.fetch().done(function(){t.tool_form=
_view.model=3Du;t.tool_form_view.render();t.tool_form_view.$el.show();$("#l=
eft").width("650px")})}});return{Tool:f,ToolSearch:b,ToolPanel:k,ToolPanelV=
iew:s,ToolFormView:o}});
\ No newline at end of file
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c templates/webapps/tool_shed/repository/tool_form.mako
--- a/templates/webapps/tool_shed/repository/tool_form.mako
+++ b/templates/webapps/tool_shed/repository/tool_form.mako
@@ -70,6 +70,8 @@
=20
<%def name=3D"row_for_param( prefix, param, parent_state, other_va=
lues )"><%
+ # Disable refresh_on_change for select lists displayed in =
the tool shed.=20
+ param.refresh_on_change =3D False
label =3D param.get_label()
if isinstance( param, DataToolParameter ) or isinstance( p=
aram, ColumnListParameter ) or isinstance( param, GenomeBuildParameter ):
field =3D SelectField( param.name )
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c test/install_and_test_tool_shed_repositories/functional_t=
ests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -6,6 +6,7 @@
=20
import os, sys, shutil, tempfile, re, string, urllib, platform
from time import strftime
+from ConfigParser import SafeConfigParser
=20
# Assume we are run from the galaxy root directory, add lib to the python =
path
cwd =3D os.getcwd()
@@ -73,6 +74,40 @@
default_galaxy_test_port_max =3D 10999
default_galaxy_test_host =3D '127.0.0.1'
=20
+# should this serve static resources (scripts, images, styles, etc.)
+STATIC_ENABLED =3D True
+
+def get_static_settings():
+ """Returns dictionary of the settings necessary for a galaxy App
+ to be wrapped in the static middleware.
+
+ This mainly consists of the filesystem locations of url-mapped
+ static resources.
+ """
+ cwd =3D os.getcwd()
+ static_dir =3D os.path.join( cwd, 'static' )
+ #TODO: these should be copied from universe_wsgi.ini
+ return dict(
+ #TODO: static_enabled needed here?
+ static_enabled =3D True,
+ static_cache_time =3D 360,
+ static_dir =3D static_dir,
+ static_images_dir =3D os.path.join( static_dir, 'images', '' ),
+ static_favicon_dir =3D os.path.join( static_dir, 'favicon.ico' ),
+ static_scripts_dir =3D os.path.join( static_dir, 'scripts', '' ),
+ static_style_dir =3D os.path.join( static_dir, 'june_2007_style=
', 'blue' ),
+ static_robots_txt =3D os.path.join( static_dir, 'robots.txt' ),
+ )
+
+def get_webapp_global_conf():
+ """Get the global_conf dictionary sent as the first argument to app_fa=
ctory.
+ """
+ # (was originally sent 'dict()') - nothing here for now except static =
settings
+ global_conf =3D dict()
+ if STATIC_ENABLED:
+ global_conf.update( get_static_settings() )
+ return global_conf
+
# Optionally, set the environment variable GALAXY_INSTALL_TEST_TOOL_SHEDS_=
CONF
# to the location of a tool sheds configuration file that includes the too=
l shed
# that repositories will be installed from.
@@ -219,6 +254,36 @@
success =3D result.wasSuccessful()
return success
=20
+def generate_config_file( input_filename, output_filename, config_items ):
+ '''
+ Generate a config file with the configuration that has been defined fo=
r the embedded web application.
+ This is mostly relevant when setting metadata externally, since the sc=
ript for doing that does not
+ have access to app.config.
+ '''=20
+ cp =3D SafeConfigParser()
+ cp.read( input_filename )
+ config_items_by_section =3D []
+ for label, value in config_items:
+ found =3D False
+ # Attempt to determine the correct section for this configuration =
option.
+ for section in cp.sections():
+ if cp.has_option( section, label ):
+ config_tuple =3D section, label, value
+ config_items_by_section.append( config_tuple )
+ found =3D True
+ continue
+ # Default to app:main if no section was found.
+ if not found:
+ config_tuple =3D 'app:main', label, value
+ config_items_by_section.append( config_tuple )
+ # Replace the default values with the provided configuration.
+ for section, label, value in config_items_by_section:
+ cp.remove_option( section, label )
+ cp.set( section, label, str( value ) )
+ fh =3D open( output_filename, 'w' )
+ cp.write( fh )
+ fh.close()
+
def get_api_url( base, parts=3D[], params=3DNone, key=3DNone ):
if 'api' in parts and parts.index( 'api' ) !=3D 0:
parts.pop( parts.index( 'api' ) )
@@ -554,48 +619,64 @@
# Generate the migrated_tool_conf.xml file.
migrated_tool_conf_xml =3D tool_conf_template_parser.safe_substitute( =
shed_tool_path=3Dgalaxy_migrated_tool_path )
file( galaxy_migrated_tool_conf_file, 'w' ).write( migrated_tool_conf_=
xml )
-
+ # Write the embedded web application's specific configuration to a tem=
porary file. This is necessary in order for
+ # the external metadata script to find the right datasets.
+ kwargs =3D dict( admin_users =3D 'test(a)bx.psu.edu',
+ allow_user_creation =3D True,
+ allow_user_deletion =3D True,
+ allow_library_path_paste =3D True,
+ database_connection =3D database_connection,
+ datatype_converters_config_file =3D "datatype_converter=
s_conf.xml.sample",
+ file_path =3D galaxy_file_path,
+ id_secret =3D galaxy_encode_secret,
+ job_queue_workers =3D 5,
+ log_destination =3D "stdout",
+ migrated_tools_config =3D galaxy_migrated_tool_conf_fil=
e,
+ new_file_path =3D galaxy_tempfiles,
+ running_functional_tests =3D True,
+ shed_tool_data_table_config =3D shed_tool_data_table_co=
nf_file,
+ shed_tool_path =3D galaxy_shed_tool_path,
+ template_path =3D "templates",
+ tool_config_file =3D ','.join( [ galaxy_tool_conf_file,=
galaxy_shed_tool_conf_file ] ),
+ tool_data_path =3D tool_data_path,
+ tool_data_table_config_path =3D galaxy_tool_data_table_=
conf_file,
+ tool_dependency_dir =3D tool_dependency_dir,
+ tool_path =3D tool_path,
+ tool_parse_help =3D False,
+ tool_sheds_config_file =3D galaxy_tool_sheds_conf_file,
+ update_integrated_tool_panel =3D False,
+ use_heartbeat =3D False )
+ galaxy_config_file =3D os.environ.get( 'GALAXY_INSTALL_TEST_INI_FILE',=
None )
+ # If the user has passed in a path for the .ini file, do not overwrite=
it.
+ if not galaxy_config_file:
+ galaxy_config_file =3D os.path.join( galaxy_test_tmp_dir, 'install=
_test_tool_shed_repositories_wsgi.ini' )
+ config_items =3D []
+ for label in kwargs:
+ config_tuple =3D label, kwargs[ label ]
+ config_items.append( config_tuple )
+ # Write a temporary file, based on universe_wsgi.ini.sample, using=
the configuration options defined above.
+ generate_config_file( 'universe_wsgi.ini.sample', galaxy_config_fi=
le, config_items )
+ kwargs[ 'tool_config_file' ] =3D [ galaxy_tool_conf_file, galaxy_shed_=
tool_conf_file ]
+ # Set the global_conf[ '__file__' ] option to the location of the temp=
orary .ini file, which gets passed to set_metadata.sh.
+ kwargs[ 'global_conf' ] =3D get_webapp_global_conf()
+ kwargs[ 'global_conf' ][ '__file__' ] =3D galaxy_config_file
# ---- Build Galaxy Application --------------------------------------=
------------=20
- global_conf =3D { '__file__' : 'universe_wsgi.ini.sample' }
if not database_connection.startswith( 'sqlite://' ):
kwargs[ 'database_engine_option_max_overflow' ] =3D '20'
kwargs[ 'database_engine_option_pool_size' ] =3D '10'
- app =3D UniverseApplication( admin_users =3D 'test(a)bx.psu.edu',
- allow_user_creation =3D True,
- allow_user_deletion =3D True,
- allow_library_path_paste =3D True,
- database_connection =3D database_connection,
- datatype_converters_config_file =3D "dataty=
pe_converters_conf.xml.sample",
- file_path =3D galaxy_file_path,
- global_conf =3D global_conf,
- id_secret =3D galaxy_encode_secret,
- job_queue_workers =3D 5,
- log_destination =3D "stdout",
- migrated_tools_config =3D galaxy_migrated_t=
ool_conf_file,
- new_file_path =3D galaxy_tempfiles,
- running_functional_tests=3DTrue,
- shed_tool_data_table_config =3D shed_tool_d=
ata_table_conf_file,
- shed_tool_path =3D galaxy_shed_tool_path,
- template_path =3D "templates",
- tool_config_file =3D [ galaxy_tool_conf_fil=
e, galaxy_shed_tool_conf_file ],
- tool_data_path =3D tool_data_path,
- tool_data_table_config_path =3D galaxy_tool=
_data_table_conf_file,
- tool_dependency_dir =3D tool_dependency_dir,
- tool_path =3D tool_path,
- tool_parse_help =3D False,
- tool_sheds_config_file =3D galaxy_tool_shed=
s_conf_file,
- update_integrated_tool_panel =3D False,
- use_heartbeat =3D False,
- **kwargs )
+ kwargs[ 'config_file' ] =3D galaxy_config_file
+ app =3D UniverseApplication( **kwargs )
=20
log.info( "Embedded Galaxy application started" )
=20
# ---- Run galaxy webserver ------------------------------------------=
------------
server =3D None
- webapp =3D buildapp.app_factory( dict( database_file=3Ddatabase_connec=
tion ),
- use_translogger=3DFalse,
- static_enabled=3DFalse,
- app=3Dapp )
+ global_conf =3D get_webapp_global_conf()
+ global_conf[ 'database_file' ] =3D database_connection
+ webapp =3D buildapp.app_factory( global_conf,
+ use_translogger=3DFalse,
+ static_enabled=3DSTATIC_ENABLED,
+ app=3Dapp )
=20
# Serve the app on a specified or random port.
if galaxy_test_port is not None:
@@ -976,6 +1057,7 @@
repositories_passed.append( dict( name=3Dname, own=
er=3Downer, changeset_revision=3Dchangeset_revision ) )
params[ 'tools_functionally_correct' ] =3D True
params[ 'do_not_test' ] =3D False
+ params[ 'test_install_error' ] =3D False
register_test_result( galaxy_tool_shed_url,=20
metadata_revision_id,=20
repository_status,=20
@@ -1027,6 +1109,7 @@
repositories_failed.append( dict( name=3Dname, own=
er=3Downer, changeset_revision=3Dchangeset_revision ) )
set_do_not_test =3D not is_latest_downloadable_rev=
ision( galaxy_tool_shed_url, repository_info_dict )
params[ 'tools_functionally_correct' ] =3D False
+ params[ 'test_install_error' ] =3D False
params[ 'do_not_test' ] =3D str( set_do_not_test )
register_test_result( galaxy_tool_shed_url,=20
metadata_revision_id,=20
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c test/unit/datatypes/dataproviders/tempfilecache.py
--- /dev/null
+++ b/test/unit/datatypes/dataproviders/tempfilecache.py
@@ -0,0 +1,47 @@
+
+import os
+import tempfile
+
+import logging
+logging.getLogger( __name__ )
+log =3D logging
+
+class TempFileCache( object ):
+ """
+ Creates and caches tempfiles with/based-on the given contents.
+ """
+ def __init__( self, logger=3DNone ):
+ if logger:
+ global log
+ log =3D logger
+ super( TempFileCache, self ).__init__()
+ self.clear()
+
+ def clear( self ):
+ self.delete_tmpfiles()
+ self._content_dict =3D {}
+
+ def create_tmpfile( self, contents ):
+ if not hasattr( self, '_content_dict' ):
+ self.set_up_tmpfiles()
+
+ if contents not in self._content_dict:
+ # create a named tmp and write contents to it, return filename
+ tmpfile =3D tempfile.NamedTemporaryFile( delete=3DFalse )
+ tmpfile.write( contents )
+ tmpfile.close()
+ log.debug( 'created tmpfile.name: %s', tmpfile.name )
+ self._content_dict[ contents ] =3D tmpfile.name
+
+ else:
+ log.debug( '(cached): %s', self._content_dict[ contents ] )
+ return self._content_dict[ contents ]
+
+ def delete_tmpfiles( self ):
+ if not hasattr( self, '_content_dict' ) or not self._content_dict:
+ return
+ for tmpfile_contents in self._content_dict:
+ tmpfile =3D self._content_dict[ tmpfile_contents ]
+ if os.path.exists( tmpfile ):
+ log.debug( 'unlinking tmpfile: %s', tmpfile )
+ os.unlink( tmpfile )
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c test/unit/datatypes/dataproviders/test_base_dataproviders=
.py
--- /dev/null
+++ b/test/unit/datatypes/dataproviders/test_base_dataproviders.py
@@ -0,0 +1,370 @@
+"""
+Unit tests for base DataProviders.
+.. seealso:: galaxy.datatypes.dataproviders.base
+"""
+# currently because of dataproviders.dataset importing galaxy.model this d=
oesn't work
+#TODO: fix imports there after dist and retry
+
+#TODO: fix off by ones in FilteredDataProvider counters
+
+import unittest
+import StringIO
+
+import tempfilecache
+import utility
+
+log =3D utility.set_up_filelogger( __name__ + '.log' )
+
+utility.add_galaxy_lib_to_path( '/test/unit/datatypes/dataproviders' )
+from galaxy.datatypes import dataproviders
+
+
+class BaseTestCase( unittest.TestCase ):
+ default_file_contents =3D """
+ One
+ Two
+ Three
+ """
+
+ @classmethod
+ def setUpClass( cls ):
+ log.debug( 'CLASS %s %s', ( '_' * 40 ), cls.__name__ )
+
+ @classmethod
+ def tearDownClass( cls ):
+ log.debug( 'CLASS %s %s\n\n', ( '_' * 40 ), cls.__name__ )
+
+ def __init__( self, *args ):
+ unittest.TestCase.__init__( self, *args )
+ self.tmpfiles =3D tempfilecache.TempFileCache( log )
+
+ def setUp( self ):
+ log.debug( 'BEGIN %s %s', ( '.' * 40 ), self._testMethodName )
+ if self._testMethodDoc:
+ log.debug( ' """%s"""', self._testMethodDoc.strip() )
+
+ def tearDown( self ):
+ self.tmpfiles.clear()
+ log.debug( 'END\n' )
+
+ def format_tmpfile_contents( self, contents=3DNone ):
+ contents =3D contents or self.default_file_contents
+ contents =3D utility.clean_multiline_string( contents )
+ log.debug( 'file contents:\n%s', contents )
+ return contents
+
+
+class Test_BaseDataProvider( BaseTestCase ):
+ provider_class =3D dataproviders.base.DataProvider
+
+ def contents_provider_and_data( self,
+ filename=3DNone, contents=3DNone, source=3DNone, *provider_arg=
s, **provider_kwargs ):
+ # to remove boiler plate
+ # returns file content string, provider used, and data list
+ if not filename:
+ contents =3D self.format_tmpfile_contents( contents )
+ filename =3D self.tmpfiles.create_tmpfile( contents )
+ #TODO: if filename, contents =3D=3D None
+ if not source:
+ source =3D open( filename )
+ provider =3D self.provider_class( source, *provider_args, **provid=
er_kwargs )
+ log.debug( 'provider: %s', provider )
+ data =3D list( provider )
+ log.debug( 'data: %s', str( data ) )
+ return ( contents, provider, data )
+
+ def test_iterators( self ):
+ source =3D ( x for x in xrange( 1, 10 ) )
+ provider =3D self.provider_class( source )
+ data =3D list( provider )
+ log.debug( 'data: %s', str( data ) )
+ self.assertEqual( data, [ x for x in xrange( 1, 10 ) ] )
+
+ source =3D [ x for x in xrange( 1, 10 ) ]
+ provider =3D self.provider_class( source )
+ data =3D list( provider )
+ log.debug( 'data: %s', str( data ) )
+ self.assertEqual( data, [ x for x in xrange( 1, 10 ) ] )
+
+ source =3D ( x for x in xrange( 1, 10 ) )
+ provider =3D self.provider_class( source )
+ data =3D list( provider )
+ log.debug( 'data: %s', str( data ) )
+ self.assertEqual( data, [ x for x in xrange( 1, 10 ) ] )
+
+ def test_validate_source( self ):
+ """validate_source should throw an error if the source doesn't hav=
e attr '__iter__'
+ """
+ def non_iterator_dprov( source ):
+ return self.provider_class( source )
+ self.assertRaises( dataproviders.exceptions.InvalidDataProviderSou=
rce,
+ non_iterator_dprov, 'one two three' )
+ self.assertRaises( dataproviders.exceptions.InvalidDataProviderSou=
rce,
+ non_iterator_dprov, 40 )
+
+ def test_writemethods( self ):
+ """should throw an error if any write methods are called
+ """
+ source =3D ( x for x in xrange( 1, 10 ) )
+ provider =3D self.provider_class( source )
+ # should throw error
+ def call_method( provider, method_name, *args ):
+ method =3D getattr( provider, method_name )
+ return method( *args )
+ self.assertRaises( NotImplementedError, call_method, provider, 'tr=
uncate', 20 )
+ self.assertRaises( NotImplementedError, call_method, provider, 'wr=
ite', 'bler' )
+ self.assertRaises( NotImplementedError, call_method, provider, 'wr=
itelines', [ 'one', 'two' ] )
+
+ def test_readlines( self ):
+ """readlines should return all the data in list form
+ """
+ source =3D ( x for x in xrange( 1, 10 ) )
+ provider =3D self.provider_class( source )
+ data =3D provider.readlines()
+ log.debug( 'data: %s', str( data ) )
+ self.assertEqual( data, [ x for x in xrange( 1, 10 ) ] )
+
+ def test_stringio( self ):
+ """should work with StringIO
+ """
+ contents =3D utility.clean_multiline_string( """
+ One
+ Two
+ Three
+ """ )
+ source =3D StringIO.StringIO( contents )
+ provider =3D self.provider_class( source )
+ data =3D list( provider )
+ log.debug( 'data: %s', str( data ) )
+ # provider should call close on file
+ self.assertEqual( ''.join( data ), contents )
+ self.assertTrue( source.closed )
+
+ def test_file( self ):
+ """should work with files
+ """
+ ( contents, provider, data ) =3D self.contents_provider_and_data()
+ self.assertEqual( ''.join( data ), contents )
+ # provider should call close on file
+ self.assertTrue( isinstance( provider.source, file ) )
+ self.assertTrue( provider.source.closed )
+
+
+class Test_FilteredDataProvider( Test_BaseDataProvider ):
+ provider_class =3D dataproviders.base.FilteredDataProvider
+
+ def assertCounters( self, provider, read, valid, returned ):
+ self.assertEqual( provider.num_data_read, read )
+ self.assertEqual( provider.num_valid_data_read, valid )
+ self.assertEqual( provider.num_data_returned, returned )
+
+ def test_counters( self ):
+ """should count: lines read, lines that passed the filter, lines r=
eturned
+ """
+ ( contents, provider, data ) =3D self.contents_provider_and_data()
+ self.assertCounters( provider, 3, 3, 3 )
+
+ def test_filter_fn( self ):
+ """should filter out lines using filter_fn and set counters proper=
ly
+ based on filter
+ """
+ def filter_ts( string ):
+ if string.lower().startswith( 't' ):
+ return None
+ return string
+ ( contents, provider, data ) =3D self.contents_provider_and_data( =
filter_fn=3Dfilter_ts )
+ self.assertCounters( provider, 3, 1, 1 )
+
+
+class Test_LimitedOffsetDataProvider( Test_FilteredDataProvider ):
+ provider_class =3D dataproviders.base.LimitedOffsetDataProvider
+
+ def test_offset_1( self ):
+ """when offset is 1, should skip first
+ """
+ ( contents, provider, data ) =3D self.contents_provider_and_data( =
offset=3D1 )
+ self.assertEqual( data, [ 'Two\n', 'Three\n' ] )
+ self.assertCounters( provider, 3, 3, 2 )
+
+ def test_offset_all( self ):
+ """when offset >=3D num lines, should return empty list
+ """
+ ( contents, provider, data ) =3D self.contents_provider_and_data( =
offset=3D4 )
+ self.assertEqual( data, [] )
+ self.assertCounters( provider, 3, 3, 0 )
+
+ def test_offset_none( self ):
+ """when offset is 0, should return all
+ """
+ ( contents, provider, data ) =3D self.contents_provider_and_data( =
offset=3D0 )
+ self.assertEqual( ''.join( data ), contents )
+ self.assertCounters( provider, 3, 3, 3 )
+
+ def test_offset_negative( self ):
+ """when offset is negative, should return all
+ """
+ ( contents, provider, data ) =3D self.contents_provider_and_data( =
offset=3D-1 )
+ self.assertEqual( ''.join( data ), contents )
+ self.assertCounters( provider, 3, 3, 3 )
+
+ def test_limit_1( self ):
+ """when limit is one, should return first
+ """
+ ( contents, provider, data ) =3D self.contents_provider_and_data( =
limit=3D1 )
+ self.assertEqual( data, [ 'One\n' ] )
+ #TODO: currently reads 2 in all counters before ending
+ #self.assertCounters( provider, 1, 1, 1 )
+
+ def test_limit_all( self ):
+ """when limit >=3D num lines, should return all
+ """
+ ( contents, provider, data ) =3D self.contents_provider_and_data( =
limit=3D4 )
+ self.assertEqual( ''.join( data ), contents )
+ self.assertCounters( provider, 3, 3, 3 )
+
+ def test_limit_zero( self ):
+ """when limit >=3D num lines, should return empty list
+ """
+ ( contents, provider, data ) =3D self.contents_provider_and_data( =
limit=3D0 )
+ self.assertEqual( data, [] )
+ #TODO: currently reads 1 before ending
+ self.assertCounters( provider, 3, 0, 0 )
+
+ def test_limit_zero( self ):
+ """when limit is None, should return all
+ """
+ ( contents, provider, data ) =3D self.contents_provider_and_data( =
limit=3DNone )
+ self.assertEqual( ''.join( data ), contents )
+ self.assertCounters( provider, 3, 3, 3 )
+
+ #TODO: somehow re-use tmpfile here
+ def test_limit_with_offset( self ):
+ def limit_offset_combo( limit, offset, data_should_be, read, valid=
, returned ):
+ ( contents, provider, data ) =3D self.contents_provider_and_da=
ta( limit=3Dlimit, offset=3Doffset )
+ self.assertEqual( data, data_should_be )
+ #self.assertCounters( provider, read, valid, returned )
+ test_data =3D [
+ ( 0, 0, [], 0, 0, 0 ),
+ ( 1, 0, [ 'One\n' ], 1, 1, 1 ),
+ ( 2, 0, [ 'One\n', 'Two\n' ], 2, 2, 2 ),
+ ( 3, 0, [ 'One\n', 'Two\n', 'Three\n' ], 3, 3, 3 ),
+ ( 1, 1, [ 'Two\n' ], 1, 1, 1 ),
+ ( 2, 1, [ 'Two\n', 'Three\n' ], 2, 2, 2 ),
+ ( 3, 1, [ 'Two\n', 'Three\n' ], 2, 2, 2 ),
+ ( 1, 2, [ 'Three\n' ], 1, 1, 1 ),
+ ( 2, 2, [ 'Three\n' ], 1, 1, 1 ),
+ ( 3, 2, [ 'Three\n' ], 1, 1, 1 ),
+ ]
+ for test in test_data:
+ log.debug( 'limit_offset_combo: %s', ', '.join([ str( e ) for =
e in test ]) )
+ limit_offset_combo( *test )
+
+ def test_limit_with_offset_and_filter( self ):
+ def limit_offset_combo( limit, offset, data_should_be, read, valid=
, returned ):
+ def only_ts( string ):
+ if not string.lower().startswith( 't' ):
+ return None
+ return string
+ ( contents, provider, data ) =3D self.contents_provider_and_da=
ta(
+ limit=3Dlimit, offset=3Doffset, filter_fn=3Donly_ts )
+ self.assertEqual( data, data_should_be )
+ #self.assertCounters( provider, read, valid, returned )
+ test_data =3D [
+ ( 0, 0, [], 0, 0, 0 ),
+ ( 1, 0, [ 'Two\n' ], 1, 1, 1 ),
+ ( 2, 0, [ 'Two\n', 'Three\n' ], 2, 2, 2 ),
+ ( 3, 0, [ 'Two\n', 'Three\n' ], 2, 2, 2 ),
+ ( 1, 1, [ 'Three\n' ], 1, 1, 1 ),
+ ( 2, 1, [ 'Three\n' ], 1, 1, 1 ),
+ ( 1, 2, [], 0, 0, 0 ),
+ ]
+ for test in test_data:
+ log.debug( 'limit_offset_combo: %s', ', '.join([ str( e ) for =
e in test ]) )
+ limit_offset_combo( *test )
+
+
+class Test_MultiSourceDataProvider( BaseTestCase ):
+ provider_class =3D dataproviders.base.MultiSourceDataProvider
+
+ def contents_and_tmpfile( self, contents=3DNone ):
+ #TODO: hmmmm...
+ contents =3D contents or self.default_file_contents
+ contents =3D utility.clean_multiline_string( contents )
+ return ( contents, self.tmpfiles.create_tmpfile( contents ) )
+
+ def test_multiple_sources( self ):
+ # clean the following contents, write them to tmpfiles, open them,
+ # and pass as a list to the provider
+ contents =3D [
+ """
+ One
+ Two
+ Three
+ Four
+ Five
+ """,
+ """
+ Six
+ Seven
+ Eight
+ Nine
+ Ten
+ """,
+ """
+ Eleven
+ Twelve! (<-- http://youtu.be/JZshZp-cxKg)
+ """
+ ]
+ contents =3D [ utility.clean_multiline_string( c ) for c in conten=
ts ]
+ source_list =3D [ open( self.tmpfiles.create_tmpfile( c ) ) for c =
in contents ]
+
+ provider =3D self.provider_class( source_list )
+ log.debug( 'provider: %s', provider )
+ data =3D list( provider )
+ log.debug( 'data: %s', str( data ) )
+ self.assertEqual( ''.join( data ), ''.join( contents) )
+
+ def test_multiple_compound_sources( self ):
+ # clean the following contents, write them to tmpfiles, open them,
+ # and pass as a list to the provider
+ contents =3D [
+ """
+ One
+ Two
+ Three
+ Four
+ Five
+ """,
+ """
+ Six
+ Seven
+ Eight
+ Nine
+ Ten
+ """,
+ """
+ Eleven
+ Twelve! (<-- http://youtu.be/JZshZp-cxKg)
+ """
+ ]
+ contents =3D [ utility.clean_multiline_string( c ) for c in conten=
ts ]
+ source_list =3D [ open( self.tmpfiles.create_tmpfile( c ) ) for c =
in contents ]
+
+ def no_Fs( string ):
+ return None if string.startswith( 'F' ) else string
+ def no_youtube( string ):
+ return None if ( 'youtu.be' in string ) else string
+ source_list =3D [
+ dataproviders.base.LimitedOffsetDataProvider( source_list[0], =
filter_fn=3Dno_Fs, limit=3D2, offset=3D1 ),
+ dataproviders.base.LimitedOffsetDataProvider( source_list[1], =
limit=3D1, offset=3D3 ),
+ dataproviders.base.FilteredDataProvider( source_list[2], filte=
r_fn=3Dno_youtube ),
+ ]
+ provider =3D self.provider_class( source_list )
+ log.debug( 'provider: %s', provider )
+ data =3D list( provider )
+ log.debug( 'data: %s', str( data ) )
+ self.assertEqual( ''.join( data ), 'Two\nThree\nNine\nEleven\n' )
+
+
+if __name__ =3D=3D '__main__':
+ unittest.main()
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c test/unit/datatypes/dataproviders/utility.py
--- /dev/null
+++ b/test/unit/datatypes/dataproviders/utility.py
@@ -0,0 +1,45 @@
+"""
+Unit test utilities.
+"""
+
+import os
+import sys
+import logging
+import textwrap
+
+def set_up_filelogger( logname, level=3Dlogging.DEBUG ):
+ """
+ Sets up logging to a file named `logname`
+ (removing it first if it already exists).
+
+ Usable with 'nosetests' to get logging msgs from failed tests
+ (no logfile created).
+ Usable with 'nosetests --nologcapture' to get logging msgs for all tes=
ts
+ (in logfile).
+ """
+ if os.path.exists( logname ): os.unlink( logname )
+ logging.basicConfig( filename=3Dlogname, level=3Dlogging.DEBUG )
+ return logging
+
+def add_galaxy_lib_to_path( this_dir_relative_to_root ):
+ """
+ Adds `<galaxy>/lib` to `sys.path` given the scripts directory relative
+ to `<galaxy>`.
+ .. example::
+ utility.add_galaxy_lib_to_path( '/test/unit/datatypes/dataprovider=
s' )
+ """
+ glx_lib =3D os.path.join( os.getcwd().replace( this_dir_relative_to_ro=
ot, '' ), 'lib' )
+ sys.path.append( glx_lib )
+
+def clean_multiline_string( multiline_string, sep=3D'\n' ):
+ """
+ Dedent, split, remove first and last empty lines, rejoin.
+ """
+ multiline_string =3D textwrap.dedent( multiline_string )
+ string_list =3D multiline_string.split( sep )
+ if not string_list[0]:
+ string_list =3D string_list[1:]
+ if not string_list[-1]:
+ string_list =3D string_list[:-1]
+ #return '\n'.join( docstrings )
+ return ''.join([ ( s + '\n' ) for s in string_list ])
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c test/unit/test_dataproviders.pyc
Binary file test/unit/test_dataproviders.pyc has changed
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c test/unit/test_tool_loader.pyc
Binary file test/unit/test_tool_loader.pyc has changed
diff -r a360e1b7b506450385be74b2c6b7762d3e794bbd -r 79ae7df72fba2e141791bdb=
8fdf2bb372fa3787c tools/ngs_rna/cuffdiff_wrapper.py
--- a/tools/ngs_rna/cuffdiff_wrapper.py
+++ /dev/null
@@ -1,241 +0,0 @@
-#!/usr/bin/env python
-
-# Wrapper supports Cuffdiff versions v1.3.0-v2.0
-
-import optparse, os, shutil, subprocess, sys, tempfile
-
-def group_callback( option, op_str, value, parser ):
- groups =3D []
- flist =3D []
- for arg in parser.rargs:
- arg =3D arg.strip()
- if arg[0] is "-":
- break
- elif arg[0] is ",":
- groups.append(flist)
- flist =3D []
- else:
- flist.append(arg)
- groups.append(flist)
-
- setattr(parser.values, option.dest, groups)
- =20
-def label_callback( option, op_str, value, parser ):
- labels =3D []
- for arg in parser.rargs:
- arg =3D arg.strip()
- if arg[0] is "-":
- break
- else:
- labels.append(arg)
-
- setattr(parser.values, option.dest, labels)
-
-def stop_err( msg ):
- sys.stderr.write( "%s\n" % msg )
- sys.exit()
- =20
-# Copied from sam_to_bam.py:
-def check_seq_file( dbkey, cached_seqs_pointer_file ):
- seq_path =3D ''
- for line in open( cached_seqs_pointer_file ):
- line =3D line.rstrip( '\r\n' )
- if line and not line.startswith( '#' ) and line.startswith( 'index=
' ):
- fields =3D line.split( '\t' )
- if len( fields ) < 3:
- continue
- if fields[1] =3D=3D dbkey:
- seq_path =3D fields[2].strip()
- break
- return seq_path
-
-def __main__():
- #Parse Command Line
- parser =3D optparse.OptionParser()
- =20
- # Cuffdiff options.
- parser.add_option( '-s', '--inner-dist-std-dev', dest=3D'inner_dist_st=
d_dev', help=3D'The standard deviation for the distribution on inner distan=
ces between mate pairs. The default is 20bp.' )
- parser.add_option( '-p', '--num-threads', dest=3D'num_threads', help=
=3D'Use this many threads to align reads. The default is 1.' )
- parser.add_option( '-m', '--inner-mean-dist', dest=3D'inner_mean_dist'=
, help=3D'This is the expected (mean) inner distance between mate pairs. \
- =
For, example, for paired end runs with fragments selected at 300bp, \
- =
where each end is 50bp, you should set -r to be 200. The default is 4=
5bp.')
- parser.add_option( '-c', '--min-alignment-count', dest=3D'min_alignmen=
t_count', help=3D'The minimum number of alignments in a locus for needed to=
conduct significance testing on changes in that locus observed between sam=
ples. If no testing is performed, changes in the locus are deemed not signf=
icant, and the locus\' observed changes don\'t contribute to correction for=
multiple testing. The default is 1,000 fragment alignments (up to 2,000 pa=
ired reads).' )
- parser.add_option( '--FDR', dest=3D'FDR', help=3D'The allowed false di=
scovery rate. The default is 0.05.' )
- parser.add_option( '-u', '--multi-read-correct', dest=3D'multi_read_co=
rrect', action=3D"store_true", help=3D'Tells Cufflinks to do an initial est=
imation procedure to more accurately weight reads mapping to multiple locat=
ions in the genome')
- parser.add_option( '--library-norm-method', dest=3D'library_norm_metho=
d' )
- parser.add_option( '--dispersion-method', dest=3D'dispersion_method' )
-
- # Advanced Options:=09
- parser.add_option( '--num-importance-samples', dest=3D'num_importance_=
samples', help=3D'Sets the number of importance samples generated for each =
locus during abundance estimation. Default: 1000' )
- parser.add_option( '--max-mle-iterations', dest=3D'max_mle_iterations'=
, help=3D'Sets the number of iterations allowed during maximum likelihood e=
stimation of abundances. Default: 5000' )
- =20
- # Wrapper / Galaxy options.
- parser.add_option( '-f', '--files', dest=3D'groups', action=3D"callbac=
k", callback=3Dgroup_callback, help=3D"Groups to be processed, groups are s=
eparated by spaces, replicates in a group comma separated. group1_rep1,grou=
p1_rep2 group2_rep1,group2_rep2, ..., groupN_rep1, groupN_rep2" )
- parser.add_option( '-A', '--inputA', dest=3D'inputA', help=3D'A transc=
ript GTF file produced by cufflinks, cuffcompare, or other source.')
- parser.add_option( '-1', '--input1', dest=3D'input1', help=3D'File of =
RNA-Seq read alignments in the SAM format. SAM is a standard short read ali=
gnment, that allows aligners to attach custom tags to individual alignments=
, and Cufflinks requires that the alignments you supply have some of these =
tags. Please see Input formats for more details.' )
- parser.add_option( '-2', '--input2', dest=3D'input2', help=3D'File of =
RNA-Seq read alignments in the SAM format. SAM is a standard short read ali=
gnment, that allows aligners to attach custom tags to individual alignments=
, and Cufflinks requires that the alignments you supply have some of these =
tags. Please see Input formats for more details.' )
-
- # Label options
- parser.add_option('-L', '--labels', dest=3D'labels', action=3D"callbac=
k", callback=3Dlabel_callback, help=3D"Labels for the groups the replicates=
are in.")
- =20
- # Normalization options.
- parser.add_option( "-N", "--quartile-normalization", dest=3D"do_normal=
ization", action=3D"store_true" )
-
- # Bias correction options.
- parser.add_option( '-b', dest=3D'do_bias_correction', action=3D"store_=
true", help=3D'Providing Cufflinks with a multifasta file via this option i=
nstructs it to run our new bias detection and correction algorithm which ca=
n significantly improve accuracy of transcript abundance estimates.')
- parser.add_option( '', '--dbkey', dest=3D'dbkey', help=3D'The build of=
the reference dataset' )
- parser.add_option( '', '--index_dir', dest=3D'index_dir', help=3D'GALA=
XY_DATA_INDEX_DIR' )
- parser.add_option( '', '--ref_file', dest=3D'ref_file', help=3D'The re=
ference dataset from the history' )
-
- # Outputs.
- parser.add_option( "--isoforms_fpkm_tracking_output", dest=3D"isoforms=
_fpkm_tracking_output" )
- parser.add_option( "--genes_fpkm_tracking_output", dest=3D"genes_fpkm_=
tracking_output" )
- parser.add_option( "--cds_fpkm_tracking_output", dest=3D"cds_fpkm_trac=
king_output" )
- parser.add_option( "--tss_groups_fpkm_tracking_output", dest=3D"tss_gr=
oups_fpkm_tracking_output" )
- parser.add_option( "--isoforms_exp_output", dest=3D"isoforms_exp_outpu=
t" )
- parser.add_option( "--genes_exp_output", dest=3D"genes_exp_output" )
- parser.add_option( "--tss_groups_exp_output", dest=3D"tss_groups_exp_o=
utput" )
- parser.add_option( "--cds_exp_fpkm_tracking_output", dest=3D"cds_exp_f=
pkm_tracking_output" )
- parser.add_option( "--splicing_diff_output", dest=3D"splicing_diff_out=
put" )
- parser.add_option( "--cds_diff_output", dest=3D"cds_diff_output" )
- parser.add_option( "--promoters_diff_output", dest=3D"promoters_diff_o=
utput" )
- =20
- (options, args) =3D parser.parse_args()
- =20
- # output version # of tool
- try:
- tmp =3D tempfile.NamedTemporaryFile().name
- tmp_stdout =3D open( tmp, 'wb' )
- proc =3D subprocess.Popen( args=3D'cuffdiff --no-update-check 2>&1=
', shell=3DTrue, stdout=3Dtmp_stdout )
- tmp_stdout.close()
- returncode =3D proc.wait()
- stdout =3D None
- for line in open( tmp_stdout.name, 'rb' ):
- if line.lower().find( 'cuffdiff v' ) >=3D 0:
- stdout =3D line.strip()
- break
- if stdout:
- sys.stdout.write( '%s\n' % stdout )
- else:
- raise Exception
- except:
- sys.stdout.write( 'Could not determine Cuffdiff version\n' )
- =20
- # If doing bias correction, set/link to sequence file.
- if options.do_bias_correction:
- if options.ref_file !=3D 'None':
- # Sequence data from history.
- # Create symbolic link to ref_file so that index will be creat=
ed in working directory.
- seq_path =3D "ref.fa"
- os.symlink( options.ref_file, seq_path )
- else:
- # Sequence data from loc file.
- cached_seqs_pointer_file =3D os.path.join( options.index_dir, =
'sam_fa_indices.loc' )
- if not os.path.exists( cached_seqs_pointer_file ):
- stop_err( 'The required file (%s) does not exist.' % cache=
d_seqs_pointer_file )
- # If found for the dbkey, seq_path will look something like /g=
alaxy/data/equCab2/sam_index/equCab2.fa,
- # and the equCab2.fa file will contain fasta sequences.
- seq_path =3D check_seq_file( options.dbkey, cached_seqs_pointe=
r_file )
- if seq_path =3D=3D '':
- stop_err( 'No sequence data found for dbkey %s, so bias co=
rrection cannot be used.' % options.dbkey ) =20
- =20
- # Build command.
- =20
- # Base; always use quiet mode to avoid problems with storing log outpu=
t.
- cmd =3D "cuffdiff --no-update-check -q"
- =20
- # Add options.
- if options.library_norm_method:
- cmd +=3D ( " --library-norm-method %s" % options.library_norm_meth=
od )
- if options.dispersion_method:
- cmd +=3D ( " --dispersion-method %s" % options.dispersion_method )
- if options.inner_dist_std_dev:
- cmd +=3D ( " -s %i" % int ( options.inner_dist_std_dev ) )
- if options.num_threads:
- cmd +=3D ( " -p %i" % int ( options.num_threads ) )
- if options.inner_mean_dist:
- cmd +=3D ( " -m %i" % int ( options.inner_mean_dist ) )
- if options.min_alignment_count:
- cmd +=3D ( " -c %i" % int ( options.min_alignment_count ) )
- if options.FDR:
- cmd +=3D ( " --FDR %f" % float( options.FDR ) )
- if options.multi_read_correct:
- cmd +=3D ( " -u" )
- if options.num_importance_samples:
- cmd +=3D ( " --num-importance-samples %i" % int ( options.num_impo=
rtance_samples ) )
- if options.max_mle_iterations:
- cmd +=3D ( " --max-mle-iterations %i" % int ( options.max_mle_iter=
ations ) )
- if options.do_normalization:
- cmd +=3D ( " -N" )
- if options.do_bias_correction:
- cmd +=3D ( " -b %s" % seq_path )
- =20
- # Add inputs.
- # For replicate analysis: group1_rep1,group1_rep2 groupN_rep1,groupN_r=
ep2
- if options.groups:
- cmd +=3D " --labels "
- for label in options.labels:
- cmd +=3D '"%s",' % label
- cmd =3D cmd[:-1]
-
- cmd +=3D " " + options.inputA + " "
-
- for group in options.groups:
- for filename in group:
- cmd +=3D filename + ","
- cmd =3D cmd[:-1] + " "
- else:=20
- cmd +=3D " " + options.inputA + " " + options.input1 + " " + optio=
ns.input2
- =20
- # Debugging.
- print cmd
-
- # Run command.
- try:
- tmp_name =3D tempfile.NamedTemporaryFile().name
- tmp_stderr =3D open( tmp_name, 'wb' )
- proc =3D subprocess.Popen( args=3Dcmd, shell=3DTrue, stderr=3Dtmp_=
stderr.fileno() )
- returncode =3D proc.wait()
- tmp_stderr.close()
- =20
- # Get stderr, allowing for case where it's very large.
- tmp_stderr =3D open( tmp_name, 'rb' )
- stderr =3D ''
- buffsize =3D 1048576
- try:
- while True:
- stderr +=3D tmp_stderr.read( buffsize )
- if not stderr or len( stderr ) % buffsize !=3D 0:
- break
- except OverflowError:
- pass
- tmp_stderr.close()
- =20
- # Error checking.
- if returncode !=3D 0:
- raise Exception, stderr
- =20
- # check that there are results in the output file
- if len( open( "isoforms.fpkm_tracking", 'rb' ).read().strip() ) =
=3D=3D 0:
- raise Exception, 'The main output file is empty, there may be =
an error with your input file or settings.'
- except Exception, e:
- stop_err( 'Error running cuffdiff. ' + str( e ) )
-
- =20
- # Copy output files to specified files.
- try:
- shutil.copyfile( "isoforms.fpkm_tracking", options.isoforms_fpkm_t=
racking_output )
- shutil.copyfile( "genes.fpkm_tracking", options.genes_fpkm_trackin=
g_output )
- shutil.copyfile( "cds.fpkm_tracking", options.cds_fpkm_tracking_ou=
tput )
- shutil.copyfile( "tss_groups.fpkm_tracking", options.tss_groups_fp=
km_tracking_output )
- shutil.copyfile( "isoform_exp.diff", options.isoforms_exp_output )
- shutil.copyfile( "gene_exp.diff", options.genes_exp_output )
- shutil.copyfile( "tss_group_exp.diff", options.tss_groups_exp_outp=
ut )
- shutil.copyfile( "splicing.diff", options.splicing_diff_output )
- shutil.copyfile( "cds.diff", options.cds_diff_output )
- shutil.copyfile( "cds_exp.diff", options.cds_exp_fpkm_tracking_out=
put )
- shutil.copyfile( "promoters.diff", options.promoters_diff_output )=
=20
- except Exception, e:
- stop_err( 'Error in cuffdiff:\n' + str( e ) )
-
-if __name__=3D=3D"__main__": __main__()
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/galaxy/galaxy-central/commits/40c9834811eb/
Changeset: 40c9834811eb
User: saketkc
Date: 2013-08-06 09:31:21
Summary: Automated merge with ssh://bitbucket.org/galaxy/galaxy-central
Affected #: 1 file
diff -r cf53a00bcf1d279072ec279e6531f211992b4f3e -r 40c9834811ebfbefb423155=
3c922ff4c44905c63 tools/vcf_tools/vcfClass.py
--- a/tools/vcf_tools/vcfClass.py
+++ b/tools/vcf_tools/vcfClass.py
@@ -12,12 +12,13 @@
self.hasHeader =3D True
self.headerText =3D ""
self.headerTitles =3D ""
+ self.vcfFormat =3D ""
#self.headerInfoText =3D ""
#self.headerFormatText =3D ""
=20
# Store the info and format tags as well as the lines that describe
# them in a dictionary.
- self.numberDataSets =3D 0=20
+ self.numberDataSets =3D 0
self.includedDataSets =3D {}
self.infoHeaderTags =3D {}
self.infoHeaderString =3D {}
@@ -63,6 +64,7 @@
# Determine the type of information in the header line.
def getHeaderLine(self, filename, writeOut):
self.headerLine =3D self.filehandle.readline().rstrip("\n")
+ if self.headerLine.startswith("##fileformat"): success =3D self.getvcf=
Format()
if self.headerLine.startswith("##INFO"): success =3D self.headerInfo(w=
riteOut, "info")
elif self.headerLine.startswith("##FORMAT"): success =3D self.headerIn=
fo(writeOut, "format")
elif self.headerLine.startswith("##FILE"): success =3D self.headerFile=
s(writeOut)
@@ -72,6 +74,18 @@
=20
return success
=20
+# Read VCF format
+ def getvcfFormat(self):
+ try:
+ self.vcfFormat =3D self.headerLine.split("=3D",1)[1]
+ self.vcfFormat =3D float( self.vcfFormat.split("VCFv",1)[1] )## =
Extract the version number rather than the whole string
+ except IndexError:
+ print >> sys.stderr, "\nError parsing the fileformat"
+ print >> sys.stderr, "The following fileformat header is wrongly=
formatted: ", self.headerLine
+ exit(1)
+ return True
+
+
# Read information on an info field from the header line.
def headerInfo(self, writeOut, lineType):
tag =3D self.headerLine.split("=3D",1)
@@ -93,11 +107,15 @@
# an integer or a '.' to indicate variable number of entries.
if tagNumber =3D=3D ".": tagNumber =3D "variable"
else:
- try: tagNumber =3D int(tagNumber)
- except ValueError:
- print >> sys.stderr, "\nError parsing header. Problem with info t=
ag:", tagID
- print >> sys.stderr, "Number of fields associated with this tag is=
not an integer or '.'"
- exit(1)
+ if self.vcfFormat<4.1:
+
+ try:
+ tagNumber =3D int(tagNumber)
+
+ except ValueError:
+ print >> sys.stderr, "\nError parsing header. Problem with in=
fo tag:", tagID
+ print >> sys.stderr, "Number of fields associated with this ta=
g is not an integer or '.'"
+ exit(1)
=20
if lineType =3D=3D "info":
self.infoHeaderTags[tagID] =3D tagNumber, tagType, tagDescription
@@ -161,7 +179,7 @@
return False
=20
# If there is no header in the vcf file, close and reopen the
-# file so that the first line is avaiable for parsing as a=20
+# file so that the first line is avaiable for parsing as a
# vcf record.
def noHeader(self, filename, writeOut):
if writeOut: print >> sys.stdout, "No header lines present in", filena=
me
@@ -216,7 +234,7 @@
else: self.hasGenotypes =3D False
=20
# Add the reference sequence to the dictionary. If it didn't previously
-# exist append the reference sequence to the end of the list as well.=20
+# exist append the reference sequence to the end of the list as well.
# This ensures that the order in which the reference sequences appeared
# in the header can be preserved.
if self.referenceSequence not in self.referenceSequences:
@@ -274,7 +292,7 @@
# Check that there are as many fields as in the format field. If not, thi=
s must
# be because the information is not known. In this case, it is permitted =
that
# the genotype information is either . or ./.
- if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):=20
+ if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):
self.genotypeFields[ self.samplesList[i] ] =3D "."
else:
if len(self.genotypeFormats) !=3D len(genotypeInfo):
@@ -381,7 +399,7 @@
=20
# First check that the variant class (VC) is listed as SNP.
vc =3D self.info.split("VC=3D",1)
- if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)=20
+ if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)
else:
snp =3D []
snp.append(vc[1])
https://bitbucket.org/galaxy/galaxy-central/commits/d33f8c5396a8/
Changeset: d33f8c5396a8
User: saketkc
Date: 2013-08-07 15:21:51
Summary: Automated merge with ssh://bitbucket.org/galaxy/galaxy-central
Affected #: 1 file
diff -r f33c054d6d5b75ae545248d71ec559d74b4fa636 -r d33f8c5396a8825b450432a=
8c443e1592a360885 tools/vcf_tools/vcfClass.py
--- a/tools/vcf_tools/vcfClass.py
+++ b/tools/vcf_tools/vcfClass.py
@@ -12,12 +12,13 @@
self.hasHeader =3D True
self.headerText =3D ""
self.headerTitles =3D ""
+ self.vcfFormat =3D ""
#self.headerInfoText =3D ""
#self.headerFormatText =3D ""
=20
# Store the info and format tags as well as the lines that describe
# them in a dictionary.
- self.numberDataSets =3D 0=20
+ self.numberDataSets =3D 0
self.includedDataSets =3D {}
self.infoHeaderTags =3D {}
self.infoHeaderString =3D {}
@@ -63,6 +64,7 @@
# Determine the type of information in the header line.
def getHeaderLine(self, filename, writeOut):
self.headerLine =3D self.filehandle.readline().rstrip("\n")
+ if self.headerLine.startswith("##fileformat"): success =3D self.getvcf=
Format()
if self.headerLine.startswith("##INFO"): success =3D self.headerInfo(w=
riteOut, "info")
elif self.headerLine.startswith("##FORMAT"): success =3D self.headerIn=
fo(writeOut, "format")
elif self.headerLine.startswith("##FILE"): success =3D self.headerFile=
s(writeOut)
@@ -72,6 +74,18 @@
=20
return success
=20
+# Read VCF format
+ def getvcfFormat(self):
+ try:
+ self.vcfFormat =3D self.headerLine.split("=3D",1)[1]
+ self.vcfFormat =3D float( self.vcfFormat.split("VCFv",1)[1] )## =
Extract the version number rather than the whole string
+ except IndexError:
+ print >> sys.stderr, "\nError parsing the fileformat"
+ print >> sys.stderr, "The following fileformat header is wrongly=
formatted: ", self.headerLine
+ exit(1)
+ return True
+
+
# Read information on an info field from the header line.
def headerInfo(self, writeOut, lineType):
tag =3D self.headerLine.split("=3D",1)
@@ -93,11 +107,15 @@
# an integer or a '.' to indicate variable number of entries.
if tagNumber =3D=3D ".": tagNumber =3D "variable"
else:
- try: tagNumber =3D int(tagNumber)
- except ValueError:
- print >> sys.stderr, "\nError parsing header. Problem with info t=
ag:", tagID
- print >> sys.stderr, "Number of fields associated with this tag is=
not an integer or '.'"
- exit(1)
+ if self.vcfFormat<4.1:
+
+ try:
+ tagNumber =3D int(tagNumber)
+
+ except ValueError:
+ print >> sys.stderr, "\nError parsing header. Problem with in=
fo tag:", tagID
+ print >> sys.stderr, "Number of fields associated with this ta=
g is not an integer or '.'"
+ exit(1)
=20
if lineType =3D=3D "info":
self.infoHeaderTags[tagID] =3D tagNumber, tagType, tagDescription
@@ -161,7 +179,7 @@
return False
=20
# If there is no header in the vcf file, close and reopen the
-# file so that the first line is avaiable for parsing as a=20
+# file so that the first line is avaiable for parsing as a
# vcf record.
def noHeader(self, filename, writeOut):
if writeOut: print >> sys.stdout, "No header lines present in", filena=
me
@@ -216,7 +234,7 @@
else: self.hasGenotypes =3D False
=20
# Add the reference sequence to the dictionary. If it didn't previously
-# exist append the reference sequence to the end of the list as well.=20
+# exist append the reference sequence to the end of the list as well.
# This ensures that the order in which the reference sequences appeared
# in the header can be preserved.
if self.referenceSequence not in self.referenceSequences:
@@ -274,7 +292,7 @@
# Check that there are as many fields as in the format field. If not, thi=
s must
# be because the information is not known. In this case, it is permitted =
that
# the genotype information is either . or ./.
- if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):=20
+ if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):
self.genotypeFields[ self.samplesList[i] ] =3D "."
else:
if len(self.genotypeFormats) !=3D len(genotypeInfo):
@@ -381,7 +399,7 @@
=20
# First check that the variant class (VC) is listed as SNP.
vc =3D self.info.split("VC=3D",1)
- if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)=20
+ if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)
else:
snp =3D []
snp.append(vc[1])
https://bitbucket.org/galaxy/galaxy-central/commits/a968c0cae563/
Changeset: a968c0cae563
User: saketkc
Date: 2013-08-28 19:15:32
Summary: Automated merge with ssh://bitbucket.org/galaxy/galaxy-central
Affected #: 1 file
diff -r 22a4f03c1fe40dedcb7e6f11510d58ada8e9c458 -r a968c0cae5639f3be5ffdd9=
31df909c215f43a43 tools/vcf_tools/vcfClass.py
--- a/tools/vcf_tools/vcfClass.py
+++ b/tools/vcf_tools/vcfClass.py
@@ -12,12 +12,13 @@
self.hasHeader =3D True
self.headerText =3D ""
self.headerTitles =3D ""
+ self.vcfFormat =3D ""
#self.headerInfoText =3D ""
#self.headerFormatText =3D ""
=20
# Store the info and format tags as well as the lines that describe
# them in a dictionary.
- self.numberDataSets =3D 0=20
+ self.numberDataSets =3D 0
self.includedDataSets =3D {}
self.infoHeaderTags =3D {}
self.infoHeaderString =3D {}
@@ -63,6 +64,7 @@
# Determine the type of information in the header line.
def getHeaderLine(self, filename, writeOut):
self.headerLine =3D self.filehandle.readline().rstrip("\n")
+ if self.headerLine.startswith("##fileformat"): success =3D self.getvcf=
Format()
if self.headerLine.startswith("##INFO"): success =3D self.headerInfo(w=
riteOut, "info")
elif self.headerLine.startswith("##FORMAT"): success =3D self.headerIn=
fo(writeOut, "format")
elif self.headerLine.startswith("##FILE"): success =3D self.headerFile=
s(writeOut)
@@ -72,6 +74,18 @@
=20
return success
=20
+# Read VCF format
+ def getvcfFormat(self):
+ try:
+ self.vcfFormat =3D self.headerLine.split("=3D",1)[1]
+ self.vcfFormat =3D float( self.vcfFormat.split("VCFv",1)[1] )## =
Extract the version number rather than the whole string
+ except IndexError:
+ print >> sys.stderr, "\nError parsing the fileformat"
+ print >> sys.stderr, "The following fileformat header is wrongly=
formatted: ", self.headerLine
+ exit(1)
+ return True
+
+
# Read information on an info field from the header line.
def headerInfo(self, writeOut, lineType):
tag =3D self.headerLine.split("=3D",1)
@@ -93,11 +107,15 @@
# an integer or a '.' to indicate variable number of entries.
if tagNumber =3D=3D ".": tagNumber =3D "variable"
else:
- try: tagNumber =3D int(tagNumber)
- except ValueError:
- print >> sys.stderr, "\nError parsing header. Problem with info t=
ag:", tagID
- print >> sys.stderr, "Number of fields associated with this tag is=
not an integer or '.'"
- exit(1)
+ if self.vcfFormat<4.1:
+
+ try:
+ tagNumber =3D int(tagNumber)
+
+ except ValueError:
+ print >> sys.stderr, "\nError parsing header. Problem with in=
fo tag:", tagID
+ print >> sys.stderr, "Number of fields associated with this ta=
g is not an integer or '.'"
+ exit(1)
=20
if lineType =3D=3D "info":
self.infoHeaderTags[tagID] =3D tagNumber, tagType, tagDescription
@@ -161,7 +179,7 @@
return False
=20
# If there is no header in the vcf file, close and reopen the
-# file so that the first line is avaiable for parsing as a=20
+# file so that the first line is avaiable for parsing as a
# vcf record.
def noHeader(self, filename, writeOut):
if writeOut: print >> sys.stdout, "No header lines present in", filena=
me
@@ -216,7 +234,7 @@
else: self.hasGenotypes =3D False
=20
# Add the reference sequence to the dictionary. If it didn't previously
-# exist append the reference sequence to the end of the list as well.=20
+# exist append the reference sequence to the end of the list as well.
# This ensures that the order in which the reference sequences appeared
# in the header can be preserved.
if self.referenceSequence not in self.referenceSequences:
@@ -274,7 +292,7 @@
# Check that there are as many fields as in the format field. If not, thi=
s must
# be because the information is not known. In this case, it is permitted =
that
# the genotype information is either . or ./.
- if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):=20
+ if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):
self.genotypeFields[ self.samplesList[i] ] =3D "."
else:
if len(self.genotypeFormats) !=3D len(genotypeInfo):
@@ -381,7 +399,7 @@
=20
# First check that the variant class (VC) is listed as SNP.
vc =3D self.info.split("VC=3D",1)
- if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)=20
+ if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)
else:
snp =3D []
snp.append(vc[1])
https://bitbucket.org/galaxy/galaxy-central/commits/4c1eb0c91fc6/
Changeset: 4c1eb0c91fc6
User: saketkc
Date: 2013-08-31 18:30:53
Summary: Automated merge with ssh://bitbucket.org/galaxy/galaxy-central
Affected #: 1 file
diff -r 0470feeb593f9797fa9fd19dfdac4751a7ca788b -r 4c1eb0c91fc6e3085948ef1=
6eb88d18ea626e7d3 tools/vcf_tools/vcfClass.py
--- a/tools/vcf_tools/vcfClass.py
+++ b/tools/vcf_tools/vcfClass.py
@@ -12,12 +12,13 @@
self.hasHeader =3D True
self.headerText =3D ""
self.headerTitles =3D ""
+ self.vcfFormat =3D ""
#self.headerInfoText =3D ""
#self.headerFormatText =3D ""
=20
# Store the info and format tags as well as the lines that describe
# them in a dictionary.
- self.numberDataSets =3D 0=20
+ self.numberDataSets =3D 0
self.includedDataSets =3D {}
self.infoHeaderTags =3D {}
self.infoHeaderString =3D {}
@@ -63,6 +64,7 @@
# Determine the type of information in the header line.
def getHeaderLine(self, filename, writeOut):
self.headerLine =3D self.filehandle.readline().rstrip("\n")
+ if self.headerLine.startswith("##fileformat"): success =3D self.getvcf=
Format()
if self.headerLine.startswith("##INFO"): success =3D self.headerInfo(w=
riteOut, "info")
elif self.headerLine.startswith("##FORMAT"): success =3D self.headerIn=
fo(writeOut, "format")
elif self.headerLine.startswith("##FILE"): success =3D self.headerFile=
s(writeOut)
@@ -72,6 +74,18 @@
=20
return success
=20
+# Read VCF format
+ def getvcfFormat(self):
+ try:
+ self.vcfFormat =3D self.headerLine.split("=3D",1)[1]
+ self.vcfFormat =3D float( self.vcfFormat.split("VCFv",1)[1] )## =
Extract the version number rather than the whole string
+ except IndexError:
+ print >> sys.stderr, "\nError parsing the fileformat"
+ print >> sys.stderr, "The following fileformat header is wrongly=
formatted: ", self.headerLine
+ exit(1)
+ return True
+
+
# Read information on an info field from the header line.
def headerInfo(self, writeOut, lineType):
tag =3D self.headerLine.split("=3D",1)
@@ -93,11 +107,15 @@
# an integer or a '.' to indicate variable number of entries.
if tagNumber =3D=3D ".": tagNumber =3D "variable"
else:
- try: tagNumber =3D int(tagNumber)
- except ValueError:
- print >> sys.stderr, "\nError parsing header. Problem with info t=
ag:", tagID
- print >> sys.stderr, "Number of fields associated with this tag is=
not an integer or '.'"
- exit(1)
+ if self.vcfFormat<4.1:
+
+ try:
+ tagNumber =3D int(tagNumber)
+
+ except ValueError:
+ print >> sys.stderr, "\nError parsing header. Problem with in=
fo tag:", tagID
+ print >> sys.stderr, "Number of fields associated with this ta=
g is not an integer or '.'"
+ exit(1)
=20
if lineType =3D=3D "info":
self.infoHeaderTags[tagID] =3D tagNumber, tagType, tagDescription
@@ -161,7 +179,7 @@
return False
=20
# If there is no header in the vcf file, close and reopen the
-# file so that the first line is avaiable for parsing as a=20
+# file so that the first line is avaiable for parsing as a
# vcf record.
def noHeader(self, filename, writeOut):
if writeOut: print >> sys.stdout, "No header lines present in", filena=
me
@@ -216,7 +234,7 @@
else: self.hasGenotypes =3D False
=20
# Add the reference sequence to the dictionary. If it didn't previously
-# exist append the reference sequence to the end of the list as well.=20
+# exist append the reference sequence to the end of the list as well.
# This ensures that the order in which the reference sequences appeared
# in the header can be preserved.
if self.referenceSequence not in self.referenceSequences:
@@ -274,7 +292,7 @@
# Check that there are as many fields as in the format field. If not, thi=
s must
# be because the information is not known. In this case, it is permitted =
that
# the genotype information is either . or ./.
- if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):=20
+ if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):
self.genotypeFields[ self.samplesList[i] ] =3D "."
else:
if len(self.genotypeFormats) !=3D len(genotypeInfo):
@@ -381,7 +399,7 @@
=20
# First check that the variant class (VC) is listed as SNP.
vc =3D self.info.split("VC=3D",1)
- if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)=20
+ if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)
else:
snp =3D []
snp.append(vc[1])
https://bitbucket.org/galaxy/galaxy-central/commits/dcd9809d0239/
Changeset: dcd9809d0239
User: jgoecks
Date: 2013-09-11 18:16:14
Summary: Merged in saketkc/galaxy-central (pull request #184)
VCFTools Incompatible with VCF4.1
Affected #: 1 file
diff -r 228f1b4066be9fc49f6f07ff6867566836134c67 -r dcd9809d0239c7dc80cec3b=
2a01d7bded4a8c088 tools/vcf_tools/vcfClass.py
--- a/tools/vcf_tools/vcfClass.py
+++ b/tools/vcf_tools/vcfClass.py
@@ -12,12 +12,13 @@
self.hasHeader =3D True
self.headerText =3D ""
self.headerTitles =3D ""
+ self.vcfFormat =3D ""
#self.headerInfoText =3D ""
#self.headerFormatText =3D ""
=20
# Store the info and format tags as well as the lines that describe
# them in a dictionary.
- self.numberDataSets =3D 0=20
+ self.numberDataSets =3D 0
self.includedDataSets =3D {}
self.infoHeaderTags =3D {}
self.infoHeaderString =3D {}
@@ -63,6 +64,7 @@
# Determine the type of information in the header line.
def getHeaderLine(self, filename, writeOut):
self.headerLine =3D self.filehandle.readline().rstrip("\n")
+ if self.headerLine.startswith("##fileformat"): success =3D self.getvcf=
Format()
if self.headerLine.startswith("##INFO"): success =3D self.headerInfo(w=
riteOut, "info")
elif self.headerLine.startswith("##FORMAT"): success =3D self.headerIn=
fo(writeOut, "format")
elif self.headerLine.startswith("##FILE"): success =3D self.headerFile=
s(writeOut)
@@ -72,6 +74,18 @@
=20
return success
=20
+# Read VCF format
+ def getvcfFormat(self):
+ try:
+ self.vcfFormat =3D self.headerLine.split("=3D",1)[1]
+ self.vcfFormat =3D float( self.vcfFormat.split("VCFv",1)[1] )## =
Extract the version number rather than the whole string
+ except IndexError:
+ print >> sys.stderr, "\nError parsing the fileformat"
+ print >> sys.stderr, "The following fileformat header is wrongly=
formatted: ", self.headerLine
+ exit(1)
+ return True
+
+
# Read information on an info field from the header line.
def headerInfo(self, writeOut, lineType):
tag =3D self.headerLine.split("=3D",1)
@@ -93,11 +107,15 @@
# an integer or a '.' to indicate variable number of entries.
if tagNumber =3D=3D ".": tagNumber =3D "variable"
else:
- try: tagNumber =3D int(tagNumber)
- except ValueError:
- print >> sys.stderr, "\nError parsing header. Problem with info t=
ag:", tagID
- print >> sys.stderr, "Number of fields associated with this tag is=
not an integer or '.'"
- exit(1)
+ if self.vcfFormat<4.1:
+
+ try:
+ tagNumber =3D int(tagNumber)
+
+ except ValueError:
+ print >> sys.stderr, "\nError parsing header. Problem with in=
fo tag:", tagID
+ print >> sys.stderr, "Number of fields associated with this ta=
g is not an integer or '.'"
+ exit(1)
=20
if lineType =3D=3D "info":
self.infoHeaderTags[tagID] =3D tagNumber, tagType, tagDescription
@@ -161,7 +179,7 @@
return False
=20
# If there is no header in the vcf file, close and reopen the
-# file so that the first line is avaiable for parsing as a=20
+# file so that the first line is avaiable for parsing as a
# vcf record.
def noHeader(self, filename, writeOut):
if writeOut: print >> sys.stdout, "No header lines present in", filena=
me
@@ -216,7 +234,7 @@
else: self.hasGenotypes =3D False
=20
# Add the reference sequence to the dictionary. If it didn't previously
-# exist append the reference sequence to the end of the list as well.=20
+# exist append the reference sequence to the end of the list as well.
# This ensures that the order in which the reference sequences appeared
# in the header can be preserved.
if self.referenceSequence not in self.referenceSequences:
@@ -274,7 +292,7 @@
# Check that there are as many fields as in the format field. If not, thi=
s must
# be because the information is not known. In this case, it is permitted =
that
# the genotype information is either . or ./.
- if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):=20
+ if genotypeInfo[0] =3D=3D "./." or genotypeInfo[0] =3D=3D "." and le=
n(self.genotypeFormats) !=3D len(genotypeInfo):
self.genotypeFields[ self.samplesList[i] ] =3D "."
else:
if len(self.genotypeFormats) !=3D len(genotypeInfo):
@@ -381,7 +399,7 @@
=20
# First check that the variant class (VC) is listed as SNP.
vc =3D self.info.split("VC=3D",1)
- if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)=20
+ if vc[1].find(";") !=3D -1: snp =3D vc[1].split(";",1)
else:
snp =3D []
snp.append(vc[1])
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.