galaxy-dev
Threads by month
- ----- 2025 -----
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2009 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2008 -----
- December
- November
- October
- September
- August
- 10007 discussions
details: http://www.bx.psu.edu/hg/galaxy/rev/30fde8c9b03e
changeset: 2774:30fde8c9b03e
user: jeremy goecks <jeremy.goecks at emory.edu>
date: Fri Sep 25 09:20:26 2009 -0400
description:
Ensure that bool filters work in grids
1 file(s) affected in this change:
lib/galaxy/web/framework/helpers/grids.py
diffs (15 lines):
diff -r 5db45e48f954 -r 30fde8c9b03e lib/galaxy/web/framework/helpers/grids.py
--- a/lib/galaxy/web/framework/helpers/grids.py Fri Sep 25 02:06:03 2009 -0400
+++ b/lib/galaxy/web/framework/helpers/grids.py Fri Sep 25 09:20:26 2009 -0400
@@ -56,7 +56,10 @@
query = column.filter( trans.sa_session, query, column_filter )
# Upate current filter dict.
cur_filter_dict[ column.key ] = column_filter
- # Carry filter along to newly generated urls.
+ # Carry filter along to newly generated urls; make sure filter is a string so
+ # that we can encode to UTF-8 and thus handle user input to filters.
+ if not isinstance( column_filter, basestring ):
+ column_filter = unicode(column_filter)
extra_url_args[ "f-" + column.key ] = column_filter.encode("utf-8")
# Process sort arguments
1
0
25 Sep '09
details: http://www.bx.psu.edu/hg/galaxy/rev/200e243a28e0
changeset: 2775:200e243a28e0
user: gua110
date: Fri Sep 25 10:45:10 2009 -0400
description:
Fix functional test for fastx_artifacts_filter tool.
1 file(s) affected in this change:
tools/fastx_toolkit/fastx_artifacts_filter.xml
diffs (21 lines):
diff -r 30fde8c9b03e -r 200e243a28e0 tools/fastx_toolkit/fastx_artifacts_filter.xml
--- a/tools/fastx_toolkit/fastx_artifacts_filter.xml Fri Sep 25 09:20:26 2009 -0400
+++ b/tools/fastx_toolkit/fastx_artifacts_filter.xml Fri Sep 25 10:45:10 2009 -0400
@@ -3,7 +3,7 @@
<command>zcat -f '$input' | fastx_artifacts_filter -v -o "$output"</command>
<inputs>
- <param format="fasta,fastqsanger" name="input" type="data" label="Library to filter" />
+ <param format="fasta,fastqsanger,fastqsolexa" name="input" type="data" label="Library to filter" />
</inputs>
@@ -15,7 +15,7 @@
</test>
<test>
<!-- Filter FASTQ file -->
- <param name="input" value="fastx_artifacts2.fastq" />
+ <param name="input" value="fastx_artifacts2.fastq" ftype="fastqsanger" />
<output name="output" file="fastx_artifacts2.out" />
</test>
</tests>
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/5db45e48f954
changeset: 2773:5db45e48f954
user: Kanwei Li <kanwei(a)gmail.com>
date: Fri Sep 25 02:06:03 2009 -0400
description:
fix some typos for tools in folders N-Z
34 file(s) affected in this change:
tools/new_operations/basecoverage.xml
tools/new_operations/cluster.xml
tools/new_operations/complement.xml
tools/new_operations/concat.xml
tools/new_operations/coverage.xml
tools/new_operations/get_flanks.xml
tools/new_operations/intersect.xml
tools/new_operations/join.xml
tools/new_operations/merge.xml
tools/new_operations/subtract.xml
tools/regVariation/microsats_alignment_level.xml
tools/regVariation/quality_filter.xml
tools/regVariation/substitution_rates.xml
tools/samtools/pileup_parser.xml
tools/samtools/sam2interval.xml
tools/samtools/sam_bitwise_flag_filter.xml
tools/samtools/sam_pileup.xml
tools/samtools/sam_to_bam.xml
tools/solid_tools/maq_cs_wrapper.xml
tools/solid_tools/solid_qual_boxplot.xml
tools/sr_mapping/bowtie_wrapper.xml
tools/sr_mapping/bwa_wrapper.xml
tools/sr_mapping/fastq_statistics.xml
tools/sr_mapping/lastz_wrapper.xml
tools/stats/aggregate_binned_scores_in_intervals.xml
tools/stats/filtering.xml
tools/stats/gsummary.xml
tools/stats/wiggle_to_simple.xml
tools/taxonomy/find_diag_hits.xml
tools/taxonomy/gi2taxonomy.xml
tools/taxonomy/t2ps_wrapper.xml
tools/taxonomy/t2t_report.xml
tools/visualization/LAJ.xml
tools/visualization/genetrack.xml
diffs (744 lines):
diff -r 210e048e7ec7 -r 5db45e48f954 tools/new_operations/basecoverage.xml
--- a/tools/new_operations/basecoverage.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/new_operations/basecoverage.xml Fri Sep 25 02:06:03 2009 -0400
@@ -24,7 +24,7 @@
.. class:: infomark
-**TIP:** If your query does not appear in the pulldown menu -> it is not in interval format. Use "edit attributes" to set chromosome, start, end, and strand columns
+**TIP:** If your query does not appear in the pulldown menu, it means that it is not in interval format. Use "edit attributes" to set chromosome, start, end, and strand columns.
This operation counts the total bases covered by a set of intervals. Bases that are covered by more than one interval are **not** counted more than once towards the total.
diff -r 210e048e7ec7 -r 5db45e48f954 tools/new_operations/cluster.xml
--- a/tools/new_operations/cluster.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/new_operations/cluster.xml Fri Sep 25 02:06:03 2009 -0400
@@ -59,7 +59,7 @@
.. class:: infomark
-**TIP:** If your query does not appear in the pulldown menu -> it is not in interval format. Use "edit attributes" to set chromosome, start, end, and strand columns
+**TIP:** If your query does not appear in the pulldown menu, it means that it is not in interval format. Use "edit attributes" to set chromosome, start, end, and strand columns.
-----
diff -r 210e048e7ec7 -r 5db45e48f954 tools/new_operations/complement.xml
--- a/tools/new_operations/complement.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/new_operations/complement.xml Fri Sep 25 02:06:03 2009 -0400
@@ -33,7 +33,7 @@
.. class:: infomark
-**TIP:** If your query does not appear in the pulldown menu -> it is not in interval format. Use "edit attributes" to set chromosome, start, end, and strand columns
+**TIP:** If your query does not appear in the pulldown menu, it means that it is not in interval format. Use "edit attributes" to set chromosome, start, end, and strand columns.
This operation complements the regions of a set of intervals. Regions are returned that represent the empty space in the input interval.
diff -r 210e048e7ec7 -r 5db45e48f954 tools/new_operations/concat.xml
--- a/tools/new_operations/concat.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/new_operations/concat.xml Fri Sep 25 02:06:03 2009 -0400
@@ -27,7 +27,7 @@
.. class:: infomark
-**TIP:** If your query does not appear in the pulldown menu -> it is not in interval format. Use "edit attributes" to set chromosome, start, end, and strand columns
+**TIP:** If your query does not appear in the pulldown menu -> it is not in interval format. Use "edit attributes" to set chromosome, start, end, and strand columns.
-----
diff -r 210e048e7ec7 -r 5db45e48f954 tools/new_operations/coverage.xml
--- a/tools/new_operations/coverage.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/new_operations/coverage.xml Fri Sep 25 02:06:03 2009 -0400
@@ -34,7 +34,7 @@
.. class:: infomark
-**TIP:** If your query does not appear in the pulldown menu -> it is not in interval format. Use "edit attributes" to set chromosome, start, end, and strand columns
+**TIP:** If your query does not appear in the pulldown menu -> it is not in interval format. Use "edit attributes" to set chromosome, start, end, and strand columns.
Find the coverage of intervals in the first query on intervals in the second query. The coverage is added as two columns, the first being bases covered, and the second being the fraction of bases covered by that interval.
diff -r 210e048e7ec7 -r 5db45e48f954 tools/new_operations/get_flanks.xml
--- a/tools/new_operations/get_flanks.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/new_operations/get_flanks.xml Fri Sep 25 02:06:03 2009 -0400
@@ -41,7 +41,7 @@
</tests>
<help>
-This tool finds the upstream and/or downstream flanking region/s of all the selected regions in the input file.
+This tool finds the upstream and/or downstream flanking region(s) of all the selected regions in the input file.
**Note:** Every line should contain at least 3 columns: Chromosome number, Start and Stop co-ordinates. If any of these columns is missing or if start and stop co-ordinates are not numerical, the tool may encounter exceptions and such lines are skipped as invalid. The number of invalid skipped lines is documented in the resulting history item as a "Data issue".
diff -r 210e048e7ec7 -r 5db45e48f954 tools/new_operations/intersect.xml
--- a/tools/new_operations/intersect.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/new_operations/intersect.xml Fri Sep 25 02:06:03 2009 -0400
@@ -21,27 +21,27 @@
<data format="input" name="output" metadata_source="input1" />
</outputs>
<code file="operation_filter.py"/>
- <tests>
- <test>
- <param name="input1" value="1.bed" />
- <param name="input2" value="2.bed" />
- <param name="min" value="1" />
- <param name="returntype" value="" />
- <output name="output" file="gops_intersect_out.bed" />
+ <tests>
+ <test>
+ <param name="input1" value="1.bed" />
+ <param name="input2" value="2.bed" />
+ <param name="min" value="1" />
+ <param name="returntype" value="" />
+ <output name="output" file="gops_intersect_out.bed" />
</test>
<test>
<param name="input1" value="1.bed" />
<param name="input2" value="2_mod.bed" ftype="interval"/>
<param name="min" value="1" />
- <param name="returntype" value="" />
+ <param name="returntype" value="" />
<output name="output" file="gops_intersect_diffCols.bed" />
- </test>
- <test>
- <param name="input1" value="1.bed" />
- <param name="input2" value="2_mod.bed" ftype="interval"/>
- <param name="min" value="1" />
- <param name="returntype" value="Overlapping pieces of Intervals" />
- <output name="output" file="gops_intersect_p_diffCols.bed" />
+ </test>
+ <test>
+ <param name="input1" value="1.bed" />
+ <param name="input2" value="2_mod.bed" ftype="interval"/>
+ <param name="min" value="1" />
+ <param name="returntype" value="Overlapping pieces of Intervals" />
+ <output name="output" file="gops_intersect_p_diffCols.bed" />
</test>
<test>
<param name="input1" value="1.bed" />
@@ -76,7 +76,7 @@
.. class:: infomark
-**TIP:** If your query does not appear in the pulldown menu -> it is not in interval format. Use "edit attributes" to set chromosome, start, end, and strand columns
+**TIP:** If your query does not appear in the pulldown menu, it means that it is not in interval format. Use "edit attributes" to set chromosome, start, end, and strand columns.
-----
diff -r 210e048e7ec7 -r 5db45e48f954 tools/new_operations/join.xml
--- a/tools/new_operations/join.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/new_operations/join.xml Fri Sep 25 02:06:03 2009 -0400
@@ -70,7 +70,7 @@
.. class:: infomark
-**TIP:** If your query does not appear in the pulldown menu -> it is not in interval format. Use "edit attributes" to set chromosome, start, end, and strand columns
+**TIP:** If your query does not appear in the pulldown menu, it means that it is not in interval format. Use "edit attributes" to set chromosome, start, end, and strand columns.
-----
@@ -85,7 +85,7 @@
**Syntax**
- **Where overlap** specifies the minimum overlap between intervals that allows them to be joined.
-- **Return only records that are joined** returns only the records of the first query that join to a recond in the second query. This is analogous to an INNER JOIN.
+- **Return only records that are joined** returns only the records of the first query that join to a record in the second query. This is analogous to an INNER JOIN.
- **Return all records of first query (fill null with ".")** returns all intervals of the first query, and any intervals that do not join an interval from the second query are filled in with a period(.). This is analogous to a LEFT JOIN.
- **Return all records of second query (fill null with ".")** returns all intervals of the second query, and any intervals that do not join an interval from the first query are filled in with a period(.). **Note that this may produce an invalid interval file, since a period(.) is not a valid chrom, start, end or strand.**
- **Return all records of both queries (fill nulls with ".")** returns all records from both queries, and fills on either the right or left with periods. **Note that this may produce an invalid interval file, since a period(.) is not a valid chrom, start, end or strand.**
diff -r 210e048e7ec7 -r 5db45e48f954 tools/new_operations/merge.xml
--- a/tools/new_operations/merge.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/new_operations/merge.xml Fri Sep 25 02:06:03 2009 -0400
@@ -36,7 +36,7 @@
.. class:: infomark
-**TIP:** If your query does not appear in the pulldown menu -> it is not in interval format. Use "edit attributes" to set chromosome, start, end, and strand columns
+**TIP:** If your query does not appear in the pulldown menu, it means that it is not in interval format. Use "edit attributes" to set chromosome, start, end, and strand columns.
-----
@@ -48,7 +48,7 @@
-----
-This operation merges all overlaping intervals into single intervals.
+This operation merges all overlapping intervals into single intervals.
**Example**
diff -r 210e048e7ec7 -r 5db45e48f954 tools/new_operations/subtract.xml
--- a/tools/new_operations/subtract.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/new_operations/subtract.xml Fri Sep 25 02:06:03 2009 -0400
@@ -58,7 +58,7 @@
.. class:: infomark
-**TIP:** If your query does not appear in the pulldown menu -> it is not in interval format. Use "edit attributes" to set chromosome, start, end, and strand columns
+**TIP:** If your query does not appear in the pulldown menu, it means that it is not in interval format. Use "edit attributes" to set chromosome, start, end, and strand columns.
-----
diff -r 210e048e7ec7 -r 5db45e48f954 tools/regVariation/microsats_alignment_level.xml
--- a/tools/regVariation/microsats_alignment_level.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/regVariation/microsats_alignment_level.xml Fri Sep 25 02:06:03 2009 -0400
@@ -6,8 +6,8 @@
<inputs>
<page>
<param format="fasta" name="input1" type="data" label="Select data"/>
- <param name="separation" size="10" type="integer" value="10" label="Minimum basepair distance between adjacent microsatellites"
- help="A value of 10 means: Adjacent microsatellites separated by less than 10 basepairs will be excluded from the output."/>
+ <param name="separation" size="10" type="integer" value="10" label="Minimum base pair distance between adjacent microsatellites"
+ help="A value of 10 means: Adjacent microsatellites separated by less than 10 base pairs will be excluded from the output."/>
<param name="mono_threshold" size="10" type="integer" value="9" label="Minimum Threshold for the number of repeats for mononucleotide microsatellites"
help="A value of 9 means: All mononucleotide microsatellites having fewer than 9 repeats will be excluded from the output."/>
<param name="non_mono_threshold" size="10" type="integer" value="4" label="Minimum Threshold for the number of repeats for non-mononucleotide microsatellites"
diff -r 210e048e7ec7 -r 5db45e48f954 tools/regVariation/quality_filter.xml
--- a/tools/regVariation/quality_filter.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/regVariation/quality_filter.xml Fri Sep 25 02:06:03 2009 -0400
@@ -91,8 +91,8 @@
**Note**
-Any block/s not containing the primary species(species whose quality scores is to be used), will be omitted.
-Also, any primary species whose quality scores are not available in galaxy, will be considered as a non-primary species. This info will appear as a message in the job history panel.
+Any block/s not containing the primary species (species whose quality scores is to be used), will be omitted.
+Also, any primary species whose quality scores are not available in Galaxy will be considered as a non-primary species. This info will appear as a message in the job history panel.
-----
diff -r 210e048e7ec7 -r 5db45e48f954 tools/regVariation/substitution_rates.xml
--- a/tools/regVariation/substitution_rates.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/regVariation/substitution_rates.xml Fri Sep 25 02:06:03 2009 -0400
@@ -43,7 +43,7 @@
**What it does**
-This tool takes a pairwise MAF file as input and estimates substitution rate according to Jukes-Cantor JC69 model. The 3 new columns appended to the output are explanied below:
+This tool takes a pairwise MAF file as input and estimates substitution rate according to Jukes-Cantor JC69 model. The 3 new columns appended to the output are explained below:
- L: number of nucleotides compared
- N: number of different nucleotides
diff -r 210e048e7ec7 -r 5db45e48f954 tools/samtools/pileup_parser.xml
--- a/tools/samtools/pileup_parser.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/samtools/pileup_parser.xml Fri Sep 25 02:06:03 2009 -0400
@@ -99,18 +99,18 @@
</tests>
<help>
-**What is does**
+**What it does**
-Allows to find sequence variants and/or sites covered by specified number of reads with bases above a set quality threshold. The tool works on six and ten column pileup formats produced with *samtools pileup* command. However, it also allows you to specify columns in the input file manually. The tool assumes the following:
+Allows one to find sequence variants and/or sites covered by a specified number of reads with bases above a set quality threshold. The tool works on six and ten column pileup formats produced with *samtools pileup* command. However, it also allows you to specify columns in the input file manually. The tool assumes the following:
- the quality scores follow phred33 convention, where input qualities are ASCII characters equal to the Phred quality plus 33.
-- the pileup dataset was produced by *samtools pileup* command (although you can override this by setting column assignments manually).
+- the pileup dataset was produced by the *samtools pileup* command (although you can override this by setting column assignments manually).
--------
**Types of pileup datasets**
-The description of pileup format below is largely based on information that can be found on SAMTools_ documentation page. The 6- and 10-column variants are described below.
+The descriptions of the following pileup formats are largely based on information that can be found on the SAMTools_ documentation page. The 6- and 10-column variants are described below.
.. _SAMTools: http://samtools.sourceforge.net/pileup.shtml
@@ -136,7 +136,7 @@
**Ten column pileup**
-The `ten-column`__ pileup incoroporates additional consensus information generated with *-c* option of *samtools pileup* command::
+The `ten-column`__ pileup incorporates additional consensus information generated with the *-c* option of the *samtools pileup* command::
1 2 3 4 5 6 7 8 9 10
@@ -187,7 +187,7 @@
chrM 414 C 4 ...a III2
chrM 415 C 4 TTTt III7
-you will get this::
+you will get::
chrM 413 G 4 ..t, IIIH 0 0 0 1 3
chrM 415 C 4 TTTt III7 0 0 0 4 4
@@ -232,26 +232,26 @@
12 Quality adjusted coverage
-Note that in this case coordinates of SNPs were converted to intervals, where the start coordinate is 0-based and the end coordinate in 1-based using the UCSC Table Browser convention.
+Note that in this case the coordinates of SNPs were converted to intervals, where the start coordinate is 0-based and the end coordinate in 1-based using the UCSC Table Browser convention.
-Although three positions have variants in the original file (413, 414, and 415), only 413 and 415 are reported, because the quality values associated with these two SNPs are above threshold of 20. In the case of 414 the **a** allele has quality value of 17 ( ord("2")-33 ), and therefore it is not reported. In each of the reported lines the program added five columns. Let's take a look at this line::
+Although three positions have variants in the original file (413, 414, and 415), only 413 and 415 are reported because the quality values associated with these two SNPs are above the threshold of 20. In the case of 414 the **a** allele has a quality value of 17 ( ord("2")-33 ), and is therefore not reported. Note that five columns have been added to each of the reported lines::
chrM 413 G 4 ..t, IIIH 0 0 0 1 3
-here there is one variant, and it is a **t**. Because the fourth column represents **T** counts, it is incremented by 1. The last column shows that at this position three reads has bases above the quality threshold of 20.
+Here, there is one variant, **t**. Because the fourth column represents **T** counts, it is incremented by 1. The last column shows that at this position, three reads have bases above the quality threshold of 20.
-----
**Example 1**: Just variants
-In this mode the tool outputs only those lines from the input datasets where at least one read contains a sequence variant with quality above the limit set by the **Do not consider read bases with quality lower than** option. For example, suppose one has a pileup dataset like this::
+In this mode, the tool only outputs the lines from the input datasets where at least one read contains a sequence variant with quality above the threshold set by the **Do not consider read bases with quality lower than** option. For example, suppose one has a pileup dataset like the following::
chrM 412 A 2 ., II
chrM 413 G 4 ..t, III2
chrM 414 C 4 ...a III2
chrM 415 C 4 TTTt III7
-to call all variants (with no restriction by coverage) with quality above phred value of 20 we will need to set parameters as follows:
+To call all variants (with no restriction by coverage) with quality above phred value of 20, we will need to set the parameters as follows:
.. image:: ../static/images/pileup_parser_help1.png
@@ -260,13 +260,13 @@
chrM 413 G 4 ..t, IIIH 0 0 0 1 3
chrM 415 C 4 TTTt III7 0 0 0 4 4
-**Note** that position 414 is not reported because the *a* variant has associated quality value of 17 (because ord('2')-33 = 17) in is below the phred threshold 20 set by the **Count variants with quality above this value** parameter.
+**Note** that position 414 is not reported because the *a* variant has associated quality value of 17 (because ord('2')-33 = 17) and is below the phred threshold of 20 set by the **Count variants with quality above this value** parameter.
-----
**Example 2**: Report everything
-In addition to calling variants it is often useful to know the quality adjusted coverage. Running the tool with these parameters:
+In addition to calling variants, it is often useful to know the quality adjusted coverage. Running the tool with these parameters:
.. image:: ../static/images/pileup_parser_help2.png
@@ -277,10 +277,9 @@
chrM 414 C 4 ...a III2 0 0 0 0 3
chrM 415 C 4 TTTt III7 0 0 0 4 4
-Here, for instance, you can see that although the total coverage at position 414 is 4 (column 4) the quality adjusted coverage is 3 (last column). This is because inly three reads out of four have bases with quality above the set threshold of 20 (the actual qualities are III2 or, after conversion, 40, 40, 40, 17).
+Here, you can see that although the total coverage at position 414 is 4 (column 4), the quality adjusted coverage is 3 (last column). This is because only three out of four reads have bases with quality above the set threshold of 20 (the actual qualities are III2 or, after conversion, 40, 40, 40, 17).
-Now, one can use the last column of this dataset to filter out (using Galaxy's filter tool) positions where quality adjusted coverage (last column) is below a set threshold.
-
+One can use the last column of this dataset to filter out (using Galaxy's **Filter** tool) positions where quality adjusted coverage (last column) is below a set threshold.
</help>
</tool>
diff -r 210e048e7ec7 -r 5db45e48f954 tools/samtools/sam2interval.xml
--- a/tools/samtools/sam2interval.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/samtools/sam2interval.xml Fri Sep 25 02:06:03 2009 -0400
@@ -31,7 +31,7 @@
**What it does**
-Converts positional information from a SAM dataset into interval format with 0-based start and 1-based end. CIGAR string of SAM format is usd to compute the end coordinate.
+Converts positional information from a SAM dataset into interval format with 0-based start and 1-based end. CIGAR string of SAM format is used to compute the end coordinate.
-----
diff -r 210e048e7ec7 -r 5db45e48f954 tools/samtools/sam_bitwise_flag_filter.xml
--- a/tools/samtools/sam_bitwise_flag_filter.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/samtools/sam_bitwise_flag_filter.xml Fri Sep 25 02:06:03 2009 -0400
@@ -46,7 +46,7 @@
**What it does**
-Allows parsing SAM datasets using bitwise flag (the second column). The bits in the flag are defined as follows::
+Allows parsing of SAM datasets using bitwise flag (the second column). The bits in the flag are defined as follows::
Bit Info
------ --------------------------------------------------------------------------
@@ -67,7 +67,7 @@
Note the following:
- Flag 0x02, 0x08, 0x20, 0x40 and 0x80 are only meaningful when flag 0x01 is present.
-- If in a read pair the information on which read is the first in the pair is lost in the upstream analysis, flag 0x01 should be present and 0x40 and 0x80 are both zero.
+- If in a read pair the information on which read is the first in the pair is lost in the upstream analysis, flag 0x01 should be set, while 0x40 and 0x80 should both be zero.
-----
@@ -82,12 +82,12 @@
r003 16 ref 29 30 6H5M * 0 0 TAGGC * NM:i:0
r001 83 ref 37 30 9M = 7 -39 CAGCGCCAT *
-To select properly mapped pairs click the **Add new Flag** button and set *Read mapped in a proper pair* to **Yes**. The following two reads will be returned::
+To select properly mapped pairs, click the **Add new Flag** button and set *Read mapped in a proper pair* to **Yes**. The following two reads will be returned::
r001 163 ref 7 30 8M2I4M1D3M = 37 39 TTAGATAAAGGATACTA *
r001 83 ref 37 30 9M = 7 -39 CAGCGCCAT *
-For more information please consult the `SAM format description`__.
+For more information, please consult the `SAM format description`__.
.. __: http://www.ncbi.nlm.nih.gov/pubmed/19505943
diff -r 210e048e7ec7 -r 5db45e48f954 tools/samtools/sam_pileup.xml
--- a/tools/samtools/sam_pileup.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/samtools/sam_pileup.xml Fri Sep 25 02:06:03 2009 -0400
@@ -77,7 +77,7 @@
**What it does**
-Uses SAMTools_' pileup command to produce a pileup dataset from a provided BAM dataset. It generated two types of pileup datasets depending on chosen options. If *Call consensus according to MAQ model?* option is set to **No**, the tool produces simple pileup. If the option is set to **Yes**, a ten column pileup dataset with consensus is generated. Both types of datasets are briefly summarized below.
+Uses SAMTools_' pileup command to produce a pileup dataset from a provided BAM dataset. It generates two types of pileup datasets depending on the specified options. If *Call consensus according to MAQ model?* option is set to **No**, the tool produces simple pileup. If the option is set to **Yes**, a ten column pileup dataset with consensus is generated. Both types of datasets are briefly summarized below.
.. _SAMTools: http://samtools.sourceforge.net/samtools.shtml
@@ -111,7 +111,7 @@
**Ten column pileup**
-The `ten-column`__ pileup incoroporates additional consensus information generated with *-c* option of *samtools pileup* command::
+The `ten-column`__ pileup incorporates additional consensus information generated with *-c* option of *samtools pileup* command::
1 2 3 4 5 6 7 8 9 10
diff -r 210e048e7ec7 -r 5db45e48f954 tools/samtools/sam_to_bam.xml
--- a/tools/samtools/sam_to_bam.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/samtools/sam_to_bam.xml Fri Sep 25 02:06:03 2009 -0400
@@ -51,7 +51,7 @@
**What it does**
-This tool uses the SAMTools_ toolkit to produce a indexed BAM file based on a sorted input SAM file.
+This tool uses the SAMTools_ toolkit to produce an indexed BAM file based on a sorted input SAM file.
.. _SAMTools: http://samtools.sourceforge.net/samtools.shtml
diff -r 210e048e7ec7 -r 5db45e48f954 tools/solid_tools/maq_cs_wrapper.xml
--- a/tools/solid_tools/maq_cs_wrapper.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/solid_tools/maq_cs_wrapper.xml Fri Sep 25 02:06:03 2009 -0400
@@ -71,7 +71,7 @@
**What it does**
-This tool maps SOLiD colour-space reads against the target genome using MAQ. It produces three output datasets:
+This tool maps SOLiD color-space reads against the target genome using MAQ. It produces three output datasets:
**ALIGNMENT INFO** : contains the read alignment information,
diff -r 210e048e7ec7 -r 5db45e48f954 tools/solid_tools/solid_qual_boxplot.xml
--- a/tools/solid_tools/solid_qual_boxplot.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/solid_tools/solid_qual_boxplot.xml Fri Sep 25 02:06:03 2009 -0400
@@ -26,7 +26,7 @@
* Black horizontal lines are medians
* Rectangular red boxes show the Inter-quartile Range (IQR) (top value is Q3, bottom value is Q1)
-* Whiskers show outlier at max. 1.5*IQR
+* Whiskers show outliers at max. 1.5*IQR
.. image:: ../static/images/solid_qual.png
diff -r 210e048e7ec7 -r 5db45e48f954 tools/sr_mapping/bowtie_wrapper.xml
--- a/tools/sr_mapping/bowtie_wrapper.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/sr_mapping/bowtie_wrapper.xml Fri Sep 25 02:06:03 2009 -0400
@@ -181,7 +181,7 @@
<param name="dcv" type="integer" value="1024" label="The period for the difference-cover sample (--dcv)" />
</when>
</conditional>
- <param name="nodc" type="select" label="Whether or not to disable the use of the difference-cover sample (--nodc)" help="Suffix sorting becomes quadratic-time in the worst case (a very repetetive reference)">
+ <param name="nodc" type="select" label="Whether or not to disable the use of the difference-cover sample (--nodc)" help="Suffix sorting becomes quadratic-time in the worst case (a very repetitive reference)">
<option value="dc">Use difference-cover sample</option>
<option value="nodc">Disable difference-cover sample</option>
</param>
diff -r 210e048e7ec7 -r 5db45e48f954 tools/sr_mapping/bwa_wrapper.xml
--- a/tools/sr_mapping/bwa_wrapper.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/sr_mapping/bwa_wrapper.xml Fri Sep 25 02:06:03 2009 -0400
@@ -141,7 +141,7 @@
<param name="mismatchPenalty" type="integer" value="3" label="Mismatch penalty" help="BWA will not search for suboptimal hits with a score lower than [value]" />
<param name="gapOpenPenalty" type="integer" value="11" label="Gap open penalty" />
<param name="gapExtensPenalty" type="integer" value="4" label="Gap extension penalty" />
- <param name="colorSpaceRev" type="select" label="Reverse query but don't compement it" help="Reverse query for all alignment in color space">
+ <param name="colorSpaceRev" type="select" label="Reverse query but don't complement it" help="Reverse query for all alignment in color space">
<option value="false">Don't reverse query</option>
<option value="true">Reverse query</option>
</param>
@@ -293,7 +293,7 @@
**What it does**
-**BWA** is a high performance sequence aligner that succeeds MAQ. It is based on BWT-SW but uses a completely different algorithm, and it is aimed toward short read alignments. It is fast--it can map the human genome in only 15-25 minutes. Heng Li of the Sanger Institute wrote the majority of the code, with contributions by Chi-Kwong Wong at the University of Hong Kong, Nong Ge at Sun Yat-Sen University, and Yuta Mori.
+**BWA** is a high performance sequence aligner that succeeds MAQ. It is based on BWT-SW but uses a completely different algorithm and is aimed towards short read alignments. It is fast--it can map the human genome in only 15-25 minutes. Heng Li of the Sanger Institute wrote the majority of the code, with contributions by Chi-Kwong Wong at the University of Hong Kong, Nong Ge at Sun Yat-Sen University, and Yuta Mori.
------
diff -r 210e048e7ec7 -r 5db45e48f954 tools/sr_mapping/fastq_statistics.xml
--- a/tools/sr_mapping/fastq_statistics.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/sr_mapping/fastq_statistics.xml Fri Sep 25 02:06:03 2009 -0400
@@ -2,7 +2,7 @@
<description>for Solexa file</description>
<command>cat $input | solexa_quality_statistics -o $output</command>
<inputs>
- <param format="fastqsolexa" name="input" type="data" label="Library to analyse" />
+ <param format="fastqsolexa" name="input" type="data" label="Library to analyze" />
</inputs>
<outputs>
<data format="txt" name="output" />
diff -r 210e048e7ec7 -r 5db45e48f954 tools/sr_mapping/lastz_wrapper.xml
--- a/tools/sr_mapping/lastz_wrapper.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/sr_mapping/lastz_wrapper.xml Fri Sep 25 02:06:03 2009 -0400
@@ -76,20 +76,20 @@
<param name="max_ident" type="integer" size="3" value="100" label="Do not report matches above this identity (%)"/>
<param name="min_cvrg" type="integer" size="3" value="0" label="Do not report matches that cover less than this fraction (%) of each read"/>
</inputs>
- <outputs>
+ <outputs>
<data format="tabular" name="output1">
<change_format>
<when input="out_format" value="maf" format="maf" />
</change_format>
</data>
- <data format="coverage" name="output2" />
+ <data format="coverage" name="output2" />
</outputs>
<requirements>
<requirement type="binary">lastz</requirement>
</requirements>
- <tests>
- <test>
- <param name="input1" value="phiX.fa" ftype="fasta" />
+ <tests>
+ <test>
+ <param name="input1" value="phiX.fa" ftype="fasta" />
<param name="input2" value="B1.fa" ftype="fasta" />
<param name="source_select" value="pre_set" />
<param name="pre_set_options" value="yasra95short" />
@@ -98,10 +98,10 @@
<param name="max_ident" value="100" />
<param name="min_cvrg" value="0" />
<param name="out_format" value="diffs" />
- <output name="output1" file="lastz_diffs.txt" />
+ <output name="output1" file="lastz_diffs.txt" />
</test>
- <test>
- <param name="input1" value="phiX.fa" ftype="fasta" />
+ <test>
+ <param name="input1" value="phiX.fa" ftype="fasta" />
<param name="input2" value="B1.fa" ftype="fasta" />
<param name="source_select" value="pre_set" />
<param name="pre_set_options" value="yasra95short" />
@@ -111,18 +111,18 @@
<param name="max_ident" value="100" />
<param name="min_cvrg" value="0" />
<param name="out_format" value="diffs" />
- <output name="output1" file="lastz_diffs_ref_name.txt" />
+ <output name="output1" file="lastz_diffs_ref_name.txt" />
</test>
- </tests>
+ </tests>
<help>
**What it does**
-**LASTZ** is a high perfomance pairwise sequence aligner derived from BLASTZ. It is written by Bob Harris in Webb Miller's laboratory at Penn State. Special scoring sets were derived to improve the performance, both in runtime and quality. The Galaxy version of LASTZ is geared towards aligning of short (Illumina/Solexa, AB/SOLiD) and medium (Roche/454) reads against a reference sequence.
+**LASTZ** is a high performance pairwise sequence aligner derived from BLASTZ. It is written by Bob Harris in Webb Miller's laboratory at Penn State. Special scoring sets were derived to improve runtime performance and quality. The Galaxy version of LASTZ is geared towards aligning of short (Illumina/Solexa, AB/SOLiD) and medium (Roche/454) reads against a reference sequence.
.. class:: warningmark
-At present this tools supports aligning reads against a single reference sequence no longer than 1 Megabase. This limitation will be lifted in the coming months as our short read analysis hardware capacity is expanding.
+This tool presently supports aligning reads against a single reference sequence no longer than 1 Megabase. This limitation will be lifted in the coming months as our short read analysis hardware capacity expands.
------
diff -r 210e048e7ec7 -r 5db45e48f954 tools/stats/aggregate_binned_scores_in_intervals.xml
--- a/tools/stats/aggregate_binned_scores_in_intervals.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/stats/aggregate_binned_scores_in_intervals.xml Fri Sep 25 02:06:03 2009 -0400
@@ -62,7 +62,7 @@
.. class:: warningmark
-This tool currently only has cached data for genome builds hg16, hg17 and hg18. However, you may use your own data point (wiggle) data, such as is available from UCSC. If you are trying to use your own data point file and it is not appearing as an option, make sure that the builds for your history items are the same.
+This tool currently only has cached data for genome builds hg16, hg17 and hg18. However, you may use your own data point (wiggle) data, such as those available from UCSC. If you are trying to use your own data point file and it is not appearing as an option, make sure that the builds for your history items are the same.
.. class:: warningmark
diff -r 210e048e7ec7 -r 5db45e48f954 tools/stats/filtering.xml
--- a/tools/stats/filtering.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/stats/filtering.xml Fri Sep 25 02:06:03 2009 -0400
@@ -42,7 +42,7 @@
**Syntax**
-The filter tool allows you to restrict the datset using simple conditional statements
+The filter tool allows you to restrict the dataset using simple conditional statements.
- Columns are referenced with **c** and a **number**. For example, **c1** refers to the first column of a tab-delimited file
- Make sure that multi-character operators contain no white space ( e.g., **<=** is valid while **< =** is not valid )
diff -r 210e048e7ec7 -r 5db45e48f954 tools/stats/gsummary.xml
--- a/tools/stats/gsummary.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/stats/gsummary.xml Fri Sep 25 02:06:03 2009 -0400
@@ -24,7 +24,7 @@
.. class:: warningmark
-This tool expects input datasets to consist of tab-delimited columns (blank or comment lines beginning with a # character are automatically skipped).
+This tool expects input datasets consisting of tab-delimited columns (blank or comment lines beginning with a # character are automatically skipped).
.. class:: infomark
@@ -48,7 +48,7 @@
- Columns are referenced with **c** and a **number**. For example, **c1** refers to the first column of a tab-delimited file.
-- Examples of expressions:
+- For example:
- **log(c5)** calculates the summary statistics for the natural log of column 5
- **(c5 + c6 + c7) / 3** calculates the summary statistics on the average of columns 5-7
diff -r 210e048e7ec7 -r 5db45e48f954 tools/stats/wiggle_to_simple.xml
--- a/tools/stats/wiggle_to_simple.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/stats/wiggle_to_simple.xml Fri Sep 25 02:06:03 2009 -0400
@@ -18,7 +18,7 @@
This tool converts wiggle data into interval type.
-- **Wiggle format**: The .wig format is line-oriented. Wiggle data is preceeded by a UCSC track definition line. Following the track definition line is the track data, which can be entered in three different formats described below.
+- **Wiggle format**: The .wig format is line-oriented. Wiggle data is preceded by a UCSC track definition line. Following the track definition line is the track data, which can be entered in three different formats described below.
- **BED format** with no declaration line and four columns of data::
diff -r 210e048e7ec7 -r 5db45e48f954 tools/taxonomy/find_diag_hits.xml
--- a/tools/taxonomy/find_diag_hits.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/taxonomy/find_diag_hits.xml Fri Sep 25 02:06:03 2009 -0400
@@ -67,7 +67,7 @@
* *Select column with sequence id* set to **c1**
* *Select taxonomic ranks* with **order**, and **genus** checked
- * *Output format* set to **Dignostic read list**
+ * *Output format* set to **Diagnostic read list**
will return::
@@ -89,7 +89,7 @@
.. class:: warningmark
-This tool omits "**n**" corresponding to ranks missing from NCBI taxonomy. In the above example *Home sapiens* conatains the order name (Primates) while *Bos taurus* does not.
+This tool omits "**n**" corresponding to ranks missing from NCBI taxonomy. In the above example *Home sapiens* contains the order name (Primates) while *Bos taurus* does not.
</help>
diff -r 210e048e7ec7 -r 5db45e48f954 tools/taxonomy/gi2taxonomy.xml
--- a/tools/taxonomy/gi2taxonomy.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/taxonomy/gi2taxonomy.xml Fri Sep 25 02:06:03 2009 -0400
@@ -45,7 +45,7 @@
| 1L_EYKX4VC01BXWX1_265 | 1430919 | 90.09 | 212 | 15 | 6 | 252.00 |
+-----------------------+----------+----------+-----------------+------------+------+--------+
-and you want to obtain full taxonomic representation for GIs listed in *targetGI* column. If you set paramenters as shown here:
+and you want to obtain full taxonomic representation for GIs listed in *targetGI* column. If you set parameters as shown here:
.. image:: ../static/images/fetchTax.png
diff -r 210e048e7ec7 -r 5db45e48f954 tools/taxonomy/t2ps_wrapper.xml
--- a/tools/taxonomy/t2ps_wrapper.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/taxonomy/t2ps_wrapper.xml Fri Sep 25 02:06:03 2009 -0400
@@ -108,7 +108,7 @@
**Explanation of phylogenetic tree markup**
-Branches of the tree are colored according to the heatmap below. The "bluer" the branch the lesser the numer of leaves it leads to and vice versa.
+Branches of the tree are colored according to the heatmap below. The "bluer" the branch the lesser the number of leaves it leads to and vice versa.
.. image:: ../static/images/t2ps_heatmap.png
diff -r 210e048e7ec7 -r 5db45e48f954 tools/taxonomy/t2t_report.xml
--- a/tools/taxonomy/t2t_report.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/taxonomy/t2t_report.xml Fri Sep 25 02:06:03 2009 -0400
@@ -30,19 +30,39 @@
Suppose the *Taxonomy manipulation->Fetch Taxonomic Ranks* generated the following taxonomy representation::
- 9916 2 root Eukaryota Metazoa n n Chordata Craniata Gnathostomata Mammalia n Laurasiatheria n Ruminantia n Bovidae Bovinae n n Bos n Bos taurus n
9606 12585 root Eukaryota Metazoa n n Chordata Craniata Gnathostomata Mammalia n Euarchontoglires Primates Haplorrhini Hominoidea Hominidae n n n Homo n Homo sapiens n
+ 9916 2 root Eukaryota Metazoa n n Chordata Craniata Gnathostomata Mammalia n Laurasiatheria n Ruminantia n Bovidae Bovinae n n Bos n Bos taurus n
+ 9606 12585 root Eukaryota Metazoa n n Chordata Craniata Gnathostomata Mammalia n Euarchontoglires Primates Haplorrhini Hominoidea Hominidae n n n Homo n Homo sapiens n
Running this tool will generate the following output::
Rank Rank Name Count
-------------------------------------
- root root 2
superkingdom Eukaryota 2
kingdom Metazoa 2
phylum Chordata 2
subphylum Craniata 2
superclass Gnathostomata 2
class Mammalia 2
superorder Euarchontoglires 1
superorder Laurasiatheria 1
order Primates 1
suborder Haplorrhini 1
suborder Ruminantia 1
superfamily Hominoidea 1
family Bovidae 1
family Hominidae 1
subfamily Bovinae 1
genus Bos 1
genus Homo 1
species Bos taurus 1
species Homo sapiens 1
+ root root 2
+ superkingdom Eukaryota 2
+ kingdom Metazoa 2
+ phylum Chordata 2
+ subphylum Craniata 2
+ superclass Gnathostomata 2
+ class Mammalia 2
+ superorder Euarchontoglires 1
+ superorder Laurasiatheria 1
+ order Primates 1
+ suborder Haplorrhini 1
+ suborder Ruminantia 1
+ superfamily Hominoidea 1
+ family Bovidae 1
+ family Hominidae 1
+ subfamily Bovinae 1
+ genus Bos 1
+ genus Homo 1
+ species Bos taurus 1
+ species Homo sapiens 1
-The outoput is sorted on Rank and then on Rank Name.
+The output is sorted on Rank and then on Rank Name.
.. class:: warningmark
-**Note** that this tool omits "**n**" corresponding to ranks missing from NCBI taxonomy. In the above example *Home sapiens* conatains the order name (Primates) while *Bos taurus* does not.
+**Note** that this tool omits "**n**" corresponding to ranks missing from NCBI taxonomy. In the above example *Home sapiens* contains the order name (Primates) while *Bos taurus* does not.
</help>
diff -r 210e048e7ec7 -r 5db45e48f954 tools/visualization/LAJ.xml
--- a/tools/visualization/LAJ.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/visualization/LAJ.xml Fri Sep 25 02:06:03 2009 -0400
@@ -15,7 +15,7 @@
<data name="out_file1" format="laj"/>
</outputs>
<help>
-You can use this tool to view a set of LAV alignments. You may include FASTA formated sequences for both species.
+You can use this tool to view a set of LAV alignments. You may include FASTA formatted sequences for both species.
For detailed information on LAJ, click here_.
diff -r 210e048e7ec7 -r 5db45e48f954 tools/visualization/genetrack.xml
--- a/tools/visualization/genetrack.xml Fri Sep 25 00:39:29 2009 -0400
+++ b/tools/visualization/genetrack.xml Fri Sep 25 02:06:03 2009 -0400
@@ -48,7 +48,7 @@
</requirements>
<help>
This tool takes the input Fit Data and creates a peak and curve plot
-showing the reads and fitness on each basepair. Features can be
+showing the reads and fitness on each base pair. Features can be
plotted below as tracks. Fit data is coverage output from tools like
the Lastz tool. Features are simply interval datasets that may be
plotted as tracks below the optional fit data. Both the fit data and
@@ -62,7 +62,7 @@
- **Track Label** is the name of the generated track.
- **Fit Data** is the dataset to calculate coverage/reads across
- basepairs and generate a curve. This is optional, and tracks may
+ base pairs and generate a curve. This is optional, and tracks may
be created simply showing features.
- **Features** are datasets (interval format) to be plotted as tracks.
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/210e048e7ec7
changeset: 2772:210e048e7ec7
user: Kanwei Li <kanwei(a)gmail.com>
date: Fri Sep 25 00:39:29 2009 -0400
description:
Style fixes
2 file(s) affected in this change:
static/june_2007_style/panel_layout.css.tmpl
templates/root/index.mako
diffs (25 lines):
diff -r aff67d57fd38 -r 210e048e7ec7 static/june_2007_style/panel_layout.css.tmpl
--- a/static/june_2007_style/panel_layout.css.tmpl Thu Sep 24 21:17:32 2009 -0400
+++ b/static/june_2007_style/panel_layout.css.tmpl Fri Sep 25 00:39:29 2009 -0400
@@ -233,7 +233,8 @@
position:absolute;
top:0;
left:0;
- width:100%;
+ width:100%;
+ min-width:900px;
height:32px;
background: ${masthead_bg};
color:#fff;
diff -r aff67d57fd38 -r 210e048e7ec7 templates/root/index.mako
--- a/templates/root/index.mako Thu Sep 24 21:17:32 2009 -0400
+++ b/templates/root/index.mako Fri Sep 25 00:39:29 2009 -0400
@@ -96,7 +96,7 @@
<div class="unified-panel-header" unselectable="on">
<div class="unified-panel-header-inner">
<div style="float: right">
- <a id="history-options-button" class='panel-header-button' href="${h.url_for( controller='root', action='history_options' )}" target="galaxy_main"><span style="padding:0 20px 0 5px;background:url(/images/dropdownarrow.png) no-repeat right center;">${_('Options')}</span></a>
+ <a id="history-options-button" class='panel-header-button' href="${h.url_for( controller='root', action='history_options' )}" target="galaxy_main"><span style="padding:0 20px 0 5px;background:url(${h.url_for( '/images/dropdownarrow.png')}) no-repeat right center;">${_('Options')}</span></a>
</div>
<div class="panel-header-text">${_('History')}</div>
</div>
1
0
25 Sep '09
details: http://www.bx.psu.edu/hg/galaxy/rev/80735c6b6932
changeset: 2770:80735c6b6932
user: Kanwei Li <kanwei(a)gmail.com>
date: Thu Sep 24 19:58:35 2009 -0400
description:
Easier to see "new workflow" link in small window sizes. Proper dropdown arrow. Fixes #149
5 file(s) affected in this change:
static/images/dropdownarrow.png
static/june_2007_style/blue/panel_layout.css
templates/base_panels.mako
templates/root/index.mako
templates/workflow/list.mako
diffs (57 lines):
diff -r 40f8f713cbd8 -r 80735c6b6932 static/images/dropdownarrow.png
Binary file static/images/dropdownarrow.png has changed
diff -r 40f8f713cbd8 -r 80735c6b6932 static/june_2007_style/blue/panel_layout.css
--- a/static/june_2007_style/blue/panel_layout.css Thu Sep 24 19:00:44 2009 -0400
+++ b/static/june_2007_style/blue/panel_layout.css Thu Sep 24 19:58:35 2009 -0400
@@ -36,7 +36,7 @@
.panel-warning-message{background-image:url(warn_small.png);background-color:#FFFFCC;}
.panel-done-message{background-image:url(done_small.png);background-color:#CCFFCC;}
.panel-info-message{background-image:url(info_small.png);background-color:#CCCCFF;}
-#masthead{position:absolute;top:0;left:0;width:100%;height:32px;background:#2C3143;color:#fff;border-bottom:solid #444 1px;z-index:15000;padding:0;}
+#masthead{position:absolute;top:0;left:0;width:100%;min-width:900px;height:32px;background:#2C3143;color:#fff;border-bottom:solid #444 1px;z-index:15000;padding:0;}
#masthead a{color:#eeeeee;text-decoration:none;}
#masthead .title{padding:3px 10px;font-size:175%;font-weight:bold;}
#masthead a:hover{text-decoration:underline;}
diff -r 40f8f713cbd8 -r 80735c6b6932 templates/base_panels.mako
--- a/templates/base_panels.mako Thu Sep 24 19:00:44 2009 -0400
+++ b/templates/base_panels.mako Thu Sep 24 19:58:35 2009 -0400
@@ -282,7 +282,7 @@
</head>
<body scroll="no" class="${self.body_class}">
- <div id="everything" style="position: absolute; top: 0; left: 0; width: 100%; height: 100%; min-width: 960px;">
+ <div id="everything" style="position: absolute; top: 0; left: 0; width: 100%; height: 100%; min-width: 600px;">
## Background displays first
<div id="background"></div>
## Layer iframes over backgrounds
diff -r 40f8f713cbd8 -r 80735c6b6932 templates/root/index.mako
--- a/templates/root/index.mako Thu Sep 24 19:00:44 2009 -0400
+++ b/templates/root/index.mako Thu Sep 24 19:58:35 2009 -0400
@@ -96,7 +96,7 @@
<div class="unified-panel-header" unselectable="on">
<div class="unified-panel-header-inner">
<div style="float: right">
- <a id="history-options-button" class='panel-header-button' href="${h.url_for( controller='root', action='history_options' )}" target="galaxy_main"><span>${_('Options')}<span>▼</span></span></a>
+ <a id="history-options-button" class='panel-header-button' href="${h.url_for( controller='root', action='history_options' )}" target="galaxy_main"><span style="padding:0 20px 0 5px;background:url(/images/dropdownarrow.png) no-repeat right center;">${_('Options')}</span></a>
</div>
<div class="panel-header-text">${_('History')}</div>
</div>
diff -r 40f8f713cbd8 -r 80735c6b6932 templates/workflow/list.mako
--- a/templates/workflow/list.mako Thu Sep 24 19:00:44 2009 -0400
+++ b/templates/workflow/list.mako Thu Sep 24 19:58:35 2009 -0400
@@ -21,13 +21,13 @@
<li>
<a class="action-button" href="${h.url_for( action='create' )}">
<img src="${h.url_for('/static/images/silk/add.png')}" />
- <span>Add a new workflow</span>
+ <span>Create new workflow</span>
</a>
</li>
</ul>
%if workflows:
- <table class="mange-table colored" border="0" cellspacing="0" cellpadding="0" width="100%">
+ <table class="manage-table colored" border="0" cellspacing="0" cellpadding="0" style="width:100%;">
<tr class="header">
<th>Name</th>
<th># of Steps</th>
1
0
25 Sep '09
details: http://www.bx.psu.edu/hg/galaxy/rev/40f8f713cbd8
changeset: 2769:40f8f713cbd8
user: jeremy goecks <jeremy.goecks(a)emory.edu>
date: Thu Sep 24 19:00:44 2009 -0400
description:
Made history grid filterable by tags (and values) and by status.
9 file(s) affected in this change:
lib/galaxy/tags/tag_handler.py
lib/galaxy/web/controllers/history.py
lib/galaxy/web/controllers/tag.py
lib/galaxy/web/framework/helpers/grids.py
static/june_2007_style/autocomplete_tagging.css.tmpl
static/june_2007_style/blue/autocomplete_tagging.css
templates/history/grid.mako
templates/root/history.mako
templates/tagging_common.mako
diffs (837 lines):
diff -r 35dd55a7898e -r 40f8f713cbd8 lib/galaxy/tags/tag_handler.py
--- a/lib/galaxy/tags/tag_handler.py Thu Sep 24 16:52:15 2009 -0400
+++ b/lib/galaxy/tags/tag_handler.py Thu Sep 24 19:00:44 2009 -0400
@@ -21,8 +21,8 @@
def get_tag_assoc_class(self, entity_class):
return self.tag_assoc_classes[entity_class]
- # Remove a tag from an item.
def remove_item_tag(self, item, tag_name):
+ """Remove a tag from an item."""
# Get item tag association.
item_tag_assoc = self._get_item_tag_assoc(item, tag_name)
@@ -35,8 +35,8 @@
return False
- # Delete tags from an item.
def delete_item_tags(self, item):
+ """Delete tags from an item."""
# Delete item-tag associations.
for tag in item.tags:
tag.delete()
@@ -44,8 +44,8 @@
# Delete tags from item.
del item.tags[:]
- # Returns true if item is has a given tag.
def item_has_tag(self, item, tag):
+ """Returns true if item is has a given tag."""
# Get tag name.
if isinstance(tag, basestring):
tag_name = tag
@@ -59,22 +59,25 @@
return False
- # Apply tags to an item.
def apply_item_tags(self, db_session, item, tags_str):
+ """Apply tags to an item."""
# Parse tags.
- parsed_tags = self._parse_tags(tags_str)
+ parsed_tags = self.parse_tags(tags_str)
# Apply each tag.
for name, value in parsed_tags.items():
+ # Use lowercase name for searching/creating tag.
+ lc_name = name.lower()
+
# Get or create item-tag association.
- item_tag_assoc = self._get_item_tag_assoc(item, name)
+ item_tag_assoc = self._get_item_tag_assoc(item, lc_name)
if not item_tag_assoc:
#
# Create item-tag association.
#
# Create tag; if None, skip the tag (and log error).
- tag = self._get_or_create_tag(db_session, name)
+ tag = self._get_or_create_tag(db_session, lc_name)
if not tag:
# Log error?
continue
@@ -88,16 +91,15 @@
item_tag_assoc.tag = tag
# Apply attributes to item-tag association. Strip whitespace from user name and tag.
+ lc_value = None
if value:
- trimmed_value = value.strip()
- else:
- trimmed_value = value
- item_tag_assoc.user_tname = name.strip()
- item_tag_assoc.user_value = trimmed_value
- item_tag_assoc.value = self._scrub_tag_value(value)
+ lc_value = value.lower()
+ item_tag_assoc.user_tname = name
+ item_tag_assoc.user_value = value
+ item_tag_assoc.value = lc_value
- # Build a string from an item's tags.
def get_tags_str(self, tags):
+ """Build a string from an item's tags."""
# Return empty string if there are no tags.
if not tags:
return ""
@@ -111,16 +113,18 @@
tags_str_list.append(tag_str)
return ", ".join(tags_str_list)
- # Get a Tag object from a tag id.
def get_tag_by_id(self, db_session, tag_id):
+ """Get a Tag object from a tag id."""
return db_session.query(Tag).filter(Tag.id==tag_id).first()
- # Get a Tag object from a tag name (string).
def get_tag_by_name(self, db_session, tag_name):
- return db_session.query(Tag).filter(Tag.name==tag_name).first()
+ """Get a Tag object from a tag name (string)."""
+ if tag_name:
+ return db_session.query( Tag ).filter( Tag.name==tag_name.lower() ).first()
+ return None
- # Create a Tag object from a tag string.
def _create_tag(self, db_session, tag_str):
+ """Create a Tag object from a tag string."""
tag_hierarchy = tag_str.split(self.__class__.hierarchy_separator)
tag_prefix = ""
parent_tag = None
@@ -139,8 +143,8 @@
tag_prefix = tag.name + self.__class__.hierarchy_separator
return tag
- # Get or create a Tag object from a tag string.
def _get_or_create_tag(self, db_session, tag_str):
+ """Get or create a Tag object from a tag string."""
# Scrub tag; if tag is None after being scrubbed, return None.
scrubbed_tag_str = self._scrub_tag_name(tag_str)
if not scrubbed_tag_str:
@@ -155,18 +159,18 @@
return tag
- # Return ItemTagAssociation object for an item and a tag string; returns None if there is
- # no such tag.
def _get_item_tag_assoc(self, item, tag_name):
+ """Return ItemTagAssociation object for an item and a tag string; returns None if there is
+ no such tag."""
scrubbed_tag_name = self._scrub_tag_name(tag_name)
for item_tag_assoc in item.tags:
if item_tag_assoc.tag.name == scrubbed_tag_name:
return item_tag_assoc
return None
- # Returns a list of raw (tag-name, value) pairs derived from a string; method does not scrub tags.
- # Return value is a dictionary where tag-names are keys.
- def _parse_tags(self, tag_str):
+ def parse_tags(self, tag_str):
+ """Returns a list of raw (tag-name, value) pairs derived from a string; method scrubs tag names and values as well.
+ Return value is a dictionary where tag-names are keys."""
# Gracefully handle None.
if not tag_str:
return dict()
@@ -179,11 +183,13 @@
name_value_pairs = dict()
for raw_tag in raw_tags:
nv_pair = self._get_name_value_pair(raw_tag)
- name_value_pairs[nv_pair[0]] = nv_pair[1]
+ scrubbed_name = self._scrub_tag_name( nv_pair[0] )
+ scrubbed_value = self._scrub_tag_value( nv_pair[1] )
+ name_value_pairs[scrubbed_name] = scrubbed_value
return name_value_pairs
- # Scrub a tag value.
def _scrub_tag_value(self, value):
+ """Scrub a tag value."""
# Gracefully handle None:
if not value:
return None
@@ -192,11 +198,10 @@
reg_exp = re.compile('\s')
scrubbed_value = re.sub(reg_exp, "", value)
- # Lowercase and return.
- return scrubbed_value.lower()
+ return scrubbed_value
- # Scrub a tag name.
def _scrub_tag_name(self, name):
+ """Scrub a tag name."""
# Gracefully handle None:
if not name:
return None
@@ -213,21 +218,20 @@
if len(scrubbed_name) < 3 or len(scrubbed_name) > 255:
return None
- # Lowercase and return.
- return scrubbed_name.lower()
+ return scrubbed_name
- # Scrub a tag name list.
def _scrub_tag_name_list(self, tag_name_list):
+ """Scrub a tag name list."""
scrubbed_tag_list = list()
for tag in tag_name_list:
- scrubbed_tag_list.append(self._scrub_tag_name(tag))
+ scrubbed_tag_list.append( self._scrub_tag_name(tag) )
return scrubbed_tag_list
- # Get name, value pair from a tag string.
def _get_name_value_pair(self, tag_str):
+ """Get name, value pair from a tag string."""
# Use regular expression to parse name, value.
- reg_exp = re.compile("[" + self.__class__.key_value_separators + "]")
- name_value_pair = reg_exp.split(tag_str)
+ reg_exp = re.compile( "[" + self.__class__.key_value_separators + "]" )
+ name_value_pair = reg_exp.split( tag_str )
# Add empty slot if tag does not have value.
if len(name_value_pair) < 2:
diff -r 35dd55a7898e -r 40f8f713cbd8 lib/galaxy/web/controllers/history.py
--- a/lib/galaxy/web/controllers/history.py Thu Sep 24 16:52:15 2009 -0400
+++ b/lib/galaxy/web/controllers/history.py Thu Sep 24 19:00:44 2009 -0400
@@ -2,8 +2,11 @@
from galaxy.web.framework.helpers import time_ago, iff, grids
from galaxy import util
from galaxy.model.mapping import desc
+from galaxy.model import History
from galaxy.model.orm import *
from galaxy.util.json import *
+from galaxy.tags.tag_handler import TagHandler
+from sqlalchemy.sql.expression import ClauseElement
import webhelpers, logging, operator
from datetime import datetime
from cgi import escape
@@ -39,16 +42,55 @@
return dict( operation="sharing" )
return None
class TagsColumn( grids.GridColumn ):
- def __init__(self, col_name):
- grids.GridColumn.__init__(self, col_name)
+ def __init__(self, col_name, key, filterable):
+ grids.GridColumn.__init__(self, col_name, key=key, filterable=filterable)
+ # Tags cannot be sorted.
+ self.sortable = False
self.tag_elt_id_gen = 0
-
def get_value( self, trans, grid, history ):
self.tag_elt_id_gen += 1
elt_id="tagging-elt" + str( self.tag_elt_id_gen )
div_elt = "<div id=%s></div>" % elt_id
- return div_elt + trans.fill_template( "/tagging_common.mako", trans=trans,
- tagged_item=history, elt_id = elt_id, in_form="true", input_size="20" )
+ return div_elt + trans.fill_template( "/tagging_common.mako", trans=trans, tagged_item=history,
+ elt_id = elt_id, in_form="true", input_size="20", tag_click_fn="add_tag_to_grid_filter" )
+ def filter( self, db_session, query, column_filter ):
+ """ Modify query to include only histories with tags in column_filter. """
+ if column_filter == "All":
+ pass
+ elif column_filter:
+ # Parse filter to extract multiple tags.
+ tag_handler = TagHandler()
+ raw_tags = tag_handler.parse_tags( column_filter.encode("utf-8") )
+ for name, value in raw_tags.items():
+ tag = tag_handler.get_tag_by_name( db_session, name )
+ if tag:
+ query = query.filter( History.tags.any( tag_id=tag.id ) )
+ if value:
+ query = query.filter( History.tags.any( value=value.lower() ) )
+ else:
+ # Tag doesn't exist; unclear what to do here, but the literal thing to do is add the criterion, which
+ # will then yield a query that returns no results.
+ query = query.filter( History.tags.any( user_tname=name ) )
+ return query
+ def get_accepted_filters( self ):
+ """ Returns a list of accepted filters for this column. """
+ accepted_filter_labels_and_vals = { "All": "All" }
+ accepted_filters = []
+ for label, val in accepted_filter_labels_and_vals.items():
+ args = { self.key: val }
+ accepted_filters.append( grids.GridColumnFilter( label, args) )
+ return accepted_filters
+
+
+ class DeletedColumn( grids.GridColumn ):
+ def get_accepted_filters( self ):
+ """ Returns a list of accepted filters for this column. """
+ accepted_filter_labels_and_vals = { "Active" : "False", "Deleted" : "True", "All": "All" }
+ accepted_filters = []
+ for label, val in accepted_filter_labels_and_vals.items():
+ args = { self.key: val }
+ accepted_filters.append( grids.GridColumnFilter( label, args) )
+ return accepted_filters
# Grid definition
title = "Stored histories"
@@ -60,12 +102,12 @@
link=( lambda item: iff( item.deleted, None, dict( operation="switch", id=item.id ) ) ),
attach_popup=True ),
DatasetsByStateColumn( "Datasets (by state)", ncells=4 ),
- TagsColumn( "Tags"),
+ TagsColumn( "Tags", key="tags", filterable=True),
StatusColumn( "Status", attach_popup=False ),
grids.GridColumn( "Created", key="create_time", format=time_ago ),
grids.GridColumn( "Last Updated", key="update_time", format=time_ago ),
# Valid for filtering but invisible
- grids.GridColumn( "Deleted", key="deleted", visible=False )
+ DeletedColumn( "Status", key="deleted", visible=False, filterable=True )
]
operations = [
grids.GridOperation( "Switch", allow_multiple=False, condition=( lambda item: not item.deleted ) ),
@@ -80,9 +122,9 @@
standard_filters = [
grids.GridColumnFilter( "Active", args=dict( deleted=False ) ),
grids.GridColumnFilter( "Deleted", args=dict( deleted=True ) ),
- grids.GridColumnFilter( "All", args=dict( deleted='All' ) )
+ grids.GridColumnFilter( "All", args=dict( deleted='All' ) ),
]
- default_filter = dict( deleted=False )
+ default_filter = dict( deleted="False", tags="All" )
def get_current_item( self, trans ):
return trans.get_history()
def apply_default_filter( self, trans, query, **kwargs ):
diff -r 35dd55a7898e -r 40f8f713cbd8 lib/galaxy/web/controllers/tag.py
--- a/lib/galaxy/web/controllers/tag.py Thu Sep 24 16:52:15 2009 -0400
+++ b/lib/galaxy/web/controllers/tag.py Thu Sep 24 19:00:44 2009 -0400
@@ -34,7 +34,7 @@
self._do_security_check(trans, item)
- self.tag_handler.apply_item_tags( trans.sa_session, item, unicode(new_tag).encode('utf-8') )
+ self.tag_handler.apply_item_tags( trans.sa_session, item, new_tag.encode('utf-8') )
trans.sa_session.flush()
@web.expose
@@ -45,7 +45,7 @@
self._do_security_check(trans, item)
- self.tag_handler.remove_item_tag( item, unicode(tag_name).encode('utf-8') )
+ self.tag_handler.remove_item_tag( item, tag_name.encode('utf-8') )
#print tag_name
#print unicode(tag_name)
trans.sa_session.flush()
@@ -60,41 +60,53 @@
self._do_security_check(trans, item)
tag_handler.delete_item_tags(item)
- self.tag_handler.apply_item_tags( trans.sa_session, item, unicode(new_tags).encode('utf-8') )
+ self.tag_handler.apply_item_tags( trans.sa_session, item, new_tags.encode('utf-8') )
trans.sa_session.flush()
@web.expose
@web.require_login( "get autocomplete data for an item's tags" )
- def tag_autocomplete_data(self, trans, id=None, item_class=None, q=None, limit=None, timestamp=None):
+ def tag_autocomplete_data( self, trans, q=None, limit=None, timestamp=None, id=None, item_class=None ):
""" Get autocomplete data for an item's tags. """
-
+
#
# Get item, do security check, and get autocomplete data.
#
- item = self._get_item(trans, item_class, trans.security.decode_id(id))
+ item = None
+ if id is not None:
+ item = self._get_item(trans, item_class, trans.security.decode_id(id))
+ self._do_security_check(trans, item)
+
+ # Get item class. TODO: we should have a mapper that goes from class_name to class object.
+ if item_class == 'History':
+ item_class = History
+ elif item_class == 'HistoryDatasetAssociation':
+ item_class = HistoryDatasetAssociation
- self._do_security_check(trans, item)
-
- q = unicode(q).encode('utf-8')
+ q = q.encode('utf-8')
if q.find(":") == -1:
- return self._get_tag_autocomplete_names(trans, item, q, limit, timestamp)
+ return self._get_tag_autocomplete_names(trans, q, limit, timestamp, item, item_class)
else:
- return self._get_tag_autocomplete_values(trans, item, q, limit, timestamp)
+ return self._get_tag_autocomplete_values(trans, q, limit, timestamp, item, item_class)
- def _get_tag_autocomplete_names(self, trans, item, q, limit, timestamp):
+ def _get_tag_autocomplete_names( self, trans, q, limit, timestamp, item=None, item_class=None ):
"""Returns autocomplete data for tag names ordered from most frequently used to
least frequently used."""
#
# Get user's item tags and usage counts.
#
- # Get item-tag association class.
- item_tag_assoc_class = self.tag_handler.get_tag_assoc_class(item.__class__)
+ # Get item's class object and item-tag association class.
+ if item is None and item_class is None:
+ raise RuntimeError("Both item and item_class cannot be None")
+ elif item is not None:
+ item_class = item.__class__
+
+ item_tag_assoc_class = self.tag_handler.get_tag_assoc_class(item_class)
# Build select statement.
cols_to_select = [ item_tag_assoc_class.table.c.tag_id, func.count('*') ]
- from_obj = item_tag_assoc_class.table.join(item.table).join(Tag)
- where_clause = and_(self._get_column_for_filtering_item_by_user_id(item.__class__)==trans.get_user().id,
+ from_obj = item_tag_assoc_class.table.join(item_class.table).join(Tag)
+ where_clause = and_(self._get_column_for_filtering_item_by_user_id(item_class)==trans.get_user().id,
Tag.table.c.name.like(q + "%"))
order_by = [ func.count("*").desc() ]
group_by = item_tag_assoc_class.table.c.tag_id
@@ -109,18 +121,18 @@
for row in result_set:
tag = self.tag_handler.get_tag_by_id(trans.sa_session, row[0])
- # Exclude tags that are already applied to the history.
- if self.tag_handler.item_has_tag(item, tag):
+ # Exclude tags that are already applied to the item.
+ if ( item is not None ) and ( self.tag_handler.item_has_tag(item, tag) ):
continue
# Add tag to autocomplete data. Use the most frequent name that user
# has employed for the tag.
tag_names = self._get_usernames_for_tag(trans.sa_session, trans.get_user(),
- tag, item.__class__, item_tag_assoc_class)
+ tag, item_class, item_tag_assoc_class)
ac_data += tag_names[0] + "|" + tag_names[0] + "\n"
return ac_data
- def _get_tag_autocomplete_values(self, trans, item, q, limit, timestamp):
+ def _get_tag_autocomplete_values(self, trans, q, limit, timestamp, item=None, item_class=None):
"""Returns autocomplete data for tag values ordered from most frequently used to
least frequently used."""
@@ -132,13 +144,18 @@
if tag is None:
return ""
- # Get item-tag association class.
- item_tag_assoc_class = self.tag_handler.get_tag_assoc_class(item.__class__)
+ # Get item's class object and item-tag association class.
+ if item is None and item_class is None:
+ raise RuntimeError("Both item and item_class cannot be None")
+ elif item is not None:
+ item_class = item.__class__
+
+ item_tag_assoc_class = self.tag_handler.get_tag_assoc_class(item_class)
# Build select statement.
cols_to_select = [ item_tag_assoc_class.table.c.value, func.count('*') ]
- from_obj = item_tag_assoc_class.table.join(item.table).join(Tag)
- where_clause = and_(self._get_column_for_filtering_item_by_user_id(item.__class__)==trans.get_user().id,
+ from_obj = item_tag_assoc_class.table.join(item_class.table).join(Tag)
+ where_clause = and_(self._get_column_for_filtering_item_by_user_id(item_class)==trans.get_user().id,
Tag.table.c.id==tag.id,
item_tag_assoc_class.table.c.value.like(tag_value + "%"))
order_by = [ func.count("*").desc(), item_tag_assoc_class.table.c.value ]
diff -r 35dd55a7898e -r 40f8f713cbd8 lib/galaxy/web/framework/helpers/grids.py
--- a/lib/galaxy/web/framework/helpers/grids.py Thu Sep 24 16:52:15 2009 -0400
+++ b/lib/galaxy/web/framework/helpers/grids.py Thu Sep 24 19:00:44 2009 -0400
@@ -38,19 +38,27 @@
query = self.apply_default_filter( trans, query, **kwargs )
# Maintain sort state in generated urls
extra_url_args = {}
- # Process filtering arguments
- filter_args = {}
- if self.default_filter:
- filter_args.update( self.default_filter )
+ # Process filtering arguments to (a) build a query that actuates the filter and (b) builds a
+ # dictionary that denotes the current filter.
+ cur_filter_dict = {}
for column in self.columns:
if column.key:
+ # Look for filter criterion in kwargs; if not found, look in default filter.
+ column_filter = None
if "f-" + column.key in kwargs:
column_filter = kwargs.get( "f-" + column.key )
- query = column.filter( query, column_filter, filter_args )
- # Carry filter along to newly generated urls
- extra_url_args[ "f-" + column.key ] = column_filter
- if filter_args:
- query = query.filter_by( **filter_args )
+ elif ( self.default_filter ) and ( column.key in self.default_filter ):
+ column_filter = self.default_filter.get( column.key )
+
+ # If column filter found, apply it.
+ if column_filter is not None:
+ # Update query.
+ query = column.filter( trans.sa_session, query, column_filter )
+ # Upate current filter dict.
+ cur_filter_dict[ column.key ] = column_filter
+ # Carry filter along to newly generated urls.
+ extra_url_args[ "f-" + column.key ] = column_filter.encode("utf-8")
+
# Process sort arguments
sort_key = sort_order = None
if 'sort' in kwargs:
@@ -92,6 +100,7 @@
return trans.fill_template( self.template,
grid=self,
query=query,
+ cur_filter_dict=cur_filter_dict,
sort_key=sort_key,
encoded_sort_key=encoded_sort_key,
sort_order=sort_order,
@@ -125,7 +134,7 @@
return query
class GridColumn( object ):
- def __init__( self, label, key=None, method=None, format=None, link=None, attach_popup=False, visible=True, ncells=1 ):
+ def __init__( self, label, key=None, method=None, format=None, link=None, attach_popup=False, visible=True, ncells=1, filterable=False ):
self.label = label
self.key = key
self.method = method
@@ -134,6 +143,7 @@
self.attach_popup = attach_popup
self.visible = visible
self.ncells = ncells
+ self.filterable = filterable
# Currently can only sort of columns that have a database
# representation, not purely derived.
if self.key:
@@ -154,20 +164,23 @@
if self.link and self.link( item ):
return self.link( item )
return None
- def filter( self, query, column_filter, filter_args ):
- """
- Must modify filter_args for carrying forward, and return query
- (possibly filtered).
- """
+ def filter( self, db_session, query, column_filter ):
+ """ Modify query to reflect the column filter. """
+ if column_filter == "All":
+ pass
if column_filter == "True":
- filter_args[self.key] = True
query = query.filter_by( **{ self.key: True } )
elif column_filter == "False":
- filter_args[self.key] = False
query = query.filter_by( **{ self.key: False } )
- elif column_filter == "All":
- del filter_args[self.key]
return query
+ def get_accepted_filters( self ):
+ """ Returns a list of accepted filters for this column. """
+ accepted_filters_vals = [ "False", "True", "All" ]
+ accepted_filters = []
+ for val in accepted_filters_vals:
+ args = { self.key: val }
+ accepted_filters.append( GridColumnFilter( val, args) )
+ return accepted_filters
class GridOperation( object ):
def __init__( self, label, key=None, condition=None, allow_multiple=True, target=None, url_args=None ):
diff -r 35dd55a7898e -r 40f8f713cbd8 static/june_2007_style/autocomplete_tagging.css.tmpl
--- a/static/june_2007_style/autocomplete_tagging.css.tmpl Thu Sep 24 16:52:15 2009 -0400
+++ b/static/june_2007_style/autocomplete_tagging.css.tmpl Thu Sep 24 19:00:44 2009 -0400
@@ -76,7 +76,7 @@
.toggle-link
{
- font-weight: bold;
+ font-weight: normal;
padding: 0.3em;
margin-bottom: 1em;
width: 100%;
diff -r 35dd55a7898e -r 40f8f713cbd8 static/june_2007_style/blue/autocomplete_tagging.css
--- a/static/june_2007_style/blue/autocomplete_tagging.css Thu Sep 24 16:52:15 2009 -0400
+++ b/static/june_2007_style/blue/autocomplete_tagging.css Thu Sep 24 19:00:44 2009 -0400
@@ -76,7 +76,7 @@
.toggle-link
{
- font-weight: bold;
+ font-weight: normal;
padding: 0.3em;
margin-bottom: 1em;
width: 100%;
diff -r 35dd55a7898e -r 40f8f713cbd8 templates/history/grid.mako
--- a/templates/history/grid.mako Thu Sep 24 16:52:15 2009 -0400
+++ b/templates/history/grid.mako Thu Sep 24 19:00:44 2009 -0400
@@ -1,3 +1,5 @@
+<%! from galaxy.web.framework.helpers.grids import GridColumnFilter %>
+
<%inherit file="/base.mako"/>
<%def name="title()">${grid.title}</%def>
@@ -25,6 +27,74 @@
});
})
});
+
+ // Set up autocomplete for tag filter input.
+ var t = $("#input-tag-filter");
+ t.keyup( function( e )
+ {
+ if ( e.keyCode == 27 )
+ {
+ // Escape key
+ $(this).trigger( "blur" );
+ } else if (
+ ( e.keyCode == 13 ) || // Return Key
+ ( e.keyCode == 188 ) || // Comma
+ ( e.keyCode == 32 ) // Space
+ )
+ {
+ //
+ // Check input.
+ //
+
+ new_value = this.value;
+
+ // Do nothing if return key was used to autocomplete.
+ if (return_key_pressed_for_autocomplete == true)
+ {
+ return_key_pressed_for_autocomplete = false;
+ return false;
+ }
+
+ // Suppress space after a ":"
+ if ( new_value.indexOf(": ", new_value.length - 2) != -1)
+ {
+ this.value = new_value.substring(0, new_value.length-1);
+ return false;
+ }
+
+ // Remove trigger keys from input.
+ if ( (e.keyCode == 188) || (e.keyCode == 32) )
+ new_value = new_value.substring( 0 , new_value.length - 1 );
+
+ // Trim whitespace.
+ new_value = new_value.replace(/^\s+|\s+$/g,"");
+
+ // Too short?
+ if (new_value.length < 3)
+ return false;
+
+ //
+ // New tag OK.
+ //
+ }
+ });
+
+ // Add autocomplete to input.
+ var format_item_func = function(key, row_position, num_rows, value, search_term)
+ {
+ tag_name_and_value = value.split(":");
+ return (tag_name_and_value.length == 1 ? tag_name_and_value[0] :tag_name_and_value[1]);
+ //var array = new Array(key, value, row_position, num_rows,
+ //search_term ); return "\"" + array.join("*") + "\"";
+ }
+ var autocomplete_options =
+ { selectFirst: false, formatItem : format_item_func, autoFill: false, highlight: false, mustMatch: true };
+
+ t.autocomplete("${h.url_for( controller='tag', action='tag_autocomplete_data', item_class='History' )}", autocomplete_options);
+
+ //t.addClass("tag-input");
+
+ return t;
});
## Can this be moved into base.mako?
%if refresh_frames:
@@ -55,6 +125,25 @@
}
%endif
%endif
+
+ //
+ // Add a tag to the current grid filter; this adds the tag to the filter and then issues a request to refresh the grid.
+ //
+ function add_tag_to_grid_filter(tag_name, tag_value)
+ {
+ // Use tag as a filter: replace TAGNAME with tag_name and issue query.
+ <%
+ url_args = {}
+ if "tags" in cur_filter_dict and cur_filter_dict["tags"] != "All":
+ url_args["f-tags"] = cur_filter_dict["tags"].encode("utf-8") + ", TAGNAME"
+ else:
+ url_args["f-tags"] = "TAGNAME"
+ %>
+ var url_base = "${url( url_args )}";
+ var url = url_base.replace("TAGNAME", tag_name);
+ self.location = url;
+ }
+
</script>
</%def>
@@ -73,19 +162,50 @@
</style>
</%def>
-%if grid.standard_filters:
- <div class="grid-header">
- <h2>${grid.title}</h2>
- <span class="title">Filter:</span>
- %for i, filter in enumerate( grid.standard_filters ):
- %if i > 0:
- <span>|</span>
+<div class="grid-header">
+ <h2>${grid.title}</h2>
+
+ ## Print grid filter.
+ <form name="history_actions" action="javascript:add_tag_to_grid_filter($('#input-tag-filter').attr('value'))" method="get" >
+ <strong>Filter: </strong>
+ %for column in grid.columns:
+ %if column.filterable:
+ <span> by ${column.label.lower()}:</span>
+ ## For now, include special case to handle tags.
+ %if column.key == "tags":
+ %if cur_filter_dict[column.key] != "All":
+ <span class="filter" "style='font-style: italic'">
+ ${cur_filter_dict[column.key]}
+ </span>
+ <span>|</span>
+ %endif
+ <input id="input-tag-filter" name="f-tags" type="text" value="" size="15"/>
+ <span>|</span>
+ %endif
+
+ ## Handle other columns.
+ %for i, filter in enumerate( column.get_accepted_filters() ):
+ %if i > 0:
+ <span>|</span>
+ %endif
+ %if cur_filter_dict[column.key] == filter.args[column.key]:
+ <span class="filter" "style='font-style: italic'">${filter.label}</span>
+ %else:
+ <span class="filter"><a href="${url( filter.get_url_args() )}">${filter.label}</a></span>
+ %endif
+ %endfor
+ <span> </span>
%endif
- <span class="filter"><a href="${url( filter.get_url_args() )}">${filter.label}</a></span>
%endfor
- </div>
-%endif
-
+
+ ## Link to clear all filters.
+ <%
+ args = { "deleted" : "False", "tags" : "All" }
+ no_filter = GridColumnFilter("Clear", args)
+ %>
+ <span><a href="${url( no_filter.get_url_args() )}">${no_filter.label}</a></span>
+ </form>
+</div>
<form name="history_actions" action="${url()}" method="post" >
<table class="grid">
<thead>
diff -r 35dd55a7898e -r 40f8f713cbd8 templates/root/history.mako
--- a/templates/root/history.mako Thu Sep 24 16:52:15 2009 -0400
+++ b/templates/root/history.mako Thu Sep 24 19:00:44 2009 -0400
@@ -242,6 +242,25 @@
}
});
};
+
+ //
+ // Function provides text for tagging toggle link.
+ //
+ var get_toggle_link_text = function(tags)
+ {
+ var text = "";
+ var num_tags = array_length(tags);
+ if (num_tags != 0)
+ {
+ text = num_tags + (num_tags != 1 ? " Tags" : " Tag");
+ }
+ else
+ {
+ // No tags.
+ text = "Add tags to history";
+ }
+ return text;
+ };
</script>
<style>
@@ -289,7 +308,7 @@
%if trans.get_user() is not None:
<div id='history-tag-area' class="tag-element"></div>
- ${render_tagging_element(history, "history-tag-area")}
+ ${render_tagging_element(history, "history-tag-area", get_toggle_link_text_fn='get_toggle_link_text')}
%endif
%if not datasets:
diff -r 35dd55a7898e -r 40f8f713cbd8 templates/tagging_common.mako
--- a/templates/tagging_common.mako Thu Sep 24 16:52:15 2009 -0400
+++ b/templates/tagging_common.mako Thu Sep 24 19:00:44 2009 -0400
@@ -1,12 +1,11 @@
## Render a tagging element if there is a tagged_item.
%if tagged_item is not None and elt_id is not None:
- ${render_tagging_element(tagged_item, elt_id=elt_id, in_form=in_form, input_size=input_size)}
+ ${render_tagging_element(tagged_item, elt_id=elt_id, in_form=in_form, input_size=input_size, tag_click_fn=tag_click_fn)}
%endif
## Render the tags 'tags' as an autocomplete element.
-<%def name="render_tagging_element(tagged_item, elt_id, use_toggle_link='true', in_form='false', input_size='15')">
+<%def name="render_tagging_element(tagged_item, elt_id, use_toggle_link='true', in_form='false', input_size='15', tag_click_fn='default_tag_click_fn', get_toggle_link_text_fn='default_get_toggle_link_text_fn')">
<script type="text/javascript">
-
//
// Set up autocomplete tagger.
//
@@ -39,9 +38,9 @@
};
//
- // Function get text to display on the toggle link.
+ // Default function get text to display on the toggle link.
//
- var get_toggle_link_text = function(tags)
+ var default_get_toggle_link_text_fn = function(tags)
{
var text = "";
var num_tags = array_length(tags);
@@ -73,30 +72,19 @@
else
{
// No tags.
- text = "Add tags to history";
+ text = "Add tags";
}
return text;
};
- //
- // Function to handle a tag click.
- //
- var tag_click_fn = function(tag_name, tag_value)
- {
- /*
- alert(tag_name);
-
- // Do URL request to get histories tag.
- self.location = "http://www.yahoo.com";
- */
- };
+ // Default function to handle a tag click.
+ var default_tag_click_fn = function(tag_name, tag_value) {};
var options =
{
tags : ${h.to_json_string(tag_names_and_values)},
- get_toggle_link_text_fn: get_toggle_link_text,
- tag_click_fn: tag_click_fn,
- ##tag_click_fn: function(name, value) { /* Do nothing. */ },
+ get_toggle_link_text_fn: ${get_toggle_link_text_fn},
+ tag_click_fn: ${tag_click_fn},
<% tagged_item_id = trans.security.encode_id(tagged_item.id) %>
ajax_autocomplete_tag_url: "${h.url_for( controller='tag', action='tag_autocomplete_data', id=tagged_item_id, item_class=tagged_item.__class__.__name__ )}",
ajax_add_tag_url: "${h.url_for( controller='tag', action='add_tag_async', id=tagged_item_id, item_class=tagged_item.__class__.__name__ )}",
1
0
25 Sep '09
details: http://www.bx.psu.edu/hg/galaxy/rev/aff67d57fd38
changeset: 2771:aff67d57fd38
user: Greg Von Kuster <greg(a)bx.psu.edu>
date: Thu Sep 24 21:17:32 2009 -0400
description:
Improve logging for cleanup_datasets.py script.
1 file(s) affected in this change:
scripts/cleanup_datasets/cleanup_datasets.py
diffs (376 lines):
diff -r 80735c6b6932 -r aff67d57fd38 scripts/cleanup_datasets/cleanup_datasets.py
--- a/scripts/cleanup_datasets/cleanup_datasets.py Thu Sep 24 19:58:35 2009 -0400
+++ b/scripts/cleanup_datasets/cleanup_datasets.py Thu Sep 24 21:17:32 2009 -0400
@@ -16,6 +16,7 @@
from optparse import OptionParser
import galaxy.model.mapping
+import sqlalchemy as sa
from galaxy.model.orm import and_, eagerload
assert sys.version_info[:2] >= ( 2, 4 )
@@ -26,20 +27,13 @@
parser.add_option( "-r", "--remove_from_disk", action="store_true", dest="remove_from_disk", help="remove datasets from disk when purged", default=False )
parser.add_option( "-i", "--info_only", action="store_true", dest="info_only", help="info about the requested action", default=False )
parser.add_option( "-f", "--force_retry", action="store_true", dest="force_retry", help="performs the requested actions, but ignores whether it might have been done before. Useful when -r wasn't used, but should have been", default=False )
-
parser.add_option( "-1", "--delete_userless_histories", action="store_true", dest="delete_userless_histories", default=False, help="delete userless histories and datasets" )
-
parser.add_option( "-2", "--purge_histories", action="store_true", dest="purge_histories", default=False, help="purge deleted histories" )
-
parser.add_option( "-3", "--purge_datasets", action="store_true", dest="purge_datasets", default=False, help="purge deleted datasets" )
-
parser.add_option( "-4", "--purge_libraries", action="store_true", dest="purge_libraries", default=False, help="purge deleted libraries" )
-
parser.add_option( "-5", "--purge_folders", action="store_true", dest="purge_folders", default=False, help="purge deleted library folders" )
-
parser.add_option( "-6", "--delete_datasets", action="store_true", dest="delete_datasets", default=False, help="mark deletable datasets as deleted and purge associated dataset instances" )
-
-
+
( options, args ) = parser.parse_args()
ini_file = args[0]
@@ -67,14 +61,15 @@
cutoff_time = datetime.utcnow() - timedelta( days=options.days )
now = strftime( "%Y-%m-%d %H:%M:%S" )
- print "\n# %s - Handling stuff older than %i days\n" % ( now, options.days )
+ print "##########################################"
+ print "\n# %s - Handling stuff older than %i days" % ( now, options.days )
if options.info_only:
print "# Displaying info only ( --info_only )\n"
elif options.remove_from_disk:
- print "# Datasets will be removed from disk.\n"
+ print "Datasets will be removed from disk.\n"
else:
- print "# Datasets will NOT be removed from disk.\n"
+ print "Datasets will NOT be removed from disk.\n"
if options.delete_userless_histories:
delete_userless_histories( app, cutoff_time, info_only = options.info_only, force_retry = options.force_retry )
@@ -96,8 +91,7 @@
# The purge history script will handle marking DatasetInstances as deleted.
# Nothing is removed from disk yet.
history_count = 0
- print '# The following datasets and associated userless histories have been deleted'
- start = time.clock()
+ start = time.time()
if force_retry:
histories = app.model.History.filter( and_( app.model.History.table.c.user_id==None,
app.model.History.table.c.update_time < cutoff_time ) ).all()
@@ -107,14 +101,14 @@
app.model.History.table.c.update_time < cutoff_time ) ).all()
for history in histories:
if not info_only:
+ print "Deleting history id ", history.id
history.deleted = True
- print "%d" % history.id
history_count += 1
app.model.flush()
- stop = time.clock()
- print "# Deleted %d histories.\n" % ( history_count )
- print "Elapsed time: ", stop - start, "\n"
-
+ stop = time.time()
+ print "Deleted %d histories" % history_count
+ print "Elapsed time: ", stop - start
+ print "##########################################"
def purge_histories( app, cutoff_time, remove_from_disk, info_only = False, force_retry = False ):
# Purges deleted histories whose update_time is older than the cutoff_time.
@@ -123,17 +117,16 @@
# history.purged == True simply means that it can no longer be undeleted
# i.e. all associated datasets are marked as deleted
history_count = 0
- print '# The following datasets and associated deleted histories have been purged'
- start = time.clock()
+ start = time.time()
if force_retry:
histories = app.model.History.filter( and_( app.model.History.table.c.deleted==True,
- app.model.History.table.c.update_time < cutoff_time ) ) \
- .options( eagerload( 'datasets' ) ).all()
+ app.model.History.table.c.update_time < cutoff_time ) ) \
+ .options( eagerload( 'datasets' ) ).all()
else:
histories = app.model.History.filter( and_( app.model.History.table.c.deleted==True,
- app.model.History.table.c.purged==False,
- app.model.History.table.c.update_time < cutoff_time ) ) \
- .options( eagerload( 'datasets' ) ).all()
+ app.model.History.table.c.purged==False,
+ app.model.History.table.c.update_time < cutoff_time ) ) \
+ .options( eagerload( 'datasets' ) ).all()
for history in histories:
for dataset_assoc in history.datasets:
_purge_dataset_instance( dataset_assoc, app, remove_from_disk, info_only = info_only ) #mark a DatasetInstance as deleted, clear associated files, and mark the Dataset as deleted if it is deletable
@@ -143,13 +136,14 @@
# if we should ever delete info like this from the db though, so commented out for now...
#for dhp in history.default_permissions:
# dhp.delete()
+ print "Purging history id ", history.id
history.purged = True
- print "%d" % history.id
history_count += 1
app.model.flush()
- stop = time.clock()
- print '# Purged %d histories.' % ( history_count ), '\n'
- print "Elapsed time: ", stop - start, "\n"
+ stop = time.time()
+ print 'Purged %d histories.' % history_count
+ print "Elapsed time: ", stop - start
+ print "##########################################"
def purge_libraries( app, cutoff_time, remove_from_disk, info_only = False, force_retry = False ):
# Purges deleted libraries whose update_time is older than the cutoff_time.
@@ -158,8 +152,7 @@
# library.purged == True simply means that it can no longer be undeleted
# i.e. all associated LibraryDatasets/folders are marked as deleted
library_count = 0
- print '# The following libraries and associated folders have been purged'
- start = time.clock()
+ start = time.time()
if force_retry:
libraries = app.model.Library.filter( and_( app.model.Library.table.c.deleted==True,
app.model.Library.table.c.update_time < cutoff_time ) ).all()
@@ -170,13 +163,14 @@
for library in libraries:
_purge_folder( library.root_folder, app, remove_from_disk, info_only = info_only )
if not info_only:
+ print "Purging library id ", library.id
library.purged = True
- print "%d" % library.id
library_count += 1
app.model.flush()
- stop = time.clock()
- print '# Purged %d libraries .' % ( library_count ), '\n'
- print "Elapsed time: ", stop - start, "\n"
+ stop = time.time()
+ print '# Purged %d libraries .' % library_count
+ print "Elapsed time: ", stop - start
+ print "##########################################"
def purge_folders( app, cutoff_time, remove_from_disk, info_only = False, force_retry = False ):
# Purges deleted folders whose update_time is older than the cutoff_time.
@@ -185,8 +179,7 @@
# libraryFolder.purged == True simply means that it can no longer be undeleted
# i.e. all associated LibraryDatasets/folders are marked as deleted
folder_count = 0
- print '# The following folders have been purged'
- start = time.clock()
+ start = time.time()
if force_retry:
folders = app.model.LibraryFolder.filter( and_( app.model.LibraryFolder.table.c.deleted==True,
app.model.LibraryFolder.table.c.update_time < cutoff_time ) ).all()
@@ -196,22 +189,26 @@
app.model.LibraryFolder.table.c.update_time < cutoff_time ) ).all()
for folder in folders:
_purge_folder( folder, app, remove_from_disk, info_only = info_only )
- print "%d" % folder.id
folder_count += 1
- stop = time.clock()
- print '# Purged %d folders.' % ( folder_count ), '\n'
- print "Elapsed time: ", stop - start, "\n"
+ stop = time.time()
+ print '# Purged %d folders.' % folder_count
+ print "Elapsed time: ", stop - start
+ print "##########################################"
def delete_datasets( app, cutoff_time, remove_from_disk, info_only = False, force_retry = False ):
- import sqlalchemy as sa
# Marks datasets as deleted if associated items are all deleted.
- print "######### Starting delete_datasets #########\n"
- start = time.clock()
+ start = time.time()
if force_retry:
- history_datasets = app.model.Dataset.options( eagerload( "history_associations" ) ) \
- .filter( app.model.HistoryDatasetAssociation.table.c.update_time < cutoff_time ).all()
- library_datasets = app.model.Dataset.options( eagerload( "library_associations" ) ) \
- .filter( app.model.LibraryDatasetDatasetAssociation.table.c.update_time < cutoff_time ).all()
+ history_dataset_ids_query = sa.select( ( app.model.Dataset.table.c.id,
+ app.model.Dataset.table.c.state ),
+ whereclause = app.model.HistoryDatasetAssociation.table.c.update_time < cutoff_time,
+ from_obj = [ sa.outerjoin( app.model.Dataset.table,
+ app.model.HistoryDatasetAssociation.table ) ] )
+ library_dataset_ids_query = sa.select( ( app.model.Dataset.table.c.id,
+ app.model.Dataset.table.c.state ),
+ whereclause = app.model.LibraryDatasetDatasetAssociation.table.c.update_time < cutoff_time,
+ from_obj = [ sa.outerjoin( app.model.Dataset.table,
+ app.model.LibraryDatasetDatasetAssociation.table ) ] )
else:
# We really only need the id column here, but sqlalchemy barfs when trying to select only 1 column
history_dataset_ids_query = sa.select( ( app.model.Dataset.table.c.id,
@@ -221,8 +218,6 @@
app.model.HistoryDatasetAssociation.table.c.deleted == True ),
from_obj = [ sa.outerjoin( app.model.Dataset.table,
app.model.HistoryDatasetAssociation.table ) ] )
- history_dataset_ids = [ row.id for row in history_dataset_ids_query.execute() ]
- print "Time to retrieve ", len( history_dataset_ids ), " history dataset ids: ", time.clock() - start
library_dataset_ids_query = sa.select( ( app.model.Dataset.table.c.id,
app.model.Dataset.table.c.state ),
whereclause = sa.and_( app.model.Dataset.table.c.deleted == False,
@@ -230,35 +225,33 @@
app.model.LibraryDatasetDatasetAssociation.table.c.deleted == True ),
from_obj = [ sa.outerjoin( app.model.Dataset.table,
app.model.LibraryDatasetDatasetAssociation.table ) ] )
- library_dataset_ids = [ row.id for row in library_dataset_ids_query.execute() ]
- print "Time to retrieve ", len( library_dataset_ids ), " library dataset ids: ", time.clock() - start
+ history_dataset_ids = [ row.id for row in history_dataset_ids_query.execute() ]
+ library_dataset_ids = [ row.id for row in library_dataset_ids_query.execute() ]
dataset_ids = history_dataset_ids + library_dataset_ids
skip = []
deleted_dataset_count = 0
deleted_instance_count = 0
for dataset_id in dataset_ids:
- print "Processing dataset id:", dataset_id, "\n"
- dataset = app.model.Dataset.get( id )
+ print "######### Processing dataset id:", dataset_id
+ dataset = app.model.Dataset.get( dataset_id )
if dataset.id not in skip and _dataset_is_deletable( dataset ):
deleted_dataset_count += 1
for dataset_instance in dataset.history_associations + dataset.library_associations:
- print "Associated Dataset instance: ", dataset_instance.__class__.__name__, dataset_instance.id, "\n"
+ print "Associated Dataset instance: ", dataset_instance.__class__.__name__, dataset_instance.id
_purge_dataset_instance( dataset_instance, app, remove_from_disk, include_children=True, info_only=info_only, is_deletable=True )
deleted_instance_count += 1
skip.append( dataset.id )
- print "Time to process dataset id: ", dataset.id, " - ", time.clock() - start, "\n\n"
- print "Time to mark datasets deleted: ", time.clock() - start, "\n\n"
- print "Examined %d datasets, marked %d as deleted and purged %d dataset instances\n" % ( len( skip ), deleted_dataset_count, deleted_instance_count )
- print "Total elapsed time: ", time.clock() - start, "\n"
- print "######### Finished delete_datasets #########\n"
+ stop = time.time()
+ print "Examined %d datasets, marked %d as deleted and purged %d dataset instances" % ( len( skip ), deleted_dataset_count, deleted_instance_count )
+ print "Total elapsed time: ", stop - start
+ print "##########################################"
def purge_datasets( app, cutoff_time, remove_from_disk, info_only = False, force_retry = False ):
# Purges deleted datasets whose update_time is older than cutoff_time. Files may or may
# not be removed from disk.
dataset_count = 0
disk_space = 0
- print '# The following deleted datasets have been purged'
- start = time.clock()
+ start = time.time()
if force_retry:
datasets = app.model.Dataset.filter( and_( app.model.Dataset.table.c.deleted==True,
app.model.Dataset.table.c.purgable==True,
@@ -276,24 +269,25 @@
disk_space += file_size
except:
pass
- stop = time.clock()
- print '# %d datasets purged\n' % dataset_count
+ stop = time.time()
+ print 'Purged %d datasets' % dataset_count
if remove_from_disk:
- print '# Freed disk space: ', disk_space, '\n'
- print "Elapsed time: ", stop - start, "\n"
-
+ print 'Freed disk space: ', disk_space
+ print "Elapsed time: ", stop - start
+ print "##########################################"
def _purge_dataset_instance( dataset_instance, app, remove_from_disk, include_children=True, info_only=False, is_deletable=False ):
# A dataset_instance is either a HDA or an LDDA. Purging a dataset instance marks the instance as deleted,
# and marks the associated dataset as deleted if it is not associated with another active DatsetInstance.
if not info_only:
+ print "Deleting dataset_instance ", str( dataset_instance ), " id ", dataset_instance.id
dataset_instance.mark_deleted( include_children = include_children )
dataset_instance.clear_associated_files()
dataset_instance.flush()
dataset_instance.dataset.refresh()
if is_deletable or _dataset_is_deletable( dataset_instance.dataset ):
# Calling methods may have already checked _dataset_is_deletable, if so, is_deletable should be True
- _delete_dataset( dataset_instance.dataset, app, remove_from_disk, info_only = info_only )
+ _delete_dataset( dataset_instance.dataset, app, remove_from_disk, info_only=info_only, is_deletable=is_deletable )
#need to purge children here
if include_children:
for child in dataset_instance.children:
@@ -303,11 +297,11 @@
#a dataset is deletable when it no longer has any non-deleted associations
return not bool( dataset.active_history_associations or dataset.active_library_associations )
-def _delete_dataset( dataset, app, remove_from_disk, info_only = False ):
+def _delete_dataset( dataset, app, remove_from_disk, info_only=False, is_deletable=False ):
#marks a base dataset as deleted, hdas/ldas associated with dataset can no longer be undeleted
#metadata files attached to associated dataset Instances is removed now
- if not _dataset_is_deletable( dataset ):
- print "# This Dataset (%i) is not deletable, associated Metadata Files will not be removed.\n" % ( dataset.id )
+ if not is_deletable and not _dataset_is_deletable( dataset ):
+ print "This Dataset (%i) is not deletable, associated Metadata Files will not be removed.\n" % ( dataset.id )
else:
# Mark all associated MetadataFiles as deleted and purged and remove them from disk
metadata_files = []
@@ -319,18 +313,19 @@
for metadata_file in app.model.MetadataFile.filter( app.model.MetadataFile.table.c.lda_id==lda.id ).all():
metadata_files.append( metadata_file )
for metadata_file in metadata_files:
- print "# The following metadata files attached to associations of Dataset '%s' have been purged:" % dataset.id
+ print "The following metadata files attached to associations of Dataset '%s' have been purged:" % dataset.id
if not info_only:
if remove_from_disk:
try:
+ print "Removing disk file ", metadata_file.file_name
os.unlink( metadata_file.file_name )
except Exception, e:
- print "# Error, exception: %s caught attempting to purge metadata file %s\n" %( str( e ), metadata_file.file_name )
+ print "Error, exception: %s caught attempting to purge metadata file %s\n" %( str( e ), metadata_file.file_name )
metadata_file.purged = True
metadata_file.deleted = True
#metadata_file.flush()
print "%s" % metadata_file.file_name
- print
+ print "Deleting dataset id", dataset.id
dataset.deleted = True
app.model.flush()
@@ -338,32 +333,34 @@
if dataset.deleted:
try:
if dataset.purgable and _dataset_is_deletable( dataset ):
- print "%s" % dataset.file_name
if not info_only:
# Remove files from disk and update the database
if remove_from_disk:
# TODO: should permissions on the dataset be deleted here?
+ print "Removing disk, file ", dataset.file_name
os.unlink( dataset.file_name )
# Remove associated extra files from disk if they exist
if dataset.extra_files_path and os.path.exists( dataset.extra_files_path ):
shutil.rmtree( dataset.extra_files_path ) #we need to delete the directory and its contents; os.unlink would always fail on a directory
+ print "Purging dataset id", dataset.id
dataset.purged = True
dataset.flush()
else:
- print "# This dataset (%i) is not purgable, the file (%s) will not be removed.\n" % ( dataset.id, dataset.file_name )
+ print "This dataset (%i) is not purgable, the file (%s) will not be removed.\n" % ( dataset.id, dataset.file_name )
except OSError, exc:
- print "# Error, file has already been removed: %s" % str( exc )
+ print "Error, dataset file has already been removed: %s" % str( exc )
+ print "Purging dataset id", dataset.id
dataset.purged = True
dataset.flush()
except Exception, exc:
- print "# Error, exception: %s caught attempting to purge %s\n" %( str( exc ), dataset.file_name )
+ print "Error attempting to purge data file: ", dataset.file_name, " error: ", str( exc )
else:
- print "# Error: '%s' has not previously been deleted, so it cannot be purged\n" % dataset.file_name
- print ""
+ print "Error: '%s' has not previously been deleted, so it cannot be purged\n" % dataset.file_name
def _purge_folder( folder, app, remove_from_disk, info_only = False ):
"""Purges a folder and its contents, recursively"""
for ld in folder.datasets:
+ print "Deleting library dataset id ", ld.id
ld.deleted = True
for ldda in [ld.library_dataset_dataset_association] + ld.expired_datasets:
_purge_dataset_instance( ldda, app, remove_from_disk, info_only = info_only ) #mark a DatasetInstance as deleted, clear associated files, and mark the Dataset as deleted if it is deletable
@@ -371,6 +368,7 @@
_purge_folder( sub_folder, app, remove_from_disk, info_only = info_only )
if not info_only:
# TODO: should the folder permissions be deleted here?
+ print "Purging folder id ", folder.id
folder.purged = True
folder.flush()
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/93b8b571a9fa
changeset: 2767:93b8b571a9fa
user: Kanwei Li <kanwei(a)gmail.com>
date: Thu Sep 24 16:16:56 2009 -0400
description:
merge
0 file(s) affected in this change:
diffs (595 lines):
diff -r 9e0c6e9b9dbb -r 93b8b571a9fa lib/galaxy/web/controllers/tracks.py
--- a/lib/galaxy/web/controllers/tracks.py Thu Sep 24 16:15:57 2009 -0400
+++ b/lib/galaxy/web/controllers/tracks.py Thu Sep 24 16:16:56 2009 -0400
@@ -48,9 +48,6 @@
# FIXME: hardcoding this for now, but it should be derived from the available
# converters
browsable_types = set( ["wig" ] )
-
-# For natural sort
-NUM_RE = re.compile('([0-9]+)')
class TracksController( BaseController ):
"""
@@ -122,8 +119,14 @@
"""
Returns a naturally sorted list of chroms/contigs for the given dbkey
"""
+ def check_int(s):
+ if s.isdigit():
+ return int(s)
+ else:
+ return s
+
def split_by_number(s):
- return [ int(c) if c.isdigit() else c for c in NUM_RE.split(s) ]
+ return [ check_int(c) for c in re.split('([0-9]+)', s) ]
chroms = self._chroms( trans, dbkey )
to_sort = [{ 'chrom': chrom, 'len': length } for chrom, length in chroms.iteritems()]
diff -r 9e0c6e9b9dbb -r 93b8b571a9fa scripts/cleanup_datasets/cleanup_datasets.py
--- a/scripts/cleanup_datasets/cleanup_datasets.py Thu Sep 24 16:15:57 2009 -0400
+++ b/scripts/cleanup_datasets/cleanup_datasets.py Thu Sep 24 16:16:56 2009 -0400
@@ -204,32 +204,42 @@
def delete_datasets( app, cutoff_time, remove_from_disk, info_only = False, force_retry = False ):
# Marks datasets as deleted if associated items are all deleted.
- print '# The following datasets have been marked deleted'
+ print "######### Starting delete_datasets #########\n"
start = time.clock()
if force_retry:
- datasets = app.model.Dataset.filter( app.model.LibraryDatasetDatasetAssociation.table.c.update_time < cutoff_time ).all() + app.model.Dataset.filter( app.model.HistoryDatasetAssociation.table.c.update_time < cutoff_time ).all()
- else:
- datasets = app.model.Dataset.filter( and_( app.model.HistoryDatasetAssociation.table.c.deleted==True,
- app.model.Dataset.table.c.deleted == False,
- app.model.HistoryDatasetAssociation.table.c.update_time < cutoff_time ) ).all()
- datasets = datasets + app.model.Dataset.filter( and_( app.model.LibraryDatasetDatasetAssociation.table.c.deleted==True,
- app.model.Dataset.table.c.deleted == False,
- app.model.LibraryDatasetDatasetAssociation.table.c.update_time < cutoff_time ) ).all()
+ history_datasets = app.model.Dataset.options( eagerload( "history_associations" ) ) \
+ .filter( app.model.HistoryDatasetAssociation.table.c.update_time < cutoff_time ).all()
+ library_datasets = app.model.Dataset.options( eagerload( "library_associations" ) ) \
+ .filter( app.model.LibraryDatasetDatasetAssociation.table.c.update_time < cutoff_time ).all()
+ else:
+ history_datasets = app.model.Dataset.filter_by( deleted = False ) \
+ .options( eagerload( "history_associations" ) ) \
+ .filter( and_( app.model.HistoryDatasetAssociation.table.c.update_time < cutoff_time,
+ app.model.HistoryDatasetAssociation.table.c.deleted==True ) ).all()
+ library_datasets = app.model.Dataset.filter_by( deleted = False ) \
+ .options( eagerload( "library_associations" ) ) \
+ .filter( and_( app.model.LibraryDatasetDatasetAssociation.table.c.update_time < cutoff_time,
+ app.model.LibraryDatasetDatasetAssociation.table.c.deleted==True ) ).all()
+ print "Time to query history and library datasets: ", time.clock() - start, "\n"
+ print "Processing ", len( history_datasets ), " history datasets and ", len( library_datasets ), " library datasets...\n\n"
+ datasets = history_datasets + library_datasets
skip = []
deleted_dataset_count = 0
deleted_instance_count = 0
for dataset in datasets:
+ print "Processing dataset id:", dataset.id, "\n"
if dataset.id not in skip and _dataset_is_deletable( dataset ):
deleted_dataset_count += 1
- print "Dataset:", dataset.id
for dataset_instance in dataset.history_associations + dataset.library_associations:
- print "\tAssociated Dataset instance:", dataset_instance.__class__.__name__, dataset_instance.id
- _purge_dataset_instance( dataset_instance, app, remove_from_disk, include_children = True, info_only = info_only )
+ print "Associated Dataset instance: ", dataset_instance.__class__.__name__, dataset_instance.id, "\n"
+ _purge_dataset_instance( dataset_instance, app, remove_from_disk, include_children=True, info_only=info_only, is_deletable=True )
deleted_instance_count += 1
skip.append( dataset.id )
- print
- print '# Examined %d datasets, marked %d as deleted and purged %d dataset instances\n' % ( len( skip ), deleted_dataset_count, deleted_instance_count )
- print "Elapsed time: ", time.clock() - start, "\n"
+ print "Time to process dataset id: ", dataset.id, " - ", time.clock() - start, "\n\n"
+ print "Time to mark datasets deleted: ", time.clock() - start, "\n\n"
+ print "Examined %d datasets, marked %d as deleted and purged %d dataset instances\n" % ( len( skip ), deleted_dataset_count, deleted_instance_count )
+ print "Total elapsed time: ", time.clock() - start, "\n"
+ print "######### Finished delete_datasets #########\n"
def purge_datasets( app, cutoff_time, remove_from_disk, info_only = False, force_retry = False ):
# Purges deleted datasets whose update_time is older than cutoff_time. Files may or may
@@ -262,15 +272,16 @@
print "Elapsed time: ", stop - start, "\n"
-def _purge_dataset_instance( dataset_instance, app, remove_from_disk, include_children = True, info_only = False ):
- #purging a dataset instance marks the instance as deleted,
- #and marks the dataset as deleted if it is not associated with another DatsetInstance that is not deleted
+def _purge_dataset_instance( dataset_instance, app, remove_from_disk, include_children=True, info_only=False, is_deletable=False ):
+ # A dataset_instance is either a HDA or an LDDA. Purging a dataset instance marks the instance as deleted,
+ # and marks the associated dataset as deleted if it is not associated with another active DatsetInstance.
if not info_only:
dataset_instance.mark_deleted( include_children = include_children )
dataset_instance.clear_associated_files()
dataset_instance.flush()
dataset_instance.dataset.refresh()
- if _dataset_is_deletable( dataset_instance.dataset ):
+ if is_deletable or _dataset_is_deletable( dataset_instance.dataset ):
+ # Calling methods may have already checked _dataset_is_deletable, if so, is_deletable should be True
_delete_dataset( dataset_instance.dataset, app, remove_from_disk, info_only = info_only )
#need to purge children here
if include_children:
diff -r 9e0c6e9b9dbb -r 93b8b571a9fa templates/root/history.mako
--- a/templates/root/history.mako Thu Sep 24 16:15:57 2009 -0400
+++ b/templates/root/history.mako Thu Sep 24 16:16:56 2009 -0400
@@ -81,7 +81,7 @@
// Functionized so AJAX'd datasets can call them
function initShowHide() {
- // Load saved state and show as neccesary
+ // Load saved state and show as necessary
try {
var stored = $.jStore.store("history_expand_state");
if (stored) {
diff -r 9e0c6e9b9dbb -r 93b8b571a9fa test-data/cat_wrapper_out1.bed
--- a/test-data/cat_wrapper_out1.bed Thu Sep 24 16:15:57 2009 -0400
+++ b/test-data/cat_wrapper_out1.bed Thu Sep 24 16:16:56 2009 -0400
@@ -131,28 +131,3 @@
chrX 152648964 152649196 NM_000425_cds_0_0_chrX_152648965_r 0 -
chrX 152691446 152691471 AF101728_cds_0_0_chrX_152691447_f 0 +
chrX 152694029 152694263 BC052303_cds_0_0_chrX_152694030_r 0 -
-chr1 147962006 147975713 NM_005997 0 - 147962192 147975670 0 6 574,145,177,115,153,160, 0,1543,7859,9048,9340,13547,
-chr1 147984101 148035079 BC007833 0 + 147984545 148033414 0 14 529,32,81,131,118,153,300,206,84,49,85,130,46,1668, 0,25695,28767,33118,33695,33998,35644,38005,39629,40577,41402,43885,48367,49310,
-chr1 148077485 148111797 NM_002651 0 - 148078400 148111728 0 12 1097,121,133,266,124,105,110,228,228,45,937,77, 0,2081,2472,6871,9907,10257,11604,14199,15637,18274,23636,34235,
-chr1 148185113 148187485 NM_002796 0 + 148185136 148187378 0 7 163,207,147,82,117,89,120, 0,416,877,1199,1674,1977,2252,
-chr2 118288484 118306183 NM_006773 0 + 118288583 118304530 0 14 184,285,144,136,101,200,115,140,162,153,114,57,178,1796, 0,2765,4970,6482,6971,7183,7468,9890,10261,10768,11590,14270,14610,15903,
-chr2 118389378 118390700 BC005078 0 - 118390395 118390500 0 1 1322, 0,
-chr2 220108603 220116964 NM_001927 0 + 220108689 220116217 0 9 664,61,96,162,126,221,44,83,789, 0,1718,1874,2118,2451,2963,5400,7286,7572,
-chr2 220229182 220233943 NM_024536 0 - 220229609 220233765 0 4 1687,180,574,492, 0,1990,2660,4269,
-chr5 131170738 131357870 AF099740 0 - 131311206 131357817 0 31 112,124,120,81,65,40,120,129,61,88,94,79,72,102,144,117,89,73,96,135,135,78,74,52,33,179,100,102,65,115,248, 0,11593,44117,47607,104668,109739,114675,126366,135488,137518,138009,140437,152389,153373,155388,159269,160793,162981,164403,165577,166119,167611,169501,178260,179675,180901,181658,182260,182953,183706,186884,
-chr5 131424245 131426795 NM_000588 0 + 131424298 131426383 0 5 215,42,90,42,535, 0,313,1658,1872,2015,
-chr5 131556201 131590458 NM_004199 0 - 131556601 131582218 0 15 471,97,69,66,54,100,71,177,194,240,138,152,97,100,170, 0,2316,2802,5596,6269,11138,11472,15098,16528,17674,21306,24587,25142,25935,34087,
-chr5 131621285 131637046 NM_003687 0 + 131621326 131635821 0 7 134,152,82,179,164,118,1430, 0,4915,8770,13221,13609,14097,14331,
-chr6 108298214 108386086 NM_007214 0 - 108299600 108385906 0 21 1530,105,99,102,159,174,60,83,148,155,93,133,95,109,51,59,62,113,115,100,304, 0,2490,6246,10831,12670,23164,23520,27331,31052,32526,34311,36130,36365,38609,41028,42398,43048,51479,54500,59097,87568,
-chr6 108593954 108616704 NM_003269 0 + 108594662 108615360 0 9 733,146,88,236,147,97,150,106,1507, 0,5400,8778,10445,12037,14265,14749,15488,21243,
-chr6 108639410 108689143 NM_152827 0 - 108640045 108688818 0 3 741,125,487, 0,2984,49246,
-chr6 108722790 108950942 NM_145315 0 + 108722976 108950321 0 13 325,224,52,102,131,100,59,83,71,101,141,114,750, 0,28931,52094,60760,61796,71339,107102,152319,181970,182297,215317,224802,227402,
-chr7 113320332 113924911 AK131266 0 + 113862563 113893433 0 20 285,91,178,90,58,75,138,51,201,178,214,105,88,84,77,102,122,70,164,1124, 0,201692,340175,448290,451999,484480,542213,543265,543478,545201,556083,558358,565876,567599,573029,573245,575738,577123,577946,603455,
-chr7 116511232 116557294 NM_003391 0 - 116512159 116556994 0 5 1157,265,278,227,383, 0,20384,37843,43339,45679,
-chr7 116713967 116902666 NM_000492 0 + 116714099 116901113 0 27 185,111,109,216,90,164,126,247,93,183,192,95,87,724,129,38,251,80,151,228,101,249,156,90,173,106,1754, 0,24290,29071,50936,54313,55285,56585,60137,62053,68678,79501,107776,110390,111971,114967,122863,123569,126711,130556,131618,134650,147559,162475,172879,184725,185496,186945,
-chr7 116944658 117107512 AF377960 0 - 116945541 116979926 0 23 1129,102,133,64,186,206,179,188,153,100,87,80,96,276,118,255,151,100,204,1654,225,108,173, 0,7364,8850,10413,13893,14398,17435,24259,24615,35177,35359,45901,47221,49781,56405,66857,69787,72208,73597,80474,100111,150555,162681,
-chr8 118880786 119193239 NM_000127 0 - 118881131 119192466 0 11 531,172,161,90,96,119,133,120,108,94,1735, 0,5355,7850,13505,19068,20309,23098,30863,36077,37741,310718,
-chr9 128763240 128783870 NM_174933 0 + 128764156 128783586 0 12 261,118,74,159,76,48,56,63,129,117,127,370, 0,522,875,5630,12374,12603,15040,15175,18961,19191,20037,20260,
-chr9 128787362 128789566 NM_014908 0 - 128787519 128789136 0 1 2204, 0,
-chr9 128789530 128848928 NM_015354 0 + 128789552 128848511 0 44 54,55,74,85,81,45,93,120,212,115,201,90,66,120,127,153,127,88,77,115,121,67,129,140,107,207,170,70,68,196,78,86,146,182,201,93,159,138,75,228,132,74,130,594, 0,1491,5075,8652,9254,10312,11104,11317,20808,21702,23060,25462,31564,32908,33566,34851,35204,35595,35776,37202,38860,39111,39891,40349,42422,45499,45827,46675,47158,47621,50453,50840,51474,51926,53831,54186,55119,55619,57449,57605,57947,58352,58541,58804,
-chr9 128849867 128870133 NM_020145 0 - 128850516 128869987 0 11 757,241,101,90,24,63,93,134,129,142,209, 0,1071,1736,2085,2635,4201,6376,6736,13056,14247,20057,
diff -r 9e0c6e9b9dbb -r 93b8b571a9fa tools/filters/catWrapper.py
--- a/tools/filters/catWrapper.py Thu Sep 24 16:15:57 2009 -0400
+++ b/tools/filters/catWrapper.py Thu Sep 24 16:16:56 2009 -0400
@@ -11,19 +11,28 @@
def main():
outfile = sys.argv[1]
infile = sys.argv[2]
- catfiles = sys.argv[3:]
+
try:
fout = open(sys.argv[1],'w')
- except Exxception, ex:
- stop_err("Output file cannot be opened for writing\n" + str(ex))
+ except:
+ stop_err("Output file cannot be opened for writing.")
+
try:
fin = open(sys.argv[2],'r')
- except Exception, ex:
- stop_err("Input file cannot be opened for reading\n" + str(ex))
- cmdline = "cat %s %s > %s" % (infile, ' '.join(catfiles), outfile)
+ except:
+ stop_err("Input file cannot be opened for reading.")
+
+ if len(sys.argv) < 4:
+ os.system("cp %s %s" %(infile,outfile))
+ sys.exit()
+
+ cmdline = "cat %s " %(infile)
+ for inp in sys.argv[3:]:
+ cmdline = cmdline + inp + " "
+ cmdline = cmdline + ">" + outfile
try:
os.system(cmdline)
- except Exception, ex:
- stop_err("Error encountered with cat\n" + str(ex))
+ except:
+ stop_err("Error encountered with cat.")
if __name__ == "__main__": main()
\ No newline at end of file
diff -r 9e0c6e9b9dbb -r 93b8b571a9fa tools/filters/catWrapper.xml
--- a/tools/filters/catWrapper.xml Thu Sep 24 16:15:57 2009 -0400
+++ b/tools/filters/catWrapper.xml Thu Sep 24 16:16:56 2009 -0400
@@ -1,19 +1,17 @@
-<tool id="cat1" name="Concatenate queries" version="1.0.1">
+<tool id="cat1" name="Concatenate queries">
<description>tail-to-head</description>
- <command interpreter="python">
- catWrapper.py
- $out_file1
+ <command interpreter="python">
+ catWrapper.py
+ $out_file1
$input1
- $input2
- #for $q in $queries
- ${q.input3}
- #end for
+ #for $q in $queries
+ ${q.input2}
+ #end for
</command>
<inputs>
- <param name="input1" type="data" label="First query to concatenate:"/>
- <param name="input2" type="data" label="Second query to concatenate:"/>
- <repeat name="queries" title="Additional query">
- <param name="input3" type="data" label="Select" />
+ <param name="input1" type="data" label="Concatenate Query"/>
+ <repeat name="queries" title="Query">
+ <param name="input2" type="data" label="Select" />
</repeat>
</inputs>
<outputs>
@@ -23,16 +21,14 @@
<test>
<param name="input1" value="1.bed"/>
<param name="input2" value="2.bed"/>
- <param name="input3" value="3.bed"/>
<output name="out_file1" file="cat_wrapper_out1.bed"/>
</test>
<!--TODO: if possible, enhance the underlying test code to handle this test
- the problem is multiple params with the same name "input3"
+ the problem is multiple params with the same name "input2"
<test>
<param name="input1" value="1.bed"/>
<param name="input2" value="2.bed"/>
- <param name="input3" value="3.bed"/>
- <param name="input3" value="4.bed"/>
+ <param name="input2" value="3.bed"/>
<output name="out_file1" file="cat_wrapper_out2.bed"/>
</test>
-->
@@ -62,12 +58,12 @@
chr1 151242630 151242955 X 0 +
chr1 151271715 151271999 Y 0 +
- chr1 151278832 151279227 Z 0 -
-
-and with Query2::
-
- chr2 100000030 200000955 P 0 +
- chr2 100000015 200000999 Q 0 +
+ chr1 151278832 151279227 Z 0 -
+
+and with Query2::
+
+ chr2 100000030 200000955 P 0 +
+ chr2 100000015 200000999 Q 0 +
will result in the following::
@@ -76,8 +72,8 @@
chr1 151242630 151242955 X 0 +
chr1 151271715 151271999 Y 0 +
chr1 151278832 151279227 Z 0 -
- chr2 100000030 200000955 P 0 +
- chr2 100000015 200000999 Q 0 +
+ chr2 100000030 200000955 P 0 +
+ chr2 100000015 200000999 Q 0 +
</help>
</tool>
diff -r 9e0c6e9b9dbb -r 93b8b571a9fa tools/filters/sorter.py
--- a/tools/filters/sorter.py Thu Sep 24 16:15:57 2009 -0400
+++ b/tools/filters/sorter.py Thu Sep 24 16:16:56 2009 -0400
@@ -1,28 +1,44 @@
-# This script sorts a file based on the inputs:
-# -cols - column to sort on
-# -order - ASC- or DESCending order
-# -i - input filename
-# -o - output filename
+"""
+Sorts tabular data on one or more columns.
+
+usage: %prog [options]
+ -i, --input=i: Tabular file to be sorted
+ -o, --out_file1=o: Sorted output file
+ -c, --column=c: First column to sort on
+ -s, --style=s: Sort style (numerical or alphabetical)
+ -r, --order=r: Order (ASC or DESC)
+
+usage: %prog input out_file1 column style order [column style ...]
+"""
import os, re, string, sys
+from galaxy import eggs
+import pkg_resources; pkg_resources.require( "bx-python" )
+from bx.cookbook import doc_optparse
def stop_err( msg ):
sys.stderr.write( "%s\n" % msg )
sys.exit()
def main():
+ #Parse Command Line
+ options, args = doc_optparse.parse( __doc__ )
try:
- inputfile = sys.argv[1]
- outputfile = '-o %s' % sys.argv[2]
- order = ('', '-r')[sys.argv[3] == 'DESC']
- sort_type = ('','-n')[sys.argv[4] == 'num']
- columns = sys.argv[5:]
- cols = [ '-k%s,%s'%(n, n) for n in columns ]
+ inputfile = options.input
+ outputfile = '-o %s' % options.out_file1
+ order = ('', '-r')[options.order == 'DESC']
+ columns = [options.column]
+ styles = [('','n')[options.style == 'num']]
+ col_styles = sys.argv[6:]
+ if len(col_styles) > 1:
+ columns.extend([col_styles[i] for i in range(0,len(col_styles),2)])
+ styles.extend([('','n')[col_styles[i] == 'num'] for i in range(1,len(col_styles),2)])
+ cols = [ '-k%s,%s%s'%(columns[i], columns[i], styles[i]) for i in range(len(columns)) ]
except Exception, ex:
stop_err('Error parsing input parameters\n' + str(ex))
# Launch sort.
- cmd = "sort -f -t $'\t' %s %s %s %s %s" % (sort_type, ' '.join(cols), order, outputfile, inputfile)
+ cmd = "sort -f -t $'\t' %s %s %s %s" % (order, ' '.join(cols), outputfile, inputfile)
try:
os.system(cmd)
except Exception, ex:
diff -r 9e0c6e9b9dbb -r 93b8b571a9fa tools/filters/sorter.xml
--- a/tools/filters/sorter.xml Thu Sep 24 16:15:57 2009 -0400
+++ b/tools/filters/sorter.xml Thu Sep 24 16:16:56 2009 -0400
@@ -2,28 +2,33 @@
<description>data in ascending or descending order</description>
<command interpreter="python">
sorter.py
- $input
- $out_file1
- $order
- $style
- $firstcol
+ --input=$input
+ --out_file1=$out_file1
+ --column=$column
+ --style=$style
+ --order=$order
#for $col in $column_set:
- ${col.column}
+ ${col.other_column}
+ ${col.other_style}
#end for
</command>
<inputs>
<param format="tabular" name="input" type="data" label="Sort Query" />
- <param name="firstcol" label="on column" type="data_column" data_ref="input" accept_default="true" />
- <repeat name="column_set" title="Column selection">
- <param name="column" label="on column" type="data_column" data_ref="input" accept_default="true" />
- </repeat>
- <param name="order" type="select" label="in">
- <option value="DESC">Descending order</option>
- <option value="ASC">Ascending order</option>
- </param>
+ <param name="column" label="on column" type="data_column" data_ref="input" accept_default="true" />
<param name="style" type="select" label="with flavor">
<option value="num">Numerical sort</option>
<option value="alpha">Alphabetical sort</option>
+ </param>
+ <repeat name="column_set" title="Column selection">
+ <param name="other_column" label="on column" type="data_column" data_ref="input" accept_default="true" />
+ <param name="other_style" type="select" label="with flavor">
+ <option value="num">Numerical sort</option>
+ <option value="alpha">Alphabetical sort</option>
+ </param>
+ </repeat>
+ <param name="order" type="select" label="everything in">
+ <option value="DESC">Descending order</option>
+ <option value="ASC">Ascending order</option>
</param>
</inputs>
<outputs>
@@ -32,18 +37,20 @@
<tests>
<test>
<param name="input" value="sort_in1.bed"/>
- <param name="firstcol" value="1"/>
- <param name="column" value="3" />
+ <param name="column" value="1"/>
+ <param name="style" value="num"/>
+ <param name="other_column" value="3"/>
+ <param name="other_style" value="num"/>
<param name="order" value="ASC"/>
- <param name="style" value="num"/>
<output name="out_file1" file="sort_out1.bed"/>
</test>
<test>
<param name="input" value="sort_in1.bed"/>
- <param name="firstcol" value="3" />
- <param name="column" value="1"/>
+ <param name="column" value="3"/>
+ <param name="style" value="alpha"/>
+ <param name="other_column" value="1"/>
+ <param name="other_style" value="alpha"/>
<param name="order" value="ASC"/>
- <param name="style" value="alpha"/>
<output name="out_file1" file="sort_out2.bed"/>
</test>
</tests>
@@ -64,9 +71,53 @@
-----
-**Example**
+**Examples**
The list of numbers 4,17,3,5 collates to 3,4,5,17 by numerical sorting, while it collates to 17,3,4,5 by alphabetical sorting.
+Sorting the following::
+
+ Q d 7 II jhu 45
+ A kk 4 I h 111
+ Pd p 1 ktY WS 113
+ A g 10 H ZZ 856
+ A edf 4 tw b 234
+ BBB rt 10 H ZZ 100
+ A rew 10 d b 1111
+ C sd 19 YH aa 10
+ Hah c 23 ver bb 467
+ MN gtr 1 a X 32
+ N j 9 a T 205
+ BBB rrf 10 b Z 134
+ odfr ws 6 Weg dew 201
+ C f 3 WW SW 34
+ A jhg 4 I b 345
+ Pd gf 7 Gthe de 567
+ rS hty 90 YY LOp 89
+ A g 10 H h 43
+ A g 4 I h 500
+
+on columns 1 (alpha), 3 (num), and 6 (num) in ascending order will yield::
+
+ A kk 4 I h 111
+ A edf 4 tw b 234
+ A jhg 4 I b 345
+ A g 4 I h 500
+ A g 10 H h 43
+ A g 10 H ZZ 856
+ A rew 10 d b 1111
+ BBB rt 10 H ZZ 100
+ BBB rrf 10 b Z 134
+ C f 3 WW SW 34
+ C sd 19 YH aa 10
+ Hah c 23 ver bb 467
+ MN gtr 1 a X 32
+ N j 9 a T 205
+ odfr ws 6 Weg dew 201
+ Pd p 1 ktY WS 113
+ Pd gf 7 Gthe de 567
+ Q d 7 II jhu 45
+ rS hty 90 YY LOp 89
+
</help>
</tool>
diff -r 9e0c6e9b9dbb -r 93b8b571a9fa tools/next_gen_conversion/solid_to_fastq.py
--- a/tools/next_gen_conversion/solid_to_fastq.py Thu Sep 24 16:15:57 2009 -0400
+++ b/tools/next_gen_conversion/solid_to_fastq.py Thu Sep 24 16:16:56 2009 -0400
@@ -30,7 +30,7 @@
tmpf = tempfile.NamedTemporaryFile() #forward reads
if options.input3 != "None" and options.input4 != "None":
tmpr = tempfile.NamedTemporaryFile() #reverse reads
- cmd1 = "bwa_solid2fastq_modified.pl 'yes' %s %s %s %s %s %s %s 2>&1" %(tmpf.name,tmpr.name,None,options.input1,options.input2,options.input3,options.input4)
+ cmd1 = "%s/bwa_solid2fastq_modified.pl 'yes' %s %s %s %s %s %s %s 2>&1" %(os.path.split(sys.argv[0])[0], tmpf.name,tmpr.name,None,options.input1,options.input2,options.input3,options.input4)
try:
os.system(cmd1)
os.system('gunzip -c %s >> %s' %(tmpf.name,options.output1))
@@ -40,7 +40,7 @@
tmpr.close()
# if single-end data
else:
- cmd1 = "bwa_solid2fastq_modified.pl 'no' %s %s %s %s %s %s %s 2>&1" % (tmpf.name, None, None, options.input1, options.input2, None, None)
+ cmd1 = "%s/bwa_solid2fastq_modified.pl 'no' %s %s %s %s %s %s %s 2>&1" % (os.path.split(sys.argv[0])[0], tmpf.name, None, None, options.input1, options.input2, None, None)
try:
os.system(cmd1)
os.system('gunzip -c %s >> %s' % (tmpf.name, options.output1))
diff -r 9e0c6e9b9dbb -r 93b8b571a9fa tools/samtools/sam_pileup.xml
--- a/tools/samtools/sam_pileup.xml Thu Sep 24 16:15:57 2009 -0400
+++ b/tools/samtools/sam_pileup.xml Thu Sep 24 16:16:56 2009 -0400
@@ -56,13 +56,13 @@
</param>
<param name="mapCap" type="integer" value="60" label="Where to cap mapping quality" />
<conditional name="c">
- <param name="consensus" type="select" label="Whether or not to call the consensus sequence using the MAQ consensus model">
- <option value="no">Don't use MAQ consensus model</option>
- <option value="yes">Use the MAQ consensus model</option>
+ <param name="consensus" type="select" label="Call consensus according to MAQ model?">
+ <option selected="true" value="no">No</option>
+ <option value="yes">Yes</option>
</param>
<when value="no" />
<when value="yes">
- <param name="theta" type="float" value="0.85" label="Theta paramter (error dependency coefficient) in the MAQ consensus calling model" />
+ <param name="theta" type="float" value="0.85" label="Theta parameter (error dependency coefficient) in the MAQ consensus calling model" />
<param name="hapNum" type="integer" value="2" label="Number of haplotypes in the sample" help="Greater than or equal to 2" />
<param name="fraction" type="float" value="0.001" label="Expected fraction of differences between a pair of haplotypes" />
<param name="phredProb" type="integer" value="40" label="Phred probability of an indel in sequencing/prep" />
@@ -77,9 +77,68 @@
**What it does**
-Uses SAMTools_' pileup command to produce a file in the pileup format based on the provided BAM file.
+Uses SAMTools_' pileup command to produce a pileup dataset from a provided BAM dataset. It generated two types of pileup datasets depending on chosen options. If *Call consensus according to MAQ model?* option is set to **No**, the tool produces simple pileup. If the option is set to **Yes**, a ten column pileup dataset with consensus is generated. Both types of datasets are briefly summarized below.
.. _SAMTools: http://samtools.sourceforge.net/samtools.shtml
+
+------
+
+**Types of pileup datasets**
+
+The description of pileup format below is largely based on information that can be found on SAMTools_ documentation page. The 6- and 10-column variants are described below.
+
+.. _SAMTools: http://samtools.sourceforge.net/pileup.shtml
+
+**Six column pileup**::
+
+ 1 2 3 4 5 6
+ ---------------------------------
+ chrM 412 A 2 ., II
+ chrM 413 G 4 ..t, IIIH
+ chrM 414 C 4 ...a III2
+ chrM 415 C 4 TTTt III7
+
+where::
+
+ Column Definition
+ ------- ----------------------------
+ 1 Chromosome
+ 2 Position (1-based)
+ 3 Reference base at that position
+ 4 Coverage (# reads aligning over that position)
+ 5 Bases within reads where (see Galaxy wiki for more info)
+ 6 Quality values (phred33 scale, see Galaxy wiki for more)
+
+**Ten column pileup**
+
+The `ten-column`__ pileup incoroporates additional consensus information generated with *-c* option of *samtools pileup* command::
+
+
+ 1 2 3 4 5 6 7 8 9 10
+ ------------------------------------------------
+ chrM 412 A A 75 0 25 2 ., II
+ chrM 413 G G 72 0 25 4 ..t, IIIH
+ chrM 414 C C 75 0 25 4 ...a III2
+ chrM 415 C T 75 75 25 4 TTTt III7
+
+where::
+
+ Column Definition
+ ------- ----------------------------
+ 1 Chromosome
+ 2 Position (1-based)
+ 3 Reference base at that position
+ 4 Consensus bases
+ 5 Consensus quality
+ 6 SNP quality
+ 7 Maximum mapping quality
+ 8 Coverage (# reads aligning over that position)
+ 9 Bases within reads where (see Galaxy wiki for more info)
+ 10 Quality values (phred33 scale, see Galaxy wiki for more)
+
+
+.. __: http://samtools.sourceforge.net/cns0.shtml
+
</help>
</tool>
diff -r 9e0c6e9b9dbb -r 93b8b571a9fa tools/samtools/sam_to_bam.xml
--- a/tools/samtools/sam_to_bam.xml Thu Sep 24 16:15:57 2009 -0400
+++ b/tools/samtools/sam_to_bam.xml Thu Sep 24 16:16:56 2009 -0400
@@ -51,7 +51,7 @@
**What it does**
-This tool uses the SAMTools_ toolkit to produce a BAM file based on a sorted input SAM file.
+This tool uses the SAMTools_ toolkit to produce a indexed BAM file based on a sorted input SAM file.
.. _SAMTools: http://samtools.sourceforge.net/samtools.shtml
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/9e0c6e9b9dbb
changeset: 2766:9e0c6e9b9dbb
user: Kanwei Li <kanwei(a)gmail.com>
date: Thu Sep 24 16:15:57 2009 -0400
description:
Changed page import statement. Fixes #169
1 file(s) affected in this change:
lib/galaxy/web/controllers/page.py
diffs (10 lines):
diff -r cbb7103283fe -r 9e0c6e9b9dbb lib/galaxy/web/controllers/page.py
--- a/lib/galaxy/web/controllers/page.py Wed Sep 23 19:11:12 2009 -0400
+++ b/lib/galaxy/web/controllers/page.py Thu Sep 24 16:15:57 2009 -0400
@@ -1,5 +1,5 @@
from galaxy.web.base.controller import *
-from galaxy.web.framework.helpers import *
+from galaxy.web.framework.helpers import time_ago, grids
from galaxy.util.sanitize_html import sanitize_html
import re
1
0
25 Sep '09
details: http://www.bx.psu.edu/hg/galaxy/rev/35dd55a7898e
changeset: 2768:35dd55a7898e
user: Greg Von Kuster <greg(a)bx.psu.edu>
date: Thu Sep 24 16:52:15 2009 -0400
description:
More improvement in cleanup_datasets script - will take significantly less disk, and may be faster.
1 file(s) affected in this change:
scripts/cleanup_datasets/cleanup_datasets.py
diffs (75 lines):
diff -r 93b8b571a9fa -r 35dd55a7898e scripts/cleanup_datasets/cleanup_datasets.py
--- a/scripts/cleanup_datasets/cleanup_datasets.py Thu Sep 24 16:16:56 2009 -0400
+++ b/scripts/cleanup_datasets/cleanup_datasets.py Thu Sep 24 16:52:15 2009 -0400
@@ -203,6 +203,7 @@
print "Elapsed time: ", stop - start, "\n"
def delete_datasets( app, cutoff_time, remove_from_disk, info_only = False, force_retry = False ):
+ import sqlalchemy as sa
# Marks datasets as deleted if associated items are all deleted.
print "######### Starting delete_datasets #########\n"
start = time.clock()
@@ -211,23 +212,33 @@
.filter( app.model.HistoryDatasetAssociation.table.c.update_time < cutoff_time ).all()
library_datasets = app.model.Dataset.options( eagerload( "library_associations" ) ) \
.filter( app.model.LibraryDatasetDatasetAssociation.table.c.update_time < cutoff_time ).all()
- else:
- history_datasets = app.model.Dataset.filter_by( deleted = False ) \
- .options( eagerload( "history_associations" ) ) \
- .filter( and_( app.model.HistoryDatasetAssociation.table.c.update_time < cutoff_time,
- app.model.HistoryDatasetAssociation.table.c.deleted==True ) ).all()
- library_datasets = app.model.Dataset.filter_by( deleted = False ) \
- .options( eagerload( "library_associations" ) ) \
- .filter( and_( app.model.LibraryDatasetDatasetAssociation.table.c.update_time < cutoff_time,
- app.model.LibraryDatasetDatasetAssociation.table.c.deleted==True ) ).all()
- print "Time to query history and library datasets: ", time.clock() - start, "\n"
- print "Processing ", len( history_datasets ), " history datasets and ", len( library_datasets ), " library datasets...\n\n"
- datasets = history_datasets + library_datasets
+ else:
+ # We really only need the id column here, but sqlalchemy barfs when trying to select only 1 column
+ history_dataset_ids_query = sa.select( ( app.model.Dataset.table.c.id,
+ app.model.Dataset.table.c.state ),
+ whereclause = sa.and_( app.model.Dataset.table.c.deleted == False,
+ app.model.HistoryDatasetAssociation.table.c.update_time < cutoff_time,
+ app.model.HistoryDatasetAssociation.table.c.deleted == True ),
+ from_obj = [ sa.outerjoin( app.model.Dataset.table,
+ app.model.HistoryDatasetAssociation.table ) ] )
+ history_dataset_ids = [ row.id for row in history_dataset_ids_query.execute() ]
+ print "Time to retrieve ", len( history_dataset_ids ), " history dataset ids: ", time.clock() - start
+ library_dataset_ids_query = sa.select( ( app.model.Dataset.table.c.id,
+ app.model.Dataset.table.c.state ),
+ whereclause = sa.and_( app.model.Dataset.table.c.deleted == False,
+ app.model.LibraryDatasetDatasetAssociation.table.c.update_time < cutoff_time,
+ app.model.LibraryDatasetDatasetAssociation.table.c.deleted == True ),
+ from_obj = [ sa.outerjoin( app.model.Dataset.table,
+ app.model.LibraryDatasetDatasetAssociation.table ) ] )
+ library_dataset_ids = [ row.id for row in library_dataset_ids_query.execute() ]
+ print "Time to retrieve ", len( library_dataset_ids ), " library dataset ids: ", time.clock() - start
+ dataset_ids = history_dataset_ids + library_dataset_ids
skip = []
deleted_dataset_count = 0
deleted_instance_count = 0
- for dataset in datasets:
- print "Processing dataset id:", dataset.id, "\n"
+ for dataset_id in dataset_ids:
+ print "Processing dataset id:", dataset_id, "\n"
+ dataset = app.model.Dataset.get( id )
if dataset.id not in skip and _dataset_is_deletable( dataset ):
deleted_dataset_count += 1
for dataset_instance in dataset.history_associations + dataset.library_associations:
@@ -375,6 +386,13 @@
self.file_path = file_path
# Setup the database engine and ORM
self.model = galaxy.model.mapping.init( self.file_path, self.database_connection, engine_options={}, create_tables=False )
+ @property
+ def sa_session( self ):
+ """
+ Returns a SQLAlchemy session -- currently just gets the current
+ session from the threadlocal session context, but this is provided
+ to allow migration toward a more SQLAlchemy 0.4 style of use.
+ """
+ return self.model.context.current
-if __name__ == "__main__":
- main()
+if __name__ == "__main__": main()
1
0