galaxy-dev
Threads by month
- ----- 2025 -----
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2009 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2008 -----
- December
- November
- October
- September
- August
- 10007 discussions
details: http://www.bx.psu.edu/hg/galaxy/rev/4733c3b69226
changeset: 2790:4733c3b69226
user: jeremy goecks <jeremy.goecks at emory.edu>
date: Mon Sep 28 10:29:23 2009 -0400
description:
Paging for grids.
2 file(s) affected in this change:
lib/galaxy/web/framework/helpers/grids.py
templates/history/grid.mako
diffs (128 lines):
diff -r d669408018a1 -r 4733c3b69226 lib/galaxy/web/framework/helpers/grids.py
--- a/lib/galaxy/web/framework/helpers/grids.py Sun Sep 27 23:11:43 2009 -0400
+++ b/lib/galaxy/web/framework/helpers/grids.py Mon Sep 28 10:29:23 2009 -0400
@@ -4,7 +4,7 @@
from galaxy.web import url_for
from galaxy.util.json import from_json_string, to_json_string
-import sys, logging
+import sys, logging, math
log = logging.getLogger( __name__ )
@@ -23,6 +23,10 @@
default_filter = None
default_sort_key = None
preserve_state = False
+
+ use_paging = False
+ num_rows_per_page = 5
+
# Set preference names.
cur_filter_pref_name = ".filter"
cur_sort_key_pref_name = ".sort_key"
@@ -90,7 +94,7 @@
column_filter = unicode(column_filter)
extra_url_args[ "f-" + column.key ] = column_filter.encode("utf-8")
- # Process sort arguments
+ # Process sort arguments.
sort_key = sort_order = None
if 'sort' in kwargs:
sort_key = kwargs['sort']
@@ -110,7 +114,26 @@
# There might be a current row
current_item = self.get_current_item( trans )
- # Save current filter and sort key.
+ # Process page number.
+ if self.use_paging:
+ if 'page' in kwargs:
+ page_num = int( kwargs['page'] )
+ else:
+ page_num = 1
+
+ # Before modifying query, get the total number of rows that query returns so that the total number of pages can
+ # be computed.
+ total_num_rows = query.count()
+ query = query.limit( self.num_rows_per_page ).offset( ( page_num-1 ) * self.num_rows_per_page )
+
+ num_pages = int ( math.ceil( float( total_num_rows ) / self.num_rows_per_page ) )
+ else:
+ # Defaults.
+ page_num = 1
+ num_pages = 1
+
+
+ # Preserve grid state: save current filter and sort key.
if self.preserve_state:
pref_name = unicode( self.__class__.__name__ + self.cur_filter_pref_name )
if not saved_filter_pref:
@@ -146,9 +169,13 @@
else:
new_kwargs[ 'id' ] = trans.security.encode_id( id )
return url_for( **new_kwargs )
+
+
return trans.fill_template( self.template,
grid=self,
query=query,
+ cur_page_num = page_num,
+ num_pages = num_pages,
cur_filter_dict=cur_filter_dict,
sort_key=sort_key,
encoded_sort_key=encoded_sort_key,
diff -r d669408018a1 -r 4733c3b69226 templates/history/grid.mako
--- a/templates/history/grid.mako Sun Sep 27 23:11:43 2009 -0400
+++ b/templates/history/grid.mako Mon Sep 28 10:29:23 2009 -0400
@@ -299,20 +299,36 @@
</tr>
%endfor
</tbody>
- %if grid.operations:
- <tfoot>
- <tr>
- <td></td>
- <td colspan="100">
- For <span class="grid-selected-count"></span> selected histories:
- %for operation in grid.operations:
- %if operation.allow_multiple:
- <input type="submit" name="operation" value="${operation.label}" class="action-button">
- %endif
- %endfor
- </td>
- </tr>
- </tfoot>
+ <tfoot>
+ %if num_pages > 1:
+ <tr>
+ <td></td>
+ <td colspan="100" style="font-size: 90%; text-align: right">
+ Page:
+ %for page_index in range(1, num_pages + 1):
+ %if page_index == cur_page_num:
+ <span style="font-style: italic">${page_index}</span>
+ %else:
+ <% args = { "page" : page_index } %>
+ <span><a href="${url( args )}">${page_index}</a></span>
+ %endif
+ %endfor
+ </td>
+ </tr>
+ %endif
+ %if grid.operations:
+ <tr>
+ <td></td>
+ <td colspan="100">
+ For <span class="grid-selected-count"></span> selected histories:
+ %for operation in grid.operations:
+ %if operation.allow_multiple:
+ <input type="submit" name="operation" value="${operation.label}" class="action-button">
+ %endif
+ %endfor
+ </td>
+ </tr>
+ </tfoot>
%endif
</table>
</form>
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/40c3c7798b64
changeset: 2791:40c3c7798b64
user: Kanwei Li <kanwei(a)gmail.com>
date: Mon Sep 28 12:39:53 2009 -0400
description:
fix history grids
2 file(s) affected in this change:
templates/grid.mako
templates/history/grid.mako
diffs (100 lines):
diff -r 4733c3b69226 -r 40c3c7798b64 templates/grid.mako
--- a/templates/grid.mako Mon Sep 28 10:29:23 2009 -0400
+++ b/templates/grid.mako Mon Sep 28 12:39:53 2009 -0400
@@ -214,19 +214,19 @@
%endfor
</tbody>
%if grid.has_multiple_item_operations:
- <tfoot>
- <tr>
- <td></td>
- <td colspan="100">
- For <span class="grid-selected-count"></span> selected items:
- %for operation in grid.operations:
- %if operation.allow_multiple:
- <input type="submit" name="operation" value="${operation.label}" class="action-button">
- %endif
- %endfor
- </td>
- </tr>
- </tfoot>
+ <tfoot>
+ <tr>
+ <td></td>
+ <td colspan="100">
+ For <span class="grid-selected-count"></span> selected items:
+ %for operation in grid.operations:
+ %if operation.allow_multiple:
+ <input type="submit" name="operation" value="${operation.label}" class="action-button">
+ %endif
+ %endfor
+ </td>
+ </tr>
+ </tfoot>
%endif
</table>
</form>
diff -r 4733c3b69226 -r 40c3c7798b64 templates/history/grid.mako
--- a/templates/history/grid.mako Mon Sep 28 10:29:23 2009 -0400
+++ b/templates/history/grid.mako Mon Sep 28 12:39:53 2009 -0400
@@ -301,34 +301,34 @@
</tbody>
<tfoot>
%if num_pages > 1:
- <tr>
- <td></td>
- <td colspan="100" style="font-size: 90%; text-align: right">
- Page:
- %for page_index in range(1, num_pages + 1):
- %if page_index == cur_page_num:
- <span style="font-style: italic">${page_index}</span>
- %else:
- <% args = { "page" : page_index } %>
- <span><a href="${url( args )}">${page_index}</a></span>
- %endif
- %endfor
- </td>
- </tr>
+ <tr>
+ <td></td>
+ <td colspan="100" style="font-size: 90%; text-align: right">
+ Page:
+ %for page_index in range(1, num_pages + 1):
+ %if page_index == cur_page_num:
+ <span style="font-style: italic">${page_index}</span>
+ %else:
+ <% args = { "page" : page_index } %>
+ <span><a href="${url( args )}">${page_index}</a></span>
+ %endif
+ %endfor
+ </td>
+ </tr>
%endif
%if grid.operations:
- <tr>
- <td></td>
- <td colspan="100">
- For <span class="grid-selected-count"></span> selected histories:
- %for operation in grid.operations:
- %if operation.allow_multiple:
- <input type="submit" name="operation" value="${operation.label}" class="action-button">
- %endif
- %endfor
- </td>
- </tr>
+ <tr>
+ <td></td>
+ <td colspan="100">
+ For <span class="grid-selected-count"></span> selected histories:
+ %for operation in grid.operations:
+ %if operation.allow_multiple:
+ <input type="submit" name="operation" value="${operation.label}" class="action-button">
+ %endif
+ %endfor
+ </td>
+ </tr>
+ %endif
</tfoot>
- %endif
</table>
</form>
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/d669408018a1
changeset: 2789:d669408018a1
user: Kanwei Li <kanwei(a)gmail.com>
date: Sun Sep 27 23:11:43 2009 -0400
description:
typo fixes for tools in folders A-M
38 file(s) affected in this change:
templates/base_panels.mako
tools/annotation_profiler/annotation_profiler.xml
tools/data_source/encode_import_all_latest_datasets.xml
tools/data_source/upload.xml
tools/extract/extract_genomic_dna.xml
tools/fastx_toolkit/fasta_formatter.xml
tools/fastx_toolkit/fastq_quality_converter.xml
tools/fastx_toolkit/fastx_barcode_splitter.xml
tools/fastx_toolkit/fastx_clipper.xml
tools/fastx_toolkit/fastx_collapser.xml
tools/fastx_toolkit/fastx_quality_statistics.xml
tools/fastx_toolkit/fastx_renamer.xml
tools/filters/axt_to_concat_fasta.xml
tools/filters/axt_to_fasta.xml
tools/filters/axt_to_lav.xml
tools/filters/compare.xml
tools/filters/cutWrapper.xml
tools/filters/grep.xml
tools/filters/joiner.xml
tools/filters/lav_to_bed.xml
tools/filters/pasteWrapper.xml
tools/filters/remove_beginning.xml
tools/hyphy/hyphy_dnds_wrapper.xml
tools/hyphy/hyphy_nj_tree_wrapper.xml
tools/maf/genebed_maf_to_fasta.xml
tools/maf/interval_maf_to_merged_fasta.xml
tools/maf/maf_to_bed.xml
tools/maf/maf_to_fasta.xml
tools/maf/maf_to_interval.xml
tools/metag_tools/blat_wrapper.xml
tools/metag_tools/convert_SOLiD_color2nuc.xml
tools/metag_tools/mapping_to_ucsc.xml
tools/metag_tools/megablast_xml_parser.xml
tools/metag_tools/short_reads_figure_high_quality_length.xml
tools/metag_tools/short_reads_figure_score.xml
tools/metag_tools/short_reads_trim_seq.xml
tools/metag_tools/shrimp_color_wrapper.xml
tools/metag_tools/shrimp_wrapper.xml
diffs (698 lines):
diff -r f7459ad62be9 -r d669408018a1 templates/base_panels.mako
--- a/templates/base_panels.mako Sat Sep 26 18:05:36 2009 -0400
+++ b/templates/base_panels.mako Sun Sep 27 23:11:43 2009 -0400
@@ -283,7 +283,7 @@
</head>
<body scroll="no" class="${self.body_class}">
- <div id="everything" style="position: absolute; top: 0; left: 0; width: 100%; height: 100%; min-width: 960px;">
+ <div id="everything" style="position: absolute; top: 0; left: 0; width: 100%; height: 100%; min-width: 600px;">
## Background displays first
<div id="background"></div>
## Layer iframes over backgrounds
diff -r f7459ad62be9 -r d669408018a1 tools/annotation_profiler/annotation_profiler.xml
--- a/tools/annotation_profiler/annotation_profiler.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/annotation_profiler/annotation_profiler.xml Sun Sep 27 23:11:43 2009 -0400
@@ -43,9 +43,9 @@
Takes an input set of intervals and for each interval determines the base coverage of the interval by a set of features (tables) available from UCSC.
-By default, this tool will check the coverage of your intervals against all available features; you may, however, choose to select only those tables that you want to include. Selecting a section heading will effectively cause all of it's children to be selected.
+By default, this tool will check the coverage of your intervals against all available features; you may, however, choose to select only those tables that you want to include. Selecting a section heading will effectively cause all of its children to be selected.
-You may alternatively choose to recieve a summary across all of the intervals that you provide.
+You may alternatively choose to receive a summary across all of the intervals that you provide.
-----
@@ -118,14 +118,14 @@
allIntervalCount is the number of provided intervals
allIntervalSize is the sum of the lengths of the provided interval file
allCoverage is the sum of the coverage for each provided interval
- allTableRegionsOverlaped is the sum of the number of regions of the table (non-unique) that were overlaped for each interval
- allIntervalsOverlapingTable is the number of provided intervals which overlap the table
+ allTableRegionsOverlapped is the sum of the number of regions of the table (non-unique) that were overlapped for each interval
+ allIntervalsOverlappingTable is the number of provided intervals which overlap the table
nrIntervalCount is the number of non-redundant intervals
nrIntervalSize is the sum of the lengths of non-redundant intervals
nrCoverage is the sum of the coverage of non-redundant intervals
- nrTableRegionsOverlaped is the number of regions of the table (unique) that were overlaped by the non-redundant intervals
- nrIntervalsOverlapingTable is the number of non-redundant intervals which overlap the table
+ nrTableRegionsOverlapped is the number of regions of the table (unique) that were overlapped by the non-redundant intervals
+ nrIntervalsOverlappingTable is the number of non-redundant intervals which overlap the table
.. class:: infomark
diff -r f7459ad62be9 -r d669408018a1 tools/data_source/encode_import_all_latest_datasets.xml
--- a/tools/data_source/encode_import_all_latest_datasets.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/data_source/encode_import_all_latest_datasets.xml Sun Sep 27 23:11:43 2009 -0400
@@ -46,7 +46,7 @@
*[gencode_partitioned]* means that the dataset was partitioned according to the protocol below:
-A partition scheme has been defined that is similar to what has previously been done with TARs/TRANSFRAGs such that any feature can be cla ssified as falling into one of the following 6 categories:
+A partition scheme has been defined that is similar to what has previously been done with TARs/TRANSFRAGs such that any feature can be classified as falling into one of the following 6 categories:
1. **Coding** -- coding exons defined from the GENCODE experimentally verified coding set (coding in any transcript)
2. **5UTR** -- 5' UTR exons defined from the GENCODE experimentally verified coding set (5' UTR in some transcript but never coding in any other)
3. **3UTR** -- 3' UTR exons defined from the GENCODE experimentally verified coding set (3' UTR in some transcript but never coding in any other)
@@ -63,4 +63,4 @@
</help>
-</tool>
+</tool>
diff -r f7459ad62be9 -r d669408018a1 tools/data_source/upload.xml
--- a/tools/data_source/upload.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/data_source/upload.xml Sun Sep 27 23:11:43 2009 -0400
@@ -94,7 +94,7 @@
**Fasta**
-A sequence in FASTA format consists of a single-line description, followed by lines of sequence data. The first character of the description line is a greater-than (">") symbol in the first column. All lines should be shorter than 80 charcters::
+A sequence in FASTA format consists of a single-line description, followed by lines of sequence data. The first character of the description line is a greater-than (">") symbol in the first column. All lines should be shorter than 80 characters::
>sequence1
atgcgtttgcgtgc
@@ -195,7 +195,7 @@
**Wig**
-The wiggle format is line-oriented. Wiggle data is preceeded by a track definition line, which adds a number of options for controlling the default display of this track.
+The wiggle format is line-oriented. Wiggle data is preceded by a track definition line, which adds a number of options for controlling the default display of this track.
-----
diff -r f7459ad62be9 -r d669408018a1 tools/extract/extract_genomic_dna.xml
--- a/tools/extract/extract_genomic_dna.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/extract/extract_genomic_dna.xml Sun Sep 27 23:11:43 2009 -0400
@@ -55,7 +55,7 @@
.. class:: infomark
- **Extract genomic DNA using coordinates from ASSEMBLED genomes and UNassembled genomes** previously were achieved by two seperate tools.
+ **Extract genomic DNA using coordinates from ASSEMBLED genomes and UNassembled genomes** previously were achieved by two separate tools.
-----
diff -r f7459ad62be9 -r d669408018a1 tools/fastx_toolkit/fasta_formatter.xml
--- a/tools/fastx_toolkit/fasta_formatter.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/fastx_toolkit/fasta_formatter.xml Sun Sep 27 23:11:43 2009 -0400
@@ -13,7 +13,7 @@
<inputs>
<param format="fasta" name="input" type="data" label="Library to re-format" />
- <param name="width" type="integer" value="0" label="New width for nucleotides strings" help="Use 0 for single line outout." />
+ <param name="width" type="integer" value="0" label="New width for nucleotides strings" help="Use 0 for single line out." />
</inputs>
<tests>
diff -r f7459ad62be9 -r d669408018a1 tools/fastx_toolkit/fastq_quality_converter.xml
--- a/tools/fastx_toolkit/fastq_quality_converter.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/fastx_toolkit/fastq_quality_converter.xml Sun Sep 27 23:11:43 2009 -0400
@@ -53,7 +53,7 @@
**What it does**
-Converts a solexa FASTQ file to/from numeric or ASCII quality format.
+Converts a Solexa FASTQ file to/from numeric or ASCII quality format.
.. class:: warningmark
diff -r f7459ad62be9 -r d669408018a1 tools/fastx_toolkit/fastx_barcode_splitter.xml
--- a/tools/fastx_toolkit/fastx_barcode_splitter.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/fastx_toolkit/fastx_barcode_splitter.xml Sun Sep 27 23:11:43 2009 -0400
@@ -36,7 +36,7 @@
**What it does**
-This tool splits a solexa library (FASTQ file) or a regular FASTA file to several files, using barcodes as the split criteria.
+This tool splits a Solexa library (FASTQ file) or a regular FASTA file into several files, using barcodes as the split criteria.
--------
diff -r f7459ad62be9 -r d669408018a1 tools/fastx_toolkit/fastx_clipper.xml
--- a/tools/fastx_toolkit/fastx_clipper.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/fastx_toolkit/fastx_clipper.xml Sun Sep 27 23:11:43 2009 -0400
@@ -42,8 +42,8 @@
</param>
<param name="DISCARD_OPTIONS" type="select" label="Output options">
- <option value="-c">Output only clipped seqeunces (i.e. sequences which contained the adapter)</option>
- <option value="-C">Output only non-clipped seqeunces (i.e. sequences which did not contained the adapter)</option>
+ <option value="-c">Output only clipped sequences (i.e. sequences which contained the adapter)</option>
+ <option value="-C">Output only non-clipped sequences (i.e. sequences which did not contained the adapter)</option>
<option value="">Output both clipped and non-clipped sequences</option>
</param>
diff -r f7459ad62be9 -r d669408018a1 tools/fastx_toolkit/fastx_collapser.xml
--- a/tools/fastx_toolkit/fastx_collapser.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/fastx_toolkit/fastx_collapser.xml Sun Sep 27 23:11:43 2009 -0400
@@ -63,7 +63,7 @@
Original Sequence Names / Lane descriptions (e.g. "CSHL_2_FC0042AGLLOO_1_1_742_502") are discarded.
-The output seqeunce name is composed of two numbers: the first is the sequence's number, the second is the multiplicity value.
+The output sequence name is composed of two numbers: the first is the sequence's number, the second is the multiplicity value.
The following output::
diff -r f7459ad62be9 -r d669408018a1 tools/fastx_toolkit/fastx_quality_statistics.xml
--- a/tools/fastx_toolkit/fastx_quality_statistics.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/fastx_toolkit/fastx_quality_statistics.xml Sun Sep 27 23:11:43 2009 -0400
@@ -36,7 +36,7 @@
**The output file will contain the following fields:**
-* column = column number (1 to 36 for a 36-cycles read solexa file)
+* column = column number (1 to 36 for a 36-cycles read Solexa file)
* count = number of bases found in this column.
* min = Lowest quality score value found in this column.
* max = Highest quality score value found in this column.
diff -r f7459ad62be9 -r d669408018a1 tools/fastx_toolkit/fastx_renamer.xml
--- a/tools/fastx_toolkit/fastx_renamer.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/fastx_toolkit/fastx_renamer.xml Sun Sep 27 23:11:43 2009 -0400
@@ -23,7 +23,7 @@
.. class:: infomark
-Use this tool at the beginning of your workflow, as a way to keep the original sequence (before trimming,clipping,barcode-removal, etc).
+Use this tool at the beginning of your workflow, as a way to keep the original sequence (before trimming, clipping, barcode-removal, etc).
--------
diff -r f7459ad62be9 -r d669408018a1 tools/filters/axt_to_concat_fasta.xml
--- a/tools/filters/axt_to_concat_fasta.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/filters/axt_to_concat_fasta.xml Sun Sep 27 23:11:43 2009 -0400
@@ -1,5 +1,5 @@
<tool id="axt_to_concat_fasta" name="AXT to concatenated FASTA">
- <description>Converts an AXT formated file to a concatenated FASTA alignment</description>
+ <description>Converts an AXT formatted file to a concatenated FASTA alignment</description>
<command interpreter="python">axt_to_concat_fasta.py $dbkey_1 $dbkey_2 < $axt_input > $out_file1</command>
<inputs>
<param format="axt" name="axt_input" type="data" label="AXT file"/>
@@ -27,7 +27,7 @@
**Syntax**
-This tool converts an AXT formated file to the FASTA format, and concatenates the results in the same build.
+This tool converts an AXT formatted file to the FASTA format, and concatenates the results in the same build.
- **AXT format** The alignments are produced from Blastz, an alignment tool available from Webb Miller's lab at Penn State University. The lav format Blastz output, which does not include the sequence, was converted to AXT format with lavToAxt. Each alignment block in an AXT file contains three lines: a summary line and 2 sequence lines. Blocks are separated from one another by blank lines.
diff -r f7459ad62be9 -r d669408018a1 tools/filters/axt_to_fasta.xml
--- a/tools/filters/axt_to_fasta.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/filters/axt_to_fasta.xml Sun Sep 27 23:11:43 2009 -0400
@@ -1,5 +1,5 @@
<tool id="axt_to_fasta" name="AXT to FASTA">
- <description>Converts an AXT formated file to FASTA format</description>
+ <description>Converts an AXT formatted file to FASTA format</description>
<command interpreter="python">axt_to_fasta.py $dbkey_1 $dbkey_2 < $axt_input > $out_file1</command>
<inputs>
<param format="axt" name="axt_input" type="data" label="AXT file"/>
@@ -28,7 +28,7 @@
**Syntax**
-This tool converts an AXT formated file to the FASTA format.
+This tool converts an AXT formatted file to the FASTA format.
- **AXT format** The alignments are produced from Blastz, an alignment tool available from Webb Miller's lab at Penn State University. The lav format Blastz output, which does not include the sequence, was converted to AXT format with lavToAxt. Each alignment block in an AXT file contains three lines: a summary line and 2 sequence lines. Blocks are separated from one another by blank lines.
diff -r f7459ad62be9 -r d669408018a1 tools/filters/axt_to_lav.xml
--- a/tools/filters/axt_to_lav.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/filters/axt_to_lav.xml Sun Sep 27 23:11:43 2009 -0400
@@ -1,5 +1,5 @@
<tool id="axt_to_lav_1" name="AXT to LAV">
- <description>Converts an AXT formated file to LAV format</description>
+ <description>Converts an AXT formatted file to LAV format</description>
<command interpreter="python">axt_to_lav.py /depot/data2/galaxy/$dbkey_1/seq/%s.nib:$dbkey_1:${GALAXY_DATA_INDEX_DIR}/shared/ucsc/chrom/${dbkey_1}.len /depot/data2/galaxy/$dbkey_2/seq/%s.nib:$dbkey_2:${GALAXY_DATA_INDEX_DIR}/shared/ucsc/chrom/${dbkey_2}.len $align_input $lav_file $seq_file1 $seq_file2</command>
<inputs>
<param name="align_input" type="data" format="axt" label="Alignment File" optional="False"/>
@@ -22,7 +22,7 @@
**Syntax**
-This tool converts an AXT formated file to the LAV format.
+This tool converts an AXT formatted file to the LAV format.
- **AXT format** The alignments are produced from Blastz, an alignment tool available from Webb Miller's lab at Penn State University. The lav format Blastz output, which does not include the sequence, was converted to AXT format with lavToAxt. Each alignment block in an AXT file contains three lines: a summary line and 2 sequence lines. Blocks are separated from one another by blank lines.
diff -r f7459ad62be9 -r d669408018a1 tools/filters/compare.xml
--- a/tools/filters/compare.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/filters/compare.xml Sun Sep 27 23:11:43 2009 -0400
@@ -52,7 +52,7 @@
and this is **Second query**::
- geneA tumor-supressor
+ geneA tumor-suppressor
geneB Foxp2
geneC Gnas1
geneE INK4a
diff -r f7459ad62be9 -r d669408018a1 tools/filters/cutWrapper.xml
--- a/tools/filters/cutWrapper.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/filters/cutWrapper.xml Sun Sep 27 23:11:43 2009 -0400
@@ -33,7 +33,7 @@
.. class:: infomark
-The output of this tool is always in tabular format (e.g., if your original delimeter was comma, it will be replaced with tab). For example:
+The output of this tool is always in tabular format (e.g., if your original delimiters are commas, they will be replaced with tabs). For example:
Cutting columns 1 and 3 from::
diff -r f7459ad62be9 -r d669408018a1 tools/filters/grep.xml
--- a/tools/filters/grep.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/filters/grep.xml Sun Sep 27 23:11:43 2009 -0400
@@ -30,7 +30,7 @@
**Syntax**
-The select tool searches the data for lines containing or not containing a match to the given pattern. Regular Expression is introduced in this tool. A Regular Expression is a pattern descibing a certain amount of text.
+The select tool searches the data for lines containing or not containing a match to the given pattern. Regular Expression is introduced in this tool. A Regular Expression is a pattern describing a certain amount of text.
- **( ) { } [ ] . * ? + \ ^ $** are all special characters. **\\** can be used to "escape" a special character, allowing that special character to be searched for.
- **\\A** matches the beginning of a string(but not an internal line).
@@ -46,7 +46,7 @@
- **{** n or n, or n,m **}** specifies an expected number of repetitions of the preceding pattern.
- **{n}** The preceding item is matched exactly n times.
- - **{n,}** The preceding item ismatched n or more times.
+ - **{n,}** The preceding item is matched n or more times.
- **{n,m}** The preceding item is matched at least n times but not more than m times.
- **[** ... **]** creates a character class. Within the brackets, single characters can be placed. A dash (-) may be used to indicate a range such as **a-z**.
@@ -64,9 +64,9 @@
**Example**
-- **^chr([0-9A-Za-z])+** would match lines that begin with chromsomes, such as lines in a BED format file.
+- **^chr([0-9A-Za-z])+** would match lines that begin with chromosomes, such as lines in a BED format file.
- **(ACGT){1,5}** would match at least 1 "ACGT" and at most 5 "ACGT" consecutively.
-- **([^,][0-9]{1,3})(,[0-9]{3})\*** would match a large integer that is properly seperated with commas such as 23,078,651.
+- **([^,][0-9]{1,3})(,[0-9]{3})\*** would match a large integer that is properly separated with commas such as 23,078,651.
- **(abc)|(def)** would match either "abc" or "def".
- **^\\W+#** would match any line that is a comment.
</help>
diff -r f7459ad62be9 -r d669408018a1 tools/filters/joiner.xml
--- a/tools/filters/joiner.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/filters/joiner.xml Sun Sep 27 23:11:43 2009 -0400
@@ -166,12 +166,12 @@
Joining the 4th column of Query1 with the 1st column of Query2 will yield::
- chr1 10 20 geneA geneA tumor-supressor
+ chr1 10 20 geneA geneA tumor-suppressor
chr1 50 80 geneB geneB Foxp2
Joining the 4th column of Query1 with the 1st column of Query2, while keeping all lines from Query1, will yield::
- chr1 10 20 geneA geneA tumor-supressor
+ chr1 10 20 geneA geneA tumor-suppressor
chr1 50 80 geneB geneB Foxp2
chr5 10 40 geneL
diff -r f7459ad62be9 -r d669408018a1 tools/filters/lav_to_bed.xml
--- a/tools/filters/lav_to_bed.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/filters/lav_to_bed.xml Sun Sep 27 23:11:43 2009 -0400
@@ -1,5 +1,5 @@
<tool id="lav_to_bed1" name="LAV to BED">
- <description>Converts a LAV formated file to BED format</description>
+ <description>Converts a LAV formatted file to BED format</description>
<command interpreter="python">lav_to_bed.py $lav_file $bed_file1 $bed_file2</command>
<inputs>
<param name="lav_file" type="data" format="lav" label="LAV File" optional="False"/>
@@ -19,7 +19,7 @@
**Syntax**
-This tool converts a LAV formated file to the BED format.
+This tool converts a LAV formatted file to the BED format.
- **LAV format** LAV is an alignment format developed by Webb Miller's group at Penn State University. It is the primary output format for BLASTZ.
@@ -54,7 +54,7 @@
}
#:eof
-- To two BED formated files::
+- To two BED formatted files::
chr19 3001011 3001075 hg16_0 0 +
chr19 3008278 3008357 hg16_1 0 +
diff -r f7459ad62be9 -r d669408018a1 tools/filters/pasteWrapper.xml
--- a/tools/filters/pasteWrapper.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/filters/pasteWrapper.xml Sun Sep 27 23:11:43 2009 -0400
@@ -33,7 +33,7 @@
.. class:: infomark
-Paste preserves column assignments of the first dataset
+Paste preserves column assignments of the first dataset.
-----
diff -r f7459ad62be9 -r d669408018a1 tools/filters/remove_beginning.xml
--- a/tools/filters/remove_beginning.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/filters/remove_beginning.xml Sun Sep 27 23:11:43 2009 -0400
@@ -19,7 +19,7 @@
**What it does**
-This tool removes specified number of lines from the beginning of a dataset
+This tool removes a specified number of lines from the beginning of a dataset.
-----
diff -r f7459ad62be9 -r d669408018a1 tools/hyphy/hyphy_dnds_wrapper.xml
--- a/tools/hyphy/hyphy_dnds_wrapper.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/hyphy/hyphy_dnds_wrapper.xml Sun Sep 27 23:11:43 2009 -0400
@@ -47,7 +47,7 @@
-----
-For the tree definition, you only need to specify the species build names. For example, you could use the tree *((hg17,panTro1),(mm5,rn3),canFam1)*, if your FASTA file looks like the example below. You may also use **Neighbor Joining Tree Builder** tool to obtain the tree definition::
+For the tree definition, you only need to specify the species build names. For example, you could use the tree *(hg17,panTro1),(mm5,rn3),canFam1)*, if your FASTA file looks like the example below. You may also use **Neighbor Joining Tree Builder** tool to obtain the tree definition::
>hg17.chr7(+):26907301-26907310|hg17_0
GTGGGAGGT
diff -r f7459ad62be9 -r d669408018a1 tools/hyphy/hyphy_nj_tree_wrapper.xml
--- a/tools/hyphy/hyphy_nj_tree_wrapper.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/hyphy/hyphy_nj_tree_wrapper.xml Sun Sep 27 23:11:43 2009 -0400
@@ -16,7 +16,7 @@
<option value="K2P">Kimura 2 parameter</option>
<option value="JC69">Jukes-Cantor</option>
<!-- <option value="T3P">Tamura 3-parameter (correction for GC content bias and transition/trasversion bias)</option> -->
- <!-- <option value="p_Distance">Number of observed substituions per site</option> -->
+ <!-- <option value="p_Distance">Number of observed substitutions per site</option> -->
<!-- <option value="Unaligned_LZ">Distance measure for unaligned sequences based on Lempel Ziv measure of information content</option> -->
<!-- <option value="Unaligned_LZ_FR">Distance measure for unaligned sequences based on Lempel Ziv measure of information content using the best choice forward and reverse string orientations</option> -->
</param>
diff -r f7459ad62be9 -r d669408018a1 tools/maf/genebed_maf_to_fasta.xml
--- a/tools/maf/genebed_maf_to_fasta.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/maf/genebed_maf_to_fasta.xml Sun Sep 27 23:11:43 2009 -0400
@@ -48,10 +48,10 @@
</param>
</when>
</conditional>
- <param name="overwrite_with_gaps" type="select" label="Split into Gapless MAF blocks" help="When set to Yes, blocks are divided around gaps appearing in any species. This will prevent gaps occuring in the interior of the sequence for an aligning species from overwriting a nucleotide found for the same position in a lower-scoring block.">
+ <param name="overwrite_with_gaps" type="select" label="Split into Gapless MAF blocks" help="When set to Yes, blocks are divided around gaps appearing in any species. This will prevent gaps occurring in the interior of the sequence for an aligning species from overwriting a nucleotide found for the same position in a lower-scoring block.">
<option value="True" selected="true">No</option>
<option value="False">Yes</option>
- </param>
+ </param>
</inputs>
<outputs>
<data format="fasta" name="out_file1" />
@@ -61,7 +61,7 @@
<param name="input1" value="8.bed"/>
<param name="maf_source" value="cached"/>in aligning species
<param name="maf_identifier" value="8_WAY_MULTIZ_hg17"/>
- <param name="species" value="canFam1,hg17,mm5,panTro1,rn3"/>
+ <param name="species" value="canFam1,hg17,mm5,panTro1,rn3"/>
<param name="overwrite_with_gaps" value="True"/>
<output name="out_file1" file="gene_bed_maf_to_fasta_out.fasta" />
</test>
@@ -69,7 +69,7 @@
<param name="input1" value="8.bed"/>
<param name="maf_source" value="user"/>
<param name="maf_file" value="4.maf"/>
- <param name="species" value="hg17,panTro1"/>
+ <param name="species" value="hg17,panTro1"/>
<param name="overwrite_with_gaps" value="True"/>
<output name="out_file1" file="gene_bed_maf_to_fasta_user_out.fasta" />
</test>
diff -r f7459ad62be9 -r d669408018a1 tools/maf/interval_maf_to_merged_fasta.xml
--- a/tools/maf/interval_maf_to_merged_fasta.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/maf/interval_maf_to_merged_fasta.xml Sun Sep 27 23:11:43 2009 -0400
@@ -49,10 +49,10 @@
</param>
</when>
</conditional>
- <param name="overwrite_with_gaps" type="select" label="Split into Gapless MAF blocks" help="When set to Yes, blocks are divided around gaps appearing in any species. This will prevent gaps occuring in the interior of the sequence for an aligning species from overwriting a nucleotide found for the same position in a lower-scoring block.">
+ <param name="overwrite_with_gaps" type="select" label="Split into Gapless MAF blocks" help="When set to Yes, blocks are divided around gaps appearing in any species. This will prevent gaps occurring in the interior of the sequence for an aligning species from overwriting a nucleotide found for the same position in a lower-scoring block.">
<option value="True" selected="true">No</option>
<option value="False">Yes</option>
- </param>
+ </param>
</page>
</inputs>
<outputs>
@@ -63,7 +63,7 @@
<param name="input1" value="13.bed" dbkey="hg18" ftype="bed"/>
<param name="maf_source" value="cached"/>
<param name="maf_identifier" value="17_WAY_MULTIZ_hg18"/>
- <param name="species" value="hg18,mm8"/>
+ <param name="species" value="hg18,mm8"/>
<param name="overwrite_with_gaps" value="True"/>
<output name="out_file1" file="interval_maf_to_merged_fasta_out3.fasta" />
</test>
@@ -71,7 +71,7 @@
<param name="input1" value="1.bed" dbkey="hg17" ftype="bed"/>
<param name="maf_source" value="cached"/>
<param name="maf_identifier" value="8_WAY_MULTIZ_hg17"/>
- <param name="species" value="canFam1,hg17,mm5,panTro1,rn3"/>
+ <param name="species" value="canFam1,hg17,mm5,panTro1,rn3"/>
<param name="overwrite_with_gaps" value="True"/>
<output name="out_file1" file="interval_maf_to_merged_fasta_out.dat" />
</test>
@@ -79,7 +79,7 @@
<param name="input1" value="1.bed" dbkey="hg17" ftype="bed"/>
<param name="maf_source" value="user"/>
<param name="maf_file" value="5.maf"/>
- <param name="species" value="canFam1,hg17,mm5,panTro1,rn3"/>
+ <param name="species" value="canFam1,hg17,mm5,panTro1,rn3"/>
<param name="overwrite_with_gaps" value="True"/>
<output name="out_file1" file="interval_maf_to_merged_fasta_user_out.dat" />
</test>
diff -r f7459ad62be9 -r d669408018a1 tools/maf/maf_to_bed.xml
--- a/tools/maf/maf_to_bed.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/maf/maf_to_bed.xml Sun Sep 27 23:11:43 2009 -0400
@@ -36,12 +36,12 @@
* **Step 2 of 2**. Choose species from the alignment to be included in the output and specify how to deal with alignment blocks that lack one or more species:
* **Choose species** - the tool reads the alignment provided during Step 1 and generates a list of species contained within that alignment. Using checkboxes you can specify taxa to be included in the output (only reference genome, shown in **bold**, is selected by default). If you select more than one species, then more than one history item will be created.
- * **Choose to include/exclude blocks with missing species** - if an alignment block does not contain any one of the species you selected within **Choose species** menu and this option is set to **exclude blocks with missing species**, then coordiantes of such a block **will not** be included in the output (see **Example 2** below).
+ * **Choose to include/exclude blocks with missing species** - if an alignment block does not contain any one of the species you selected within **Choose species** menu and this option is set to **exclude blocks with missing species**, then coordinates of such a block **will not** be included in the output (see **Example 2** below).
-----
-**Example 1**: **Include only refernce genome** (hg18 in this case) and **include blocks with missing species**:
+**Example 1**: **Include only reference genome** (hg18 in this case) and **include blocks with missing species**:
For the following alignment::
diff -r f7459ad62be9 -r d669408018a1 tools/maf/maf_to_fasta.xml
--- a/tools/maf/maf_to_fasta.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/maf/maf_to_fasta.xml Sun Sep 27 23:11:43 2009 -0400
@@ -71,7 +71,7 @@
Multiple Block output has additional options:
* **Choose species** - the tool reads the alignment provided during Step 1 and generates a list of species contained within that alignment. Using checkboxes you can specify taxa to be included in the output (all species are selected by default).
- * **Choose to include/exclude blocks with missing species** - if an alignment block does not contain any one of the species you selected within **Choose species** menu and this option is set to **exclude blocks with missing species**, then such a block **will not** be included in the output (see **Example 2** below). For example, if you want to extact human, mouse, and rat from a series of alignments and one of the blocks does not contain mouse sequence, then this block will not be converted to FASTA and will not be returned.
+ * **Choose to include/exclude blocks with missing species** - if an alignment block does not contain any one of the species you selected within **Choose species** menu and this option is set to **exclude blocks with missing species**, then such a block **will not** be included in the output (see **Example 2** below). For example, if you want to extract human, mouse, and rat from a series of alignments and one of the blocks does not contain mouse sequence, then this block will not be converted to FASTA and will not be returned.
-----
diff -r f7459ad62be9 -r d669408018a1 tools/maf/maf_to_interval.xml
--- a/tools/maf/maf_to_interval.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/maf/maf_to_interval.xml Sun Sep 27 23:11:43 2009 -0400
@@ -1,5 +1,5 @@
<tool id="MAF_To_Interval1" name="MAF to Interval" force_history_refresh="True">
- <description>Converts a MAF formated file to the Interval format</description>
+ <description>Converts a MAF formatted file to the Interval format</description>
<command interpreter="python">maf_to_interval.py $input1 $out_file1 $out_file1.id $__new_file_path__ $input1.dbkey $species $input1.metadata.species $complete_blocks $remove_gaps</command>
<inputs>
<param format="maf" name="input1" type="data" label="MAF file to convert"/>
diff -r f7459ad62be9 -r d669408018a1 tools/metag_tools/blat_wrapper.xml
--- a/tools/metag_tools/blat_wrapper.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/metag_tools/blat_wrapper.xml Sun Sep 27 23:11:43 2009 -0400
@@ -45,17 +45,17 @@
.. class:: warningmark
- Use a smaller word size (*Minimal Size of Exact Match*) will increase the computational time.
+Using a smaller word size (*Minimal Size of Exact Match*) will increase the computational time.
.. class:: warningmark
-Use a larger mismatch number (*Number of Mismatch in the Word*) will increase the computational time.
+Using a larger mismatch number (*Number of Mismatch in the Word*) will increase the computational time.
-----
**What it does**
-This tool currently uses alignment program **BLAT**. Your short reads file is searched against a genome build or another uploaded file.
+This tool currently uses the **BLAT** alignment program. Your short reads file is searched against a genome build or another uploaded file.
-----
@@ -66,13 +66,13 @@
>seq1
TGGTAATGGTGGTTTTTTTTTTTTTTTTTTATTTTT
-- Use default settings:
+- Use the default settings:
- alignment identity must be higher than or equal to 90%.
- minimal size of exact match to trigger an alignment is 11.
- - allow 0 mismatch in the above exact match size.
+ - allow 0 mismatches in the above exact match size.
- Search against ce2 (C. elegans March 2004), partial result::
diff -r f7459ad62be9 -r d669408018a1 tools/metag_tools/convert_SOLiD_color2nuc.xml
--- a/tools/metag_tools/convert_SOLiD_color2nuc.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/metag_tools/convert_SOLiD_color2nuc.xml Sun Sep 27 23:11:43 2009 -0400
@@ -25,13 +25,13 @@
.. class:: warningmark
- The tool was designed for color space files generated from ABI SOLiD sequencer. The file format must be fasta-like: the title starts with a ">" sign, and each color space sequence starts with a leading nucleotide.
+The tool was designed for color space files generated from an ABI SOLiD sequencer. The file format must be fasta-like: the title starts with a ">" character, and each color space sequence starts with a leading nucleotide.
-----
**What it does**
- This tool convert a color space sequence to nucleotides. The leading character must be one of the nucleotides: A, C, G, T.
+This tool converts a color space sequence to nucleotides. The leading character must be a nucleotide: A, C, G, or T.
-----
diff -r f7459ad62be9 -r d669408018a1 tools/metag_tools/mapping_to_ucsc.xml
--- a/tools/metag_tools/mapping_to_ucsc.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/metag_tools/mapping_to_ucsc.xml Sun Sep 27 23:11:43 2009 -0400
@@ -145,7 +145,7 @@
**What it does**
-This tool formats mapping data generated by short read mappers, as a custom track that can be displayed at UCSC genome browser.
+This tool turns mapping data generated by short read mappers into a format that can be displayed in the UCSC genome browser as a custom track.
-----
diff -r f7459ad62be9 -r d669408018a1 tools/metag_tools/megablast_xml_parser.xml
--- a/tools/metag_tools/megablast_xml_parser.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/metag_tools/megablast_xml_parser.xml Sun Sep 27 23:11:43 2009 -0400
@@ -26,13 +26,13 @@
**What it does**
-This tool will process XML output of any NCBI blast tool (if you run your own blast jobs, the XML output can be generated with **-m 7** option).
+This tool processes the XML output of any NCBI blast tool (if you run your own blast jobs, the XML output can be generated with **-m 7** option).
-----
**Output fields**
-This tools returns tab-delimted output with the following fields::
+This tools returns tab-delimited output with the following fields::
Description Example
----------------------------------------- -----------------
diff -r f7459ad62be9 -r d669408018a1 tools/metag_tools/short_reads_figure_high_quality_length.xml
--- a/tools/metag_tools/short_reads_figure_high_quality_length.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/metag_tools/short_reads_figure_high_quality_length.xml Sun Sep 27 23:11:43 2009 -0400
@@ -32,7 +32,7 @@
.. class:: warningmark
- To use this tool your dataset needs to be in *Quality Score* format. Click pencil icon next to your dataset to set datatype to *Quality Score* (see below for examples of quality scores).
+To use this tool, your dataset needs to be in the *Quality Score* format. Click the pencil icon next to your dataset to set the datatype to *Quality Score* (see below for examples).
-----
@@ -62,7 +62,7 @@
>seq1
23 33 34 25 28 28 28 32 23 34 27 4 28 28 31 21 28
-- If the threshold was set to 20:
+- If the threshold is set to 20:
- a low quality score 4 in the middle separated two segments of lengths 11 and 5.
diff -r f7459ad62be9 -r d669408018a1 tools/metag_tools/short_reads_figure_score.xml
--- a/tools/metag_tools/short_reads_figure_score.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/metag_tools/short_reads_figure_score.xml Sun Sep 27 23:11:43 2009 -0400
@@ -29,7 +29,7 @@
.. class:: warningmark
- To use this tool your dataset needs to be in *Quality Score* format. Click pencil icon next to your dataset to set datatype to *Quality Score* (see below for examples of quality scores).
+To use this tool, your dataset needs to be in the *Quality Score* format. Click the pencil icon next to your dataset to set the datatype to *Quality Score* (see below for examples).
-----
@@ -58,7 +58,7 @@
.. image:: ../static/images/short_reads_boxplot.png
-where the **X-axis** is coordiante along the read and the **Y-axis** is quality score adjusted to comply with the Phred score metric. Units on the X-axis depend on whether your data comes from Roche (454) or Illumina (Solexa) and ABI SOLiD machines:
+where the **X-axis** is coordinate along the read and the **Y-axis** is quality score adjusted to comply with the Phred score metric. Units on the X-axis depend on whether your data comes from Roche (454) or Illumina (Solexa) and ABI SOLiD machines:
- For Roche (454) X-axis (shown above) indicates **relative** position (in %) within reads as this technology produces reads of different lengths;
- For Illumina (Solexa) and ABI SOLiD X-axis shows **absolute** position in nucleotides within reads.
diff -r f7459ad62be9 -r d669408018a1 tools/metag_tools/short_reads_trim_seq.xml
--- a/tools/metag_tools/short_reads_trim_seq.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/metag_tools/short_reads_trim_seq.xml Sun Sep 27 23:11:43 2009 -0400
@@ -57,7 +57,7 @@
.. class:: warningmark
- To use this tool your quality score dataset needs to be in *Quality Score* format. Click pencil icon next to your dataset to set datatype to *Quality Score*.
+To use this tool, your dataset needs to be in the *Quality Score* format. Click the pencil icon next to your dataset to set the datatype to *Quality Score* (see below for examples).
-----
diff -r f7459ad62be9 -r d669408018a1 tools/metag_tools/shrimp_color_wrapper.xml
--- a/tools/metag_tools/shrimp_color_wrapper.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/metag_tools/shrimp_color_wrapper.xml Sun Sep 27 23:11:43 2009 -0400
@@ -55,7 +55,7 @@
.. class:: warningmark
-To use this tool your dataset needs to be in *csfasta* (as ABI SOLiD color-space sequences) format. Click pencil icon next to your dataset to set datatype to *csfasta*.
+To use this tool your dataset needs to be in the *csfasta* (as ABI SOLiD color-space sequences) format. Click pencil icon next to your dataset to set the datatype to *csfasta*.
-----
@@ -166,8 +166,8 @@
-h S-W Full Hit Threshold (default: 68.00%)
In letter-space, this parameter determines the threshold
score for both vectored and full Smith-Waterman alignments.
- Any values less than this quanitity will be thrown away.
- *Note* This option differs slightly in meaning between letter-space and colour-space.
+ Any values less than this quantity will be thrown away.
+ *Note* This option differs slightly in meaning between letter-space and color-space.
-v
diff -r f7459ad62be9 -r d669408018a1 tools/metag_tools/shrimp_wrapper.xml
--- a/tools/metag_tools/shrimp_wrapper.xml Sat Sep 26 18:05:36 2009 -0400
+++ b/tools/metag_tools/shrimp_wrapper.xml Sun Sep 27 23:11:43 2009 -0400
@@ -219,7 +219,7 @@
running time. Higher values will have the opposite effect.
-t Seed Hit Taboo Length (default: 4)
The seed taboo length specifies how many target genome bases
- or colours must exist prior to a previous seed match in order
+ or colors must exist prior to a previous seed match in order
to count another seed match as a hit.
-9 Seed Generation Taboo Length (default: 0)
@@ -265,8 +265,8 @@
-h S-W Hit Threshold (default: 68.00%)
In letter-space, this parameter determines the threshold
score for both vectored and full Smith-Waterman alignments.
- Any values less than this quanitity will be thrown away.
- *Note* This option differs slightly in meaning between letter-space and colour-space.
+ Any values less than this quantity will be thrown away.
+ *Note* This option differs slightly in meaning between letter-space and color-space.
-----
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/93dc1855f0d6
changeset: 2786:93dc1855f0d6
user: Kanwei Li <kanwei(a)gmail.com>
date: Fri Sep 25 18:47:40 2009 -0400
description:
trackster now supports BED files
10 file(s) affected in this change:
datatypes_conf.xml.sample
lib/galaxy/datatypes/converters/bed_to_interval_index_converter.py
lib/galaxy/datatypes/converters/bed_to_interval_index_converter.xml
lib/galaxy/visualization/__init__.py
lib/galaxy/visualization/tracks/__init__.py
lib/galaxy/visualization/tracks/data/array_tree.py
lib/galaxy/visualization/tracks/data/interval_index.py
lib/galaxy/web/controllers/tracks.py
static/scripts/packed/trackster.js
static/scripts/trackster.js
diffs (238 lines):
diff -r d8e3ad46bfa3 -r 93dc1855f0d6 datatypes_conf.xml.sample
--- a/datatypes_conf.xml.sample Fri Sep 25 17:21:26 2009 -0400
+++ b/datatypes_conf.xml.sample Fri Sep 25 18:47:40 2009 -0400
@@ -7,6 +7,7 @@
<datatype extension="bed" type="galaxy.datatypes.interval:Bed" display_in_upload="true">
<converter file="bed_to_gff_converter.xml" target_datatype="gff"/>
<converter file="interval_to_coverage.xml" target_datatype="coverage"/>
+ <converter file="bed_to_interval_index_converter.xml" target_datatype="interval_index"/>
</datatype>
<datatype extension="binseq.zip" type="galaxy.datatypes.images:Binseq" mimetype="application/zip" display_in_upload="true"/>
<datatype extension="len" type="galaxy.datatypes.chrominfo:ChromInfo" display_in_upload="true">
@@ -61,6 +62,7 @@
<converter file="wiggle_to_array_tree_converter.xml" target_datatype="array_tree"/>
</datatype>
<datatype extension="array_tree" type="galaxy.datatypes.data:Data" />
+ <datatype extension="interval_index" type="galaxy.datatypes.data:Data" />
<!-- EMBOSS TOOLS -->
<datatype extension="acedb" type="galaxy.datatypes.data:Text"/>
<datatype extension="asn1" type="galaxy.datatypes.data:Text"/>
diff -r d8e3ad46bfa3 -r 93dc1855f0d6 lib/galaxy/datatypes/converters/bed_to_interval_index_converter.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/datatypes/converters/bed_to_interval_index_converter.py Fri Sep 25 18:47:40 2009 -0400
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+
+from __future__ import division
+
+import sys
+from galaxy import eggs
+import pkg_resources; pkg_resources.require( "bx-python" )
+from bx.interval_index_file import Indexes
+
+def main():
+
+ input_fname = sys.argv[1]
+ out_fname = sys.argv[2]
+ index = Indexes()
+ offset = 0
+
+ for line in open(input_fname, "r"):
+ feature = line.split()
+ if feature[0] == "track":
+ offset += len(line)
+ continue
+ chrom = feature[0]
+ chrom_start = int(feature[1])
+ chrom_end = int(feature[2])
+ index.add( chrom, chrom_start, chrom_end, offset )
+ offset += len(line)
+
+ index.write( open(out_fname, "w") )
+
+if __name__ == "__main__":
+ main()
+
\ No newline at end of file
diff -r d8e3ad46bfa3 -r 93dc1855f0d6 lib/galaxy/datatypes/converters/bed_to_interval_index_converter.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/datatypes/converters/bed_to_interval_index_converter.xml Fri Sep 25 18:47:40 2009 -0400
@@ -0,0 +1,14 @@
+<tool id="CONVERTER_bed_to_interval_index_0" name="Convert BED to Interval Index" version="1.0.0">
+<!-- <description>__NOT_USED_CURRENTLY_FOR_CONVERTERS__</description> -->
+ <command interpreter="python">bed_to_interval_index_converter.py $input1 $output1</command>
+ <inputs>
+ <page>
+ <param format="bed" name="input1" type="data" label="Choose BED file"/>
+ </page>
+ </inputs>
+ <outputs>
+ <data format="interval_index" name="output1"/>
+ </outputs>
+ <help>
+ </help>
+</tool>
diff -r d8e3ad46bfa3 -r 93dc1855f0d6 lib/galaxy/visualization/__init__.py
--- a/lib/galaxy/visualization/__init__.py Fri Sep 25 17:21:26 2009 -0400
+++ b/lib/galaxy/visualization/__init__.py Fri Sep 25 18:47:40 2009 -0400
@@ -1,3 +1,3 @@
"""
-Package for Galaxy visulization plugins.
+Package for Galaxy visualization plugins.
"""
\ No newline at end of file
diff -r d8e3ad46bfa3 -r 93dc1855f0d6 lib/galaxy/visualization/tracks/__init__.py
--- a/lib/galaxy/visualization/tracks/__init__.py Fri Sep 25 17:21:26 2009 -0400
+++ b/lib/galaxy/visualization/tracks/__init__.py Fri Sep 25 18:47:40 2009 -0400
@@ -1,3 +1,3 @@
"""
-Package for track style visulization using the trackster UI.
+Package for track style visualization using the trackster UI.
"""
\ No newline at end of file
diff -r d8e3ad46bfa3 -r 93dc1855f0d6 lib/galaxy/visualization/tracks/data/array_tree.py
--- a/lib/galaxy/visualization/tracks/data/array_tree.py Fri Sep 25 17:21:26 2009 -0400
+++ b/lib/galaxy/visualization/tracks/data/array_tree.py Fri Sep 25 18:47:40 2009 -0400
@@ -1,5 +1,5 @@
"""
-Array tree data provider for Galaxy track browser.
+Array tree data provider for the Galaxy track browser.
"""
import pkg_resources; pkg_resources.require( "bx-python" )
@@ -16,7 +16,7 @@
BLOCK_SIZE = 1000
class ArrayTreeDataProvider( object ):
- def __init__( self, dataset ):
+ def __init__( self, dataset, original_dataset ):
self.dataset = dataset
def get_stats( self, chrom ):
diff -r d8e3ad46bfa3 -r 93dc1855f0d6 lib/galaxy/visualization/tracks/data/interval_index.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/visualization/tracks/data/interval_index.py Fri Sep 25 18:47:40 2009 -0400
@@ -0,0 +1,41 @@
+"""
+Interval index data provider for the Galaxy track browser.
+Kanwei Li, 2009
+"""
+
+import pkg_resources; pkg_resources.require( "bx-python" )
+from bx.interval_index_file import Indexes
+
+class IntervalIndexDataProvider( object ):
+ def __init__( self, converted_dataset, original_dataset ):
+ self.original_dataset = original_dataset
+ self.converted_dataset = converted_dataset
+
+ def get_data( self, chrom, start, end ):
+ start, end = int(start), int(end)
+ chrom = str(chrom)
+ source = open( self.original_dataset.file_name )
+ index = Indexes( self.converted_dataset.file_name )
+ results = []
+
+ for start, end, offset in index.find(chrom, start, end):
+ source.seek(offset)
+ feature = source.readline().split()
+ payload = { 'start': start, 'end': end, 'name': feature[3] }
+ try:
+ block_sizes = [ int(n) for n in feature[10].split(',') if n != '']
+ block_starts = [ int(n) for n in feature[11].split(',') if n != '' ]
+ blocks = zip(block_sizes, block_starts)
+ payload['block_start_end'] = [ (chrom_start + block[1], chrom_start + block[1] + block[0]) for block in blocks]
+ except:
+ pass
+
+ try:
+ payload['exon_start'] = int(feature[6])
+ payload['exon_end'] = int(feature[7])
+ except:
+ pass
+
+ results.append(payload)
+
+ return results
diff -r d8e3ad46bfa3 -r 93dc1855f0d6 lib/galaxy/web/controllers/tracks.py
--- a/lib/galaxy/web/controllers/tracks.py Fri Sep 25 17:21:26 2009 -0400
+++ b/lib/galaxy/web/controllers/tracks.py Fri Sep 25 18:47:40 2009 -0400
@@ -24,6 +24,7 @@
from galaxy.util.bunch import Bunch
from galaxy.visualization.tracks.data.array_tree import ArrayTreeDataProvider
+from galaxy.visualization.tracks.data.interval_index import IntervalIndexDataProvider
# Message strings returned to browser
messages = Bunch(
@@ -36,18 +37,20 @@
# Dataset type required for each track type. This needs to be more flexible,
# there might be multiple types of indexes that suffice for a given track type.
track_type_to_dataset_type = {
- "line": "array_tree"
+ "line": "array_tree",
+ "feature": "interval_index"
}
# Mapping from dataset type to a class that can fetch data from a file of that
# type. This also needs to be more flexible.
dataset_type_to_data_provider = {
- "array_tree": ArrayTreeDataProvider
+ "array_tree": ArrayTreeDataProvider,
+ "interval_index": IntervalIndexDataProvider
}
# FIXME: hardcoding this for now, but it should be derived from the available
# converters
-browsable_types = set( ["wig" ] )
+browsable_types = set( ["wig", "bed" ] )
class TracksController( BaseController ):
"""
@@ -66,7 +69,7 @@
to 'index' once datasets to browse have been selected.
"""
session = trans.sa_session
- # If the user clicked the submit button explicately, try to build the browser
+ # If the user clicked the submit button explicitly, try to build the browser
if browse and dataset_ids:
if not isinstance( dataset_ids, list ):
dataset_ids = [ dataset_ids ]
@@ -183,7 +186,7 @@
return messages.PENDING
# We have a dataset in the right format that is ready to use, wrap in
# a data provider that knows how to access it
- data_provider = dataset_type_to_data_provider[ converted_dataset_type ]( converted_dataset )
+ data_provider = dataset_type_to_data_provider[ converted_dataset_type ]( converted_dataset, dataset )
# Return stats if we need them
if stats: return data_provider.get_stats( chrom )
diff -r d8e3ad46bfa3 -r 93dc1855f0d6 static/scripts/packed/trackster.js
--- a/static/scripts/packed/trackster.js Fri Sep 25 17:21:26 2009 -0400
+++ b/static/scripts/packed/trackster.js Fri Sep 25 18:47:40 2009 -0400
@@ -1,1 +1,1 @@
-var DENSITY=1000;var DataCache=function(b,a){this.type=b;this.track=a;this.cache=Object()};$.extend(DataCache.prototype,{get:function(d,b){var c=this.cache;if(!(c[d]&&c[d][b])){if(!c[d]){c[d]=Object()}var a=b*DENSITY*d;var e=(b+1)*DENSITY*d;c[d][b]={state:"loading"};$.getJSON(data_url,{track_type:this.track.track_type,chrom:this.track.view.chrom,low:a,high:e,dataset_id:this.track.dataset_id},function(f){if(f=="pending"){setTimeout(fetcher,5000)}else{c[d][b]={state:"loaded",values:f}}$(document).trigger("redraw")})}return c[d][b]}});var View=function(a,b){this.chrom=a;this.tracks=[];this.max_low=0;this.max_high=b;this.low=this.max_low;this.high=this.max_high;this.length=this.max_high-this.max_low};$.extend(View.prototype,{add_track:function(a){a.view=this;this.tracks.push(a);if(a.init){a.init()}},redraw:function(){$("#overview-box").css({left:(this.low/this.length)*$("#overview-viewport").width(),width:Math.max(4,((this.high-this.low)/this.length)*$("#overview-viewport").widt
h())}).show();$("#low").text(this.low);$("#high").text(this.high);for(var a in this.tracks){this.tracks[a].draw()}$("#bottom-spacer").remove();$("#viewport").append('<div id="bottom-spacer" style="height: 200px;"></div>')},move:function(b,a){this.low=Math.max(this.max_low,Math.floor(b));this.high=Math.min(this.length,Math.ceil(a))},zoom_in:function(d,b){var c=this.high-this.low;var e=c/d/2;if(b==undefined){var a=(this.low+this.high)/2}else{var a=this.low+c*b/$(document).width()}this.low=Math.floor(a-e);this.high=Math.ceil(a+e);if(this.low<this.max_low){this.low=this.max_low;this.high=c/d}else{if(this.high>this.max_high){this.high=this.max_high;this.low=this.max_high-c/d}}if(this.high-this.low<1){this.high=this.low+1}},zoom_out:function(c){var a=(this.low+this.high)/2;var b=this.high-this.low;var d=b*c/2;this.low=Math.floor(Math.max(0,a-d));this.high=Math.ceil(Math.min(this.length,a+d))},left:function(b){var a=this.high-this.low;var c=Math.floor(a/b);if(this.low-c<0){this.low
=0;this.high=this.low+a}else{this.low-=c;this.high-=c}},right:function(b){var a=this.high-this.low;var c=Math.floor(a/b);if(this.high+c>this.length){this.high=this.length;this.low=this.high-a}else{this.low+=c;this.high+=c}}});var Track=function(a,b){this.name=a;this.parent_element=b;this.make_container()};$.extend(Track.prototype,{make_container:function(){this.header_div=$("<div class='track-header'>").text(this.name);this.content_div=$("<div class='track-content'>");this.container_div=$("<div class='track'></div>").append(this.header_div).append(this.content_div);this.parent_element.append(this.container_div)}});var TiledTrack=function(){this.last_resolution=null;this.last_w_scale=null;this.tile_cache={}};$.extend(TiledTrack.prototype,Track.prototype,{draw:function(){var k=this.view.low,c=this.view.high,e=c-k;var b=Math.pow(10,Math.ceil(Math.log(e/DENSITY)/Math.log(10)));b=Math.max(b,1);b=Math.min(b,100000);var o=$("<div style='position: relative;'></div>");this.content_di
v.children(":first").remove();this.content_div.append(o);var m=this.content_div.width(),d=this.content_div.height(),p=m/e,l={},n={};if(this.last_resolution==b&&this.last_w_scale==p){l=this.tile_cache}var g;var a=Math.floor(k/b/DENSITY);var i=0;while((a*1000*b)<c){if(a in l){g=l[a];var f=a*DENSITY*b;g.css({left:(f-this.view.low)*p});o.append(g)}else{g=this.draw_tile(b,a,o,p,d)}if(g){n[a]=g;i=Math.max(i,g.height())}a+=1}o.css("height",i);this.last_resolution=b;this.last_w_scale=p;this.tile_cache=n}});var LineTrack=function(c,b,a){Track.call(this,c,$("#viewport"));this.track_type="line";this.height_px=(a?a:100);this.container_div.addClass("line-track");this.dataset_id=b;this.cache=new DataCache("",this)};$.extend(LineTrack.prototype,TiledTrack.prototype,{make_container:function(){Track.prototype.make_container.call(this);this.content_div.css("height",this.height_px)},init:function(){track=this;$.getJSON(data_url,{stats:true,track_type:track.track_type,chrom:this.view.chrom,low:
null,high:null,dataset_id:this.dataset_id},function(a){if(a){track.min_value=a.min;track.max_value=a.max;track.vertical_range=track.max_value-track.min_value;track.view.redraw()}})},draw_tile:function(d,a,o,s,p){if(!this.vertical_range){return}var k=a*DENSITY*d,r=(a+1)*DENSITY*d,c=DENSITY*d;var n=this.cache.get(d,a);var h;if(n.state=="loading"){h=$("<div class='loading tile'></div>")}else{h=$("<canvas class='tile'></canvas>")}h.css({position:"absolute",top:0,left:(k-this.view.low)*s,});o.append(h);if(n.state=="loading"){e=false;return null}var b=h;b.get(0).width=Math.ceil(c*s);b.get(0).height=this.height_px;var q=b.get(0).getContext("2d");var e=false;q.beginPath();var g=n.values;if(!g){return}for(var f=0;f<g.length-1;f++){var m=g[f][0]-k;var l=g[f][1];if(isNaN(l)){e=false}else{m=m*s;y_above_min=l-this.min_value;l=y_above_min/this.vertical_range*this.height_px;if(e){q.lineTo(m,l)}else{q.moveTo(m,l);e=true}}}q.stroke();return h}});var LabelTrack=function(a){Track.call(this,nul
l,a);this.container_div.addClass("label-track")};$.extend(LabelTrack.prototype,Track.prototype,{draw:function(){var c=this.view,d=c.high-c.low,g=Math.floor(Math.pow(10,Math.floor(Math.log(d)/Math.log(10)))),a=Math.floor(c.low/g)*g,e=this.content_div.width(),b=$("<div style='position: relative; height: 1.3em;'></div>");while(a<c.high){var f=(a-c.low)/d*e;b.append($("<div class='label'>"+a+"</div>").css({position:"absolute",left:f-1}));a+=g}this.content_div.children(":first").remove();this.content_div.append(b)}});var itemHeight=13,itemPad=3,thinHeight=7,thinOffset=3;var FeatureTrack=function(b,a){Track.call(this,b,$("#viewport"));this.track_type="feature";this.container_div.addClass("feature-track");this.dataset_id=a;this.zo_slots=new Object();this.show_labels_scale=0.01;this.showing_labels=false};$.extend(FeatureTrack.prototype,TiledTrack.prototype,{calc_slots:function(d){end_ary=new Array();var c=this.container_div.width()/(this.view.high-this.view.low);if(d){this.zi_slots=
new Object()}var b=$("<canvas></canvas>").get(0).getContext("2d");for(var a in this.values){feature=this.values[a];f_start=Math.floor(Math.max(this.view.max_low,(feature.start-this.view.max_low)*c));if(d){f_start-=b.measureText(feature.name).width}f_end=Math.ceil(Math.min(this.view.max_high,(feature.end-this.view.max_low)*c));j=0;while(true){if(end_ary[j]==undefined||end_ary[j]<f_start){end_ary[j]=f_end;if(d){this.zi_slots[feature.name]=j}else{this.zo_slots[feature.name]=j}break}j++}}},init:function(){var a=this;$.getJSON("getfeature",{start:this.view.max_low,end:this.view.max_high,dataset_id:this.dataset_id,chrom:this.view.chrom},function(b){a.values=b;a.calc_slots();a.slots=a.zo_slots;a.draw()})},draw_tile:function(q,t,e,g,f){if(!this.values){return null}if(g>this.show_labels_scale&&!this.showing_labels){this.showing_labels=true;if(!this.zi_slots){this.calc_slots(true)}this.slots=this.zi_slots}else{if(g<=this.show_labels_scale&&this.showing_labels){this.showing_labels=fals
e;this.slots=this.zo_slots}}var u=t*DENSITY*q,c=(t+1)*DENSITY*q,b=DENSITY*q;var k=this.view,m=k.high-k.low,o=Math.ceil(b*g),h=new Array(),n=200,l=$("<canvas class='tile'></canvas>");l.css({position:"absolute",top:0,left:(u-this.view.low)*g,"border-right":"1px solid #ddd"});l.get(0).width=o;l.get(0).height=n;var p=l.get(0).getContext("2d");var r=0;for(var s in this.values){feature=this.values[s];if(feature.start<=c&&feature.end>=u){f_start=Math.floor(Math.max(0,(feature.start-u)*g));f_end=Math.ceil(Math.min(o,(feature.end-u)*g));p.fillStyle="#000";p.fillRect(f_start,this.slots[feature.name]*10+5,f_end-f_start,1);if(this.showing_labels&&p.fillText){p.font="10px monospace";p.textAlign="right";p.fillText(feature.name,f_start,this.slots[feature.name]*10+8)}if(feature.exon_start&&feature.exon_end){var d=Math.floor(Math.max(0,(feature.exon_start-u)*g));var w=Math.ceil(Math.min(o,(feature.exon_end-u)*g))}for(var s in feature.blocks){block=feature.blocks[s];block_start=Math.floor(Mat
h.max(0,(block[0]-u)*g));block_end=Math.ceil(Math.min(o,(block[1]-u)*g));var a=3,v=4;if(d&&block_start>=d&&block_end<=w){a=5,v=3}p.fillRect(d,this.slots[feature.name]*10+v,block_end-block_start,a)}r++}}e.append(l);return l},});
\ No newline at end of file
+var DENSITY=1000;var DataCache=function(b,a){this.type=b;this.track=a;this.cache=Object()};$.extend(DataCache.prototype,{get:function(d,b){var c=this.cache;if(!(c[d]&&c[d][b])){if(!c[d]){c[d]=Object()}var a=b*DENSITY*d;var e=(b+1)*DENSITY*d;c[d][b]={state:"loading"};$.getJSON(data_url,{track_type:this.track.track_type,chrom:this.track.view.chrom,low:a,high:e,dataset_id:this.track.dataset_id},function(f){if(f=="pending"){setTimeout(fetcher,5000)}else{c[d][b]={state:"loaded",values:f}}$(document).trigger("redraw")})}return c[d][b]}});var View=function(a,b){this.chrom=a;this.tracks=[];this.max_low=0;this.max_high=b;this.low=this.max_low;this.high=this.max_high;this.length=this.max_high-this.max_low};$.extend(View.prototype,{add_track:function(a){a.view=this;this.tracks.push(a);if(a.init){a.init()}},redraw:function(){$("#overview-box").css({left:(this.low/this.length)*$("#overview-viewport").width(),width:Math.max(4,((this.high-this.low)/this.length)*$("#overview-viewport").widt
h())}).show();$("#low").text(this.low);$("#high").text(this.high);for(var a in this.tracks){this.tracks[a].draw()}$("#bottom-spacer").remove();$("#viewport").append('<div id="bottom-spacer" style="height: 200px;"></div>')},move:function(b,a){this.low=Math.max(this.max_low,Math.floor(b));this.high=Math.min(this.length,Math.ceil(a))},zoom_in:function(d,b){if(this.max_high==0){return}var c=this.high-this.low;var e=c/d/2;if(b==undefined){var a=(this.low+this.high)/2}else{var a=this.low+c*b/$(document).width()}this.low=Math.floor(a-e);this.high=Math.ceil(a+e);if(this.low<this.max_low){this.low=this.max_low;this.high=c/d}else{if(this.high>this.max_high){this.high=this.max_high;this.low=this.max_high-c/d}}if(this.high-this.low<1){this.high=this.low+1}},zoom_out:function(c){if(this.max_high==0){return}var a=(this.low+this.high)/2;var b=this.high-this.low;var d=b*c/2;this.low=Math.floor(Math.max(0,a-d));this.high=Math.ceil(Math.min(this.length,a+d))},left:function(b){var a=this.high-
this.low;var c=Math.floor(a/b);if(this.low-c<0){this.low=0;this.high=this.low+a}else{this.low-=c;this.high-=c}},right:function(b){var a=this.high-this.low;var c=Math.floor(a/b);if(this.high+c>this.length){this.high=this.length;this.low=this.high-a}else{this.low+=c;this.high+=c}}});var Track=function(a,b){this.name=a;this.parent_element=b;this.make_container()};$.extend(Track.prototype,{make_container:function(){this.header_div=$("<div class='track-header'>").text(this.name);this.content_div=$("<div class='track-content'>");this.container_div=$("<div class='track'></div>").append(this.header_div).append(this.content_div);this.parent_element.append(this.container_div)}});var TiledTrack=function(){this.last_resolution=null;this.last_w_scale=null;this.tile_cache={}};$.extend(TiledTrack.prototype,Track.prototype,{draw:function(){var k=this.view.low,c=this.view.high,e=c-k;var b=Math.pow(10,Math.ceil(Math.log(e/DENSITY)/Math.log(10)));b=Math.max(b,1);b=Math.min(b,100000);var o=$("<
div style='position: relative;'></div>");this.content_div.children(":first").remove();this.content_div.append(o);var m=this.content_div.width(),d=this.content_div.height(),p=m/e,l={},n={};if(this.last_resolution==b&&this.last_w_scale==p){l=this.tile_cache}var g;var a=Math.floor(k/b/DENSITY);var i=0;while((a*1000*b)<c){if(a in l){g=l[a];var f=a*DENSITY*b;g.css({left:(f-this.view.low)*p});o.append(g)}else{g=this.draw_tile(b,a,o,p,d)}if(g){n[a]=g;i=Math.max(i,g.height())}a+=1}o.css("height",i);this.last_resolution=b;this.last_w_scale=p;this.tile_cache=n}});var LineTrack=function(c,b,a){Track.call(this,c,$("#viewport"));this.track_type="line";this.height_px=(a?a:100);this.container_div.addClass("line-track");this.dataset_id=b;this.cache=new DataCache("",this)};$.extend(LineTrack.prototype,TiledTrack.prototype,{make_container:function(){Track.prototype.make_container.call(this);this.content_div.css("height",this.height_px)},init:function(){track=this;$.getJSON(data_url,{stats:tru
e,track_type:track.track_type,chrom:this.view.chrom,low:null,high:null,dataset_id:this.dataset_id},function(a){if(a){track.min_value=a.min;track.max_value=a.max;track.vertical_range=track.max_value-track.min_value;track.view.redraw()}})},draw_tile:function(d,a,o,s,p){if(!this.vertical_range){return}var k=a*DENSITY*d,r=(a+1)*DENSITY*d,c=DENSITY*d;var n=this.cache.get(d,a);var h;if(n.state=="loading"){h=$("<div class='loading tile'></div>")}else{h=$("<canvas class='tile'></canvas>")}h.css({position:"absolute",top:0,left:(k-this.view.low)*s,});o.append(h);if(n.state=="loading"){e=false;return null}var b=h;b.get(0).width=Math.ceil(c*s);b.get(0).height=this.height_px;var q=b.get(0).getContext("2d");var e=false;q.beginPath();var g=n.values;if(!g){return}for(var f=0;f<g.length-1;f++){var m=g[f][0]-k;var l=g[f][1];if(isNaN(l)){e=false}else{m=m*s;y_above_min=l-this.min_value;l=y_above_min/this.vertical_range*this.height_px;if(e){q.lineTo(m,l)}else{q.moveTo(m,l);e=true}}}q.stroke();re
turn h}});var LabelTrack=function(a){Track.call(this,null,a);this.container_div.addClass("label-track")};$.extend(LabelTrack.prototype,Track.prototype,{draw:function(){var c=this.view,d=c.high-c.low,g=Math.floor(Math.pow(10,Math.floor(Math.log(d)/Math.log(10)))),a=Math.floor(c.low/g)*g,e=this.content_div.width(),b=$("<div style='position: relative; height: 1.3em;'></div>");while(a<c.high){var f=(a-c.low)/d*e;b.append($("<div class='label'>"+a+"</div>").css({position:"absolute",left:f-1}));a+=g}this.content_div.children(":first").remove();this.content_div.append(b)}});var itemHeight=13,itemPad=3,thinHeight=7,thinOffset=3;var FeatureTrack=function(b,a){Track.call(this,b,$("#viewport"));this.track_type="feature";this.container_div.addClass("feature-track");this.dataset_id=a;this.zo_slots=new Object();this.show_labels_scale=0.01;this.showing_labels=false};$.extend(FeatureTrack.prototype,TiledTrack.prototype,{calc_slots:function(d){end_ary=new Array();var c=this.container_div.wid
th()/(this.view.high-this.view.low);if(d){this.zi_slots=new Object()}var b=$("<canvas></canvas>").get(0).getContext("2d");for(var a in this.values){feature=this.values[a];f_start=Math.floor(Math.max(this.view.max_low,(feature.start-this.view.max_low)*c));if(d){f_start-=b.measureText(feature.name).width}f_end=Math.ceil(Math.min(this.view.max_high,(feature.end-this.view.max_low)*c));j=0;while(true){if(end_ary[j]==undefined||end_ary[j]<f_start){end_ary[j]=f_end;if(d){this.zi_slots[feature.name]=j}else{this.zo_slots[feature.name]=j}break}j++}}},init:function(){var a=this;$.getJSON(data_url,{track_type:a.track_type,low:a.view.max_low,high:a.view.max_high,dataset_id:a.dataset_id,chrom:a.view.chrom},function(b){a.values=b;a.calc_slots();a.slots=a.zo_slots;a.draw()})},draw_tile:function(q,t,e,g,f){if(!this.values){return null}if(g>this.show_labels_scale&&!this.showing_labels){this.showing_labels=true;if(!this.zi_slots){this.calc_slots(true)}this.slots=this.zi_slots}else{if(g<=this.s
how_labels_scale&&this.showing_labels){this.showing_labels=false;this.slots=this.zo_slots}}var u=t*DENSITY*q,c=(t+1)*DENSITY*q,b=DENSITY*q;var k=this.view,m=k.high-k.low,o=Math.ceil(b*g),h=new Array(),n=200,l=$("<canvas class='tile'></canvas>");l.css({position:"absolute",top:0,left:(u-this.view.low)*g,});l.get(0).width=o;l.get(0).height=n;var p=l.get(0).getContext("2d");var r=0;for(var s in this.values){feature=this.values[s];if(feature.start<=c&&feature.end>=u){f_start=Math.floor(Math.max(0,(feature.start-u)*g));f_end=Math.ceil(Math.min(o,(feature.end-u)*g));p.fillStyle="#000";p.fillRect(f_start,this.slots[feature.name]*10+5,f_end-f_start,1);if(this.showing_labels&&p.fillText){p.font="10px monospace";p.textAlign="right";p.fillText(feature.name,f_start,this.slots[feature.name]*10+8)}if(feature.exon_start&&feature.exon_end){var d=Math.floor(Math.max(0,(feature.exon_start-u)*g));var w=Math.ceil(Math.min(o,(feature.exon_end-u)*g))}for(var s in feature.blocks){block=feature.bloc
ks[s];block_start=Math.floor(Math.max(0,(block[0]-u)*g));block_end=Math.ceil(Math.min(o,(block[1]-u)*g));var a=3,v=4;if(d&&block_start>=d&&block_end<=w){a=5,v=3}p.fillRect(d,this.slots[feature.name]*10+v,block_end-block_start,a)}r++}}e.append(l);return l},});
\ No newline at end of file
diff -r d8e3ad46bfa3 -r 93dc1855f0d6 static/scripts/trackster.js
--- a/static/scripts/trackster.js Fri Sep 25 17:21:26 2009 -0400
+++ b/static/scripts/trackster.js Fri Sep 25 18:47:40 2009 -0400
@@ -364,7 +364,8 @@
init: function() {
var track = this;
- $.getJSON( "getfeature", { 'start': this.view.max_low, 'end': this.view.max_high, 'dataset_id': this.dataset_id, 'chrom': this.view.chrom }, function ( data ) {
+ $.getJSON( data_url, { track_type: track.track_type, low: track.view.max_low, high: track.view.max_high,
+ dataset_id: track.dataset_id, chrom: track.view.chrom }, function ( data ) {
track.values = data;
track.calc_slots();
track.slots = track.zo_slots;
@@ -402,7 +403,6 @@
position: "absolute",
top: 0,
left: ( tile_low - this.view.low ) * w_scale,
- "border-right": "1px solid #ddd"
});
new_canvas.get(0).width = width;
new_canvas.get(0).height = height;
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/f7459ad62be9
changeset: 2788:f7459ad62be9
user: jeremy goecks <jeremy.goecks at emory.edu>
date: Sat Sep 26 18:05:36 2009 -0400
description:
Turn off grid state preservation.
1 file(s) affected in this change:
lib/galaxy/web/framework/helpers/grids.py
diffs (12 lines):
diff -r 659713ba1d92 -r f7459ad62be9 lib/galaxy/web/framework/helpers/grids.py
--- a/lib/galaxy/web/framework/helpers/grids.py Sat Sep 26 17:33:21 2009 -0400
+++ b/lib/galaxy/web/framework/helpers/grids.py Sat Sep 26 18:05:36 2009 -0400
@@ -22,7 +22,7 @@
standard_filters = []
default_filter = None
default_sort_key = None
- preserve_state = True
+ preserve_state = False
# Set preference names.
cur_filter_pref_name = ".filter"
cur_sort_key_pref_name = ".sort_key"
1
0
02 Oct '09
details: http://www.bx.psu.edu/hg/galaxy/rev/659713ba1d92
changeset: 2787:659713ba1d92
user: jeremy goecks <jeremy.goecks at emory.edu>
date: Sat Sep 26 17:33:21 2009 -0400
description:
Ensure that all user preferences are in unicode.
1 file(s) affected in this change:
lib/galaxy/web/framework/helpers/grids.py
diffs (58 lines):
diff -r 93dc1855f0d6 -r 659713ba1d92 lib/galaxy/web/framework/helpers/grids.py
--- a/lib/galaxy/web/framework/helpers/grids.py Fri Sep 25 18:47:40 2009 -0400
+++ b/lib/galaxy/web/framework/helpers/grids.py Sat Sep 26 17:33:21 2009 -0400
@@ -44,19 +44,21 @@
base_filter = {}
if self.default_filter:
base_filter = self.default_filter.copy()
- base_sort_key = self.default_sort_key
+ base_sort_key = self.default_sort_key
if self.preserve_state:
+ pref_name = unicode( self.__class__.__name__ + self.cur_filter_pref_name )
saved_filter_pref = trans.sa_session.query( UserPreference ).\
- filter_by( name=self.__class__.__name__ + self.cur_filter_pref_name, user_id=trans.get_user().id ).first()
+ filter_by( name=pref_name, user_id=trans.get_user().id ).first()
if saved_filter_pref:
saved_filter = from_json_string( saved_filter_pref.value )
base_filter.update( saved_filter )
+ pref_name = unicode( self.__class__.__name__ + self.cur_sort_key_pref_name )
saved_sort_key_pref = trans.sa_session.query( UserPreference ).\
- filter_by( name=self.__class__.__name__ + self.cur_sort_key_pref_name, user_id=trans.get_user().id ).first()
+ filter_by( name=pref_name, user_id=trans.get_user().id ).first()
if saved_sort_key_pref:
base_sort_key = from_json_string( saved_sort_key_pref.value )
-
+
# Build initial query
query = self.build_initial_query( session )
query = self.apply_default_filter( trans, query, **kwargs )
@@ -110,19 +112,20 @@
# Save current filter and sort key.
if self.preserve_state:
- pref_name = self.__class__.__name__ + self.cur_filter_pref_name
+ pref_name = unicode( self.__class__.__name__ + self.cur_filter_pref_name )
if not saved_filter_pref:
saved_filter_pref = UserPreference( name=pref_name )
trans.get_user().preferences.append( saved_filter_pref )
- saved_filter_pref.value = to_json_string( cur_filter_dict )
- if not saved_sort_key_pref:
- pref_name = self.__class__.__name__ + self.cur_sort_key_pref_name
+ saved_filter_pref.value = unicode( to_json_string( cur_filter_dict ) )
+ if sort_key:
if not saved_sort_key_pref:
- saved_sort_key_pref = UserPreference( name=pref_name )
- trans.get_user().preferences.append( saved_sort_key_pref )
- saved_sort_key_pref.value = to_json_string( sort_key )
+ pref_name = unicode( self.__class__.__name__ + self.cur_sort_key_pref_name )
+ if not saved_sort_key_pref:
+ saved_sort_key_pref = UserPreference( name=pref_name )
+ trans.get_user().preferences.append( saved_sort_key_pref )
+ saved_sort_key_pref.value = unicode( to_json_string( sort_key ) )
trans.sa_session.flush()
-
+
# Render grid.
def url( *args, **kwargs ):
# Only include sort/filter arguments if not linking to another
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/d8e3ad46bfa3
changeset: 2785:d8e3ad46bfa3
user: jeremy goecks <jeremy.goecks(a)emory.edu>
date: Fri Sep 25 17:21:26 2009 -0400
description:
Fix migration script naming collision.
2 file(s) affected in this change:
lib/galaxy/model/migrate/versions/0020_user_prefs.py
lib/galaxy/model/migrate/versions/0021_user_prefs.py
diffs (100 lines):
diff -r 6f8b5f1e8ec9 -r d8e3ad46bfa3 lib/galaxy/model/migrate/versions/0020_user_prefs.py
--- a/lib/galaxy/model/migrate/versions/0020_user_prefs.py Fri Sep 25 17:07:13 2009 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-"""
-This migration script adds a user preferences table to Galaxy.
-"""
-
-from sqlalchemy import *
-from migrate import *
-
-import datetime
-now = datetime.datetime.utcnow
-
-import logging
-log = logging.getLogger( __name__ )
-
-metadata = MetaData( migrate_engine )
-
-def display_migration_details():
- print ""
- print "This migration script adds a user preferences table to Galaxy."
- print ""
-
-
-# New table to support user preferences.
-
-UserPreference_table = Table( "user_preference", metadata,
- Column( "id", Integer, primary_key=True ),
- Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
- Column( "name", Unicode( 255 ), index=True),
- Column( "value", Unicode( 1024 ) ) )
-
-def upgrade():
- display_migration_details()
- metadata.reflect()
- try:
- UserPreference_table.create()
- except Exception, e:
- print str(e)
- log.debug( "Creating user_preference table failed: %s" % str( e ) )
-
-def downgrade():
- metadata.reflect()
- try:
- UserPreference_table.drop()
- except Exception, e:
- print str(e)
- log.debug( "Dropping user_preference table failed: %s" % str( e ) )
\ No newline at end of file
diff -r 6f8b5f1e8ec9 -r d8e3ad46bfa3 lib/galaxy/model/migrate/versions/0021_user_prefs.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/model/migrate/versions/0021_user_prefs.py Fri Sep 25 17:21:26 2009 -0400
@@ -0,0 +1,45 @@
+"""
+This migration script adds a user preferences table to Galaxy.
+"""
+
+from sqlalchemy import *
+from migrate import *
+
+import datetime
+now = datetime.datetime.utcnow
+
+import logging
+log = logging.getLogger( __name__ )
+
+metadata = MetaData( migrate_engine )
+
+def display_migration_details():
+ print ""
+ print "This migration script adds a user preferences table to Galaxy."
+ print ""
+
+
+# New table to support user preferences.
+
+UserPreference_table = Table( "user_preference", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
+ Column( "name", Unicode( 255 ), index=True),
+ Column( "value", Unicode( 1024 ) ) )
+
+def upgrade():
+ display_migration_details()
+ metadata.reflect()
+ try:
+ UserPreference_table.create()
+ except Exception, e:
+ print str(e)
+ log.debug( "Creating user_preference table failed: %s" % str( e ) )
+
+def downgrade():
+ metadata.reflect()
+ try:
+ UserPreference_table.drop()
+ except Exception, e:
+ print str(e)
+ log.debug( "Dropping user_preference table failed: %s" % str( e ) )
\ No newline at end of file
1
0
02 Oct '09
details: http://www.bx.psu.edu/hg/galaxy/rev/e5d57c9e2824
changeset: 2782:e5d57c9e2824
user: Kelly Vincent <kpvincent(a)bx.psu.edu>
date: Fri Sep 25 16:49:30 2009 -0400
description:
Fixed an error in the help section of the Sam Pileup tool config
1 file(s) affected in this change:
tools/samtools/sam_pileup.xml
diffs (33 lines):
diff -r dd50d8d45177 -r e5d57c9e2824 tools/samtools/sam_pileup.xml
--- a/tools/samtools/sam_pileup.xml Fri Sep 25 14:50:34 2009 -0400
+++ b/tools/samtools/sam_pileup.xml Fri Sep 25 16:49:30 2009 -0400
@@ -85,9 +85,9 @@
**Types of pileup datasets**
-The description of pileup format below is largely based on information that can be found on SAMTools_ documentation page. The 6- and 10-column variants are described below.
+The description of pileup format below is largely based on information that can be found on SAMTools Pileup_ documentation page. The 6- and 10-column variants are described below.
-.. _SAMTools: http://samtools.sourceforge.net/pileup.shtml
+.. _Pileup: http://samtools.sourceforge.net/pileup.shtml
**Six column pileup**::
@@ -111,7 +111,7 @@
**Ten column pileup**
-The `ten-column`__ pileup incorporates additional consensus information generated with *-c* option of *samtools pileup* command::
+The `ten-column` (consensus_) pileup incorporates additional consensus information generated with *-c* option of *samtools pileup* command::
1 2 3 4 5 6 7 8 9 10
@@ -137,7 +137,7 @@
10 Quality values (phred33 scale, see Galaxy wiki for more)
-.. __: http://samtools.sourceforge.net/cns0.shtml
+.. _consensus: http://samtools.sourceforge.net/cns0.shtml
</help>
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/6f8b5f1e8ec9
changeset: 2784:6f8b5f1e8ec9
user: jeremy goecks <jeremy.goecks(a)emory.edu>
date: Fri Sep 25 17:07:13 2009 -0400
description:
Merge
2 file(s) affected in this change:
lib/galaxy/model/__init__.py
lib/galaxy/model/mapping.py
diffs (2075 lines):
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py Fri Sep 25 17:06:45 2009 -0400
+++ b/lib/galaxy/jobs/__init__.py Fri Sep 25 17:07:13 2009 -0400
@@ -357,13 +357,14 @@
# Restore input / output data lists
inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] )
out_data = dict( [ ( da.name, da.dataset ) for da in job.output_datasets ] )
+ out_data.update( [ ( da.name, da.dataset ) for da in job.output_library_datasets ] )
# These can be passed on the command line if wanted as $userId $userEmail
- if job.history.user: # check for anonymous user!
- userId = '%d' % job.history.user.id
- userEmail = str(job.history.user.email)
+ if job.history and job.history.user: # check for anonymous user!
+ userId = '%d' % job.history.user.id
+ userEmail = str(job.history.user.email)
else:
- userId = 'Anonymous'
- userEmail = 'Anonymous'
+ userId = 'Anonymous'
+ userEmail = 'Anonymous'
incoming['userId'] = userId
incoming['userEmail'] = userEmail
# Build params, done before hook so hook can use
@@ -424,7 +425,7 @@
log.debug( "fail(): Moved %s to %s" % ( dataset_path.false_path, dataset_path.real_path ) )
except ( IOError, OSError ), e:
log.error( "fail(): Missing output file in working directory: %s" % e )
- for dataset_assoc in job.output_datasets:
+ for dataset_assoc in job.output_datasets + job.output_library_datasets:
dataset = dataset_assoc.dataset
dataset.refresh()
dataset.state = dataset.states.ERROR
@@ -444,7 +445,7 @@
def change_state( self, state, info = False ):
job = model.Job.get( self.job_id )
job.refresh()
- for dataset_assoc in job.output_datasets:
+ for dataset_assoc in job.output_datasets + job.output_library_datasets:
dataset = dataset_assoc.dataset
dataset.refresh()
dataset.state = state
@@ -504,10 +505,10 @@
self.fail( "Job %s's output dataset(s) could not be read" % job.id )
return
job_context = ExpressionContext( dict( stdout = stdout, stderr = stderr ) )
- for dataset_assoc in job.output_datasets:
+ for dataset_assoc in job.output_datasets + job.output_library_datasets:
context = self.get_dataset_finish_context( job_context, dataset_assoc.dataset.dataset )
#should this also be checking library associations? - can a library item be added from a history before the job has ended? - lets not allow this to occur
- for dataset in dataset_assoc.dataset.dataset.history_associations: #need to update all associated output hdas, i.e. history was shared with job running
+ for dataset in dataset_assoc.dataset.dataset.history_associations + dataset_assoc.dataset.dataset.library_associations: #need to update all associated output hdas, i.e. history was shared with job running
dataset.blurb = 'done'
dataset.peek = 'no peek'
dataset.info = context['stdout'] + context['stderr']
@@ -576,6 +577,7 @@
# custom post process setup
inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] )
out_data = dict( [ ( da.name, da.dataset ) for da in job.output_datasets ] )
+ out_data.update( [ ( da.name, da.dataset ) for da in job.output_library_datasets ] )
param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] ) # why not re-use self.param_dict here? ##dunno...probably should, this causes tools.parameters.basic.UnvalidatedValue to be used in following methods instead of validated and transformed values during i.e. running workflows
param_dict = self.tool.params_from_strings( param_dict, self.app )
# Check for and move associated_files
@@ -647,11 +649,11 @@
job = model.Job.get( self.job_id )
if self.app.config.outputs_to_working_directory:
self.output_paths = []
- for name, data in [ ( da.name, da.dataset.dataset ) for da in job.output_datasets ]:
+ for name, data in [ ( da.name, da.dataset.dataset ) for da in job.output_datasets + job.output_library_datasets ]:
false_path = os.path.abspath( os.path.join( self.working_directory, "galaxy_dataset_%d.dat" % data.id ) )
self.output_paths.append( DatasetPath( data.id, data.file_name, false_path ) )
else:
- self.output_paths = [ DatasetPath( da.dataset.dataset.id, da.dataset.file_name ) for da in job.output_datasets ]
+ self.output_paths = [ DatasetPath( da.dataset.dataset.id, da.dataset.file_name ) for da in job.output_datasets + job.output_library_datasets ]
return self.output_paths
def get_output_file_id( self, file ):
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py Fri Sep 25 17:06:45 2009 -0400
+++ b/lib/galaxy/model/__init__.py Fri Sep 25 17:07:13 2009 -0400
@@ -74,6 +74,7 @@
self.parameters = []
self.input_datasets = []
self.output_datasets = []
+ self.output_library_datasets = []
self.state = Job.states.NEW
self.info = None
self.job_runner_name = None
@@ -84,6 +85,8 @@
self.input_datasets.append( JobToInputDatasetAssociation( name, dataset ) )
def add_output_dataset( self, name, dataset ):
self.output_datasets.append( JobToOutputDatasetAssociation( name, dataset ) )
+ def add_output_library_dataset( self, name, dataset ):
+ self.output_library_datasets.append( JobToOutputLibraryDatasetAssociation( name, dataset ) )
def set_state( self, state ):
self.state = state
# For historical reasons state propogates down to datasets
@@ -138,6 +141,11 @@
self.dataset = dataset
class JobToOutputDatasetAssociation( object ):
+ def __init__( self, name, dataset ):
+ self.name = name
+ self.dataset = dataset
+
+class JobToOutputLibraryDatasetAssociation( object ):
def __init__( self, name, dataset ):
self.name = name
self.dataset = dataset
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 lib/galaxy/model/mapping.py
--- a/lib/galaxy/model/mapping.py Fri Sep 25 17:06:45 2009 -0400
+++ b/lib/galaxy/model/mapping.py Fri Sep 25 17:07:13 2009 -0400
@@ -107,7 +107,7 @@
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, index=True, default=now, onupdate=now ),
- Column( "state", TrimmedString( 64 ) ),
+ Column( "state", TrimmedString( 64 ), index=True ),
Column( "deleted", Boolean, index=True, default=False ),
Column( "purged", Boolean, index=True, default=False ),
Column( "purgable", Boolean, default=True ),
@@ -307,6 +307,7 @@
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "history_id", Integer, ForeignKey( "history.id" ), index=True ),
+ Column( "library_folder_id", Integer, ForeignKey( "library_folder.id" ), index=True ),
Column( "tool_id", String( 255 ) ),
Column( "tool_version", TEXT, default="1.0.0" ),
Column( "state", String( 64 ), index=True ),
@@ -337,6 +338,12 @@
Column( "id", Integer, primary_key=True ),
Column( "job_id", Integer, ForeignKey( "job.id" ), index=True ),
Column( "dataset_id", Integer, ForeignKey( "history_dataset_association.id" ), index=True ),
+ Column( "name", String(255) ) )
+
+JobToOutputLibraryDatasetAssociation.table = Table( "job_to_output_library_dataset", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "job_id", Integer, ForeignKey( "job.id" ), index=True ),
+ Column( "ldda_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), index=True ),
Column( "name", String(255) ) )
JobExternalOutputMetadata.table = Table( "job_external_output_metadata", metadata,
@@ -914,6 +921,9 @@
assign_mapper( context, JobToOutputDatasetAssociation, JobToOutputDatasetAssociation.table,
properties=dict( job=relation( Job ), dataset=relation( HistoryDatasetAssociation, lazy=False ) ) )
+assign_mapper( context, JobToOutputLibraryDatasetAssociation, JobToOutputLibraryDatasetAssociation.table,
+ properties=dict( job=relation( Job ), dataset=relation( LibraryDatasetDatasetAssociation, lazy=False ) ) )
+
assign_mapper( context, JobParameter, JobParameter.table )
assign_mapper( context, JobExternalOutputMetadata, JobExternalOutputMetadata.table,
@@ -924,9 +934,11 @@
assign_mapper( context, Job, Job.table,
properties=dict( galaxy_session=relation( GalaxySession ),
history=relation( History ),
+ library_folder=relation( LibraryFolder ),
parameters=relation( JobParameter, lazy=False ),
input_datasets=relation( JobToInputDatasetAssociation, lazy=False ),
output_datasets=relation( JobToOutputDatasetAssociation, lazy=False ),
+ output_library_datasets=relation( JobToOutputLibraryDatasetAssociation, lazy=False ),
external_output_metadata = relation( JobExternalOutputMetadata, lazy = False ) ) )
assign_mapper( context, Event, Event.table,
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 lib/galaxy/model/migrate/versions/0020_library_upload_job.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/model/migrate/versions/0020_library_upload_job.py Fri Sep 25 17:07:13 2009 -0400
@@ -0,0 +1,121 @@
+from sqlalchemy import *
+from sqlalchemy.orm import *
+from sqlalchemy.exceptions import *
+from migrate import *
+from migrate.changeset import *
+import datetime
+now = datetime.datetime.utcnow
+import sys, logging
+# Need our custom types, but don't import anything else from model
+from galaxy.model.custom_types import *
+
+log = logging.getLogger( __name__ )
+log.setLevel(logging.DEBUG)
+handler = logging.StreamHandler( sys.stdout )
+format = "%(name)s %(levelname)s %(asctime)s %(message)s"
+formatter = logging.Formatter( format )
+handler.setFormatter( formatter )
+log.addHandler( handler )
+
+metadata = MetaData( migrate_engine )
+db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, transactional=False ) )
+
+def display_migration_details():
+ print ""
+ print "========================================"
+ print """This script creates a job_to_output_library_dataset table for allowing library
+uploads to run as regular jobs. To support this, a library_folder_id column is
+added to the job table, and library_folder/output_library_datasets relations
+are added to the Job object. An index is also added to the dataset.state
+column."""
+ print "========================================"
+
+JobToOutputLibraryDatasetAssociation_table = Table( "job_to_output_library_dataset", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "job_id", Integer, ForeignKey( "job.id" ), index=True ),
+ Column( "ldda_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), index=True ),
+ Column( "name", String(255) ) )
+
+def upgrade():
+ display_migration_details()
+ # Load existing tables
+ metadata.reflect()
+ # Create the job_to_output_library_dataset table
+ try:
+ JobToOutputLibraryDatasetAssociation_table.create()
+ except Exception, e:
+ print "Creating job_to_output_library_dataset table failed: %s" % str( e )
+ log.debug( "Creating job_to_output_library_dataset table failed: %s" % str( e ) )
+ # Create the library_folder_id column
+ try:
+ Job_table = Table( "job", metadata, autoload=True )
+ except NoSuchTableError:
+ Job_table = None
+ log.debug( "Failed loading table job" )
+ if Job_table:
+ try:
+ col = Column( "library_folder_id", Integer, index=True )
+ col.create( Job_table )
+ assert col is Job_table.c.library_folder_id
+ except Exception, e:
+ log.debug( "Adding column 'library_folder_id' to job table failed: %s" % ( str( e ) ) )
+ try:
+ LibraryFolder_table = Table( "library_folder", metadata, autoload=True )
+ except NoSuchTableError:
+ LibraryFolder_table = None
+ log.debug( "Failed loading table library_folder" )
+ # Add 1 foreign key constraint to the job table
+ if Job_table and LibraryFolder_table:
+ try:
+ cons = ForeignKeyConstraint( [Job_table.c.library_folder_id],
+ [LibraryFolder_table.c.id],
+ name='job_library_folder_id_fk' )
+ # Create the constraint
+ cons.create()
+ except Exception, e:
+ log.debug( "Adding foreign key constraint 'job_library_folder_id_fk' to table 'library_folder' failed: %s" % ( str( e ) ) )
+ # Create the ix_dataset_state index
+ try:
+ Dataset_table = Table( "dataset", metadata, autoload=True )
+ except NoSuchTableError:
+ Dataset_table = None
+ log.debug( "Failed loading table dataset" )
+ i = Index( "ix_dataset_state", Dataset_table.c.state )
+ try:
+ i.create()
+ except Exception, e:
+ print str(e)
+ log.debug( "Adding index 'ix_dataset_state' to dataset table failed: %s" % str( e ) )
+
+def downgrade():
+ metadata.reflect()
+ # Drop the library_folder_id column
+ try:
+ Job_table = Table( "job", metadata, autoload=True )
+ except NoSuchTableError:
+ Job_table = None
+ log.debug( "Failed loading table job" )
+ if Job_table:
+ try:
+ col = Job_table.c.library_folder_id
+ col.drop()
+ except Exception, e:
+ log.debug( "Dropping column 'library_folder_id' from job table failed: %s" % ( str( e ) ) )
+ # Drop the job_to_output_library_dataset table
+ try:
+ JobToOutputLibraryDatasetAssociation_table.drop()
+ except Exception, e:
+ print str(e)
+ log.debug( "Dropping job_to_output_library_dataset table failed: %s" % str( e ) )
+ # Drop the ix_dataset_state index
+ try:
+ Dataset_table = Table( "dataset", metadata, autoload=True )
+ except NoSuchTableError:
+ Dataset_table = None
+ log.debug( "Failed loading table dataset" )
+ i = Index( "ix_dataset_state", Dataset_table.c.state )
+ try:
+ i.drop()
+ except Exception, e:
+ print str(e)
+ log.debug( "Dropping index 'ix_dataset_state' from dataset table failed: %s" % str( e ) )
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 lib/galaxy/tools/actions/upload.py
--- a/lib/galaxy/tools/actions/upload.py Fri Sep 25 17:06:45 2009 -0400
+++ b/lib/galaxy/tools/actions/upload.py Fri Sep 25 17:07:13 2009 -0400
@@ -1,126 +1,22 @@
-import os, shutil, urllib, StringIO, re, gzip, tempfile, shutil, zipfile
-from cgi import FieldStorage
+import os
from __init__ import ToolAction
-from galaxy import datatypes, jobs
-from galaxy.datatypes import sniff
-from galaxy import model, util
-from galaxy.util.json import to_json_string
-
-import sys, traceback
+from galaxy.tools.actions import upload_common
import logging
log = logging.getLogger( __name__ )
class UploadToolAction( ToolAction ):
- # Action for uploading files
- def persist_uploads( self, incoming ):
- if 'files' in incoming:
- new_files = []
- temp_files = []
- for upload_dataset in incoming['files']:
- f = upload_dataset['file_data']
- if isinstance( f, FieldStorage ):
- assert not isinstance( f.file, StringIO.StringIO )
- assert f.file.name != '<fdopen>'
- local_filename = util.mkstemp_ln( f.file.name, 'upload_file_data_' )
- f.file.close()
- upload_dataset['file_data'] = dict( filename = f.filename,
- local_filename = local_filename )
- if upload_dataset['url_paste'].strip() != '':
- upload_dataset['url_paste'] = datatypes.sniff.stream_to_file( StringIO.StringIO( upload_dataset['url_paste'] ), prefix="strio_url_paste_" )[0]
- else:
- upload_dataset['url_paste'] = None
- new_files.append( upload_dataset )
- incoming['files'] = new_files
- return incoming
def execute( self, tool, trans, incoming={}, set_output_hid = True ):
dataset_upload_inputs = []
for input_name, input in tool.inputs.iteritems():
if input.type == "upload_dataset":
dataset_upload_inputs.append( input )
assert dataset_upload_inputs, Exception( "No dataset upload groups were found." )
- # Get any precreated datasets (when using asynchronous uploads)
- async_datasets = []
- self.precreated_datasets = []
- if incoming.get( 'async_datasets', None ) not in ["None", "", None]:
- async_datasets = incoming['async_datasets'].split(',')
- for id in async_datasets:
- try:
- data = trans.app.model.HistoryDatasetAssociation.get( int( id ) )
- except:
- log.exception( 'Unable to load precreated dataset (%s) sent in upload form' % id )
- continue
- if trans.user is None and trans.galaxy_session.current_history != data.history:
- log.error( 'Got a precreated dataset (%s) but it does not belong to anonymous user\'s current session (%s)' % ( data.id, trans.galaxy_session.id ) )
- elif data.history.user != trans.user:
- log.error( 'Got a precreated dataset (%s) but it does not belong to current user (%s)' % ( data.id, trans.user.id ) )
- else:
- self.precreated_datasets.append( data )
- data_list = []
-
- incoming = self.persist_uploads( incoming )
-
- json_file = tempfile.mkstemp()
- json_file_path = json_file[1]
- json_file = os.fdopen( json_file[0], 'w' )
- for dataset_upload_input in dataset_upload_inputs:
- uploaded_datasets = dataset_upload_input.get_uploaded_datasets( trans, incoming )
- for uploaded_dataset in uploaded_datasets:
- data = self.get_precreated_dataset( uploaded_dataset.name )
- if not data:
- data = trans.app.model.HistoryDatasetAssociation( history = trans.history, create_dataset = True )
- data.name = uploaded_dataset.name
- data.state = data.states.QUEUED
- data.extension = uploaded_dataset.file_type
- data.dbkey = uploaded_dataset.dbkey
- data.flush()
- trans.history.add_dataset( data, genome_build = uploaded_dataset.dbkey )
- permissions = trans.app.security_agent.history_get_default_permissions( trans.history )
- trans.app.security_agent.set_all_dataset_permissions( data.dataset, permissions )
- else:
- data.extension = uploaded_dataset.file_type
- data.dbkey = uploaded_dataset.dbkey
- data.flush()
- trans.history.genome_build = uploaded_dataset.dbkey
- if uploaded_dataset.type == 'composite':
- # we need to init metadata before the job is dispatched
- data.init_meta()
- for meta_name, meta_value in uploaded_dataset.metadata.iteritems():
- setattr( data.metadata, meta_name, meta_value )
- data.flush()
- json = dict( file_type = uploaded_dataset.file_type,
- dataset_id = data.dataset.id,
- dbkey = uploaded_dataset.dbkey,
- type = uploaded_dataset.type,
- metadata = uploaded_dataset.metadata,
- primary_file = uploaded_dataset.primary_file,
- extra_files_path = data.extra_files_path,
- composite_file_paths = uploaded_dataset.composite_files,
- composite_files = dict( [ ( k, v.__dict__ ) for k, v in data.datatype.get_composite_files( data ).items() ] ) )
- else:
- try:
- is_binary = uploaded_dataset.datatype.is_binary
- except:
- is_binary = None
- json = dict( file_type = uploaded_dataset.file_type,
- ext = uploaded_dataset.ext,
- name = uploaded_dataset.name,
- dataset_id = data.dataset.id,
- dbkey = uploaded_dataset.dbkey,
- type = uploaded_dataset.type,
- is_binary = is_binary,
- space_to_tab = uploaded_dataset.space_to_tab,
- path = uploaded_dataset.path )
- json_file.write( to_json_string( json ) + '\n' )
- data_list.append( data )
- json_file.close()
-
- #cleanup unclaimed precreated datasets:
- for data in self.precreated_datasets:
- log.info( 'Cleaned up unclaimed precreated dataset (%s).' % ( data.id ) )
- data.state = data.states.ERROR
- data.info = 'No file contents were available.'
+ precreated_datasets = upload_common.get_precreated_datasets( trans, incoming, trans.app.model.HistoryDatasetAssociation )
+ incoming = upload_common.persist_uploads( incoming )
+ json_file_path, data_list = upload_common.create_paramfile( trans, incoming, precreated_datasets, dataset_upload_inputs )
+ upload_common.cleanup_unused_precreated_datasets( precreated_datasets )
if not data_list:
try:
@@ -129,38 +25,4 @@
pass
return 'No data was entered in the upload form, please go back and choose data to upload.'
- # Create the job object
- job = trans.app.model.Job()
- job.session_id = trans.get_galaxy_session().id
- job.history_id = trans.history.id
- job.tool_id = tool.id
- job.tool_version = tool.version
- job.state = trans.app.model.Job.states.UPLOAD
- job.flush()
- log.info( 'tool %s created job id %d' % ( tool.id, job.id ) )
- trans.log_event( 'created job id %d' % job.id, tool_id=tool.id )
-
- for name, value in tool.params_to_strings( incoming, trans.app ).iteritems():
- job.add_parameter( name, value )
- job.add_parameter( 'paramfile', to_json_string( json_file_path ) )
- for i, dataset in enumerate( data_list ):
- job.add_output_dataset( 'output%i' % i, dataset )
- job.state = trans.app.model.Job.states.NEW
- trans.app.model.flush()
-
- # Queue the job for execution
- trans.app.job_queue.put( job.id, tool )
- trans.log_event( "Added job to the job queue, id: %s" % str(job.id), tool_id=job.tool_id )
- return dict( [ ( i, v ) for i, v in enumerate( data_list ) ] )
-
- def get_precreated_dataset( self, name ):
- """
- Return a dataset matching a name from the list of precreated (via async
- upload) datasets. If there's more than one upload with the exact same
- name, we need to pop one (the first) so it isn't chosen next time.
- """
- names = [ d.name for d in self.precreated_datasets ]
- if names.count( name ) > 0:
- return self.precreated_datasets.pop( names.index( name ) )
- else:
- return None
+ return upload_common.create_job( trans, incoming, tool, json_file_path, data_list )
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 lib/galaxy/tools/actions/upload_common.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/tools/actions/upload_common.py Fri Sep 25 17:07:13 2009 -0400
@@ -0,0 +1,235 @@
+import os, tempfile, StringIO
+from cgi import FieldStorage
+from galaxy import datatypes, util
+from galaxy.datatypes import sniff
+from galaxy.util.json import to_json_string
+
+import logging
+log = logging.getLogger( __name__ )
+
+def persist_uploads( params ):
+ """
+ Turn any uploads in the submitted form to persisted files.
+ """
+ if 'files' in params:
+ new_files = []
+ temp_files = []
+ for upload_dataset in params['files']:
+ f = upload_dataset['file_data']
+ if isinstance( f, FieldStorage ):
+ assert not isinstance( f.file, StringIO.StringIO )
+ assert f.file.name != '<fdopen>'
+ local_filename = util.mkstemp_ln( f.file.name, 'upload_file_data_' )
+ f.file.close()
+ upload_dataset['file_data'] = dict( filename = f.filename,
+ local_filename = local_filename )
+ if upload_dataset['url_paste'].strip() != '':
+ upload_dataset['url_paste'] = datatypes.sniff.stream_to_file( StringIO.StringIO( upload_dataset['url_paste'] ), prefix="strio_url_paste_" )[0]
+ else:
+ upload_dataset['url_paste'] = None
+ new_files.append( upload_dataset )
+ params['files'] = new_files
+ return params
+
+def get_precreated_datasets( trans, params, data_obj ):
+ """
+ Get any precreated datasets (when using asynchronous uploads).
+ """
+ rval = []
+ async_datasets = []
+ if params.get( 'async_datasets', None ) not in ["None", "", None]:
+ async_datasets = params['async_datasets'].split(',')
+ user, roles = trans.get_user_and_roles()
+ for id in async_datasets:
+ try:
+ data = data_obj.get( int( id ) )
+ except:
+ log.exception( 'Unable to load precreated dataset (%s) sent in upload form' % id )
+ continue
+ if data_obj is trans.app.model.HistoryDatasetAssociation:
+ if user is None and trans.galaxy_session.current_history != data.history:
+ log.error( 'Got a precreated dataset (%s) but it does not belong to anonymous user\'s current session (%s)' % ( data.id, trans.galaxy_session.id ) )
+ elif data.history.user != user:
+ log.error( 'Got a precreated dataset (%s) but it does not belong to current user (%s)' % ( data.id, user.id ) )
+ else:
+ rval.append( data )
+ elif data_obj is trans.app.model.LibraryDatasetDatasetAssociation:
+ if not trans.app.security_agent.can_add_library_item( user, roles, data.library_dataset.folder ):
+ log.error( 'Got a precreated dataset (%s) but this user (%s) is not allowed to write to it' % ( data.id, user.id ) )
+ else:
+ rval.append( data )
+ return rval
+
+def get_precreated_dataset( precreated_datasets, name ):
+ """
+ Return a dataset matching a name from the list of precreated (via async
+ upload) datasets. If there's more than one upload with the exact same
+ name, we need to pop one (the first) so it isn't chosen next time.
+ """
+ names = [ d.name for d in precreated_datasets ]
+ if names.count( name ) > 0:
+ return precreated_datasets.pop( names.index( name ) )
+ else:
+ return None
+
+def cleanup_unused_precreated_datasets( precreated_datasets ):
+ for data in precreated_datasets:
+ log.info( 'Cleaned up unclaimed precreated dataset (%s).' % ( data.id ) )
+ data.state = data.states.ERROR
+ data.info = 'No file contents were available.'
+
+def new_history_upload( trans, uploaded_dataset ):
+ hda = trans.app.model.HistoryDatasetAssociation( name = uploaded_dataset.name,
+ extension = uploaded_dataset.file_type,
+ dbkey = uploaded_dataset.dbkey,
+ history = trans.history,
+ create_dataset = True )
+ hda.state = hda.states.QUEUED
+ hda.flush()
+ trans.history.add_dataset( hda, genome_build = uploaded_dataset.dbkey )
+ permissions = trans.app.security_agent.history_get_default_permissions( trans.history )
+ trans.app.security_agent.set_all_dataset_permissions( hda.dataset, permissions )
+ return hda
+
+def new_library_upload( trans, uploaded_dataset, replace_dataset, folder,
+ template, template_field_contents, roles, message ):
+ if replace_dataset:
+ ld = replace_dataset
+ else:
+ ld = trans.app.model.LibraryDataset( folder=folder, name=uploaded_dataset.name )
+ ld.flush()
+ trans.app.security_agent.copy_library_permissions( folder, ld )
+ ldda = trans.app.model.LibraryDatasetDatasetAssociation( name = uploaded_dataset.name,
+ extension = uploaded_dataset.file_type,
+ dbkey = uploaded_dataset.dbkey,
+ library_dataset = ld,
+ user = trans.user,
+ create_dataset = True )
+ ldda.state = ldda.states.QUEUED
+ ldda.message = message
+ ldda.flush()
+ # Permissions must be the same on the LibraryDatasetDatasetAssociation and the associated LibraryDataset
+ trans.app.security_agent.copy_library_permissions( ld, ldda )
+ if replace_dataset:
+ # Copy the Dataset level permissions from replace_dataset to the new LibraryDatasetDatasetAssociation.dataset
+ trans.app.security_agent.copy_dataset_permissions( replace_dataset.library_dataset_dataset_association.dataset, ldda.dataset )
+ else:
+ # Copy the current user's DefaultUserPermissions to the new LibraryDatasetDatasetAssociation.dataset
+ trans.app.security_agent.set_all_dataset_permissions( ldda.dataset, trans.app.security_agent.user_get_default_permissions( trans.user ) )
+ folder.add_library_dataset( ld, genome_build=uploaded_dataset.dbkey )
+ folder.flush()
+ ld.library_dataset_dataset_association_id = ldda.id
+ ld.flush()
+ # Handle template included in the upload form, if any
+ if template and template_field_contents:
+ # Since information templates are inherited, the template fields can be displayed on the upload form.
+ # If the user has added field contents, we'll need to create a new form_values and info_association
+ # for the new library_dataset_dataset_association object.
+ # Create a new FormValues object, using the template we previously retrieved
+ form_values = trans.app.model.FormValues( template, template_field_contents )
+ form_values.flush()
+ # Create a new info_association between the current ldda and form_values
+ info_association = trans.app.model.LibraryDatasetDatasetInfoAssociation( ldda, template, form_values )
+ info_association.flush()
+ # If roles were selected upon upload, restrict access to the Dataset to those roles
+ if roles:
+ for role in roles:
+ dp = trans.app.model.DatasetPermissions( trans.app.security_agent.permitted_actions.DATASET_ACCESS.action, ldda.dataset, role )
+ dp.flush()
+ return ldda
+
+def create_paramfile( trans, params, precreated_datasets, dataset_upload_inputs,
+ replace_dataset=None, folder=None, template=None,
+ template_field_contents=None, roles=None, message=None ):
+ """
+ Create the upload tool's JSON "param" file.
+ """
+ data_list = []
+ json_file = tempfile.mkstemp()
+ json_file_path = json_file[1]
+ json_file = os.fdopen( json_file[0], 'w' )
+ for dataset_upload_input in dataset_upload_inputs:
+ uploaded_datasets = dataset_upload_input.get_uploaded_datasets( trans, params )
+ for uploaded_dataset in uploaded_datasets:
+ data = get_precreated_dataset( precreated_datasets, uploaded_dataset.name )
+ if not data:
+ if folder:
+ data = new_library_upload( trans, uploaded_dataset, replace_dataset, folder, template, template_field_contents, roles, message )
+ else:
+ data = new_history_upload( trans, uploaded_dataset )
+ else:
+ data.extension = uploaded_dataset.file_type
+ data.dbkey = uploaded_dataset.dbkey
+ data.flush()
+ if folder:
+ folder.genome_build = uploaded_dataset.dbkey
+ folder.flush()
+ else:
+ trans.history.genome_build = uploaded_dataset.dbkey
+ if uploaded_dataset.type == 'composite':
+ # we need to init metadata before the job is dispatched
+ data.init_meta()
+ for meta_name, meta_value in uploaded_dataset.metadata.iteritems():
+ setattr( data.metadata, meta_name, meta_value )
+ data.flush()
+ json = dict( file_type = uploaded_dataset.file_type,
+ dataset_id = data.dataset.id,
+ dbkey = uploaded_dataset.dbkey,
+ type = uploaded_dataset.type,
+ metadata = uploaded_dataset.metadata,
+ primary_file = uploaded_dataset.primary_file,
+ extra_files_path = data.extra_files_path,
+ composite_file_paths = uploaded_dataset.composite_files,
+ composite_files = dict( [ ( k, v.__dict__ ) for k, v in data.datatype.get_composite_files( data ).items() ] ) )
+ else:
+ try:
+ is_binary = uploaded_dataset.datatype.is_binary
+ except:
+ is_binary = None
+ json = dict( file_type = uploaded_dataset.file_type,
+ ext = uploaded_dataset.ext,
+ name = uploaded_dataset.name,
+ dataset_id = data.dataset.id,
+ dbkey = uploaded_dataset.dbkey,
+ type = uploaded_dataset.type,
+ is_binary = is_binary,
+ space_to_tab = uploaded_dataset.space_to_tab,
+ path = uploaded_dataset.path )
+ json_file.write( to_json_string( json ) + '\n' )
+ data_list.append( data )
+ json_file.close()
+ return ( json_file_path, data_list )
+
+def create_job( trans, params, tool, json_file_path, data_list, folder=None ):
+ """
+ Create the upload job.
+ """
+ job = trans.app.model.Job()
+ job.session_id = trans.get_galaxy_session().id
+ if folder:
+ job.library_folder_id = folder.id
+ else:
+ job.history_id = trans.history.id
+ job.tool_id = tool.id
+ job.tool_version = tool.version
+ job.state = job.states.UPLOAD
+ job.flush()
+ log.info( 'tool %s created job id %d' % ( tool.id, job.id ) )
+ trans.log_event( 'created job id %d' % job.id, tool_id=tool.id )
+
+ for name, value in tool.params_to_strings( params, trans.app ).iteritems():
+ job.add_parameter( name, value )
+ job.add_parameter( 'paramfile', to_json_string( json_file_path ) )
+ if folder:
+ for i, dataset in enumerate( data_list ):
+ job.add_output_library_dataset( 'output%i' % i, dataset )
+ else:
+ for i, dataset in enumerate( data_list ):
+ job.add_output_dataset( 'output%i' % i, dataset )
+ job.state = job.states.NEW
+ trans.app.model.flush()
+
+ # Queue the job for execution
+ trans.app.job_queue.put( job.id, tool )
+ trans.log_event( "Added job to the job queue, id: %s" % str(job.id), tool_id=job.tool_id )
+ return dict( [ ( 'output%i' % i, v ) for i, v in enumerate( data_list ) ] )
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 lib/galaxy/web/controllers/library.py
--- a/lib/galaxy/web/controllers/library.py Fri Sep 25 17:06:45 2009 -0400
+++ b/lib/galaxy/web/controllers/library.py Fri Sep 25 17:07:13 2009 -0400
@@ -726,17 +726,17 @@
template_id = 'None'
widgets = []
upload_option = params.get( 'upload_option', 'upload_file' )
- created_ldda_ids = trans.webapp.controllers[ 'library_dataset' ].upload_dataset( trans,
- controller='library',
- library_id=library_id,
- folder_id=folder_id,
- template_id=template_id,
- widgets=widgets,
- replace_dataset=replace_dataset,
- **kwd )
- if created_ldda_ids:
- ldda_id_list = created_ldda_ids.split( ',' )
- total_added = len( ldda_id_list )
+ created_outputs = trans.webapp.controllers[ 'library_dataset' ].upload_dataset( trans,
+ controller='library',
+ library_id=library_id,
+ folder_id=folder_id,
+ template_id=template_id,
+ widgets=widgets,
+ replace_dataset=replace_dataset,
+ **kwd )
+ if created_outputs:
+ ldda_id_list = [ str( v.id ) for v in created_outputs.values() ]
+ total_added = len( created_outputs.values() )
if replace_dataset:
msg = "Added %d dataset versions to the library dataset '%s' in the folder '%s'." % ( total_added, replace_dataset.name, folder.name )
else:
@@ -760,7 +760,7 @@
action='browse_library',
id=library_id,
default_action=default_action,
- created_ldda_ids=created_ldda_ids,
+ created_ldda_ids=",".join( ldda_id_list ),
msg=util.sanitize_text( msg ),
messagetype='done' ) )
@@ -769,7 +769,7 @@
trans.response.send_redirect( web.url_for( controller='library',
action='browse_library',
id=library_id,
- created_ldda_ids=created_ldda_ids,
+ created_ldda_ids=",".join( ldda_id_list ),
msg=util.sanitize_text( msg ),
messagetype='error' ) )
if not id or replace_dataset:
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 lib/galaxy/web/controllers/library_admin.py
--- a/lib/galaxy/web/controllers/library_admin.py Fri Sep 25 17:06:45 2009 -0400
+++ b/lib/galaxy/web/controllers/library_admin.py Fri Sep 25 17:07:13 2009 -0400
@@ -438,16 +438,16 @@
template_id = 'None'
widgets = []
upload_option = params.get( 'upload_option', 'upload_file' )
- created_ldda_ids = trans.webapp.controllers[ 'library_dataset' ].upload_dataset( trans,
- controller='library_admin',
- library_id=library_id,
- folder_id=folder_id,
- template_id=template_id,
- widgets=widgets,
- replace_dataset=replace_dataset,
- **kwd )
- if created_ldda_ids:
- total_added = len( created_ldda_ids.split( ',' ) )
+ created_outputs = trans.webapp.controllers[ 'library_dataset' ].upload_dataset( trans,
+ controller='library_admin',
+ library_id=library_id,
+ folder_id=folder_id,
+ template_id=template_id,
+ widgets=widgets,
+ replace_dataset=replace_dataset,
+ **kwd )
+ if created_outputs:
+ total_added = len( created_outputs.values() )
if replace_dataset:
msg = "Added %d dataset versions to the library dataset '%s' in the folder '%s'." % ( total_added, replace_dataset.name, folder.name )
else:
@@ -464,7 +464,7 @@
trans.response.send_redirect( web.url_for( controller='library_admin',
action='browse_library',
id=library_id,
- created_ldda_ids=created_ldda_ids,
+ created_ldda_ids=",".join( [ str( v.id ) for v in created_outputs.values() ] ),
msg=util.sanitize_text( msg ),
messagetype=messagetype ) )
elif not id or replace_dataset:
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 lib/galaxy/web/controllers/library_dataset.py
--- a/lib/galaxy/web/controllers/library_dataset.py Fri Sep 25 17:06:45 2009 -0400
+++ b/lib/galaxy/web/controllers/library_dataset.py Fri Sep 25 17:07:13 2009 -0400
@@ -3,196 +3,51 @@
from galaxy import util, jobs
from galaxy.datatypes import sniff
from galaxy.security import RBACAgent
+from galaxy.util.json import to_json_string
+from galaxy.tools.actions import upload_common
log = logging.getLogger( __name__ )
class UploadLibraryDataset( BaseController ):
- def remove_tempfile( self, filename ):
- try:
- os.unlink( filename )
- except:
- log.exception( 'failure removing temporary file: %s' % filename )
- def add_file( self, trans, folder, file_obj, name, file_type, dbkey, roles,
- info='no info', space_to_tab=False, replace_dataset=None,
- template=None, template_field_contents=[], message=None ):
- data_type = None
- line_count = 0
- temp_name, is_multi_byte = sniff.stream_to_file( file_obj )
- # See if we have an empty file
- if not os.path.getsize( temp_name ) > 0:
- raise BadFileException( "you attempted to upload an empty file." )
- if is_multi_byte:
- ext = sniff.guess_ext( temp_name, is_multi_byte=True )
- else:
- if not data_type:
- # See if we have a gzipped file, which, if it passes our restrictions, we'll uncompress on the fly.
- is_gzipped, is_valid = self.check_gzip( temp_name )
- if is_gzipped and not is_valid:
- raise BadFileException( "you attempted to upload an inappropriate file." )
- elif is_gzipped and is_valid:
- # We need to uncompress the temp_name file
- CHUNK_SIZE = 2**20 # 1Mb
- fd, uncompressed = tempfile.mkstemp()
- gzipped_file = gzip.GzipFile( temp_name )
- while 1:
- try:
- chunk = gzipped_file.read( CHUNK_SIZE )
- except IOError:
- os.close( fd )
- os.remove( uncompressed )
- raise BadFileException( 'problem uncompressing gzipped data.' )
- if not chunk:
- break
- os.write( fd, chunk )
- os.close( fd )
- gzipped_file.close()
- # Replace the gzipped file with the decompressed file
- shutil.move( uncompressed, temp_name )
- name = name.rstrip( '.gz' )
- data_type = 'gzip'
- ext = ''
- if not data_type:
- # See if we have a zip archive
- is_zipped, is_valid, test_ext = self.check_zip( temp_name )
- if is_zipped and not is_valid:
- raise BadFileException( "you attempted to upload an inappropriate file." )
- elif is_zipped and is_valid:
- # Currently, we force specific tools to handle this case. We also require the user
- # to manually set the incoming file_type
- if ( test_ext == 'ab1' or test_ext == 'scf' ) and file_type != 'binseq.zip':
- raise BadFileException( "Invalid 'File Format' for archive consisting of binary files - use 'Binseq.zip'." )
- elif test_ext == 'txt' and file_type != 'txtseq.zip':
- raise BadFileException( "Invalid 'File Format' for archive consisting of text files - use 'Txtseq.zip'." )
- if not ( file_type == 'binseq.zip' or file_type == 'txtseq.zip' ):
- raise BadFileException( "you must manually set the 'File Format' to either 'Binseq.zip' or 'Txtseq.zip' when uploading zip files." )
- data_type = 'zip'
- ext = file_type
- if not data_type:
- if self.check_binary( temp_name ):
- try:
- ext = name.split( "." )[1].strip().lower()
- except:
- ext = ''
- try:
- is_pdf = open( temp_name ).read( len( '%PDF' ) ) == '%PDF'
- except:
- is_pdf = False #file failed to open or contents are smaller than pdf header
- if is_pdf:
- file_type = 'pdf' #allow the upload of PDFs to library via the admin interface.
- else:
- if not( ext == 'ab1' or ext == 'scf' ):
- raise BadFileException( "you attempted to upload an inappropriate file." )
- if ext == 'ab1' and file_type != 'ab1':
- raise BadFileException( "you must manually set the 'File Format' to 'Ab1' when uploading ab1 files." )
- elif ext == 'scf' and file_type != 'scf':
- raise BadFileException( "you must manually set the 'File Format' to 'Scf' when uploading scf files." )
- data_type = 'binary'
- if not data_type:
- # We must have a text file
- if self.check_html( temp_name ):
- raise BadFileException( "you attempted to upload an inappropriate file." )
- if data_type != 'binary' and data_type != 'zip':
- if space_to_tab:
- line_count = sniff.convert_newlines_sep2tabs( temp_name )
- elif os.stat( temp_name ).st_size < 262144000: # 250MB
- line_count = sniff.convert_newlines( temp_name )
- else:
- if sniff.check_newlines( temp_name ):
- line_count = sniff.convert_newlines( temp_name )
- else:
- line_count = None
- if file_type == 'auto':
- ext = sniff.guess_ext( temp_name, sniff_order=trans.app.datatypes_registry.sniff_order )
- else:
- ext = file_type
- data_type = ext
- if info is None:
- info = 'uploaded %s file' % data_type
- if file_type == 'auto':
- data_type = sniff.guess_ext( temp_name, sniff_order=trans.app.datatypes_registry.sniff_order )
- else:
- data_type = file_type
- if replace_dataset:
- # The replace_dataset param ( when not None ) refers to a LibraryDataset that is being replaced with a new version.
- library_dataset = replace_dataset
- else:
- # If replace_dataset is None, the Library level permissions will be taken from the folder and applied to the new
- # LibraryDataset, and the current user's DefaultUserPermissions will be applied to the associated Dataset.
- library_dataset = trans.app.model.LibraryDataset( folder=folder, name=name, info=info )
- library_dataset.flush()
- trans.app.security_agent.copy_library_permissions( folder, library_dataset )
- ldda = trans.app.model.LibraryDatasetDatasetAssociation( name=name,
- info=info,
- extension=data_type,
- dbkey=dbkey,
- library_dataset=library_dataset,
- user=trans.get_user(),
- create_dataset=True )
- ldda.message = message
- ldda.flush()
- # Permissions must be the same on the LibraryDatasetDatasetAssociation and the associated LibraryDataset
- trans.app.security_agent.copy_library_permissions( library_dataset, ldda )
- if replace_dataset:
- # Copy the Dataset level permissions from replace_dataset to the new LibraryDatasetDatasetAssociation.dataset
- trans.app.security_agent.copy_dataset_permissions( replace_dataset.library_dataset_dataset_association.dataset, ldda.dataset )
- else:
- # Copy the current user's DefaultUserPermissions to the new LibraryDatasetDatasetAssociation.dataset
- trans.app.security_agent.set_all_dataset_permissions( ldda.dataset, trans.app.security_agent.user_get_default_permissions( trans.get_user() ) )
- folder.add_library_dataset( library_dataset, genome_build=dbkey )
- folder.flush()
- library_dataset.library_dataset_dataset_association_id = ldda.id
- library_dataset.flush()
- # Handle template included in the upload form, if any
- if template and template_field_contents:
- # Since information templates are inherited, the template fields can be displayed on the upload form.
- # If the user has added field contents, we'll need to create a new form_values and info_association
- # for the new library_dataset_dataset_association object.
- # Create a new FormValues object, using the template we previously retrieved
- form_values = trans.app.model.FormValues( template, template_field_contents )
- form_values.flush()
- # Create a new info_association between the current ldda and form_values
- info_association = trans.app.model.LibraryDatasetDatasetInfoAssociation( ldda, template, form_values )
- info_association.flush()
- # If roles were selected upon upload, restrict access to the Dataset to those roles
- if roles:
- for role in roles:
- dp = trans.app.model.DatasetPermissions( RBACAgent.permitted_actions.DATASET_ACCESS.action, ldda.dataset, role )
- dp.flush()
- shutil.move( temp_name, ldda.dataset.file_name )
- ldda.state = ldda.states.OK
- ldda.init_meta()
- if line_count:
- try:
- if is_multi_byte:
- ldda.set_multi_byte_peek( line_count=line_count )
- else:
- ldda.set_peek( line_count=line_count )
- except:
- if is_multi_byte:
- ldda.set_multi_byte_peek()
- else:
- ldda.set_peek()
- else:
- if is_multi_byte:
- ldda.set_multi_byte_peek()
- else:
- ldda.set_peek()
- ldda.set_size()
- if ldda.missing_meta():
- ldda.datatype.set_meta( ldda )
- ldda.flush()
- return ldda
+ @web.json
+ def library_item_updates( self, trans, ids=None, states=None ):
+ # Avoid caching
+ trans.response.headers['Pragma'] = 'no-cache'
+ trans.response.headers['Expires'] = '0'
+ # Create new HTML for any that have changed
+ rval = {}
+ if ids is not None and states is not None:
+ ids = map( int, ids.split( "," ) )
+ states = states.split( "," )
+ for id, state in zip( ids, states ):
+ data = self.app.model.LibraryDatasetDatasetAssociation.get( id )
+ if data.state != state:
+ job_ldda = data
+ while job_ldda.copied_from_library_dataset_dataset_association:
+ job_ldda = job_ldda.copied_from_library_dataset_dataset_association
+ force_history_refresh = False
+ rval[id] = {
+ "state": data.state,
+ "html": unicode( trans.fill_template( "library/library_item_info.mako", ldda=data ), 'utf-8' )
+ #"force_history_refresh": force_history_refresh
+ }
+ return rval
@web.expose
def upload_dataset( self, trans, controller, library_id, folder_id, replace_dataset=None, **kwd ):
- # This method is called from both the admin and library controllers. The replace_dataset param ( when
- # not None ) refers to a LibraryDataset that is being replaced with a new version.
- params = util.Params( kwd )
+ # Set up the traditional tool state/params
+ tool_id = 'upload1'
+ tool = trans.app.toolbox.tools_by_id[ tool_id ]
+ state = tool.new_state( trans )
+ errors = tool.update_state( trans, tool.inputs_by_page[0], state.inputs, kwd, changed_dependencies={} )
+ tool_params = state.inputs
+ dataset_upload_inputs = []
+ for input_name, input in tool.inputs.iteritems():
+ if input.type == "upload_dataset":
+ dataset_upload_inputs.append( input )
+ # Library-specific params
+ params = util.Params( kwd ) # is this filetoolparam safe?
msg = util.restore_text( params.get( 'msg', '' ) )
messagetype = params.get( 'messagetype', 'done' )
- dbkey = params.get( 'dbkey', '?' )
- file_type = params.get( 'file_type', 'auto' )
- data_file = params.get( 'files_0|file_data', '' )
- url_paste = params.get( 'files_0|url_paste', '' )
server_dir = util.restore_text( params.get( 'server_dir', '' ) )
if replace_dataset not in [ None, 'None' ]:
replace_id = replace_dataset.id
@@ -217,24 +72,43 @@
template_field_contents.append( field_value )
else:
template = None
- if upload_option == 'upload_file' and data_file == '' and url_paste == '':
- msg = 'Select a file, enter a URL or enter text'
- err_redirect = True
- elif upload_option == 'upload_directory':
+ if upload_option == 'upload_directory':
if server_dir in [ None, 'None', '' ]:
err_redirect = True
- # See if our request is from the Admin view or the Libraries view
- if trans.request.browser_url.find( 'admin' ) >= 0:
+ if controller == 'library_admin':
import_dir = trans.app.config.library_import_dir
import_dir_desc = 'library_import_dir'
+ full_dir = os.path.join( import_dir, server_dir )
else:
import_dir = trans.app.config.user_library_import_dir
import_dir_desc = 'user_library_import_dir'
+ if server_dir == trans.user.email:
+ full_dir = os.path.join( import_dir, server_dir )
+ else:
+ full_dir = os.path.join( import_dir, trans.user.email, server_dir )
if import_dir:
msg = 'Select a directory'
else:
msg = '"%s" is not defined in the Galaxy configuration file' % import_dir_desc
+ roles = []
+ for role_id in util.listify( params.get( 'roles', [] ) ):
+ roles.append( trans.app.model.Role.get( role_id ) )
+ # Proceed with (mostly) regular upload processing
+ precreated_datasets = upload_common.get_precreated_datasets( trans, tool_params, trans.app.model.HistoryDatasetAssociation )
+ if upload_option == 'upload_file':
+ tool_params = upload_common.persist_uploads( tool_params )
+ json_file_path, data_list = upload_common.create_paramfile( trans, tool_params, precreated_datasets, dataset_upload_inputs, replace_dataset, folder, template, template_field_contents, roles, message )
+ elif upload_option == 'upload_directory':
+ json_file_path, data_list = self.create_server_dir_paramfile( trans, params, full_dir, import_dir_desc, folder, template, template_field_contents, roles, message, err_redirect, msg )
+ upload_common.cleanup_unused_precreated_datasets( precreated_datasets )
+ if upload_option == 'upload_file' and not data_list:
+ msg = 'Select a file, enter a URL or enter text'
+ err_redirect = True
if err_redirect:
+ try:
+ os.remove( json_file_path )
+ except:
+ pass
trans.response.send_redirect( web.url_for( controller=controller,
action='library_dataset_dataset_association',
library_id=library_id,
@@ -243,226 +117,49 @@
upload_option=upload_option,
msg=util.sanitize_text( msg ),
messagetype='error' ) )
- space_to_tab = params.get( 'files_0|space_to_tab', False )
- if space_to_tab and space_to_tab not in [ "None", None ]:
- space_to_tab = True
- roles = []
- for role_id in util.listify( params.get( 'roles', [] ) ):
- roles.append( trans.app.model.Role.get( role_id ) )
+ return upload_common.create_job( trans, tool_params, tool, json_file_path, data_list, folder=folder )
+ def create_server_dir_paramfile( self, trans, params, full_dir, import_dir_desc, folder, template,
+ template_field_contents, roles, message, err_redirect, msg ):
+ """
+ Create JSON param file for the upload tool when using the server_dir upload.
+ """
+ files = []
+ try:
+ for entry in os.listdir( full_dir ):
+ # Only import regular files
+ if os.path.isfile( os.path.join( full_dir, entry ) ):
+ files.append( entry )
+ except Exception, e:
+ msg = "Unable to get file list for configured %s, error: %s" % ( import_dir_desc, str( e ) )
+ err_redirect = True
+ return ( None, None )
+ if not files:
+ msg = "The directory '%s' contains no valid files" % full_dir
+ err_redirect = True
+ return ( None, None )
data_list = []
- created_ldda_ids = ''
- if 'filename' in dir( data_file ):
- file_name = data_file.filename
- file_name = file_name.split( '\\' )[-1]
- file_name = file_name.split( '/' )[-1]
- try:
- created_ldda = self.add_file( trans,
- folder,
- data_file.file,
- file_name,
- file_type,
- dbkey,
- roles,
- info="uploaded file",
- space_to_tab=space_to_tab,
- replace_dataset=replace_dataset,
- template=template,
- template_field_contents=template_field_contents,
- message=message )
- created_ldda_ids = str( created_ldda.id )
- except Exception, e:
- log.exception( 'exception in upload_dataset using file_name %s: %s' % ( str( file_name ), str( e ) ) )
- return self.upload_empty( trans, controller, library_id, folder_id, "Error:", str( e ) )
- elif url_paste not in [ None, "" ]:
- if url_paste.lower().find( 'http://' ) >= 0 or url_paste.lower().find( 'ftp://' ) >= 0:
- url_paste = url_paste.replace( '\r', '' ).split( '\n' )
- # If we are setting the name from the line, it needs to be the line that creates that dataset
- name_set_from_line = False
- for line in url_paste:
- line = line.rstrip( '\r\n' )
- if line:
- if not line or name_set_from_line:
- name_set_from_line = True
- try:
- created_ldda = self.add_file( trans,
- folder,
- urllib.urlopen( line ),
- line,
- file_type,
- dbkey,
- roles,
- info="uploaded url",
- space_to_tab=space_to_tab,
- replace_dataset=replace_dataset,
- template=template,
- template_field_contents=template_field_contents,
- message=message )
- created_ldda_ids = '%s,%s' % ( created_ldda_ids, str( created_ldda.id ) )
- except Exception, e:
- log.exception( 'exception in upload_dataset using url_paste %s' % str( e ) )
- return self.upload_empty( trans, controller, library_id, folder_id, "Error:", str( e ) )
- else:
- is_valid = False
- for line in url_paste:
- line = line.rstrip( '\r\n' )
- if line:
- is_valid = True
- break
- if is_valid:
- try:
- created_ldda = self.add_file( trans,
- folder,
- StringIO.StringIO( url_paste ),
- 'Pasted Entry',
- file_type,
- dbkey,
- roles,
- info="pasted entry",
- space_to_tab=space_to_tab,
- replace_dataset=replace_dataset,
- template=template,
- template_field_contents=template_field_contents,
- message=message )
- created_ldda_ids = '%s,%s' % ( created_ldda_ids, str( created_ldda.id ) )
- except Exception, e:
- log.exception( 'exception in add_file using StringIO.StringIO( url_paste ) %s' % str( e ) )
- return self.upload_empty( trans, controller, library_id, folder_id, "Error:", str( e ) )
- elif server_dir not in [ None, "", "None" ]:
- # See if our request is from the Admin view or the Libraries view
- if trans.request.browser_url.find( 'admin' ) >= 0:
- import_dir = trans.app.config.library_import_dir
- import_dir_desc = 'library_import_dir'
- full_dir = os.path.join( import_dir, server_dir )
- else:
- imrport_dir = trans.app.config.user_library_import_dir
- import_dir_desc = 'user_library_import_dir'
- # From the Libraries view, users are restricted to the directory named the same as
- # their email within the configured user_library_import_dir. If this directory contains
- # sub-directories, server_dir will be the name of the selected sub-directory. Otherwise
- # server_dir will be the user's email address.
- if server_dir == trans.user.email:
- full_dir = os.path.join( import_dir, server_dir )
- else:
- full_dir = os.path.join( import_dir, trans.user.email, server_dir )
- files = []
- try:
- for entry in os.listdir( full_dir ):
- # Only import regular files
- if os.path.isfile( os.path.join( full_dir, entry ) ):
- files.append( entry )
- except Exception, e:
- msg = "Unable to get file list for configured %s, error: %s" % ( import_dir_desc, str( e ) )
- return self.upload_empty( trans, controller, library_id, folder_id, "Error:", msg )
- if not files:
- msg = "The directory '%s' contains no valid files" % full_dir
- return self.upload_empty( trans, controller, library_id, folder_id, "Error:", msg )
- for file in files:
- full_file = os.path.join( full_dir, file )
- if not os.path.isfile( full_file ):
- continue
- try:
- created_ldda = self.add_file( trans,
- folder,
- open( full_file, 'rb' ),
- file,
- file_type,
- dbkey,
- roles,
- info="imported file",
- space_to_tab=space_to_tab,
- replace_dataset=replace_dataset,
- template=template,
- template_field_contents=template_field_contents,
- message=message )
- created_ldda_ids = '%s,%s' % ( created_ldda_ids, str( created_ldda.id ) )
- except Exception, e:
- log.exception( 'exception in add_file using server_dir %s' % str( e ) )
- return self.upload_empty( trans, controller, library_id, folder_id, "Error:", str( e ) )
- if created_ldda_ids:
- created_ldda_ids = created_ldda_ids.lstrip( ',' )
- return created_ldda_ids
- else:
- return ''
- def check_gzip( self, temp_name ):
- temp = open( temp_name, "U" )
- magic_check = temp.read( 2 )
- temp.close()
- if magic_check != util.gzip_magic:
- return ( False, False )
- CHUNK_SIZE = 2**15 # 32Kb
- gzipped_file = gzip.GzipFile( temp_name )
- chunk = gzipped_file.read( CHUNK_SIZE )
- gzipped_file.close()
- if self.check_html( temp_name, chunk=chunk ) or self.check_binary( temp_name, chunk=chunk ):
- return( True, False )
- return ( True, True )
- def check_zip( self, temp_name ):
- if not zipfile.is_zipfile( temp_name ):
- return ( False, False, None )
- zip_file = zipfile.ZipFile( temp_name, "r" )
- # Make sure the archive consists of valid files. The current rules are:
- # 1. Archives can only include .ab1, .scf or .txt files
- # 2. All file file_types within an archive must be the same
- name = zip_file.namelist()[0]
- test_ext = name.split( "." )[1].strip().lower()
- if not ( test_ext == 'scf' or test_ext == 'ab1' or test_ext == 'txt' ):
- return ( True, False, test_ext )
- for name in zip_file.namelist():
- ext = name.split( "." )[1].strip().lower()
- if ext != test_ext:
- return ( True, False, test_ext )
- return ( True, True, test_ext )
- def check_html( self, temp_name, chunk=None ):
- if chunk is None:
- temp = open(temp_name, "U")
- else:
- temp = chunk
- regexp1 = re.compile( "<A\s+[^>]*HREF[^>]+>", re.I )
- regexp2 = re.compile( "<IFRAME[^>]*>", re.I )
- regexp3 = re.compile( "<FRAMESET[^>]*>", re.I )
- regexp4 = re.compile( "<META[^>]*>", re.I )
- lineno = 0
- for line in temp:
- lineno += 1
- matches = regexp1.search( line ) or regexp2.search( line ) or regexp3.search( line ) or regexp4.search( line )
- if matches:
- if chunk is None:
- temp.close()
- return True
- if lineno > 100:
- break
- if chunk is None:
- temp.close()
- return False
- def check_binary( self, temp_name, chunk=None ):
- if chunk is None:
- temp = open( temp_name, "U" )
- else:
- temp = chunk
- lineno = 0
- for line in temp:
- lineno += 1
- line = line.strip()
- if line:
- if util.is_multi_byte( line ):
- return False
- for char in line:
- if ord( char ) > 128:
- if chunk is None:
- temp.close()
- return True
- if lineno > 10:
- break
- if chunk is None:
- temp.close()
- return False
- def upload_empty( self, trans, controller, library_id, folder_id, err_code, err_msg ):
- msg = err_code + err_msg
- return trans.response.send_redirect( web.url_for( controller=controller,
- action='library_dataset_dataset_association',
- library_id=library_id,
- folder_id=folder_id,
- msg=util.sanitize_text( msg ),
- messagetype='error' ) )
-class BadFileException( Exception ):
- pass
+ json_file = tempfile.mkstemp()
+ json_file_path = json_file[1]
+ json_file = os.fdopen( json_file[0], 'w' )
+ for file in files:
+ full_file = os.path.join( full_dir, file )
+ if not os.path.isfile( full_file ):
+ continue
+ uploaded_dataset = util.bunch.Bunch()
+ uploaded_dataset.name = file
+ uploaded_dataset.file_type = params.file_type
+ uploaded_dataset.dbkey = params.dbkey
+ data = upload_common.new_library_upload( trans, uploaded_dataset, None, folder, template, template_field_contents, roles, message )
+ json = dict( file_type = uploaded_dataset.file_type,
+ ext = None,
+ name = uploaded_dataset.name,
+ dataset_id = data.dataset.id,
+ dbkey = uploaded_dataset.dbkey,
+ type = 'server_dir',
+ is_binary = None,
+ space_to_tab = params.space_to_tab,
+ path = full_file )
+ json_file.write( to_json_string( json ) + '\n' )
+ data_list.append( data )
+ json_file.close()
+ return ( json_file_path, data_list )
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 static/june_2007_style/blue/library.css
--- a/static/june_2007_style/blue/library.css Fri Sep 25 17:06:45 2009 -0400
+++ b/static/june_2007_style/blue/library.css Fri Sep 25 17:07:13 2009 -0400
@@ -1,7 +1,7 @@
.libraryRow{background-color:#ebd9b2;}
.datasetHighlighted{background-color:#C1C9E5;}
.libraryItemDeleted-True{font-style:italic;}
-div.historyItemBody{padding:4px 4px 2px 4px;}
+div.libraryItemBody{padding:4px 4px 2px 4px;}
li.folderRow,li.datasetRow{border-top:solid 1px #ddd;}
li.folderRow:hover,li.datasetRow:hover{background-color:#C1C9E5;}
img.expanderIcon{padding-right:4px;}
@@ -15,3 +15,6 @@
span.expandLink{width:16px;height:16px;display:inline-block;vertical-align:middle;background:url(../images/silk/resultset_next.png);}
.folderRow.expanded span.expandLink{background:url(../images/silk/resultset_bottom.png);}
.folderRow span.rowIcon{width:16px;height:16px;display:inline-block;vertical-align:middle;background:url(../images/silk/folder.png);}
+.libraryItem-error{margin-right:2px;padding:0 2px 0 2px;border:1px solid #AA6666;background:#FFCCCC;}
+.libraryItem-queued{margin-right:2px;padding:0 2px 0 2px;border:1px solid #888888;background:#EEEEEE;}
+.libraryItem-running{margin-right:2px;padding:0 2px 0 2px;border:1px solid #AAAA66;background:#FFFFCC;}
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 static/june_2007_style/library.css.tmpl
--- a/static/june_2007_style/library.css.tmpl Fri Sep 25 17:06:45 2009 -0400
+++ b/static/june_2007_style/library.css.tmpl Fri Sep 25 17:07:13 2009 -0400
@@ -10,7 +10,7 @@
font-style: italic;
}
-div.historyItemBody {
+div.libraryItemBody {
padding: 4px 4px 2px 4px;
}
@@ -88,3 +88,24 @@
background: url(../images/silk/folder.png);
}
+.libraryItem-error {
+ margin-right: 2px;
+ padding: 0 2px 0 2px;
+ border: 1px solid $history_error_border;
+ background: $history_error_bg;
+}
+
+.libraryItem-queued {
+ margin-right: 2px;
+ padding: 0 2px 0 2px;
+ border: 1px solid $history_queued_border;
+ background: $history_queued_bg;
+}
+
+.libraryItem-running {
+ margin-right: 2px;
+ padding: 0 2px 0 2px;
+ border: 1px solid $history_running_border;
+ background: $history_running_bg;
+}
+
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 templates/admin/library/browse_library.mako
--- a/templates/admin/library/browse_library.mako Fri Sep 25 17:06:45 2009 -0400
+++ b/templates/admin/library/browse_library.mako Fri Sep 25 17:07:13 2009 -0400
@@ -1,5 +1,6 @@
<%inherit file="/base.mako"/>
<%namespace file="/message.mako" import="render_msg" />
+<%namespace file="/library/library_item_info.mako" import="render_library_item_info" />
<%
from time import strftime
from galaxy import util
@@ -10,6 +11,8 @@
<link href="${h.url_for('/static/style/base.css')}" rel="stylesheet" type="text/css" />
<link href="${h.url_for('/static/style/library.css')}" rel="stylesheet" type="text/css" />
</%def>
+
+<% tracked_datasets = {} %>
<script type="text/javascript">
$( document ).ready( function () {
@@ -35,29 +38,6 @@
$(this).children().find("img.rowIcon").each( function() { this.src = icon_open; });
}
});
- // Hide all dataset bodies
- $("div.historyItemBody").hide();
- // Handle the dataset body hide/show link.
- $("div.historyItemWrapper").each( function() {
- var id = this.id;
- var li = $(this).parent();
- var body = $(this).children( "div.historyItemBody" );
- var peek = body.find( "pre.peek" )
- $(this).children( ".historyItemTitleBar" ).find( ".historyItemTitle" ).wrap( "<a href='#'></a>" ).click( function() {
- if ( body.is(":visible") ) {
- if ( $.browser.mozilla ) { peek.css( "overflow", "hidden" ) }
- body.slideUp( "fast" );
- li.removeClass( "datasetHighlighted" );
- }
- else {
- body.slideDown( "fast", function() {
- if ( $.browser.mozilla ) { peek.css( "overflow", "auto" ); }
- });
- li.addClass( "datasetHighlighted" );
- }
- return false;
- });
- });
});
function checkForm() {
if ( $("select#action_on_datasets_select option:selected").text() == "delete" ) {
@@ -68,6 +48,54 @@
}
}
}
+ // Looks for changes in dataset state using an async request. Keeps
+ // calling itself (via setTimeout) until all datasets are in a terminal
+ // state.
+ var updater = function ( tracked_datasets ) {
+ // Check if there are any items left to track
+ var empty = true;
+ for ( i in tracked_datasets ) {
+ empty = false;
+ break;
+ }
+ if ( ! empty ) {
+ setTimeout( function() { updater_callback( tracked_datasets ) }, 3000 );
+ }
+ };
+ var updater_callback = function ( tracked_datasets ) {
+ // Build request data
+ var ids = []
+ var states = []
+ $.each( tracked_datasets, function ( id, state ) {
+ ids.push( id );
+ states.push( state );
+ });
+ // Make ajax call
+ $.ajax( {
+ type: "POST",
+ url: "${h.url_for( controller='library_dataset', action='library_item_updates' )}",
+ dataType: "json",
+ data: { ids: ids.join( "," ), states: states.join( "," ) },
+ success : function ( data ) {
+ $.each( data, function( id, val ) {
+ // Replace HTML
+ var cell = $("#libraryItem-" + id).find("#libraryItemInfo");
+ cell.html( val.html );
+ // If new state was terminal, stop tracking
+ if (( val.state == "ok") || ( val.state == "error") || ( val.state == "empty") || ( val.state == "deleted" ) || ( val.state == "discarded" )) {
+ delete tracked_datasets[ parseInt(id) ];
+ } else {
+ tracked_datasets[ parseInt(id) ] = val.state;
+ }
+ });
+ updater( tracked_datasets );
+ },
+ error: function() {
+ // Just retry, like the old method, should try to be smarter
+ updater( tracked_datasets );
+ }
+ });
+ };
</script>
<%def name="render_dataset( ldda, library_dataset, selected, library, folder, deleted, show_deleted )">
@@ -84,11 +112,13 @@
current_version = True
else:
current_version = False
+ if current_version and ldda.state not in ( 'ok', 'error', 'empty', 'deleted', 'discarded' ):
+ tracked_datasets[ldda.id] = ldda.state
%>
%if current_version:
- <div class="historyItemWrapper historyItem historyItem-${ldda.state}" id="libraryItem-${ldda.id}">
+ <div class="libraryItemWrapper libraryItem" id="libraryItem-${ldda.id}">
## Header row for library items (name, state, action buttons)
- <div class="historyItemTitleBar">
+ <div class="libraryItemTitleBar">
<table cellspacing="0" cellpadding="0" border="0" width="100%">
<tr>
<td width="*">
@@ -119,7 +149,7 @@
</div>
%endif
</td>
- <td width="300">${ldda.message}</td>
+ <td width="300" id="libraryItemInfo">${render_library_item_info( ldda )}</td>
<td width="150">${uploaded_by}</td>
<td width="60">${ldda.create_time.strftime( "%Y-%m-%d" )}</td>
</tr>
@@ -287,3 +317,11 @@
</p>
%endif
</form>
+
+%if tracked_datasets:
+ <script type="text/javascript">
+ // Updater
+ updater({${ ",".join( [ '"%s" : "%s"' % ( k, v ) for k, v in tracked_datasets.iteritems() ] ) }});
+ </script>
+ <!-- running: do not change this comment, used by TwillTestCase.library_wait -->
+%endif
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 templates/admin/library/new_library.mako
--- a/templates/admin/library/new_library.mako Fri Sep 25 17:06:45 2009 -0400
+++ b/templates/admin/library/new_library.mako Fri Sep 25 17:07:13 2009 -0400
@@ -29,7 +29,9 @@
</div>
<div style="clear: both"></div>
</div>
- <input type="submit" name="create_library_button" value="Create"/>
+ <div class="form-row">
+ <input type="submit" name="create_library_button" value="Create"/>
+ </div>
</form>
</div>
</div>
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 templates/base_panels.mako
--- a/templates/base_panels.mako Fri Sep 25 17:06:45 2009 -0400
+++ b/templates/base_panels.mako Fri Sep 25 17:07:13 2009 -0400
@@ -167,8 +167,9 @@
<div class="submenu">
<ul>
<li><a href="${app.config.get( "bugs_email", "mailto:galaxy-bugs@bx.psu.edu" )}">Email comments, bug reports, or suggestions</a></li>
- <li><a target="_blank" href="${app.config.get( "wiki_url", "http://g2.trac.bx.psu.edu/" )}">Galaxy Wiki</a></li>
- <li><a target="_blank" href="${app.config.get( "screencasts_url", "http://g2.trac.bx.psu.edu/wiki/ScreenCasts" )}">Video tutorials (screencasts)</a></li>
+ <li><a target="_blank" href="${app.config.get( "wiki_url", "http://bitbucket.org/galaxy/galaxy-central/wiki" )}">Galaxy Wiki</a></li>
+ <li><a target="_blank" href="${app.config.get( "screencasts_url", "http://galaxycast.org" )}">Video tutorials (screencasts)</a></li>
+ <li><a target="_blank" href="${app.config.get( "citation_url", "http://bitbucket.org/galaxy/galaxy-central/wiki/Citations" )}">How to Cite Galaxy</a></li>
</ul>
</div>
</td>
@@ -282,7 +283,7 @@
</head>
<body scroll="no" class="${self.body_class}">
- <div id="everything" style="position: absolute; top: 0; left: 0; width: 100%; height: 100%; min-width: 600px;">
+ <div id="everything" style="position: absolute; top: 0; left: 0; width: 100%; height: 100%; min-width: 960px;">
## Background displays first
<div id="background"></div>
## Layer iframes over backgrounds
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 templates/library/browse_library.mako
--- a/templates/library/browse_library.mako Fri Sep 25 17:06:45 2009 -0400
+++ b/templates/library/browse_library.mako Fri Sep 25 17:07:13 2009 -0400
@@ -1,5 +1,6 @@
<%inherit file="/base.mako"/>
<%namespace file="/message.mako" import="render_msg" />
+<%namespace file="/library/library_item_info.mako" import="render_library_item_info" />
<%
from galaxy import util
from galaxy.web.controllers.library import active_folders
@@ -12,6 +13,8 @@
<link href="${h.url_for('/static/style/base.css')}" rel="stylesheet" type="text/css" />
<link href="${h.url_for('/static/style/library.css')}" rel="stylesheet" type="text/css" />
</%def>
+
+<% tracked_datasets = {} %>
<%
class RowCounter( object ):
@@ -77,6 +80,54 @@
});
});
});
+ // Looks for changes in dataset state using an async request. Keeps
+ // calling itself (via setTimeout) until all datasets are in a terminal
+ // state.
+ var updater = function ( tracked_datasets ) {
+ // Check if there are any items left to track
+ var empty = true;
+ for ( i in tracked_datasets ) {
+ empty = false;
+ break;
+ }
+ if ( ! empty ) {
+ setTimeout( function() { updater_callback( tracked_datasets ) }, 3000 );
+ }
+ };
+ var updater_callback = function ( tracked_datasets ) {
+ // Build request data
+ var ids = []
+ var states = []
+ $.each( tracked_datasets, function ( id, state ) {
+ ids.push( id );
+ states.push( state );
+ });
+ // Make ajax call
+ $.ajax( {
+ type: "POST",
+ url: "${h.url_for( controller='library_dataset', action='library_item_updates' )}",
+ dataType: "json",
+ data: { ids: ids.join( "," ), states: states.join( "," ) },
+ success : function ( data ) {
+ $.each( data, function( id, val ) {
+ // Replace HTML
+ var cell = $("#libraryItem-" + id).find("#libraryItemInfo");
+ cell.html( val.html );
+ // If new state was terminal, stop tracking
+ if (( val.state == "ok") || ( val.state == "error") || ( val.state == "empty") || ( val.state == "deleted" ) || ( val.state == "discarded" )) {
+ delete tracked_datasets[ parseInt(id) ];
+ } else {
+ tracked_datasets[ parseInt(id) ] = val.state;
+ }
+ });
+ updater( tracked_datasets );
+ },
+ error: function() {
+ // Just retry, like the old method, should try to be smarter
+ updater( tracked_datasets );
+ }
+ });
+ };
</script>
<%def name="render_dataset( ldda, library_dataset, selected, library, folder, pad, parent, row_conter )">
@@ -95,6 +146,8 @@
can_manage_library_dataset = trans.app.security_agent.can_manage_library_item( user, roles, library_dataset )
else:
current_version = False
+ if current_version and ldda.state not in ( 'ok', 'error', 'empty', 'deleted', 'discarded' ):
+ tracked_datasets[ldda.id] = ldda.state
%>
%if current_version:
<tr class="datasetRow"
@@ -102,7 +155,7 @@
parent="${parent}"
style="display: none;"
%endif
- >
+ id="libraryItem-${ldda.id}">
<td style="padding-left: ${pad+20}px;">
%if selected:
<input type="checkbox" name="ldda_ids" value="${ldda.id}" checked/>
@@ -129,7 +182,7 @@
%endif
</div>
</td>
- <td>${ldda.message}</td>
+ <td id="libraryItemInfo">${render_library_item_info( ldda )}</td>
<td>${uploaded_by}</td>
<td>${ldda.create_time.strftime( "%Y-%m-%d" )}</td>
</tr>
@@ -305,6 +358,14 @@
</table>
</form>
+%if tracked_datasets:
+ <script type="text/javascript">
+ // Updater
+ updater({${ ",".join( [ '"%s" : "%s"' % ( k, v ) for k, v in tracked_datasets.iteritems() ] ) }});
+ </script>
+ <!-- running: do not change this comment, used by TwillTestCase.library_wait -->
+%endif
+
## Help about compression types
%if len( comptypes ) > 1:
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 templates/library/library_dataset_common.mako
--- a/templates/library/library_dataset_common.mako Fri Sep 25 17:06:45 2009 -0400
+++ b/templates/library/library_dataset_common.mako Fri Sep 25 17:07:13 2009 -0400
@@ -40,7 +40,8 @@
<div class="form-row">
<label>File:</label>
<div class="form-row-input">
- <input type="file" name="files_0|file_data" galaxy-ajax-upload="true"/>
+ ##<input type="file" name="files_0|file_data" galaxy-ajax-upload="true"/>
+ <input type="file" name="files_0|file_data"/>
</div>
<div style="clear: both"></div>
</div>
@@ -109,11 +110,16 @@
Convert spaces to tabs:
</label>
<div class="form-row-input">
- <input type="checkbox" name="files_0|space_to_tab" value="Yes"/>Yes
+ ## The files grouping only makes sense in the upload_file context.
+ %if upload_option == 'upload_file':
+ <input type="checkbox" name="files_0|space_to_tab" value="Yes"/>Yes
+ %else:
+ <input type="checkbox" name="space_to_tab" value="Yes"/>Yes
+ %endif
</div>
- </div>
- <div class="toolParamHelp" style="clear: both;">
- Use this option if you are entering intervals by hand.
+ <div class="toolParamHelp" style="clear: both;">
+ Use this option if you are entering intervals by hand.
+ </div>
</div>
<div style="clear: both"></div>
<div class="form-row">
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 templates/library/library_item_info.mako
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/templates/library/library_item_info.mako Fri Sep 25 17:07:13 2009 -0400
@@ -0,0 +1,13 @@
+<%def name="render_library_item_info( ldda )">
+ %if ldda.state == 'error':
+ <div class="libraryItem-${ldda.state}">Job error <i>(click name for more info)</i></div>
+ %elif ldda.state == 'queued':
+ <div class="libraryItem-${ldda.state}">This job is queued</div>
+ %elif ldda.state == 'running':
+ <div class="libraryItem-${ldda.state}">This job is running</div>
+ %else:
+ ${ldda.message}
+ %endif
+</%def>
+
+${render_library_item_info( ldda )}
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 test-data/groupby_out1.dat
--- a/test-data/groupby_out1.dat Fri Sep 25 17:06:45 2009 -0400
+++ b/test-data/groupby_out1.dat Fri Sep 25 17:07:13 2009 -0400
@@ -17,4 +17,4 @@
chr7 1.15958e+08
chr8 1.18881e+08
chr9 1.28843e+08
-chrX 1.45195e+08
+chrx 1.45195e+08
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 test-data/users/test3(a)bx.psu.edu/run1/2.fasta
--- a/test-data/users/test3(a)bx.psu.edu/run1/2.fasta Fri Sep 25 17:06:45 2009 -0400
+++ b/test-data/users/test3(a)bx.psu.edu/run1/2.fasta Fri Sep 25 17:07:13 2009 -0400
@@ -8,4 +8,4 @@
ctcaatgttc atgttcttag gttgttttgg ataatatgcg gtcagtttaa tcttcgttgt
ttcttcttaa aatatttatt catggtttaa tttttggttt gtacttgttc aggggccagt
tcattattta ctctgtttgt atacagcagt tcttttattt ttagtatgat tttaatttaa
-aacaattcta atggtcaaaa a
\ No newline at end of file
+aacaattcta atggtcaaaa a
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 test/base/twilltestcase.py
--- a/test/base/twilltestcase.py Fri Sep 25 17:06:45 2009 -0400
+++ b/test/base/twilltestcase.py Fri Sep 25 17:07:13 2009 -0400
@@ -1274,6 +1274,7 @@
else:
check_str = "Added 1 datasets to the folder '%s' ( each is selected )." % folder_name
self.check_page_for_string( check_str )
+ self.library_wait( library_id )
self.home()
def set_library_dataset_permissions( self, library_id, folder_id, ldda_id, ldda_name, role_id, permissions_in, permissions_out ):
url = "library_admin/library_dataset_dataset_association?library_id=%s&folder_id=%s&&id=%s&permissions=True&update_roles_button=Save" % \
@@ -1359,25 +1360,7 @@
tc.submit( "runtool_btn" )
check_str = "Added 1 dataset versions to the library dataset '%s' in the folder '%s'." % ( ldda_name, folder_name )
self.check_page_for_string( check_str )
- self.home()
- def upload_new_dataset_versions( self, library_id, folder_id, folder_name, library_dataset_id, ldda_name, file_type='auto',
- dbkey='hg18', message='', template_field_name1='', template_field_contents1='' ):
- """Upload new version(s) of a dataset using a directory of files"""
- self.home()
- self.visit_url( "%s/library_admin/library_dataset_dataset_association?upload_option=upload_directory&library_id=%s&folder_id=%s&replace_id=%s" \
- % ( self.url, library_id, folder_id, library_dataset_id ) )
- self.check_page_for_string( 'Upload a directory of files' )
- self.check_page_for_string( 'You are currently selecting a new file to replace' )
- tc.fv( "1", "file_type", file_type )
- tc.fv( "1", "dbkey", dbkey )
- tc.fv( "1", "message", message.replace( '+', ' ' ) )
- tc.fv( "1", "server_dir", "library" )
- # Add template field contents, if any...
- if template_field_name1:
- tc.fv( "1", template_field_name1, template_field_contents1 )
- tc.submit( "runtool_btn" )
- check_str = "Added 3 dataset versions to the library dataset '%s' in the folder '%s'." % ( ldda_name, folder_name )
- self.check_page_for_string( check_str )
+ self.library_wait( library_id )
self.home()
def add_history_datasets_to_library( self, library_id, folder_id, folder_name, hda_id, root=False ):
"""Copy a dataset from the current history to a library folder"""
@@ -1410,6 +1393,7 @@
tc.submit( "runtool_btn" )
if check_str_after_submit:
self.check_page_for_string( check_str_after_submit )
+ self.library_wait( library_id )
self.home()
def add_dir_of_files_from_libraries_view( self, library_id, folder_id, selected_dir, file_type='auto', dbkey='hg18', roles_tuple=[],
message='', check_str_after_submit='', template_field_name1='', template_field_contents1='' ):
@@ -1432,6 +1416,7 @@
tc.submit( "runtool_btn" )
if check_str_after_submit:
self.check_page_for_string( check_str_after_submit )
+ self.library_wait( library_id, controller='library' )
self.home()
def delete_library_item( self, library_id, library_item_id, library_item_name, library_item_type='library_dataset' ):
"""Mark a library item as deleted"""
@@ -1464,3 +1449,18 @@
check_str = "Library '%s' and all of its contents have been purged" % library_name
self.check_page_for_string( check_str )
self.home()
+ def library_wait( self, library_id, controller='library_admin', maxiter=20 ):
+ """Waits for the tools to finish"""
+ count = 0
+ sleep_amount = 1
+ self.home()
+ while count < maxiter:
+ count += 1
+ self.visit_url( "%s/%s/browse_library?id=%s" % ( self.url, controller, library_id ) )
+ page = tc.browser.get_html()
+ if page.find( '<!-- running: do not change this comment, used by TwillTestCase.library_wait -->' ) > -1:
+ time.sleep( sleep_amount )
+ sleep_amount += 1
+ else:
+ break
+ self.assertNotEqual(count, maxiter)
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 test/functional/__init__.py
--- a/test/functional/__init__.py Fri Sep 25 17:06:45 2009 -0400
+++ b/test/functional/__init__.py Fri Sep 25 17:07:13 2009 -0400
@@ -79,8 +79,8 @@
allow_user_creation = True,
allow_user_deletion = True,
admin_users = 'test(a)bx.psu.edu',
- library_import_dir = galaxy_test_file_dir,
- user_library_import_dir = os.path.join( galaxy_test_file_dir, 'users' ),
+ library_import_dir = os.path.join( os.getcwd(), galaxy_test_file_dir ),
+ user_library_import_dir = os.path.join( os.getcwd(), galaxy_test_file_dir, 'users' ),
global_conf = { "__file__": "universe_wsgi.ini.sample" } )
log.info( "Embedded Universe application started" )
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 tools/data_source/upload.py
--- a/tools/data_source/upload.py Fri Sep 25 17:06:45 2009 -0400
+++ b/tools/data_source/upload.py Fri Sep 25 17:07:13 2009 -0400
@@ -137,7 +137,7 @@
# See if we have an empty file
if not os.path.exists( dataset.path ):
- file_err( 'Uploaded temporary file (%s) does not exist. Please' % dataset.path, dataset, json_file )
+ file_err( 'Uploaded temporary file (%s) does not exist.' % dataset.path, dataset, json_file )
return
if not os.path.getsize( dataset.path ) > 0:
file_err( 'The uploaded file is empty', dataset, json_file )
@@ -237,7 +237,10 @@
if ext == 'auto':
ext = 'data'
# Move the dataset to its "real" path
- shutil.move( dataset.path, output_path )
+ if dataset.type == 'server_dir':
+ shutil.copy( dataset.path, output_path )
+ else:
+ shutil.move( dataset.path, output_path )
# Write the job info
info = dict( type = 'dataset',
dataset_id = dataset.dataset_id,
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 tools/fastx_toolkit/fastq_to_fasta.xml
--- a/tools/fastx_toolkit/fastq_to_fasta.xml Fri Sep 25 17:06:45 2009 -0400
+++ b/tools/fastx_toolkit/fastq_to_fasta.xml Fri Sep 25 17:07:13 2009 -0400
@@ -3,7 +3,7 @@
<command>gunzip -cf $input | fastq_to_fasta $SKIPN $RENAMESEQ -o $output -v </command>
<inputs>
- <param format="fastqsolexa" name="input" type="data" label="FASTQ Library to convert" />
+ <param format="fastqsolexa,fastqsanger" name="input" type="data" label="FASTQ Library to convert" />
<param name="SKIPN" type="select" label="Discard sequences with unknown (N) bases ">
<option value="">yes</option>
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 tools/samtools/pileup_parser.xml
--- a/tools/samtools/pileup_parser.xml Fri Sep 25 17:06:45 2009 -0400
+++ b/tools/samtools/pileup_parser.xml Fri Sep 25 17:07:13 2009 -0400
@@ -1,5 +1,5 @@
-<tool id="pileup_parser" name="Parse pileup">
- <description>to find variants</description>
+<tool id="pileup_parser" name="Filter pileup">
+ <description>on coverage and SNPs</description>
<command interpreter="perl">
#if $pileup_type.type_select == "six": #pileup_parser.pl $input "3" "5" "6" "4" $qv_cutoff $cvrg_cutoff $snps_only $interval "2" $out_file1
#elif $pileup_type.type_select == "ten": #pileup_parser.pl $input "3" "9" "10" "8" $qv_cutoff $cvrg_cutoff $snps_only $interval "2" $out_file1
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 tools/samtools/sam_pileup.xml
--- a/tools/samtools/sam_pileup.xml Fri Sep 25 17:06:45 2009 -0400
+++ b/tools/samtools/sam_pileup.xml Fri Sep 25 17:07:13 2009 -0400
@@ -1,5 +1,5 @@
-<tool id="sam_pileup" name="SAM Pileup Format" version="1.0.0">
- <description>generates the pileup format for a provided BAM file</description>
+<tool id="sam_pileup" name="Generate pileup" version="1.0.0">
+ <description>from BAM dataset</description>
<command interpreter="python">
sam_pileup.py
--input1=$input1
@@ -85,9 +85,9 @@
**Types of pileup datasets**
-The description of pileup format below is largely based on information that can be found on SAMTools_ documentation page. The 6- and 10-column variants are described below.
+The description of pileup format below is largely based on information that can be found on SAMTools Pileup_ documentation page. The 6- and 10-column variants are described below.
-.. _SAMTools: http://samtools.sourceforge.net/pileup.shtml
+.. _Pileup: http://samtools.sourceforge.net/pileup.shtml
**Six column pileup**::
@@ -111,7 +111,7 @@
**Ten column pileup**
-The `ten-column`__ pileup incorporates additional consensus information generated with *-c* option of *samtools pileup* command::
+The `ten-column` (consensus_) pileup incorporates additional consensus information generated with *-c* option of *samtools pileup* command::
1 2 3 4 5 6 7 8 9 10
@@ -137,7 +137,7 @@
10 Quality values (phred33 scale, see Galaxy wiki for more)
-.. __: http://samtools.sourceforge.net/cns0.shtml
+.. _consensus: http://samtools.sourceforge.net/cns0.shtml
</help>
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 tools/stats/grouping.py
--- a/tools/stats/grouping.py Fri Sep 25 17:06:45 2009 -0400
+++ b/tools/stats/grouping.py Fri Sep 25 17:07:13 2009 -0400
@@ -12,13 +12,13 @@
def main():
inputfile = sys.argv[2]
-
+ ignorecase = int(sys.argv[4])
ops = []
cols = []
rounds = []
elems = []
- for var in sys.argv[4:]:
+ for var in sys.argv[5:]:
ops.append(var.split()[0])
cols.append(var.split()[1])
rounds.append(var.split()[2])
@@ -71,7 +71,10 @@
we need to add 1 to group_col.
if POS2 is not specified, the newer versions of sort will consider the entire line for sorting. To prevent this, we set POS2=POS1.
"""
- command_line = "sort -f -k " + str(group_col+1) +"," + str(group_col+1) + " -o " + tmpfile.name + " " + inputfile
+ case = ''
+ if ignorecase == 1:
+ case = '-f'
+ command_line = "sort -t $'\t' " + case + " -k" + str(group_col+1) +"," + str(group_col+1) + " -o " + tmpfile.name + " " + inputfile
except Exception, exc:
stop_err( 'Initialization error -> %s' %str(exc) )
@@ -95,6 +98,8 @@
try:
fields = line.split("\t")
item = fields[group_col]
+ if ignorecase == 1:
+ item = item.lower()
if prev_item != "":
# At this level, we're grouping on values (item and prev_item) in group_col
if item == prev_item:
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 tools/stats/grouping.xml
--- a/tools/stats/grouping.xml Fri Sep 25 17:06:45 2009 -0400
+++ b/tools/stats/grouping.xml Fri Sep 25 17:07:13 2009 -0400
@@ -1,10 +1,11 @@
-<tool id="Grouping1" name="Group" version="1.7.0">
+<tool id="Grouping1" name="Group" version="1.8.0">
<description>data by a column and perform aggregate operation on other columns.</description>
<command interpreter="python">
grouping.py
$out_file1
$input1
$groupcol
+ $ignorecase
#for $op in $operations
'${op.optype}
${op.opcol}
@@ -14,6 +15,9 @@
<inputs>
<param format="tabular" name="input1" type="data" label="Select data" help="Query missing? See TIP below."/>
<param name="groupcol" label="Group by column" type="data_column" data_ref="input1" />
+ <param name="ignorecase" type="boolean" truevalue="1" falsevalue="0">
+ <label>Ignore case while grouping?</label>
+ </param>
<repeat name="operations" title="Operation">
<param name="optype" type="select" label="Type">
<option value="mean">Mean</option>
@@ -44,6 +48,7 @@
<test>
<param name="input1" value="1.bed"/>
<param name="groupcol" value="1"/>
+ <param name="ignorecase" value="true"/>
<param name="optype" value="mean"/>
<param name="opcol" value="2"/>
<param name="opround" value="no"/>
@@ -54,6 +59,7 @@
<test>
<param name="input1" value="1.tabular"/>
<param name="groupcol" value="1"/>
+ <param name="ignorecase" value="true"/>
<param name="optype" value="mean"/>
<param name="opcol" value="2"/>
<param name="opround" value="no"/>
@@ -80,15 +86,22 @@
- For the following input::
- chr22 1000 NM_17
- chr22 2000 NM_18
- chr10 2200 NM_10
- chr10 1200 NM_11
- chr22 1600 NM_19
+ chr22 1000 1003 TTT
+ chr22 2000 2003 aaa
+ chr10 2200 2203 TTT
+ chr10 1200 1203 ttt
+ chr22 1600 1603 AAA
-- running this tool with **Group by column 1**, Operations **Mean on column 2** and **Concatenate on column 3** will return::
+- **Grouping on column 4** while ignoring case, and performing operation **Count on column 1** will return::
- chr10 1700.00 NM_11,NM_10
- chr22 1533.33 NM_17,NM_19,NM_18
+ AAA 2
+ TTT 3
+
+- **Grouping on column 4** while not ignoring case, and performing operation **Count on column 1** will return::
+
+ aaa 1
+ AAA 1
+ ttt 1
+ TTT 2
</help>
</tool>
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 universe_wsgi.ini.sample
--- a/universe_wsgi.ini.sample Fri Sep 25 17:06:45 2009 -0400
+++ b/universe_wsgi.ini.sample Fri Sep 25 17:07:13 2009 -0400
@@ -102,9 +102,11 @@
## Brand: appends "/[brand]" to the "Galaxy" text in the masthead
## wiki_url: replaces the default galaxy main wiki
## bugs_email: replaces the default galaxy bugs email list
+##citation_url: point to a URL listing citations
#brand = Private local mirror
#wiki_url = /path/to/my/local/wiki
#bugs_email = mailto:galaxy-bugs@example.org
+#citation_url = /path/to/my/citations
# ---- Logging and Debugging ------------------------------------------------
1
0
02 Oct '09
details: http://www.bx.psu.edu/hg/galaxy/rev/83dc9981e3c4
changeset: 2783:83dc9981e3c4
user: jeremy goecks <jeremy.goecks(a)emory.edu>
date: Fri Sep 25 17:06:45 2009 -0400
description:
Grid states (filter, sorting) can be preserved.
5 file(s) affected in this change:
lib/galaxy/model/__init__.py
lib/galaxy/model/mapping.py
lib/galaxy/model/migrate/versions/0020_user_prefs.py
lib/galaxy/web/framework/helpers/grids.py
templates/history/grid.mako
diffs (252 lines):
diff -r 6a86a558f405 -r 83dc9981e3c4 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py Fri Sep 25 11:32:47 2009 -0400
+++ b/lib/galaxy/model/__init__.py Fri Sep 25 17:06:45 2009 -0400
@@ -1306,6 +1306,12 @@
class PageTagAssociation ( ItemTagAssociation ):
pass
+
+class UserPreference ( object ):
+ def __init( self, user_id=None, name=None, value=None ):
+ self.user_id = user_id
+ self.name = name
+ self.value = value
## ---- Utility methods -------------------------------------------------------
diff -r 6a86a558f405 -r 83dc9981e3c4 lib/galaxy/model/mapping.py
--- a/lib/galaxy/model/mapping.py Fri Sep 25 11:32:47 2009 -0400
+++ b/lib/galaxy/model/mapping.py Fri Sep 25 17:06:45 2009 -0400
@@ -585,7 +585,13 @@
Column( "user_tname", TrimmedString(255), index=True),
Column( "value", TrimmedString(255), index=True),
Column( "user_value", TrimmedString(255), index=True) )
-
+
+UserPreference.table = Table( "user_preference", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
+ Column( "name", Unicode( 255 ), index=True),
+ Column( "value", Unicode( 1024 ) ) )
+
# With the tables defined we can define the mappers and setup the
# relationships between the model objects.
@@ -741,6 +747,7 @@
stored_workflow_menu_entries=relation( StoredWorkflowMenuEntry, backref="user",
cascade="all, delete-orphan",
collection_class=ordering_list( 'order_index' ) ),
+ preferences=relation( UserPreference, backref="user", order_by=UserPreference.table.c.id),
# addresses=relation( UserAddress,
# primaryjoin=( User.table.c.id == UserAddress.table.c.user_id ) )
) )
@@ -1010,6 +1017,10 @@
properties=dict( tag=relation(Tag, backref="tagged_pages") ),
primary_key=[PageTagAssociation.table.c.page_id, PageTagAssociation.table.c.tag_id]
)
+
+assign_mapper( context, UserPreference, UserPreference.table,
+ properties = {}
+ )
def db_next_hid( self ):
"""
diff -r 6a86a558f405 -r 83dc9981e3c4 lib/galaxy/model/migrate/versions/0020_user_prefs.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/model/migrate/versions/0020_user_prefs.py Fri Sep 25 17:06:45 2009 -0400
@@ -0,0 +1,45 @@
+"""
+This migration script adds a user preferences table to Galaxy.
+"""
+
+from sqlalchemy import *
+from migrate import *
+
+import datetime
+now = datetime.datetime.utcnow
+
+import logging
+log = logging.getLogger( __name__ )
+
+metadata = MetaData( migrate_engine )
+
+def display_migration_details():
+ print ""
+ print "This migration script adds a user preferences table to Galaxy."
+ print ""
+
+
+# New table to support user preferences.
+
+UserPreference_table = Table( "user_preference", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
+ Column( "name", Unicode( 255 ), index=True),
+ Column( "value", Unicode( 1024 ) ) )
+
+def upgrade():
+ display_migration_details()
+ metadata.reflect()
+ try:
+ UserPreference_table.create()
+ except Exception, e:
+ print str(e)
+ log.debug( "Creating user_preference table failed: %s" % str( e ) )
+
+def downgrade():
+ metadata.reflect()
+ try:
+ UserPreference_table.drop()
+ except Exception, e:
+ print str(e)
+ log.debug( "Dropping user_preference table failed: %s" % str( e ) )
\ No newline at end of file
diff -r 6a86a558f405 -r 83dc9981e3c4 lib/galaxy/web/framework/helpers/grids.py
--- a/lib/galaxy/web/framework/helpers/grids.py Fri Sep 25 11:32:47 2009 -0400
+++ b/lib/galaxy/web/framework/helpers/grids.py Fri Sep 25 17:06:45 2009 -0400
@@ -2,6 +2,7 @@
from galaxy.model.orm import *
from galaxy.web import url_for
+from galaxy.util.json import from_json_string, to_json_string
import sys, logging
@@ -21,6 +22,10 @@
standard_filters = []
default_filter = None
default_sort_key = None
+ preserve_state = True
+ # Set preference names.
+ cur_filter_pref_name = ".filter"
+ cur_sort_key_pref_name = ".sort_key"
pass_through_operations = {}
def __init__( self ):
# Determine if any multiple row operations are defined
@@ -29,26 +34,47 @@
if operation.allow_multiple:
self.has_multiple_item_operations = True
break
+
def __call__( self, trans, **kwargs ):
status = kwargs.get( 'status', None )
message = kwargs.get( 'message', None )
session = trans.sa_session
+
+ # Build a base filter and sort key that is the combination of the saved state and defaults. Saved state takes preference over defaults.
+ base_filter = {}
+ if self.default_filter:
+ base_filter = self.default_filter.copy()
+ base_sort_key = self.default_sort_key
+ if self.preserve_state:
+ saved_filter_pref = trans.sa_session.query( UserPreference ).\
+ filter_by( name=self.__class__.__name__ + self.cur_filter_pref_name, user_id=trans.get_user().id ).first()
+ if saved_filter_pref:
+ saved_filter = from_json_string( saved_filter_pref.value )
+ base_filter.update( saved_filter )
+
+ saved_sort_key_pref = trans.sa_session.query( UserPreference ).\
+ filter_by( name=self.__class__.__name__ + self.cur_sort_key_pref_name, user_id=trans.get_user().id ).first()
+ if saved_sort_key_pref:
+ base_sort_key = from_json_string( saved_sort_key_pref.value )
+
# Build initial query
query = self.build_initial_query( session )
query = self.apply_default_filter( trans, query, **kwargs )
+
# Maintain sort state in generated urls
extra_url_args = {}
- # Process filtering arguments to (a) build a query that actuates the filter and (b) builds a
+
+ # Process filtering arguments to (a) build a query that represents the filter and (b) builds a
# dictionary that denotes the current filter.
cur_filter_dict = {}
for column in self.columns:
if column.key:
- # Look for filter criterion in kwargs; if not found, look in default filter.
+ # Look for filter criterion in kwargs; if not found, look in base filter.
column_filter = None
if "f-" + column.key in kwargs:
column_filter = kwargs.get( "f-" + column.key )
- elif ( self.default_filter ) and ( column.key in self.default_filter ):
- column_filter = self.default_filter.get( column.key )
+ elif column.key in base_filter:
+ column_filter = base_filter.get( column.key )
# If column filter found, apply it.
if column_filter is not None:
@@ -61,13 +87,13 @@
if not isinstance( column_filter, basestring ):
column_filter = unicode(column_filter)
extra_url_args[ "f-" + column.key ] = column_filter.encode("utf-8")
-
+
# Process sort arguments
sort_key = sort_order = None
if 'sort' in kwargs:
sort_key = kwargs['sort']
- elif self.default_sort_key:
- sort_key = self.default_sort_key
+ elif base_sort_key:
+ sort_key = base_sort_key
encoded_sort_key = sort_key
if sort_key:
if sort_key.startswith( "-" ):
@@ -78,9 +104,26 @@
sort_order = 'asc'
query = query.order_by( self.model_class.c.get( sort_key ).asc() )
extra_url_args['sort'] = encoded_sort_key
+
# There might be a current row
current_item = self.get_current_item( trans )
- # Render
+
+ # Save current filter and sort key.
+ if self.preserve_state:
+ pref_name = self.__class__.__name__ + self.cur_filter_pref_name
+ if not saved_filter_pref:
+ saved_filter_pref = UserPreference( name=pref_name )
+ trans.get_user().preferences.append( saved_filter_pref )
+ saved_filter_pref.value = to_json_string( cur_filter_dict )
+ if not saved_sort_key_pref:
+ pref_name = self.__class__.__name__ + self.cur_sort_key_pref_name
+ if not saved_sort_key_pref:
+ saved_sort_key_pref = UserPreference( name=pref_name )
+ trans.get_user().preferences.append( saved_sort_key_pref )
+ saved_sort_key_pref.value = to_json_string( sort_key )
+ trans.sa_session.flush()
+
+ # Render grid.
def url( *args, **kwargs ):
# Only include sort/filter arguments if not linking to another
# page. This is a bit of a hack.
diff -r 6a86a558f405 -r 83dc9981e3c4 templates/history/grid.mako
--- a/templates/history/grid.mako Fri Sep 25 11:32:47 2009 -0400
+++ b/templates/history/grid.mako Fri Sep 25 17:06:45 2009 -0400
@@ -167,7 +167,7 @@
## Print grid filter.
<form name="history_actions" action="javascript:add_tag_to_grid_filter($('#input-tag-filter').attr('value'))" method="get" >
- <strong>Filter: </strong>
+ <strong>Filter: </strong>
%for column in grid.columns:
%if column.filterable:
<span> by ${column.label.lower()}:</span>
@@ -194,14 +194,14 @@
<span class="filter"><a href="${url( filter.get_url_args() )}">${filter.label}</a></span>
%endif
%endfor
- <span> </span>
+ <span> </span>
%endif
%endfor
- ## Link to clear all filters.
+ ## Link to clear all filters. TODO: this should be the default filter or an empty filter.
<%
args = { "deleted" : "False", "tags" : "All" }
- no_filter = GridColumnFilter("Clear", args)
+ no_filter = GridColumnFilter("Clear Filter", args)
%>
<span><a href="${url( no_filter.get_url_args() )}">${no_filter.label}</a></span>
</form>
1
0