galaxy-dev
  Threads by month 
                
            - ----- 2025 -----
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2009 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2008 -----
- December
- November
- October
- September
- August
October 2009
- 18 participants
- 172 discussions
                    
                        details:   http://www.bx.psu.edu/hg/galaxy/rev/93dc1855f0d6
changeset: 2786:93dc1855f0d6
user:      Kanwei Li <kanwei(a)gmail.com>
date:      Fri Sep 25 18:47:40 2009 -0400
description:
trackster now supports BED files
10 file(s) affected in this change:
datatypes_conf.xml.sample
lib/galaxy/datatypes/converters/bed_to_interval_index_converter.py
lib/galaxy/datatypes/converters/bed_to_interval_index_converter.xml
lib/galaxy/visualization/__init__.py
lib/galaxy/visualization/tracks/__init__.py
lib/galaxy/visualization/tracks/data/array_tree.py
lib/galaxy/visualization/tracks/data/interval_index.py
lib/galaxy/web/controllers/tracks.py
static/scripts/packed/trackster.js
static/scripts/trackster.js
diffs (238 lines):
diff -r d8e3ad46bfa3 -r 93dc1855f0d6 datatypes_conf.xml.sample
--- a/datatypes_conf.xml.sample	Fri Sep 25 17:21:26 2009 -0400
+++ b/datatypes_conf.xml.sample	Fri Sep 25 18:47:40 2009 -0400
@@ -7,6 +7,7 @@
     <datatype extension="bed" type="galaxy.datatypes.interval:Bed" display_in_upload="true">
       <converter file="bed_to_gff_converter.xml" target_datatype="gff"/>
       <converter file="interval_to_coverage.xml" target_datatype="coverage"/>
+      <converter file="bed_to_interval_index_converter.xml" target_datatype="interval_index"/>
     </datatype>
     <datatype extension="binseq.zip" type="galaxy.datatypes.images:Binseq" mimetype="application/zip" display_in_upload="true"/>
     <datatype extension="len" type="galaxy.datatypes.chrominfo:ChromInfo" display_in_upload="true">
@@ -61,6 +62,7 @@
       <converter file="wiggle_to_array_tree_converter.xml" target_datatype="array_tree"/>
     </datatype>
     <datatype extension="array_tree" type="galaxy.datatypes.data:Data" />
+    <datatype extension="interval_index" type="galaxy.datatypes.data:Data" />
     <!-- EMBOSS TOOLS -->
     <datatype extension="acedb" type="galaxy.datatypes.data:Text"/>
     <datatype extension="asn1" type="galaxy.datatypes.data:Text"/>
diff -r d8e3ad46bfa3 -r 93dc1855f0d6 lib/galaxy/datatypes/converters/bed_to_interval_index_converter.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/datatypes/converters/bed_to_interval_index_converter.py	Fri Sep 25 18:47:40 2009 -0400
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+
+from __future__ import division
+
+import sys
+from galaxy import eggs
+import pkg_resources; pkg_resources.require( "bx-python" )
+from bx.interval_index_file import Indexes
+
+def main():
+    
+    input_fname = sys.argv[1]
+    out_fname = sys.argv[2]
+    index = Indexes()
+    offset = 0
+    
+    for line in open(input_fname, "r"):
+        feature = line.split()
+        if feature[0] == "track":
+            offset += len(line)
+            continue
+        chrom = feature[0]
+        chrom_start = int(feature[1])
+        chrom_end = int(feature[2])
+        index.add( chrom, chrom_start, chrom_end, offset )
+        offset += len(line)
+    
+    index.write( open(out_fname, "w") )
+
+if __name__ == "__main__": 
+    main()
+    
\ No newline at end of file
diff -r d8e3ad46bfa3 -r 93dc1855f0d6 lib/galaxy/datatypes/converters/bed_to_interval_index_converter.xml
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/datatypes/converters/bed_to_interval_index_converter.xml	Fri Sep 25 18:47:40 2009 -0400
@@ -0,0 +1,14 @@
+<tool id="CONVERTER_bed_to_interval_index_0" name="Convert BED to Interval Index" version="1.0.0">
+<!--  <description>__NOT_USED_CURRENTLY_FOR_CONVERTERS__</description> -->
+  <command interpreter="python">bed_to_interval_index_converter.py $input1 $output1</command>
+  <inputs>
+    <page>
+        <param format="bed" name="input1" type="data" label="Choose BED file"/>
+    </page>
+   </inputs>
+  <outputs>
+    <data format="interval_index" name="output1"/>
+  </outputs>
+  <help>
+  </help>
+</tool>
diff -r d8e3ad46bfa3 -r 93dc1855f0d6 lib/galaxy/visualization/__init__.py
--- a/lib/galaxy/visualization/__init__.py	Fri Sep 25 17:21:26 2009 -0400
+++ b/lib/galaxy/visualization/__init__.py	Fri Sep 25 18:47:40 2009 -0400
@@ -1,3 +1,3 @@
 """
-Package for Galaxy visulization plugins.
+Package for Galaxy visualization plugins.
 """
\ No newline at end of file
diff -r d8e3ad46bfa3 -r 93dc1855f0d6 lib/galaxy/visualization/tracks/__init__.py
--- a/lib/galaxy/visualization/tracks/__init__.py	Fri Sep 25 17:21:26 2009 -0400
+++ b/lib/galaxy/visualization/tracks/__init__.py	Fri Sep 25 18:47:40 2009 -0400
@@ -1,3 +1,3 @@
 """
-Package for track style visulization using the trackster UI.
+Package for track style visualization using the trackster UI.
 """
\ No newline at end of file
diff -r d8e3ad46bfa3 -r 93dc1855f0d6 lib/galaxy/visualization/tracks/data/array_tree.py
--- a/lib/galaxy/visualization/tracks/data/array_tree.py	Fri Sep 25 17:21:26 2009 -0400
+++ b/lib/galaxy/visualization/tracks/data/array_tree.py	Fri Sep 25 18:47:40 2009 -0400
@@ -1,5 +1,5 @@
 """
-Array tree data provider for Galaxy track browser. 
+Array tree data provider for the Galaxy track browser. 
 """
 
 import pkg_resources; pkg_resources.require( "bx-python" )
@@ -16,7 +16,7 @@
 BLOCK_SIZE = 1000
 
 class ArrayTreeDataProvider( object ):
-    def __init__( self, dataset ):
+    def __init__( self, dataset, original_dataset ):
         self.dataset = dataset
     
     def get_stats( self, chrom ):
diff -r d8e3ad46bfa3 -r 93dc1855f0d6 lib/galaxy/visualization/tracks/data/interval_index.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/visualization/tracks/data/interval_index.py	Fri Sep 25 18:47:40 2009 -0400
@@ -0,0 +1,41 @@
+"""
+Interval index data provider for the Galaxy track browser.
+Kanwei Li, 2009
+"""
+
+import pkg_resources; pkg_resources.require( "bx-python" )
+from bx.interval_index_file import Indexes
+
+class IntervalIndexDataProvider( object ):
+    def __init__( self, converted_dataset, original_dataset ):
+        self.original_dataset = original_dataset
+        self.converted_dataset = converted_dataset
+    
+    def get_data( self, chrom, start, end ):
+        start, end = int(start), int(end)
+        chrom = str(chrom)
+        source = open( self.original_dataset.file_name )
+        index = Indexes( self.converted_dataset.file_name )
+        results = []
+        
+        for start, end, offset in index.find(chrom, start, end):
+            source.seek(offset)
+            feature = source.readline().split()
+            payload = { 'start': start, 'end': end, 'name': feature[3] }
+            try:
+                block_sizes = [ int(n) for n in feature[10].split(',') if n != '']
+                block_starts = [ int(n) for n in feature[11].split(',') if n != '' ]
+                blocks = zip(block_sizes, block_starts)
+                payload['block_start_end'] = [ (chrom_start + block[1], chrom_start + block[1] + block[0]) for block in blocks]
+            except:
+                pass
+    
+            try:
+                payload['exon_start'] = int(feature[6])
+                payload['exon_end'] = int(feature[7])
+            except:
+                pass
+
+            results.append(payload)
+        
+        return results
diff -r d8e3ad46bfa3 -r 93dc1855f0d6 lib/galaxy/web/controllers/tracks.py
--- a/lib/galaxy/web/controllers/tracks.py	Fri Sep 25 17:21:26 2009 -0400
+++ b/lib/galaxy/web/controllers/tracks.py	Fri Sep 25 18:47:40 2009 -0400
@@ -24,6 +24,7 @@
 from galaxy.util.bunch import Bunch
 
 from galaxy.visualization.tracks.data.array_tree import ArrayTreeDataProvider
+from galaxy.visualization.tracks.data.interval_index import IntervalIndexDataProvider
 
 # Message strings returned to browser
 messages = Bunch(
@@ -36,18 +37,20 @@
 # Dataset type required for each track type. This needs to be more flexible,
 # there might be multiple types of indexes that suffice for a given track type.
 track_type_to_dataset_type = {
-    "line": "array_tree"
+    "line": "array_tree",
+    "feature": "interval_index"
 }
 
 # Mapping from dataset type to a class that can fetch data from a file of that
 # type. This also needs to be more flexible.
 dataset_type_to_data_provider = {
-    "array_tree": ArrayTreeDataProvider
+    "array_tree": ArrayTreeDataProvider,
+    "interval_index": IntervalIndexDataProvider
 }
 
 # FIXME: hardcoding this for now, but it should be derived from the available
 #        converters
-browsable_types = set( ["wig" ] )
+browsable_types = set( ["wig", "bed" ] )
 
 class TracksController( BaseController ):
     """
@@ -66,7 +69,7 @@
         to 'index' once datasets to browse have been selected.
         """
         session = trans.sa_session
-        # If the user clicked the submit button explicately, try to build the browser
+        # If the user clicked the submit button explicitly, try to build the browser
         if browse and dataset_ids:
             if not isinstance( dataset_ids, list ):
                 dataset_ids = [ dataset_ids ]    
@@ -183,7 +186,7 @@
             return messages.PENDING
         # We have a dataset in the right format that is ready to use, wrap in
         # a data provider that knows how to access it
-        data_provider = dataset_type_to_data_provider[ converted_dataset_type ]( converted_dataset )
+        data_provider = dataset_type_to_data_provider[ converted_dataset_type ]( converted_dataset, dataset )
         
         # Return stats if we need them
         if stats: return data_provider.get_stats( chrom )
diff -r d8e3ad46bfa3 -r 93dc1855f0d6 static/scripts/packed/trackster.js
--- a/static/scripts/packed/trackster.js	Fri Sep 25 17:21:26 2009 -0400
+++ b/static/scripts/packed/trackster.js	Fri Sep 25 18:47:40 2009 -0400
@@ -1,1 +1,1 @@
-var DENSITY=1000;var DataCache=function(b,a){this.type=b;this.track=a;this.cache=Object()};$.extend(DataCache.prototype,{get:function(d,b){var c=this.cache;if(!(c[d]&&c[d][b])){if(!c[d]){c[d]=Object()}var a=b*DENSITY*d;var e=(b+1)*DENSITY*d;c[d][b]={state:"loading"};$.getJSON(data_url,{track_type:this.track.track_type,chrom:this.track.view.chrom,low:a,high:e,dataset_id:this.track.dataset_id},function(f){if(f=="pending"){setTimeout(fetcher,5000)}else{c[d][b]={state:"loaded",values:f}}$(document).trigger("redraw")})}return c[d][b]}});var View=function(a,b){this.chrom=a;this.tracks=[];this.max_low=0;this.max_high=b;this.low=this.max_low;this.high=this.max_high;this.length=this.max_high-this.max_low};$.extend(View.prototype,{add_track:function(a){a.view=this;this.tracks.push(a);if(a.init){a.init()}},redraw:function(){$("#overview-box").css({left:(this.low/this.length)*$("#overview-viewport").width(),width:Math.max(4,((this.high-this.low)/this.length)*$("#overview-viewport").widt
 h())}).show();$("#low").text(this.low);$("#high").text(this.high);for(var a in this.tracks){this.tracks[a].draw()}$("#bottom-spacer").remove();$("#viewport").append('<div id="bottom-spacer" style="height: 200px;"></div>')},move:function(b,a){this.low=Math.max(this.max_low,Math.floor(b));this.high=Math.min(this.length,Math.ceil(a))},zoom_in:function(d,b){var c=this.high-this.low;var e=c/d/2;if(b==undefined){var a=(this.low+this.high)/2}else{var a=this.low+c*b/$(document).width()}this.low=Math.floor(a-e);this.high=Math.ceil(a+e);if(this.low<this.max_low){this.low=this.max_low;this.high=c/d}else{if(this.high>this.max_high){this.high=this.max_high;this.low=this.max_high-c/d}}if(this.high-this.low<1){this.high=this.low+1}},zoom_out:function(c){var a=(this.low+this.high)/2;var b=this.high-this.low;var d=b*c/2;this.low=Math.floor(Math.max(0,a-d));this.high=Math.ceil(Math.min(this.length,a+d))},left:function(b){var a=this.high-this.low;var c=Math.floor(a/b);if(this.low-c<0){this.low
 =0;this.high=this.low+a}else{this.low-=c;this.high-=c}},right:function(b){var a=this.high-this.low;var c=Math.floor(a/b);if(this.high+c>this.length){this.high=this.length;this.low=this.high-a}else{this.low+=c;this.high+=c}}});var Track=function(a,b){this.name=a;this.parent_element=b;this.make_container()};$.extend(Track.prototype,{make_container:function(){this.header_div=$("<div class='track-header'>").text(this.name);this.content_div=$("<div class='track-content'>");this.container_div=$("<div class='track'></div>").append(this.header_div).append(this.content_div);this.parent_element.append(this.container_div)}});var TiledTrack=function(){this.last_resolution=null;this.last_w_scale=null;this.tile_cache={}};$.extend(TiledTrack.prototype,Track.prototype,{draw:function(){var k=this.view.low,c=this.view.high,e=c-k;var b=Math.pow(10,Math.ceil(Math.log(e/DENSITY)/Math.log(10)));b=Math.max(b,1);b=Math.min(b,100000);var o=$("<div style='position: relative;'></div>");this.content_di
 v.children(":first").remove();this.content_div.append(o);var m=this.content_div.width(),d=this.content_div.height(),p=m/e,l={},n={};if(this.last_resolution==b&&this.last_w_scale==p){l=this.tile_cache}var g;var a=Math.floor(k/b/DENSITY);var i=0;while((a*1000*b)<c){if(a in l){g=l[a];var f=a*DENSITY*b;g.css({left:(f-this.view.low)*p});o.append(g)}else{g=this.draw_tile(b,a,o,p,d)}if(g){n[a]=g;i=Math.max(i,g.height())}a+=1}o.css("height",i);this.last_resolution=b;this.last_w_scale=p;this.tile_cache=n}});var LineTrack=function(c,b,a){Track.call(this,c,$("#viewport"));this.track_type="line";this.height_px=(a?a:100);this.container_div.addClass("line-track");this.dataset_id=b;this.cache=new DataCache("",this)};$.extend(LineTrack.prototype,TiledTrack.prototype,{make_container:function(){Track.prototype.make_container.call(this);this.content_div.css("height",this.height_px)},init:function(){track=this;$.getJSON(data_url,{stats:true,track_type:track.track_type,chrom:this.view.chrom,low:
 null,high:null,dataset_id:this.dataset_id},function(a){if(a){track.min_value=a.min;track.max_value=a.max;track.vertical_range=track.max_value-track.min_value;track.view.redraw()}})},draw_tile:function(d,a,o,s,p){if(!this.vertical_range){return}var k=a*DENSITY*d,r=(a+1)*DENSITY*d,c=DENSITY*d;var n=this.cache.get(d,a);var h;if(n.state=="loading"){h=$("<div class='loading tile'></div>")}else{h=$("<canvas class='tile'></canvas>")}h.css({position:"absolute",top:0,left:(k-this.view.low)*s,});o.append(h);if(n.state=="loading"){e=false;return null}var b=h;b.get(0).width=Math.ceil(c*s);b.get(0).height=this.height_px;var q=b.get(0).getContext("2d");var e=false;q.beginPath();var g=n.values;if(!g){return}for(var f=0;f<g.length-1;f++){var m=g[f][0]-k;var l=g[f][1];if(isNaN(l)){e=false}else{m=m*s;y_above_min=l-this.min_value;l=y_above_min/this.vertical_range*this.height_px;if(e){q.lineTo(m,l)}else{q.moveTo(m,l);e=true}}}q.stroke();return h}});var LabelTrack=function(a){Track.call(this,nul
 l,a);this.container_div.addClass("label-track")};$.extend(LabelTrack.prototype,Track.prototype,{draw:function(){var c=this.view,d=c.high-c.low,g=Math.floor(Math.pow(10,Math.floor(Math.log(d)/Math.log(10)))),a=Math.floor(c.low/g)*g,e=this.content_div.width(),b=$("<div style='position: relative; height: 1.3em;'></div>");while(a<c.high){var f=(a-c.low)/d*e;b.append($("<div class='label'>"+a+"</div>").css({position:"absolute",left:f-1}));a+=g}this.content_div.children(":first").remove();this.content_div.append(b)}});var itemHeight=13,itemPad=3,thinHeight=7,thinOffset=3;var FeatureTrack=function(b,a){Track.call(this,b,$("#viewport"));this.track_type="feature";this.container_div.addClass("feature-track");this.dataset_id=a;this.zo_slots=new Object();this.show_labels_scale=0.01;this.showing_labels=false};$.extend(FeatureTrack.prototype,TiledTrack.prototype,{calc_slots:function(d){end_ary=new Array();var c=this.container_div.width()/(this.view.high-this.view.low);if(d){this.zi_slots=
 new Object()}var b=$("<canvas></canvas>").get(0).getContext("2d");for(var a in this.values){feature=this.values[a];f_start=Math.floor(Math.max(this.view.max_low,(feature.start-this.view.max_low)*c));if(d){f_start-=b.measureText(feature.name).width}f_end=Math.ceil(Math.min(this.view.max_high,(feature.end-this.view.max_low)*c));j=0;while(true){if(end_ary[j]==undefined||end_ary[j]<f_start){end_ary[j]=f_end;if(d){this.zi_slots[feature.name]=j}else{this.zo_slots[feature.name]=j}break}j++}}},init:function(){var a=this;$.getJSON("getfeature",{start:this.view.max_low,end:this.view.max_high,dataset_id:this.dataset_id,chrom:this.view.chrom},function(b){a.values=b;a.calc_slots();a.slots=a.zo_slots;a.draw()})},draw_tile:function(q,t,e,g,f){if(!this.values){return null}if(g>this.show_labels_scale&&!this.showing_labels){this.showing_labels=true;if(!this.zi_slots){this.calc_slots(true)}this.slots=this.zi_slots}else{if(g<=this.show_labels_scale&&this.showing_labels){this.showing_labels=fals
 e;this.slots=this.zo_slots}}var u=t*DENSITY*q,c=(t+1)*DENSITY*q,b=DENSITY*q;var k=this.view,m=k.high-k.low,o=Math.ceil(b*g),h=new Array(),n=200,l=$("<canvas class='tile'></canvas>");l.css({position:"absolute",top:0,left:(u-this.view.low)*g,"border-right":"1px solid #ddd"});l.get(0).width=o;l.get(0).height=n;var p=l.get(0).getContext("2d");var r=0;for(var s in this.values){feature=this.values[s];if(feature.start<=c&&feature.end>=u){f_start=Math.floor(Math.max(0,(feature.start-u)*g));f_end=Math.ceil(Math.min(o,(feature.end-u)*g));p.fillStyle="#000";p.fillRect(f_start,this.slots[feature.name]*10+5,f_end-f_start,1);if(this.showing_labels&&p.fillText){p.font="10px monospace";p.textAlign="right";p.fillText(feature.name,f_start,this.slots[feature.name]*10+8)}if(feature.exon_start&&feature.exon_end){var d=Math.floor(Math.max(0,(feature.exon_start-u)*g));var w=Math.ceil(Math.min(o,(feature.exon_end-u)*g))}for(var s in feature.blocks){block=feature.blocks[s];block_start=Math.floor(Mat
 h.max(0,(block[0]-u)*g));block_end=Math.ceil(Math.min(o,(block[1]-u)*g));var a=3,v=4;if(d&&block_start>=d&&block_end<=w){a=5,v=3}p.fillRect(d,this.slots[feature.name]*10+v,block_end-block_start,a)}r++}}e.append(l);return l},});
\ No newline at end of file
+var DENSITY=1000;var DataCache=function(b,a){this.type=b;this.track=a;this.cache=Object()};$.extend(DataCache.prototype,{get:function(d,b){var c=this.cache;if(!(c[d]&&c[d][b])){if(!c[d]){c[d]=Object()}var a=b*DENSITY*d;var e=(b+1)*DENSITY*d;c[d][b]={state:"loading"};$.getJSON(data_url,{track_type:this.track.track_type,chrom:this.track.view.chrom,low:a,high:e,dataset_id:this.track.dataset_id},function(f){if(f=="pending"){setTimeout(fetcher,5000)}else{c[d][b]={state:"loaded",values:f}}$(document).trigger("redraw")})}return c[d][b]}});var View=function(a,b){this.chrom=a;this.tracks=[];this.max_low=0;this.max_high=b;this.low=this.max_low;this.high=this.max_high;this.length=this.max_high-this.max_low};$.extend(View.prototype,{add_track:function(a){a.view=this;this.tracks.push(a);if(a.init){a.init()}},redraw:function(){$("#overview-box").css({left:(this.low/this.length)*$("#overview-viewport").width(),width:Math.max(4,((this.high-this.low)/this.length)*$("#overview-viewport").widt
 h())}).show();$("#low").text(this.low);$("#high").text(this.high);for(var a in this.tracks){this.tracks[a].draw()}$("#bottom-spacer").remove();$("#viewport").append('<div id="bottom-spacer" style="height: 200px;"></div>')},move:function(b,a){this.low=Math.max(this.max_low,Math.floor(b));this.high=Math.min(this.length,Math.ceil(a))},zoom_in:function(d,b){if(this.max_high==0){return}var c=this.high-this.low;var e=c/d/2;if(b==undefined){var a=(this.low+this.high)/2}else{var a=this.low+c*b/$(document).width()}this.low=Math.floor(a-e);this.high=Math.ceil(a+e);if(this.low<this.max_low){this.low=this.max_low;this.high=c/d}else{if(this.high>this.max_high){this.high=this.max_high;this.low=this.max_high-c/d}}if(this.high-this.low<1){this.high=this.low+1}},zoom_out:function(c){if(this.max_high==0){return}var a=(this.low+this.high)/2;var b=this.high-this.low;var d=b*c/2;this.low=Math.floor(Math.max(0,a-d));this.high=Math.ceil(Math.min(this.length,a+d))},left:function(b){var a=this.high-
 this.low;var c=Math.floor(a/b);if(this.low-c<0){this.low=0;this.high=this.low+a}else{this.low-=c;this.high-=c}},right:function(b){var a=this.high-this.low;var c=Math.floor(a/b);if(this.high+c>this.length){this.high=this.length;this.low=this.high-a}else{this.low+=c;this.high+=c}}});var Track=function(a,b){this.name=a;this.parent_element=b;this.make_container()};$.extend(Track.prototype,{make_container:function(){this.header_div=$("<div class='track-header'>").text(this.name);this.content_div=$("<div class='track-content'>");this.container_div=$("<div class='track'></div>").append(this.header_div).append(this.content_div);this.parent_element.append(this.container_div)}});var TiledTrack=function(){this.last_resolution=null;this.last_w_scale=null;this.tile_cache={}};$.extend(TiledTrack.prototype,Track.prototype,{draw:function(){var k=this.view.low,c=this.view.high,e=c-k;var b=Math.pow(10,Math.ceil(Math.log(e/DENSITY)/Math.log(10)));b=Math.max(b,1);b=Math.min(b,100000);var o=$("<
 div style='position: relative;'></div>");this.content_div.children(":first").remove();this.content_div.append(o);var m=this.content_div.width(),d=this.content_div.height(),p=m/e,l={},n={};if(this.last_resolution==b&&this.last_w_scale==p){l=this.tile_cache}var g;var a=Math.floor(k/b/DENSITY);var i=0;while((a*1000*b)<c){if(a in l){g=l[a];var f=a*DENSITY*b;g.css({left:(f-this.view.low)*p});o.append(g)}else{g=this.draw_tile(b,a,o,p,d)}if(g){n[a]=g;i=Math.max(i,g.height())}a+=1}o.css("height",i);this.last_resolution=b;this.last_w_scale=p;this.tile_cache=n}});var LineTrack=function(c,b,a){Track.call(this,c,$("#viewport"));this.track_type="line";this.height_px=(a?a:100);this.container_div.addClass("line-track");this.dataset_id=b;this.cache=new DataCache("",this)};$.extend(LineTrack.prototype,TiledTrack.prototype,{make_container:function(){Track.prototype.make_container.call(this);this.content_div.css("height",this.height_px)},init:function(){track=this;$.getJSON(data_url,{stats:tru
 e,track_type:track.track_type,chrom:this.view.chrom,low:null,high:null,dataset_id:this.dataset_id},function(a){if(a){track.min_value=a.min;track.max_value=a.max;track.vertical_range=track.max_value-track.min_value;track.view.redraw()}})},draw_tile:function(d,a,o,s,p){if(!this.vertical_range){return}var k=a*DENSITY*d,r=(a+1)*DENSITY*d,c=DENSITY*d;var n=this.cache.get(d,a);var h;if(n.state=="loading"){h=$("<div class='loading tile'></div>")}else{h=$("<canvas class='tile'></canvas>")}h.css({position:"absolute",top:0,left:(k-this.view.low)*s,});o.append(h);if(n.state=="loading"){e=false;return null}var b=h;b.get(0).width=Math.ceil(c*s);b.get(0).height=this.height_px;var q=b.get(0).getContext("2d");var e=false;q.beginPath();var g=n.values;if(!g){return}for(var f=0;f<g.length-1;f++){var m=g[f][0]-k;var l=g[f][1];if(isNaN(l)){e=false}else{m=m*s;y_above_min=l-this.min_value;l=y_above_min/this.vertical_range*this.height_px;if(e){q.lineTo(m,l)}else{q.moveTo(m,l);e=true}}}q.stroke();re
 turn h}});var LabelTrack=function(a){Track.call(this,null,a);this.container_div.addClass("label-track")};$.extend(LabelTrack.prototype,Track.prototype,{draw:function(){var c=this.view,d=c.high-c.low,g=Math.floor(Math.pow(10,Math.floor(Math.log(d)/Math.log(10)))),a=Math.floor(c.low/g)*g,e=this.content_div.width(),b=$("<div style='position: relative; height: 1.3em;'></div>");while(a<c.high){var f=(a-c.low)/d*e;b.append($("<div class='label'>"+a+"</div>").css({position:"absolute",left:f-1}));a+=g}this.content_div.children(":first").remove();this.content_div.append(b)}});var itemHeight=13,itemPad=3,thinHeight=7,thinOffset=3;var FeatureTrack=function(b,a){Track.call(this,b,$("#viewport"));this.track_type="feature";this.container_div.addClass("feature-track");this.dataset_id=a;this.zo_slots=new Object();this.show_labels_scale=0.01;this.showing_labels=false};$.extend(FeatureTrack.prototype,TiledTrack.prototype,{calc_slots:function(d){end_ary=new Array();var c=this.container_div.wid
 th()/(this.view.high-this.view.low);if(d){this.zi_slots=new Object()}var b=$("<canvas></canvas>").get(0).getContext("2d");for(var a in this.values){feature=this.values[a];f_start=Math.floor(Math.max(this.view.max_low,(feature.start-this.view.max_low)*c));if(d){f_start-=b.measureText(feature.name).width}f_end=Math.ceil(Math.min(this.view.max_high,(feature.end-this.view.max_low)*c));j=0;while(true){if(end_ary[j]==undefined||end_ary[j]<f_start){end_ary[j]=f_end;if(d){this.zi_slots[feature.name]=j}else{this.zo_slots[feature.name]=j}break}j++}}},init:function(){var a=this;$.getJSON(data_url,{track_type:a.track_type,low:a.view.max_low,high:a.view.max_high,dataset_id:a.dataset_id,chrom:a.view.chrom},function(b){a.values=b;a.calc_slots();a.slots=a.zo_slots;a.draw()})},draw_tile:function(q,t,e,g,f){if(!this.values){return null}if(g>this.show_labels_scale&&!this.showing_labels){this.showing_labels=true;if(!this.zi_slots){this.calc_slots(true)}this.slots=this.zi_slots}else{if(g<=this.s
 how_labels_scale&&this.showing_labels){this.showing_labels=false;this.slots=this.zo_slots}}var u=t*DENSITY*q,c=(t+1)*DENSITY*q,b=DENSITY*q;var k=this.view,m=k.high-k.low,o=Math.ceil(b*g),h=new Array(),n=200,l=$("<canvas class='tile'></canvas>");l.css({position:"absolute",top:0,left:(u-this.view.low)*g,});l.get(0).width=o;l.get(0).height=n;var p=l.get(0).getContext("2d");var r=0;for(var s in this.values){feature=this.values[s];if(feature.start<=c&&feature.end>=u){f_start=Math.floor(Math.max(0,(feature.start-u)*g));f_end=Math.ceil(Math.min(o,(feature.end-u)*g));p.fillStyle="#000";p.fillRect(f_start,this.slots[feature.name]*10+5,f_end-f_start,1);if(this.showing_labels&&p.fillText){p.font="10px monospace";p.textAlign="right";p.fillText(feature.name,f_start,this.slots[feature.name]*10+8)}if(feature.exon_start&&feature.exon_end){var d=Math.floor(Math.max(0,(feature.exon_start-u)*g));var w=Math.ceil(Math.min(o,(feature.exon_end-u)*g))}for(var s in feature.blocks){block=feature.bloc
 ks[s];block_start=Math.floor(Math.max(0,(block[0]-u)*g));block_end=Math.ceil(Math.min(o,(block[1]-u)*g));var a=3,v=4;if(d&&block_start>=d&&block_end<=w){a=5,v=3}p.fillRect(d,this.slots[feature.name]*10+v,block_end-block_start,a)}r++}}e.append(l);return l},});
\ No newline at end of file
diff -r d8e3ad46bfa3 -r 93dc1855f0d6 static/scripts/trackster.js
--- a/static/scripts/trackster.js	Fri Sep 25 17:21:26 2009 -0400
+++ b/static/scripts/trackster.js	Fri Sep 25 18:47:40 2009 -0400
@@ -364,7 +364,8 @@
     
     init: function() {
         var track = this;
-        $.getJSON( "getfeature", { 'start': this.view.max_low, 'end': this.view.max_high, 'dataset_id': this.dataset_id, 'chrom': this.view.chrom }, function ( data ) {
+        $.getJSON( data_url, { track_type: track.track_type, low: track.view.max_low, high: track.view.max_high,
+                               dataset_id: track.dataset_id, chrom: track.view.chrom }, function ( data ) {
             track.values = data;
             track.calc_slots();
             track.slots = track.zo_slots;
@@ -402,7 +403,6 @@
             position: "absolute",
             top: 0,
             left: ( tile_low - this.view.low ) * w_scale,
-            "border-right": "1px solid #ddd"
         });
         new_canvas.get(0).width = width;
         new_canvas.get(0).height = height;
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            0
                            
                          
                          
                            
    
                          
                        
                     
                        
                    02 Oct '09
                    
                        details:   http://www.bx.psu.edu/hg/galaxy/rev/659713ba1d92
changeset: 2787:659713ba1d92
user:      jeremy goecks <jeremy.goecks at emory.edu>
date:      Sat Sep 26 17:33:21 2009 -0400
description:
Ensure that all user preferences are in unicode.
1 file(s) affected in this change:
lib/galaxy/web/framework/helpers/grids.py
diffs (58 lines):
diff -r 93dc1855f0d6 -r 659713ba1d92 lib/galaxy/web/framework/helpers/grids.py
--- a/lib/galaxy/web/framework/helpers/grids.py	Fri Sep 25 18:47:40 2009 -0400
+++ b/lib/galaxy/web/framework/helpers/grids.py	Sat Sep 26 17:33:21 2009 -0400
@@ -44,19 +44,21 @@
         base_filter = {}
         if self.default_filter:
             base_filter = self.default_filter.copy()
-        base_sort_key = self.default_sort_key            
+        base_sort_key = self.default_sort_key
         if self.preserve_state:
+            pref_name = unicode( self.__class__.__name__ + self.cur_filter_pref_name )
             saved_filter_pref = trans.sa_session.query( UserPreference ).\
-                                    filter_by( name=self.__class__.__name__ + self.cur_filter_pref_name, user_id=trans.get_user().id ).first()
+                                    filter_by( name=pref_name, user_id=trans.get_user().id ).first()
             if saved_filter_pref:
                 saved_filter = from_json_string( saved_filter_pref.value )
                 base_filter.update( saved_filter )
             
+            pref_name = unicode( self.__class__.__name__ + self.cur_sort_key_pref_name )
             saved_sort_key_pref = trans.sa_session.query( UserPreference ).\
-                                    filter_by( name=self.__class__.__name__ + self.cur_sort_key_pref_name, user_id=trans.get_user().id ).first()
+                                    filter_by( name=pref_name, user_id=trans.get_user().id ).first()
             if saved_sort_key_pref:
                 base_sort_key = from_json_string( saved_sort_key_pref.value )
-        
+                        
         # Build initial query
         query = self.build_initial_query( session )
         query = self.apply_default_filter( trans, query, **kwargs )
@@ -110,19 +112,20 @@
         
         # Save current filter and sort key.
         if self.preserve_state:
-            pref_name = self.__class__.__name__ + self.cur_filter_pref_name
+            pref_name = unicode( self.__class__.__name__ + self.cur_filter_pref_name )
             if not saved_filter_pref:
                 saved_filter_pref = UserPreference( name=pref_name )
                 trans.get_user().preferences.append( saved_filter_pref )
-            saved_filter_pref.value = to_json_string( cur_filter_dict )
-            if not saved_sort_key_pref:
-                pref_name = self.__class__.__name__ + self.cur_sort_key_pref_name
+            saved_filter_pref.value = unicode( to_json_string( cur_filter_dict ) )
+            if sort_key:
                 if not saved_sort_key_pref:
-                    saved_sort_key_pref = UserPreference( name=pref_name )
-                    trans.get_user().preferences.append( saved_sort_key_pref )
-            saved_sort_key_pref.value = to_json_string( sort_key )
+                    pref_name = unicode( self.__class__.__name__ + self.cur_sort_key_pref_name )
+                    if not saved_sort_key_pref:
+                        saved_sort_key_pref = UserPreference( name=pref_name )
+                        trans.get_user().preferences.append( saved_sort_key_pref )
+                saved_sort_key_pref.value = unicode( to_json_string( sort_key ) )
             trans.sa_session.flush()
-        
+            
         # Render grid.
         def url( *args, **kwargs ):
             # Only include sort/filter arguments if not linking to another
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            0
                            
                          
                          
                            
    
                          
                        
                    
                    
                        details:   http://www.bx.psu.edu/hg/galaxy/rev/f7459ad62be9
changeset: 2788:f7459ad62be9
user:      jeremy goecks <jeremy.goecks at emory.edu>
date:      Sat Sep 26 18:05:36 2009 -0400
description:
Turn off grid state preservation.
1 file(s) affected in this change:
lib/galaxy/web/framework/helpers/grids.py
diffs (12 lines):
diff -r 659713ba1d92 -r f7459ad62be9 lib/galaxy/web/framework/helpers/grids.py
--- a/lib/galaxy/web/framework/helpers/grids.py	Sat Sep 26 17:33:21 2009 -0400
+++ b/lib/galaxy/web/framework/helpers/grids.py	Sat Sep 26 18:05:36 2009 -0400
@@ -22,7 +22,7 @@
     standard_filters = []
     default_filter = None
     default_sort_key = None
-    preserve_state = True
+    preserve_state = False
     # Set preference names.
     cur_filter_pref_name = ".filter"
     cur_sort_key_pref_name = ".sort_key"    
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            0
                            
                          
                          
                            
    
                          
                        
                    
                    
                        details:   http://www.bx.psu.edu/hg/galaxy/rev/d8e3ad46bfa3
changeset: 2785:d8e3ad46bfa3
user:      jeremy goecks <jeremy.goecks(a)emory.edu>
date:      Fri Sep 25 17:21:26 2009 -0400
description:
Fix migration script naming collision.
2 file(s) affected in this change:
lib/galaxy/model/migrate/versions/0020_user_prefs.py
lib/galaxy/model/migrate/versions/0021_user_prefs.py
diffs (100 lines):
diff -r 6f8b5f1e8ec9 -r d8e3ad46bfa3 lib/galaxy/model/migrate/versions/0020_user_prefs.py
--- a/lib/galaxy/model/migrate/versions/0020_user_prefs.py	Fri Sep 25 17:07:13 2009 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-"""
-This migration script adds a user preferences table to Galaxy.
-"""
-
-from sqlalchemy import *
-from migrate import *
-
-import datetime
-now = datetime.datetime.utcnow
-
-import logging
-log = logging.getLogger( __name__ )
-
-metadata = MetaData( migrate_engine )
-
-def display_migration_details():
-    print ""
-    print "This migration script adds a user preferences table to Galaxy."
-    print ""
-
-
-# New table to support user preferences.
-
-UserPreference_table = Table( "user_preference", metadata,
-    Column( "id", Integer, primary_key=True ),
-    Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
-    Column( "name", Unicode( 255 ), index=True),
-    Column( "value", Unicode( 1024 ) ) )
-
-def upgrade():
-    display_migration_details()
-    metadata.reflect()
-    try:
-        UserPreference_table.create()
-    except Exception, e:
-        print str(e)
-        log.debug( "Creating user_preference table failed: %s" % str( e ) )
-
-def downgrade():
-    metadata.reflect()
-    try:
-        UserPreference_table.drop()
-    except Exception, e:
-        print str(e)
-        log.debug( "Dropping user_preference table failed: %s" % str( e ) )
\ No newline at end of file
diff -r 6f8b5f1e8ec9 -r d8e3ad46bfa3 lib/galaxy/model/migrate/versions/0021_user_prefs.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/model/migrate/versions/0021_user_prefs.py	Fri Sep 25 17:21:26 2009 -0400
@@ -0,0 +1,45 @@
+"""
+This migration script adds a user preferences table to Galaxy.
+"""
+
+from sqlalchemy import *
+from migrate import *
+
+import datetime
+now = datetime.datetime.utcnow
+
+import logging
+log = logging.getLogger( __name__ )
+
+metadata = MetaData( migrate_engine )
+
+def display_migration_details():
+    print ""
+    print "This migration script adds a user preferences table to Galaxy."
+    print ""
+
+
+# New table to support user preferences.
+
+UserPreference_table = Table( "user_preference", metadata,
+    Column( "id", Integer, primary_key=True ),
+    Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
+    Column( "name", Unicode( 255 ), index=True),
+    Column( "value", Unicode( 1024 ) ) )
+
+def upgrade():
+    display_migration_details()
+    metadata.reflect()
+    try:
+        UserPreference_table.create()
+    except Exception, e:
+        print str(e)
+        log.debug( "Creating user_preference table failed: %s" % str( e ) )
+
+def downgrade():
+    metadata.reflect()
+    try:
+        UserPreference_table.drop()
+    except Exception, e:
+        print str(e)
+        log.debug( "Dropping user_preference table failed: %s" % str( e ) )
\ No newline at end of file
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            0
                            
                          
                          
                            
    
                          
                        
                     
                        
                    02 Oct '09
                    
                        details:   http://www.bx.psu.edu/hg/galaxy/rev/e5d57c9e2824
changeset: 2782:e5d57c9e2824
user:      Kelly Vincent <kpvincent(a)bx.psu.edu>
date:      Fri Sep 25 16:49:30 2009 -0400
description:
Fixed an error in the help section of the Sam Pileup tool config
1 file(s) affected in this change:
tools/samtools/sam_pileup.xml
diffs (33 lines):
diff -r dd50d8d45177 -r e5d57c9e2824 tools/samtools/sam_pileup.xml
--- a/tools/samtools/sam_pileup.xml	Fri Sep 25 14:50:34 2009 -0400
+++ b/tools/samtools/sam_pileup.xml	Fri Sep 25 16:49:30 2009 -0400
@@ -85,9 +85,9 @@
 
 **Types of pileup datasets**
 
-The description of pileup format below is largely based on information that can be found on SAMTools_ documentation page. The 6- and 10-column variants are described below.
+The description of pileup format below is largely based on information that can be found on SAMTools Pileup_ documentation page. The 6- and 10-column variants are described below.
 
-.. _SAMTools: http://samtools.sourceforge.net/pileup.shtml
+.. _Pileup: http://samtools.sourceforge.net/pileup.shtml
 
 **Six column pileup**::
 
@@ -111,7 +111,7 @@
        
 **Ten column pileup**
 
-The `ten-column`__ pileup incorporates additional consensus information generated with *-c* option of *samtools pileup* command::
+The `ten-column` (consensus_) pileup incorporates additional consensus information generated with *-c* option of *samtools pileup* command::
 
 
     1    2  3  4   5   6   7   8       9       10
@@ -137,7 +137,7 @@
       10 Quality values (phred33 scale, see Galaxy wiki for more)
 
 
-.. __: http://samtools.sourceforge.net/cns0.shtml
+.. _consensus: http://samtools.sourceforge.net/cns0.shtml
 
 
   </help>
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            0
                            
                          
                          
                            
    
                          
                        
                     
                        
                    02 Oct '09
                    
                        details:   http://www.bx.psu.edu/hg/galaxy/rev/83dc9981e3c4
changeset: 2783:83dc9981e3c4
user:      jeremy goecks <jeremy.goecks(a)emory.edu>
date:      Fri Sep 25 17:06:45 2009 -0400
description:
Grid states (filter, sorting) can be preserved.
5 file(s) affected in this change:
lib/galaxy/model/__init__.py
lib/galaxy/model/mapping.py
lib/galaxy/model/migrate/versions/0020_user_prefs.py
lib/galaxy/web/framework/helpers/grids.py
templates/history/grid.mako
diffs (252 lines):
diff -r 6a86a558f405 -r 83dc9981e3c4 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py	Fri Sep 25 11:32:47 2009 -0400
+++ b/lib/galaxy/model/__init__.py	Fri Sep 25 17:06:45 2009 -0400
@@ -1306,6 +1306,12 @@
 
 class PageTagAssociation ( ItemTagAssociation ):
     pass
+    
+class UserPreference ( object ):
+    def __init( self, user_id=None, name=None, value=None ):
+        self.user_id = user_id
+        self.name = name
+        self.value = value
 
 
 ## ---- Utility methods -------------------------------------------------------
diff -r 6a86a558f405 -r 83dc9981e3c4 lib/galaxy/model/mapping.py
--- a/lib/galaxy/model/mapping.py	Fri Sep 25 11:32:47 2009 -0400
+++ b/lib/galaxy/model/mapping.py	Fri Sep 25 17:06:45 2009 -0400
@@ -585,7 +585,13 @@
     Column( "user_tname", TrimmedString(255), index=True),
     Column( "value", TrimmedString(255), index=True),
     Column( "user_value", TrimmedString(255), index=True) )
-
+    
+UserPreference.table = Table( "user_preference", metadata,
+    Column( "id", Integer, primary_key=True ),
+    Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
+    Column( "name", Unicode( 255 ), index=True),
+    Column( "value", Unicode( 1024 ) ) )
+    
 # With the tables defined we can define the mappers and setup the 
 # relationships between the model objects.
 
@@ -741,6 +747,7 @@
                      stored_workflow_menu_entries=relation( StoredWorkflowMenuEntry, backref="user",
                                                             cascade="all, delete-orphan",
                                                             collection_class=ordering_list( 'order_index' ) ),
+                     preferences=relation( UserPreference, backref="user", order_by=UserPreference.table.c.id),
 #                     addresses=relation( UserAddress,
 #                                         primaryjoin=( User.table.c.id == UserAddress.table.c.user_id ) )
                      ) )
@@ -1010,6 +1017,10 @@
     properties=dict( tag=relation(Tag, backref="tagged_pages") ),
                      primary_key=[PageTagAssociation.table.c.page_id, PageTagAssociation.table.c.tag_id]
                )
+               
+assign_mapper( context, UserPreference, UserPreference.table, 
+    properties = {}
+              )
 
 def db_next_hid( self ):
     """
diff -r 6a86a558f405 -r 83dc9981e3c4 lib/galaxy/model/migrate/versions/0020_user_prefs.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/model/migrate/versions/0020_user_prefs.py	Fri Sep 25 17:06:45 2009 -0400
@@ -0,0 +1,45 @@
+"""
+This migration script adds a user preferences table to Galaxy.
+"""
+
+from sqlalchemy import *
+from migrate import *
+
+import datetime
+now = datetime.datetime.utcnow
+
+import logging
+log = logging.getLogger( __name__ )
+
+metadata = MetaData( migrate_engine )
+
+def display_migration_details():
+    print ""
+    print "This migration script adds a user preferences table to Galaxy."
+    print ""
+
+
+# New table to support user preferences.
+
+UserPreference_table = Table( "user_preference", metadata,
+    Column( "id", Integer, primary_key=True ),
+    Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
+    Column( "name", Unicode( 255 ), index=True),
+    Column( "value", Unicode( 1024 ) ) )
+
+def upgrade():
+    display_migration_details()
+    metadata.reflect()
+    try:
+        UserPreference_table.create()
+    except Exception, e:
+        print str(e)
+        log.debug( "Creating user_preference table failed: %s" % str( e ) )
+
+def downgrade():
+    metadata.reflect()
+    try:
+        UserPreference_table.drop()
+    except Exception, e:
+        print str(e)
+        log.debug( "Dropping user_preference table failed: %s" % str( e ) )
\ No newline at end of file
diff -r 6a86a558f405 -r 83dc9981e3c4 lib/galaxy/web/framework/helpers/grids.py
--- a/lib/galaxy/web/framework/helpers/grids.py	Fri Sep 25 11:32:47 2009 -0400
+++ b/lib/galaxy/web/framework/helpers/grids.py	Fri Sep 25 17:06:45 2009 -0400
@@ -2,6 +2,7 @@
 from galaxy.model.orm import *
 
 from galaxy.web import url_for
+from galaxy.util.json import from_json_string, to_json_string
 
 import sys, logging
 
@@ -21,6 +22,10 @@
     standard_filters = []
     default_filter = None
     default_sort_key = None
+    preserve_state = True
+    # Set preference names.
+    cur_filter_pref_name = ".filter"
+    cur_sort_key_pref_name = ".sort_key"    
     pass_through_operations = {}
     def __init__( self ):
         # Determine if any multiple row operations are defined
@@ -29,26 +34,47 @@
             if operation.allow_multiple:
                 self.has_multiple_item_operations = True
                 break
+                
     def __call__( self, trans, **kwargs ):
         status = kwargs.get( 'status', None )
         message = kwargs.get( 'message', None )
         session = trans.sa_session
+        
+        # Build a base filter and sort key that is the combination of the saved state and defaults. Saved state takes preference over defaults.
+        base_filter = {}
+        if self.default_filter:
+            base_filter = self.default_filter.copy()
+        base_sort_key = self.default_sort_key            
+        if self.preserve_state:
+            saved_filter_pref = trans.sa_session.query( UserPreference ).\
+                                    filter_by( name=self.__class__.__name__ + self.cur_filter_pref_name, user_id=trans.get_user().id ).first()
+            if saved_filter_pref:
+                saved_filter = from_json_string( saved_filter_pref.value )
+                base_filter.update( saved_filter )
+            
+            saved_sort_key_pref = trans.sa_session.query( UserPreference ).\
+                                    filter_by( name=self.__class__.__name__ + self.cur_sort_key_pref_name, user_id=trans.get_user().id ).first()
+            if saved_sort_key_pref:
+                base_sort_key = from_json_string( saved_sort_key_pref.value )
+        
         # Build initial query
         query = self.build_initial_query( session )
         query = self.apply_default_filter( trans, query, **kwargs )
+        
         # Maintain sort state in generated urls
         extra_url_args = {}
-        # Process filtering arguments to (a) build a query that actuates the filter and (b) builds a
+        
+        # Process filtering arguments to (a) build a query that represents the filter and (b) builds a
         # dictionary that denotes the current filter.
         cur_filter_dict = {}
         for column in self.columns:
             if column.key:
-                # Look for filter criterion in kwargs; if not found, look in default filter.
+                # Look for filter criterion in kwargs; if not found, look in base filter.
                 column_filter = None
                 if "f-" + column.key in kwargs:
                     column_filter = kwargs.get( "f-" + column.key )
-                elif ( self.default_filter ) and ( column.key in self.default_filter ):
-                    column_filter = self.default_filter.get( column.key )
+                elif column.key in base_filter:
+                    column_filter = base_filter.get( column.key )
 
                 # If column filter found, apply it.
                 if column_filter is not None:
@@ -61,13 +87,13 @@
                     if not isinstance( column_filter, basestring ):
                         column_filter = unicode(column_filter)
                     extra_url_args[ "f-" + column.key ] = column_filter.encode("utf-8")
-                  
+                    
         # Process sort arguments
         sort_key = sort_order = None
         if 'sort' in kwargs:
             sort_key = kwargs['sort']
-        elif self.default_sort_key:
-            sort_key = self.default_sort_key
+        elif base_sort_key:
+            sort_key = base_sort_key
         encoded_sort_key = sort_key
         if sort_key:
             if sort_key.startswith( "-" ):
@@ -78,9 +104,26 @@
                 sort_order = 'asc'
                 query = query.order_by( self.model_class.c.get( sort_key ).asc() )
         extra_url_args['sort'] = encoded_sort_key
+        
         # There might be a current row
         current_item = self.get_current_item( trans )
-        # Render
+        
+        # Save current filter and sort key.
+        if self.preserve_state:
+            pref_name = self.__class__.__name__ + self.cur_filter_pref_name
+            if not saved_filter_pref:
+                saved_filter_pref = UserPreference( name=pref_name )
+                trans.get_user().preferences.append( saved_filter_pref )
+            saved_filter_pref.value = to_json_string( cur_filter_dict )
+            if not saved_sort_key_pref:
+                pref_name = self.__class__.__name__ + self.cur_sort_key_pref_name
+                if not saved_sort_key_pref:
+                    saved_sort_key_pref = UserPreference( name=pref_name )
+                    trans.get_user().preferences.append( saved_sort_key_pref )
+            saved_sort_key_pref.value = to_json_string( sort_key )
+            trans.sa_session.flush()
+        
+        # Render grid.
         def url( *args, **kwargs ):
             # Only include sort/filter arguments if not linking to another
             # page. This is a bit of a hack.
diff -r 6a86a558f405 -r 83dc9981e3c4 templates/history/grid.mako
--- a/templates/history/grid.mako	Fri Sep 25 11:32:47 2009 -0400
+++ b/templates/history/grid.mako	Fri Sep 25 17:06:45 2009 -0400
@@ -167,7 +167,7 @@
 
     ## Print grid filter.
     <form name="history_actions" action="javascript:add_tag_to_grid_filter($('#input-tag-filter').attr('value'))" method="get" >
-        <strong>Filter:      </strong>
+        <strong>Filter:   </strong>
         %for column in grid.columns:
             %if column.filterable:
                 <span> by ${column.label.lower()}:</span>
@@ -194,14 +194,14 @@
                         <span class="filter"><a href="${url( filter.get_url_args() )}">${filter.label}</a></span>
                     %endif
                 %endfor
-                <span>                </span>
+                <span>     </span>
             %endif
         %endfor
         
-        ## Link to clear all filters.
+        ## Link to clear all filters. TODO: this should be the default filter or an empty filter.
         <%
             args = { "deleted" : "False", "tags" : "All" }
-            no_filter = GridColumnFilter("Clear", args)
+            no_filter = GridColumnFilter("Clear Filter", args)
         %>
         <span><a href="${url( no_filter.get_url_args() )}">${no_filter.label}</a></span>
     </form>
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            0
                            
                          
                          
                            
    
                          
                        
                    
                    
                        details:   http://www.bx.psu.edu/hg/galaxy/rev/6f8b5f1e8ec9
changeset: 2784:6f8b5f1e8ec9
user:      jeremy goecks <jeremy.goecks(a)emory.edu>
date:      Fri Sep 25 17:07:13 2009 -0400
description:
Merge
2 file(s) affected in this change:
lib/galaxy/model/__init__.py
lib/galaxy/model/mapping.py
diffs (2075 lines):
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py	Fri Sep 25 17:06:45 2009 -0400
+++ b/lib/galaxy/jobs/__init__.py	Fri Sep 25 17:07:13 2009 -0400
@@ -357,13 +357,14 @@
         # Restore input / output data lists
         inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] )
         out_data = dict( [ ( da.name, da.dataset ) for da in job.output_datasets ] )
+        out_data.update( [ ( da.name, da.dataset ) for da in job.output_library_datasets ] )
         # These can be passed on the command line if wanted as $userId $userEmail
-        if job.history.user: # check for anonymous user!
-             userId = '%d' % job.history.user.id
-             userEmail = str(job.history.user.email)
+        if job.history and job.history.user: # check for anonymous user!
+            userId = '%d' % job.history.user.id
+            userEmail = str(job.history.user.email)
         else:
-             userId = 'Anonymous'
-             userEmail = 'Anonymous'
+            userId = 'Anonymous'
+            userEmail = 'Anonymous'
         incoming['userId'] = userId
         incoming['userEmail'] = userEmail
         # Build params, done before hook so hook can use
@@ -424,7 +425,7 @@
                         log.debug( "fail(): Moved %s to %s" % ( dataset_path.false_path, dataset_path.real_path ) )
                     except ( IOError, OSError ), e:
                         log.error( "fail(): Missing output file in working directory: %s" % e )
-            for dataset_assoc in job.output_datasets:
+            for dataset_assoc in job.output_datasets + job.output_library_datasets:
                 dataset = dataset_assoc.dataset
                 dataset.refresh()
                 dataset.state = dataset.states.ERROR
@@ -444,7 +445,7 @@
     def change_state( self, state, info = False ):
         job = model.Job.get( self.job_id )
         job.refresh()
-        for dataset_assoc in job.output_datasets:
+        for dataset_assoc in job.output_datasets + job.output_library_datasets:
             dataset = dataset_assoc.dataset
             dataset.refresh()
             dataset.state = state
@@ -504,10 +505,10 @@
                         self.fail( "Job %s's output dataset(s) could not be read" % job.id )
                         return
         job_context = ExpressionContext( dict( stdout = stdout, stderr = stderr ) )
-        for dataset_assoc in job.output_datasets:
+        for dataset_assoc in job.output_datasets + job.output_library_datasets:
             context = self.get_dataset_finish_context( job_context, dataset_assoc.dataset.dataset )
             #should this also be checking library associations? - can a library item be added from a history before the job has ended? - lets not allow this to occur
-            for dataset in dataset_assoc.dataset.dataset.history_associations: #need to update all associated output hdas, i.e. history was shared with job running
+            for dataset in dataset_assoc.dataset.dataset.history_associations + dataset_assoc.dataset.dataset.library_associations: #need to update all associated output hdas, i.e. history was shared with job running
                 dataset.blurb = 'done'
                 dataset.peek  = 'no peek'
                 dataset.info  = context['stdout'] + context['stderr']
@@ -576,6 +577,7 @@
         # custom post process setup
         inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] )
         out_data = dict( [ ( da.name, da.dataset ) for da in job.output_datasets ] )
+        out_data.update( [ ( da.name, da.dataset ) for da in job.output_library_datasets ] )
         param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] ) # why not re-use self.param_dict here? ##dunno...probably should, this causes tools.parameters.basic.UnvalidatedValue to be used in following methods instead of validated and transformed values during i.e. running workflows
         param_dict = self.tool.params_from_strings( param_dict, self.app )
         # Check for and move associated_files
@@ -647,11 +649,11 @@
         job = model.Job.get( self.job_id )
         if self.app.config.outputs_to_working_directory:
             self.output_paths = []
-            for name, data in [ ( da.name, da.dataset.dataset ) for da in job.output_datasets ]:
+            for name, data in [ ( da.name, da.dataset.dataset ) for da in job.output_datasets + job.output_library_datasets ]:
                 false_path = os.path.abspath( os.path.join( self.working_directory, "galaxy_dataset_%d.dat" % data.id ) )
                 self.output_paths.append( DatasetPath( data.id, data.file_name, false_path ) )
         else:
-            self.output_paths = [ DatasetPath( da.dataset.dataset.id, da.dataset.file_name ) for da in job.output_datasets ]
+            self.output_paths = [ DatasetPath( da.dataset.dataset.id, da.dataset.file_name ) for da in job.output_datasets + job.output_library_datasets ]
         return self.output_paths
 
     def get_output_file_id( self, file ):
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py	Fri Sep 25 17:06:45 2009 -0400
+++ b/lib/galaxy/model/__init__.py	Fri Sep 25 17:07:13 2009 -0400
@@ -74,6 +74,7 @@
         self.parameters = []
         self.input_datasets = []
         self.output_datasets = []
+        self.output_library_datasets = []
         self.state = Job.states.NEW
         self.info = None
         self.job_runner_name = None
@@ -84,6 +85,8 @@
         self.input_datasets.append( JobToInputDatasetAssociation( name, dataset ) )
     def add_output_dataset( self, name, dataset ):
         self.output_datasets.append( JobToOutputDatasetAssociation( name, dataset ) )
+    def add_output_library_dataset( self, name, dataset ):
+        self.output_library_datasets.append( JobToOutputLibraryDatasetAssociation( name, dataset ) )
     def set_state( self, state ):
         self.state = state
         # For historical reasons state propogates down to datasets
@@ -138,6 +141,11 @@
         self.dataset = dataset
         
 class JobToOutputDatasetAssociation( object ):
+    def __init__( self, name, dataset ):
+        self.name = name
+        self.dataset = dataset
+
+class JobToOutputLibraryDatasetAssociation( object ):
     def __init__( self, name, dataset ):
         self.name = name
         self.dataset = dataset
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 lib/galaxy/model/mapping.py
--- a/lib/galaxy/model/mapping.py	Fri Sep 25 17:06:45 2009 -0400
+++ b/lib/galaxy/model/mapping.py	Fri Sep 25 17:07:13 2009 -0400
@@ -107,7 +107,7 @@
     Column( "id", Integer, primary_key=True ),
     Column( "create_time", DateTime, default=now ),
     Column( "update_time", DateTime, index=True, default=now, onupdate=now ),
-    Column( "state", TrimmedString( 64 ) ),
+    Column( "state", TrimmedString( 64 ), index=True ),
     Column( "deleted", Boolean, index=True, default=False ),
     Column( "purged", Boolean, index=True, default=False ),
     Column( "purgable", Boolean, default=True ),
@@ -307,6 +307,7 @@
     Column( "create_time", DateTime, default=now ),
     Column( "update_time", DateTime, default=now, onupdate=now ),
     Column( "history_id", Integer, ForeignKey( "history.id" ), index=True ),
+    Column( "library_folder_id", Integer, ForeignKey( "library_folder.id" ), index=True ),
     Column( "tool_id", String( 255 ) ),
     Column( "tool_version", TEXT, default="1.0.0" ),
     Column( "state", String( 64 ), index=True ),
@@ -337,6 +338,12 @@
     Column( "id", Integer, primary_key=True ),
     Column( "job_id", Integer, ForeignKey( "job.id" ), index=True ),
     Column( "dataset_id", Integer, ForeignKey( "history_dataset_association.id" ), index=True ),
+    Column( "name", String(255) ) )
+    
+JobToOutputLibraryDatasetAssociation.table = Table( "job_to_output_library_dataset", metadata,
+    Column( "id", Integer, primary_key=True ),
+    Column( "job_id", Integer, ForeignKey( "job.id" ), index=True ),
+    Column( "ldda_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), index=True ),
     Column( "name", String(255) ) )
     
 JobExternalOutputMetadata.table = Table( "job_external_output_metadata", metadata,
@@ -914,6 +921,9 @@
 assign_mapper( context, JobToOutputDatasetAssociation, JobToOutputDatasetAssociation.table,
     properties=dict( job=relation( Job ), dataset=relation( HistoryDatasetAssociation, lazy=False ) ) )
 
+assign_mapper( context, JobToOutputLibraryDatasetAssociation, JobToOutputLibraryDatasetAssociation.table,
+    properties=dict( job=relation( Job ), dataset=relation( LibraryDatasetDatasetAssociation, lazy=False ) ) )
+
 assign_mapper( context, JobParameter, JobParameter.table )
 
 assign_mapper( context, JobExternalOutputMetadata, JobExternalOutputMetadata.table,
@@ -924,9 +934,11 @@
 assign_mapper( context, Job, Job.table, 
     properties=dict( galaxy_session=relation( GalaxySession ),
                      history=relation( History ),
+                     library_folder=relation( LibraryFolder ),
                      parameters=relation( JobParameter, lazy=False ),
                      input_datasets=relation( JobToInputDatasetAssociation, lazy=False ),
                      output_datasets=relation( JobToOutputDatasetAssociation, lazy=False ),
+                     output_library_datasets=relation( JobToOutputLibraryDatasetAssociation, lazy=False ),
                      external_output_metadata = relation( JobExternalOutputMetadata, lazy = False ) ) )
 
 assign_mapper( context, Event, Event.table,
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 lib/galaxy/model/migrate/versions/0020_library_upload_job.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/model/migrate/versions/0020_library_upload_job.py	Fri Sep 25 17:07:13 2009 -0400
@@ -0,0 +1,121 @@
+from sqlalchemy import *
+from sqlalchemy.orm import *
+from sqlalchemy.exceptions import *
+from migrate import *
+from migrate.changeset import *
+import datetime
+now = datetime.datetime.utcnow
+import sys, logging
+# Need our custom types, but don't import anything else from model
+from galaxy.model.custom_types import *
+
+log = logging.getLogger( __name__ )
+log.setLevel(logging.DEBUG)
+handler = logging.StreamHandler( sys.stdout )
+format = "%(name)s %(levelname)s %(asctime)s %(message)s"
+formatter = logging.Formatter( format )
+handler.setFormatter( formatter )
+log.addHandler( handler )
+
+metadata = MetaData( migrate_engine )
+db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, transactional=False ) )
+
+def display_migration_details():
+    print ""
+    print "========================================"
+    print """This script creates a job_to_output_library_dataset table for allowing library
+uploads to run as regular jobs.  To support this, a library_folder_id column is
+added to the job table, and library_folder/output_library_datasets relations
+are added to the Job object.  An index is also added to the dataset.state
+column."""
+    print "========================================"
+
+JobToOutputLibraryDatasetAssociation_table = Table( "job_to_output_library_dataset", metadata,
+    Column( "id", Integer, primary_key=True ),
+    Column( "job_id", Integer, ForeignKey( "job.id" ), index=True ),
+    Column( "ldda_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), index=True ),
+    Column( "name", String(255) ) )
+
+def upgrade():
+    display_migration_details()
+    # Load existing tables
+    metadata.reflect()
+    # Create the job_to_output_library_dataset table
+    try:
+        JobToOutputLibraryDatasetAssociation_table.create()
+    except Exception, e:
+        print "Creating job_to_output_library_dataset table failed: %s" % str( e )
+        log.debug( "Creating job_to_output_library_dataset table failed: %s" % str( e ) )
+    # Create the library_folder_id column
+    try:
+        Job_table = Table( "job", metadata, autoload=True )
+    except NoSuchTableError:
+        Job_table = None
+        log.debug( "Failed loading table job" )
+    if Job_table:
+        try:
+            col = Column( "library_folder_id", Integer, index=True )
+            col.create( Job_table )
+            assert col is Job_table.c.library_folder_id
+        except Exception, e:
+            log.debug( "Adding column 'library_folder_id' to job table failed: %s" % ( str( e ) ) )
+        try:
+            LibraryFolder_table = Table( "library_folder", metadata, autoload=True )
+        except NoSuchTableError:
+            LibraryFolder_table = None
+            log.debug( "Failed loading table library_folder" )
+        # Add 1 foreign key constraint to the job table
+        if Job_table and LibraryFolder_table:
+            try:
+                cons = ForeignKeyConstraint( [Job_table.c.library_folder_id],
+                                             [LibraryFolder_table.c.id],
+                                             name='job_library_folder_id_fk' )
+                # Create the constraint
+                cons.create()
+            except Exception, e:
+                log.debug( "Adding foreign key constraint 'job_library_folder_id_fk' to table 'library_folder' failed: %s" % ( str( e ) ) )
+    # Create the ix_dataset_state index
+    try:
+        Dataset_table = Table( "dataset", metadata, autoload=True )
+    except NoSuchTableError:
+        Dataset_table = None
+        log.debug( "Failed loading table dataset" )
+    i = Index( "ix_dataset_state", Dataset_table.c.state )
+    try:
+        i.create()
+    except Exception, e:
+        print str(e)
+        log.debug( "Adding index 'ix_dataset_state' to dataset table failed: %s" % str( e ) )
+
+def downgrade():
+    metadata.reflect()
+    # Drop the library_folder_id column
+    try:
+        Job_table = Table( "job", metadata, autoload=True )
+    except NoSuchTableError:
+        Job_table = None
+        log.debug( "Failed loading table job" )
+    if Job_table:
+        try:
+            col = Job_table.c.library_folder_id
+            col.drop()
+        except Exception, e:
+            log.debug( "Dropping column 'library_folder_id' from job table failed: %s" % ( str( e ) ) )
+    # Drop the job_to_output_library_dataset table
+    try:
+        JobToOutputLibraryDatasetAssociation_table.drop()
+    except Exception, e:
+        print str(e)
+        log.debug( "Dropping job_to_output_library_dataset table failed: %s" % str( e ) )
+    # Drop the ix_dataset_state index
+    try:
+        Dataset_table = Table( "dataset", metadata, autoload=True )
+    except NoSuchTableError:
+        Dataset_table = None
+        log.debug( "Failed loading table dataset" )
+    i = Index( "ix_dataset_state", Dataset_table.c.state )
+    try:
+        i.drop()
+    except Exception, e:
+        print str(e)
+        log.debug( "Dropping index 'ix_dataset_state' from dataset table failed: %s" % str( e ) )
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 lib/galaxy/tools/actions/upload.py
--- a/lib/galaxy/tools/actions/upload.py	Fri Sep 25 17:06:45 2009 -0400
+++ b/lib/galaxy/tools/actions/upload.py	Fri Sep 25 17:07:13 2009 -0400
@@ -1,126 +1,22 @@
-import os, shutil, urllib, StringIO, re, gzip, tempfile, shutil, zipfile
-from cgi import FieldStorage
+import os
 from __init__ import ToolAction
-from galaxy import datatypes, jobs
-from galaxy.datatypes import sniff
-from galaxy import model, util
-from galaxy.util.json import to_json_string
-
-import sys, traceback
+from galaxy.tools.actions import upload_common
 
 import logging
 log = logging.getLogger( __name__ )
 
 class UploadToolAction( ToolAction ):
-    # Action for uploading files
-    def persist_uploads( self, incoming ):
-        if 'files' in incoming:
-            new_files = []
-            temp_files = []
-            for upload_dataset in incoming['files']:
-                f = upload_dataset['file_data']
-                if isinstance( f, FieldStorage ): 
-                    assert not isinstance( f.file, StringIO.StringIO )
-                    assert f.file.name != '<fdopen>'
-                    local_filename = util.mkstemp_ln( f.file.name, 'upload_file_data_' )
-                    f.file.close()
-                    upload_dataset['file_data'] = dict( filename = f.filename,
-                                                        local_filename = local_filename )
-                if upload_dataset['url_paste'].strip() != '':
-                    upload_dataset['url_paste'] = datatypes.sniff.stream_to_file( StringIO.StringIO( upload_dataset['url_paste'] ), prefix="strio_url_paste_" )[0]
-                else:
-                    upload_dataset['url_paste'] = None
-                new_files.append( upload_dataset )
-            incoming['files'] = new_files
-        return incoming
     def execute( self, tool, trans, incoming={}, set_output_hid = True ):
         dataset_upload_inputs = []
         for input_name, input in tool.inputs.iteritems():
             if input.type == "upload_dataset":
                 dataset_upload_inputs.append( input )
         assert dataset_upload_inputs, Exception( "No dataset upload groups were found." )
-        # Get any precreated datasets (when using asynchronous uploads)
-        async_datasets = []
-        self.precreated_datasets = []
-        if incoming.get( 'async_datasets', None ) not in ["None", "", None]:
-            async_datasets = incoming['async_datasets'].split(',')
-        for id in async_datasets:
-            try:
-                data = trans.app.model.HistoryDatasetAssociation.get( int( id ) )
-            except:
-                log.exception( 'Unable to load precreated dataset (%s) sent in upload form' % id )
-                continue
-            if trans.user is None and trans.galaxy_session.current_history != data.history:
-               log.error( 'Got a precreated dataset (%s) but it does not belong to anonymous user\'s current session (%s)' % ( data.id, trans.galaxy_session.id ) )
-            elif data.history.user != trans.user:
-               log.error( 'Got a precreated dataset (%s) but it does not belong to current user (%s)' % ( data.id, trans.user.id ) )
-            else:
-                self.precreated_datasets.append( data )
 
-        data_list = []
-
-        incoming = self.persist_uploads( incoming )
-
-        json_file = tempfile.mkstemp()
-        json_file_path = json_file[1]
-        json_file = os.fdopen( json_file[0], 'w' )
-        for dataset_upload_input in dataset_upload_inputs:
-            uploaded_datasets = dataset_upload_input.get_uploaded_datasets( trans, incoming )
-            for uploaded_dataset in uploaded_datasets:
-                data = self.get_precreated_dataset( uploaded_dataset.name )
-                if not data:
-                    data = trans.app.model.HistoryDatasetAssociation( history = trans.history, create_dataset = True )
-                    data.name = uploaded_dataset.name
-                    data.state = data.states.QUEUED
-                    data.extension = uploaded_dataset.file_type
-                    data.dbkey = uploaded_dataset.dbkey
-                    data.flush()
-                    trans.history.add_dataset( data, genome_build = uploaded_dataset.dbkey )
-                    permissions = trans.app.security_agent.history_get_default_permissions( trans.history )
-                    trans.app.security_agent.set_all_dataset_permissions( data.dataset, permissions )
-                else:
-                    data.extension = uploaded_dataset.file_type
-                    data.dbkey = uploaded_dataset.dbkey
-                    data.flush()
-                    trans.history.genome_build = uploaded_dataset.dbkey
-                if uploaded_dataset.type == 'composite':
-                    # we need to init metadata before the job is dispatched
-                    data.init_meta()
-                    for meta_name, meta_value in uploaded_dataset.metadata.iteritems():
-                        setattr( data.metadata, meta_name, meta_value )
-                    data.flush()
-                    json = dict( file_type = uploaded_dataset.file_type,
-                                 dataset_id = data.dataset.id,
-                                 dbkey = uploaded_dataset.dbkey,
-                                 type = uploaded_dataset.type,
-                                 metadata = uploaded_dataset.metadata,
-                                 primary_file = uploaded_dataset.primary_file,
-                                 extra_files_path = data.extra_files_path,
-                                 composite_file_paths = uploaded_dataset.composite_files,
-                                 composite_files = dict( [ ( k, v.__dict__ ) for k, v in data.datatype.get_composite_files( data ).items() ] ) )
-                else:
-                    try:
-                        is_binary = uploaded_dataset.datatype.is_binary
-                    except:
-                        is_binary = None
-                    json = dict( file_type = uploaded_dataset.file_type,
-                                 ext = uploaded_dataset.ext,
-                                 name = uploaded_dataset.name,
-                                 dataset_id = data.dataset.id,
-                                 dbkey = uploaded_dataset.dbkey,
-                                 type = uploaded_dataset.type,
-                                 is_binary = is_binary,
-                                 space_to_tab = uploaded_dataset.space_to_tab,
-                                 path = uploaded_dataset.path )
-                json_file.write( to_json_string( json ) + '\n' )
-                data_list.append( data )
-        json_file.close()
-
-        #cleanup unclaimed precreated datasets:
-        for data in self.precreated_datasets:
-            log.info( 'Cleaned up unclaimed precreated dataset (%s).' % ( data.id ) )
-            data.state = data.states.ERROR
-            data.info = 'No file contents were available.'
+        precreated_datasets = upload_common.get_precreated_datasets( trans, incoming, trans.app.model.HistoryDatasetAssociation )
+        incoming = upload_common.persist_uploads( incoming )
+        json_file_path, data_list = upload_common.create_paramfile( trans, incoming, precreated_datasets, dataset_upload_inputs )
+        upload_common.cleanup_unused_precreated_datasets( precreated_datasets )
         
         if not data_list:
             try:
@@ -129,38 +25,4 @@
                 pass
             return 'No data was entered in the upload form, please go back and choose data to upload.'
         
-        # Create the job object
-        job = trans.app.model.Job()
-        job.session_id = trans.get_galaxy_session().id
-        job.history_id = trans.history.id
-        job.tool_id = tool.id
-        job.tool_version = tool.version
-        job.state = trans.app.model.Job.states.UPLOAD
-        job.flush()
-        log.info( 'tool %s created job id %d' % ( tool.id, job.id ) )
-        trans.log_event( 'created job id %d' % job.id, tool_id=tool.id )
-
-        for name, value in tool.params_to_strings( incoming, trans.app ).iteritems():
-            job.add_parameter( name, value )
-        job.add_parameter( 'paramfile', to_json_string( json_file_path ) )
-        for i, dataset in enumerate( data_list ):
-            job.add_output_dataset( 'output%i' % i, dataset )
-        job.state = trans.app.model.Job.states.NEW
-        trans.app.model.flush()
-        
-        # Queue the job for execution
-        trans.app.job_queue.put( job.id, tool )
-        trans.log_event( "Added job to the job queue, id: %s" % str(job.id), tool_id=job.tool_id )
-        return dict( [ ( i, v ) for i, v in enumerate( data_list ) ] )
-
-    def get_precreated_dataset( self, name ):
-        """
-        Return a dataset matching a name from the list of precreated (via async
-        upload) datasets. If there's more than one upload with the exact same
-        name, we need to pop one (the first) so it isn't chosen next time.
-        """
-        names = [ d.name for d in self.precreated_datasets ]
-        if names.count( name ) > 0:
-            return self.precreated_datasets.pop( names.index( name ) )
-        else:
-            return None
+        return upload_common.create_job( trans, incoming, tool, json_file_path, data_list )
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 lib/galaxy/tools/actions/upload_common.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/tools/actions/upload_common.py	Fri Sep 25 17:07:13 2009 -0400
@@ -0,0 +1,235 @@
+import os, tempfile, StringIO
+from cgi import FieldStorage
+from galaxy import datatypes, util
+from galaxy.datatypes import sniff
+from galaxy.util.json import to_json_string
+
+import logging
+log = logging.getLogger( __name__ )
+
+def persist_uploads( params ):
+    """
+    Turn any uploads in the submitted form to persisted files.
+    """
+    if 'files' in params:
+        new_files = []
+        temp_files = []
+        for upload_dataset in params['files']:
+            f = upload_dataset['file_data']
+            if isinstance( f, FieldStorage ):
+                assert not isinstance( f.file, StringIO.StringIO )
+                assert f.file.name != '<fdopen>'
+                local_filename = util.mkstemp_ln( f.file.name, 'upload_file_data_' )
+                f.file.close()
+                upload_dataset['file_data'] = dict( filename = f.filename,
+                                                    local_filename = local_filename )
+            if upload_dataset['url_paste'].strip() != '':
+                upload_dataset['url_paste'] = datatypes.sniff.stream_to_file( StringIO.StringIO( upload_dataset['url_paste'] ), prefix="strio_url_paste_" )[0]
+            else:
+                upload_dataset['url_paste'] = None
+            new_files.append( upload_dataset )
+        params['files'] = new_files
+    return params
+
+def get_precreated_datasets( trans, params, data_obj ):
+    """
+    Get any precreated datasets (when using asynchronous uploads).
+    """
+    rval = []
+    async_datasets = []
+    if params.get( 'async_datasets', None ) not in ["None", "", None]:
+        async_datasets = params['async_datasets'].split(',')
+    user, roles = trans.get_user_and_roles()
+    for id in async_datasets:
+        try:
+            data = data_obj.get( int( id ) )
+        except:
+            log.exception( 'Unable to load precreated dataset (%s) sent in upload form' % id )
+            continue
+        if data_obj is trans.app.model.HistoryDatasetAssociation:
+            if user is None and trans.galaxy_session.current_history != data.history:
+                log.error( 'Got a precreated dataset (%s) but it does not belong to anonymous user\'s current session (%s)' % ( data.id, trans.galaxy_session.id ) )
+            elif data.history.user != user:
+                log.error( 'Got a precreated dataset (%s) but it does not belong to current user (%s)' % ( data.id, user.id ) )
+            else:
+                rval.append( data )
+        elif data_obj is trans.app.model.LibraryDatasetDatasetAssociation:
+            if not trans.app.security_agent.can_add_library_item( user, roles, data.library_dataset.folder ):
+                log.error( 'Got a precreated dataset (%s) but this user (%s) is not allowed to write to it' % ( data.id, user.id ) )
+            else:
+                rval.append( data )
+    return rval
+
+def get_precreated_dataset( precreated_datasets, name ):
+    """
+    Return a dataset matching a name from the list of precreated (via async
+    upload) datasets. If there's more than one upload with the exact same
+    name, we need to pop one (the first) so it isn't chosen next time.
+    """
+    names = [ d.name for d in precreated_datasets ]
+    if names.count( name ) > 0:
+        return precreated_datasets.pop( names.index( name ) )
+    else:
+        return None
+
+def cleanup_unused_precreated_datasets( precreated_datasets ):
+    for data in precreated_datasets:
+        log.info( 'Cleaned up unclaimed precreated dataset (%s).' % ( data.id ) )
+        data.state = data.states.ERROR
+        data.info = 'No file contents were available.'
+
+def new_history_upload( trans, uploaded_dataset ):
+    hda = trans.app.model.HistoryDatasetAssociation( name = uploaded_dataset.name,
+                                                     extension = uploaded_dataset.file_type,
+                                                     dbkey = uploaded_dataset.dbkey, 
+                                                     history = trans.history,
+                                                     create_dataset = True )
+    hda.state = hda.states.QUEUED
+    hda.flush()
+    trans.history.add_dataset( hda, genome_build = uploaded_dataset.dbkey )
+    permissions = trans.app.security_agent.history_get_default_permissions( trans.history )
+    trans.app.security_agent.set_all_dataset_permissions( hda.dataset, permissions )
+    return hda
+
+def new_library_upload( trans, uploaded_dataset, replace_dataset, folder,
+                        template, template_field_contents, roles, message ):
+    if replace_dataset:
+        ld = replace_dataset
+    else:
+        ld = trans.app.model.LibraryDataset( folder=folder, name=uploaded_dataset.name )
+        ld.flush()
+        trans.app.security_agent.copy_library_permissions( folder, ld )
+    ldda = trans.app.model.LibraryDatasetDatasetAssociation( name = uploaded_dataset.name,
+                                                             extension = uploaded_dataset.file_type,
+                                                             dbkey = uploaded_dataset.dbkey,
+                                                             library_dataset = ld,
+                                                             user = trans.user,
+                                                             create_dataset = True )
+    ldda.state = ldda.states.QUEUED
+    ldda.message = message
+    ldda.flush()
+    # Permissions must be the same on the LibraryDatasetDatasetAssociation and the associated LibraryDataset
+    trans.app.security_agent.copy_library_permissions( ld, ldda )
+    if replace_dataset:
+        # Copy the Dataset level permissions from replace_dataset to the new LibraryDatasetDatasetAssociation.dataset
+        trans.app.security_agent.copy_dataset_permissions( replace_dataset.library_dataset_dataset_association.dataset, ldda.dataset )
+    else:
+        # Copy the current user's DefaultUserPermissions to the new LibraryDatasetDatasetAssociation.dataset
+        trans.app.security_agent.set_all_dataset_permissions( ldda.dataset, trans.app.security_agent.user_get_default_permissions( trans.user ) )
+        folder.add_library_dataset( ld, genome_build=uploaded_dataset.dbkey )
+        folder.flush()
+    ld.library_dataset_dataset_association_id = ldda.id
+    ld.flush()
+    # Handle template included in the upload form, if any
+    if template and template_field_contents:
+        # Since information templates are inherited, the template fields can be displayed on the upload form.
+        # If the user has added field contents, we'll need to create a new form_values and info_association
+        # for the new library_dataset_dataset_association object.
+        # Create a new FormValues object, using the template we previously retrieved
+        form_values = trans.app.model.FormValues( template, template_field_contents )
+        form_values.flush()
+        # Create a new info_association between the current ldda and form_values
+        info_association = trans.app.model.LibraryDatasetDatasetInfoAssociation( ldda, template, form_values )
+        info_association.flush()
+    # If roles were selected upon upload, restrict access to the Dataset to those roles
+    if roles:
+        for role in roles:
+            dp = trans.app.model.DatasetPermissions( trans.app.security_agent.permitted_actions.DATASET_ACCESS.action, ldda.dataset, role )
+            dp.flush()
+    return ldda
+
+def create_paramfile( trans, params, precreated_datasets, dataset_upload_inputs,
+                      replace_dataset=None, folder=None, template=None,
+                      template_field_contents=None, roles=None, message=None ):
+    """
+    Create the upload tool's JSON "param" file.
+    """
+    data_list = []
+    json_file = tempfile.mkstemp()
+    json_file_path = json_file[1]
+    json_file = os.fdopen( json_file[0], 'w' )
+    for dataset_upload_input in dataset_upload_inputs:
+        uploaded_datasets = dataset_upload_input.get_uploaded_datasets( trans, params )
+        for uploaded_dataset in uploaded_datasets:
+            data = get_precreated_dataset( precreated_datasets, uploaded_dataset.name )
+            if not data:
+                if folder:
+                    data = new_library_upload( trans, uploaded_dataset, replace_dataset, folder, template, template_field_contents, roles, message )
+                else:
+                    data = new_history_upload( trans, uploaded_dataset )
+            else:
+                data.extension = uploaded_dataset.file_type
+                data.dbkey = uploaded_dataset.dbkey
+                data.flush()
+                if folder:
+                    folder.genome_build = uploaded_dataset.dbkey
+                    folder.flush()
+                else:
+                    trans.history.genome_build = uploaded_dataset.dbkey
+            if uploaded_dataset.type == 'composite':
+                # we need to init metadata before the job is dispatched
+                data.init_meta()
+                for meta_name, meta_value in uploaded_dataset.metadata.iteritems():
+                    setattr( data.metadata, meta_name, meta_value )
+                data.flush()
+                json = dict( file_type = uploaded_dataset.file_type,
+                             dataset_id = data.dataset.id,
+                             dbkey = uploaded_dataset.dbkey,
+                             type = uploaded_dataset.type,
+                             metadata = uploaded_dataset.metadata,
+                             primary_file = uploaded_dataset.primary_file,
+                             extra_files_path = data.extra_files_path,
+                             composite_file_paths = uploaded_dataset.composite_files,
+                             composite_files = dict( [ ( k, v.__dict__ ) for k, v in data.datatype.get_composite_files( data ).items() ] ) )
+            else:
+                try:
+                    is_binary = uploaded_dataset.datatype.is_binary
+                except:
+                    is_binary = None
+                json = dict( file_type = uploaded_dataset.file_type,
+                             ext = uploaded_dataset.ext,
+                             name = uploaded_dataset.name,
+                             dataset_id = data.dataset.id,
+                             dbkey = uploaded_dataset.dbkey,
+                             type = uploaded_dataset.type,
+                             is_binary = is_binary,
+                             space_to_tab = uploaded_dataset.space_to_tab,
+                             path = uploaded_dataset.path )
+            json_file.write( to_json_string( json ) + '\n' )
+            data_list.append( data )
+    json_file.close()
+    return ( json_file_path, data_list )
+
+def create_job( trans, params, tool, json_file_path, data_list, folder=None ):
+    """
+    Create the upload job.
+    """
+    job = trans.app.model.Job()
+    job.session_id = trans.get_galaxy_session().id
+    if folder:
+        job.library_folder_id = folder.id
+    else:
+        job.history_id = trans.history.id
+    job.tool_id = tool.id
+    job.tool_version = tool.version
+    job.state = job.states.UPLOAD
+    job.flush()
+    log.info( 'tool %s created job id %d' % ( tool.id, job.id ) )
+    trans.log_event( 'created job id %d' % job.id, tool_id=tool.id )
+
+    for name, value in tool.params_to_strings( params, trans.app ).iteritems():
+        job.add_parameter( name, value )
+    job.add_parameter( 'paramfile', to_json_string( json_file_path ) )
+    if folder:
+        for i, dataset in enumerate( data_list ):
+            job.add_output_library_dataset( 'output%i' % i, dataset )
+    else:
+        for i, dataset in enumerate( data_list ):
+            job.add_output_dataset( 'output%i' % i, dataset )
+    job.state = job.states.NEW
+    trans.app.model.flush()
+
+    # Queue the job for execution
+    trans.app.job_queue.put( job.id, tool )
+    trans.log_event( "Added job to the job queue, id: %s" % str(job.id), tool_id=job.tool_id )
+    return dict( [ ( 'output%i' % i, v ) for i, v in enumerate( data_list ) ] )
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 lib/galaxy/web/controllers/library.py
--- a/lib/galaxy/web/controllers/library.py	Fri Sep 25 17:06:45 2009 -0400
+++ b/lib/galaxy/web/controllers/library.py	Fri Sep 25 17:07:13 2009 -0400
@@ -726,17 +726,17 @@
                     template_id = 'None'
                     widgets = []
                 upload_option = params.get( 'upload_option', 'upload_file' )
-                created_ldda_ids = trans.webapp.controllers[ 'library_dataset' ].upload_dataset( trans,
-                                                                                                 controller='library', 
-                                                                                                 library_id=library_id,
-                                                                                                 folder_id=folder_id,
-                                                                                                 template_id=template_id,
-                                                                                                 widgets=widgets,
-                                                                                                 replace_dataset=replace_dataset,
-                                                                                                 **kwd )
-                if created_ldda_ids:
-                    ldda_id_list = created_ldda_ids.split( ',' )
-                    total_added = len( ldda_id_list )
+                created_outputs = trans.webapp.controllers[ 'library_dataset' ].upload_dataset( trans,
+                                                                                                controller='library', 
+                                                                                                library_id=library_id,
+                                                                                                folder_id=folder_id,
+                                                                                                template_id=template_id,
+                                                                                                widgets=widgets,
+                                                                                                replace_dataset=replace_dataset,
+                                                                                                **kwd )
+                if created_outputs:
+                    ldda_id_list = [ str( v.id ) for v in created_outputs.values() ]
+                    total_added = len( created_outputs.values() )
                     if replace_dataset:
                         msg = "Added %d dataset versions to the library dataset '%s' in the folder '%s'." % ( total_added, replace_dataset.name, folder.name )
                     else:
@@ -760,7 +760,7 @@
                                                                action='browse_library',
                                                                id=library_id,
                                                                default_action=default_action,
-                                                               created_ldda_ids=created_ldda_ids, 
+                                                               created_ldda_ids=",".join( ldda_id_list ), 
                                                                msg=util.sanitize_text( msg ), 
                                                                messagetype='done' ) )
                     
@@ -769,7 +769,7 @@
                     trans.response.send_redirect( web.url_for( controller='library',
                                                                action='browse_library',
                                                                id=library_id,
-                                                               created_ldda_ids=created_ldda_ids, 
+                                                               created_ldda_ids=",".join( ldda_id_list ), 
                                                                msg=util.sanitize_text( msg ), 
                                                                messagetype='error' ) )
         if not id or replace_dataset:
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 lib/galaxy/web/controllers/library_admin.py
--- a/lib/galaxy/web/controllers/library_admin.py	Fri Sep 25 17:06:45 2009 -0400
+++ b/lib/galaxy/web/controllers/library_admin.py	Fri Sep 25 17:07:13 2009 -0400
@@ -438,16 +438,16 @@
                 template_id = 'None'
                 widgets = []
             upload_option = params.get( 'upload_option', 'upload_file' )
-            created_ldda_ids = trans.webapp.controllers[ 'library_dataset' ].upload_dataset( trans,
-                                                                                             controller='library_admin',
-                                                                                             library_id=library_id,
-                                                                                             folder_id=folder_id,
-                                                                                             template_id=template_id,
-                                                                                             widgets=widgets,
-                                                                                             replace_dataset=replace_dataset,
-                                                                                             **kwd )
-            if created_ldda_ids:
-                total_added = len( created_ldda_ids.split( ',' ) )
+            created_outputs = trans.webapp.controllers[ 'library_dataset' ].upload_dataset( trans,
+                                                                                            controller='library_admin',
+                                                                                            library_id=library_id,
+                                                                                            folder_id=folder_id,
+                                                                                            template_id=template_id,
+                                                                                            widgets=widgets,
+                                                                                            replace_dataset=replace_dataset,
+                                                                                            **kwd )
+            if created_outputs:
+                total_added = len( created_outputs.values() )
                 if replace_dataset:
                     msg = "Added %d dataset versions to the library dataset '%s' in the folder '%s'." % ( total_added, replace_dataset.name, folder.name )
                 else:
@@ -464,7 +464,7 @@
             trans.response.send_redirect( web.url_for( controller='library_admin',
                                                        action='browse_library',
                                                        id=library_id,
-                                                       created_ldda_ids=created_ldda_ids,
+                                                       created_ldda_ids=",".join( [ str( v.id ) for v in created_outputs.values() ] ),
                                                        msg=util.sanitize_text( msg ),
                                                        messagetype=messagetype ) )
         elif not id or replace_dataset:
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 lib/galaxy/web/controllers/library_dataset.py
--- a/lib/galaxy/web/controllers/library_dataset.py	Fri Sep 25 17:06:45 2009 -0400
+++ b/lib/galaxy/web/controllers/library_dataset.py	Fri Sep 25 17:07:13 2009 -0400
@@ -3,196 +3,51 @@
 from galaxy import util, jobs
 from galaxy.datatypes import sniff
 from galaxy.security import RBACAgent
+from galaxy.util.json import to_json_string
+from galaxy.tools.actions import upload_common
 
 log = logging.getLogger( __name__ )
 
 class UploadLibraryDataset( BaseController ):
-    def remove_tempfile( self, filename ):
-        try:
-            os.unlink( filename )
-        except:
-            log.exception( 'failure removing temporary file: %s' % filename )
-    def add_file( self, trans, folder, file_obj, name, file_type, dbkey, roles,
-                  info='no info', space_to_tab=False, replace_dataset=None,
-                  template=None, template_field_contents=[], message=None ):
-        data_type = None
-        line_count = 0
-        temp_name, is_multi_byte = sniff.stream_to_file( file_obj )
-        # See if we have an empty file
-        if not os.path.getsize( temp_name ) > 0:
-            raise BadFileException( "you attempted to upload an empty file." )
-        if is_multi_byte:
-            ext = sniff.guess_ext( temp_name, is_multi_byte=True )
-        else:
-            if not data_type:
-                # See if we have a gzipped file, which, if it passes our restrictions, we'll uncompress on the fly.
-                is_gzipped, is_valid = self.check_gzip( temp_name )
-                if is_gzipped and not is_valid:
-                    raise BadFileException( "you attempted to upload an inappropriate file." )
-                elif is_gzipped and is_valid:
-                    # We need to uncompress the temp_name file
-                    CHUNK_SIZE = 2**20 # 1Mb   
-                    fd, uncompressed = tempfile.mkstemp()   
-                    gzipped_file = gzip.GzipFile( temp_name )
-                    while 1:
-                        try:
-                            chunk = gzipped_file.read( CHUNK_SIZE )
-                        except IOError:
-                            os.close( fd )
-                            os.remove( uncompressed )
-                            raise BadFileException( 'problem uncompressing gzipped data.' )
-                        if not chunk:
-                            break
-                        os.write( fd, chunk )
-                    os.close( fd )
-                    gzipped_file.close()
-                    # Replace the gzipped file with the decompressed file
-                    shutil.move( uncompressed, temp_name )
-                    name = name.rstrip( '.gz' )
-                    data_type = 'gzip'
-            ext = ''
-            if not data_type:
-                # See if we have a zip archive
-                is_zipped, is_valid, test_ext = self.check_zip( temp_name )
-                if is_zipped and not is_valid:
-                    raise BadFileException( "you attempted to upload an inappropriate file." )
-                elif is_zipped and is_valid:
-                    # Currently, we force specific tools to handle this case.  We also require the user
-                    # to manually set the incoming file_type
-                    if ( test_ext == 'ab1' or test_ext == 'scf' ) and file_type != 'binseq.zip':
-                        raise BadFileException( "Invalid 'File Format' for archive consisting of binary files - use 'Binseq.zip'." )
-                    elif test_ext == 'txt' and file_type != 'txtseq.zip':
-                        raise BadFileException( "Invalid 'File Format' for archive consisting of text files - use 'Txtseq.zip'." )
-                    if not ( file_type == 'binseq.zip' or file_type == 'txtseq.zip' ):
-                        raise BadFileException( "you must manually set the 'File Format' to either 'Binseq.zip' or 'Txtseq.zip' when uploading zip files." )
-                    data_type = 'zip'
-                    ext = file_type
-            if not data_type:
-                if self.check_binary( temp_name ):
-                    try:
-                        ext = name.split( "." )[1].strip().lower()
-                    except:
-                        ext = ''
-                    try:
-                        is_pdf = open( temp_name ).read( len( '%PDF' ) ) == '%PDF'
-                    except:
-                        is_pdf = False #file failed to open or contents are smaller than pdf header
-                    if is_pdf:
-                        file_type = 'pdf' #allow the upload of PDFs to library via the admin interface.
-                    else:
-                        if not( ext == 'ab1' or ext == 'scf' ):
-                            raise BadFileException( "you attempted to upload an inappropriate file." )
-                        if ext == 'ab1' and file_type != 'ab1':
-                            raise BadFileException( "you must manually set the 'File Format' to 'Ab1' when uploading ab1 files." )
-                        elif ext == 'scf' and file_type != 'scf':
-                            raise BadFileException( "you must manually set the 'File Format' to 'Scf' when uploading scf files." )
-                    data_type = 'binary'
-            if not data_type:
-                # We must have a text file
-                if self.check_html( temp_name ):
-                    raise BadFileException( "you attempted to upload an inappropriate file." )
-            if data_type != 'binary' and data_type != 'zip':
-                if space_to_tab:
-                    line_count = sniff.convert_newlines_sep2tabs( temp_name )
-                elif os.stat( temp_name ).st_size < 262144000: # 250MB
-                    line_count = sniff.convert_newlines( temp_name )
-                else:
-                    if sniff.check_newlines( temp_name ):
-                        line_count = sniff.convert_newlines( temp_name )
-                    else:
-                        line_count = None
-                if file_type == 'auto':
-                    ext = sniff.guess_ext( temp_name, sniff_order=trans.app.datatypes_registry.sniff_order )    
-                else:
-                    ext = file_type
-                data_type = ext
-        if info is None:
-            info = 'uploaded %s file' % data_type
-        if file_type == 'auto':
-            data_type = sniff.guess_ext( temp_name, sniff_order=trans.app.datatypes_registry.sniff_order )    
-        else:
-            data_type = file_type
-        if replace_dataset:
-            # The replace_dataset param ( when not None ) refers to a LibraryDataset that is being replaced with a new version.
-            library_dataset = replace_dataset
-        else:
-            # If replace_dataset is None, the Library level permissions will be taken from the folder and applied to the new 
-            # LibraryDataset, and the current user's DefaultUserPermissions will be applied to the associated Dataset.
-            library_dataset = trans.app.model.LibraryDataset( folder=folder, name=name, info=info )
-            library_dataset.flush()
-            trans.app.security_agent.copy_library_permissions( folder, library_dataset )
-        ldda = trans.app.model.LibraryDatasetDatasetAssociation( name=name, 
-                                                                 info=info, 
-                                                                 extension=data_type, 
-                                                                 dbkey=dbkey, 
-                                                                 library_dataset=library_dataset,
-                                                                 user=trans.get_user(),
-                                                                 create_dataset=True )
-        ldda.message = message
-        ldda.flush()
-        # Permissions must be the same on the LibraryDatasetDatasetAssociation and the associated LibraryDataset
-        trans.app.security_agent.copy_library_permissions( library_dataset, ldda )
-        if replace_dataset:
-            # Copy the Dataset level permissions from replace_dataset to the new LibraryDatasetDatasetAssociation.dataset
-            trans.app.security_agent.copy_dataset_permissions( replace_dataset.library_dataset_dataset_association.dataset, ldda.dataset )
-        else:
-            # Copy the current user's DefaultUserPermissions to the new LibraryDatasetDatasetAssociation.dataset
-            trans.app.security_agent.set_all_dataset_permissions( ldda.dataset, trans.app.security_agent.user_get_default_permissions( trans.get_user() ) )
-            folder.add_library_dataset( library_dataset, genome_build=dbkey )
-            folder.flush()
-        library_dataset.library_dataset_dataset_association_id = ldda.id
-        library_dataset.flush()
-        # Handle template included in the upload form, if any
-        if template and template_field_contents:
-            # Since information templates are inherited, the template fields can be displayed on the upload form.
-            # If the user has added field contents, we'll need to create a new form_values and info_association
-            # for the new library_dataset_dataset_association object.
-            # Create a new FormValues object, using the template we previously retrieved
-            form_values = trans.app.model.FormValues( template, template_field_contents )
-            form_values.flush()
-            # Create a new info_association between the current ldda and form_values
-            info_association = trans.app.model.LibraryDatasetDatasetInfoAssociation( ldda, template, form_values )
-            info_association.flush()
-        # If roles were selected upon upload, restrict access to the Dataset to those roles
-        if roles:
-            for role in roles:
-                dp = trans.app.model.DatasetPermissions( RBACAgent.permitted_actions.DATASET_ACCESS.action, ldda.dataset, role )
-                dp.flush()
-        shutil.move( temp_name, ldda.dataset.file_name )
-        ldda.state = ldda.states.OK
-        ldda.init_meta()
-        if line_count:
-            try:
-                if is_multi_byte:
-                    ldda.set_multi_byte_peek( line_count=line_count )
-                else:
-                    ldda.set_peek( line_count=line_count )
-            except:
-                if is_multi_byte:
-                    ldda.set_multi_byte_peek()
-                else:
-                    ldda.set_peek()
-        else:
-            if is_multi_byte:
-                ldda.set_multi_byte_peek()
-            else:
-                ldda.set_peek()
-        ldda.set_size()
-        if ldda.missing_meta():
-            ldda.datatype.set_meta( ldda )
-        ldda.flush()
-        return ldda
+    @web.json
+    def library_item_updates( self, trans, ids=None, states=None ):
+        # Avoid caching
+        trans.response.headers['Pragma'] = 'no-cache'
+        trans.response.headers['Expires'] = '0'
+        # Create new HTML for any that have changed
+        rval = {}
+        if ids is not None and states is not None:
+            ids = map( int, ids.split( "," ) )
+            states = states.split( "," )
+            for id, state in zip( ids, states ):
+                data = self.app.model.LibraryDatasetDatasetAssociation.get( id )
+                if data.state != state:
+                    job_ldda = data
+                    while job_ldda.copied_from_library_dataset_dataset_association:
+                        job_ldda = job_ldda.copied_from_library_dataset_dataset_association
+                    force_history_refresh = False
+                    rval[id] = {
+                        "state": data.state,
+                        "html": unicode( trans.fill_template( "library/library_item_info.mako", ldda=data ), 'utf-8' )
+                        #"force_history_refresh": force_history_refresh
+                    }
+        return rval
     @web.expose
     def upload_dataset( self, trans, controller, library_id, folder_id, replace_dataset=None, **kwd ):
-        # This method is called from both the admin and library controllers.  The replace_dataset param ( when
-        # not None ) refers to a LibraryDataset that is being replaced with a new version.
-        params = util.Params( kwd )
+        # Set up the traditional tool state/params
+        tool_id = 'upload1'
+        tool = trans.app.toolbox.tools_by_id[ tool_id ]
+        state = tool.new_state( trans )
+        errors = tool.update_state( trans, tool.inputs_by_page[0], state.inputs, kwd, changed_dependencies={} )
+        tool_params = state.inputs
+        dataset_upload_inputs = []
+        for input_name, input in tool.inputs.iteritems():
+            if input.type == "upload_dataset":
+                dataset_upload_inputs.append( input )
+        # Library-specific params
+        params = util.Params( kwd ) # is this filetoolparam safe?
         msg = util.restore_text( params.get( 'msg', ''  ) )
         messagetype = params.get( 'messagetype', 'done' )
-        dbkey = params.get( 'dbkey', '?' )
-        file_type = params.get( 'file_type', 'auto' )
-        data_file = params.get( 'files_0|file_data', '' )
-        url_paste = params.get( 'files_0|url_paste', '' )
         server_dir = util.restore_text( params.get( 'server_dir', '' ) )
         if replace_dataset not in [ None, 'None' ]:
             replace_id = replace_dataset.id
@@ -217,24 +72,43 @@
                     template_field_contents.append( field_value )
         else:
             template = None
-        if upload_option == 'upload_file' and data_file == '' and url_paste == '':
-                msg = 'Select a file, enter a URL or enter text'
-                err_redirect = True
-        elif upload_option == 'upload_directory':
+        if upload_option == 'upload_directory':
             if server_dir in [ None, 'None', '' ]:
                 err_redirect = True
-            # See if our request is from the Admin view or the Libraries view
-            if trans.request.browser_url.find( 'admin' ) >= 0:
+            if controller == 'library_admin':
                 import_dir = trans.app.config.library_import_dir
                 import_dir_desc = 'library_import_dir'
+                full_dir = os.path.join( import_dir, server_dir )
             else:
                 import_dir = trans.app.config.user_library_import_dir
                 import_dir_desc = 'user_library_import_dir'
+                if server_dir == trans.user.email:
+                    full_dir = os.path.join( import_dir, server_dir )
+                else:
+                    full_dir = os.path.join( import_dir, trans.user.email, server_dir )
             if import_dir:
                 msg = 'Select a directory'
             else:
                 msg = '"%s" is not defined in the Galaxy configuration file' % import_dir_desc
+        roles = []
+        for role_id in util.listify( params.get( 'roles', [] ) ):
+            roles.append( trans.app.model.Role.get( role_id ) )
+        # Proceed with (mostly) regular upload processing
+        precreated_datasets = upload_common.get_precreated_datasets( trans, tool_params, trans.app.model.HistoryDatasetAssociation )
+        if upload_option == 'upload_file':
+            tool_params = upload_common.persist_uploads( tool_params )
+            json_file_path, data_list = upload_common.create_paramfile( trans, tool_params, precreated_datasets, dataset_upload_inputs, replace_dataset, folder, template, template_field_contents, roles, message )
+        elif upload_option == 'upload_directory':
+            json_file_path, data_list = self.create_server_dir_paramfile( trans, params, full_dir, import_dir_desc, folder, template, template_field_contents, roles, message, err_redirect, msg )
+        upload_common.cleanup_unused_precreated_datasets( precreated_datasets )
+        if upload_option == 'upload_file' and not data_list:
+            msg = 'Select a file, enter a URL or enter text'
+            err_redirect = True
         if err_redirect:
+            try:
+                os.remove( json_file_path )
+            except:
+                pass
             trans.response.send_redirect( web.url_for( controller=controller,
                                                        action='library_dataset_dataset_association',
                                                        library_id=library_id,
@@ -243,226 +117,49 @@
                                                        upload_option=upload_option,
                                                        msg=util.sanitize_text( msg ),
                                                        messagetype='error' ) )
-        space_to_tab = params.get( 'files_0|space_to_tab', False )
-        if space_to_tab and space_to_tab not in [ "None", None ]:
-            space_to_tab = True
-        roles = []
-        for role_id in util.listify( params.get( 'roles', [] ) ):
-            roles.append( trans.app.model.Role.get( role_id ) )
+        return upload_common.create_job( trans, tool_params, tool, json_file_path, data_list, folder=folder )
+    def create_server_dir_paramfile( self, trans, params, full_dir, import_dir_desc, folder, template,
+                                     template_field_contents, roles, message, err_redirect, msg ):
+        """
+        Create JSON param file for the upload tool when using the server_dir upload.
+        """
+        files = []
+        try:
+            for entry in os.listdir( full_dir ):
+                # Only import regular files
+                if os.path.isfile( os.path.join( full_dir, entry ) ):
+                    files.append( entry )
+        except Exception, e:
+            msg = "Unable to get file list for configured %s, error: %s" % ( import_dir_desc, str( e ) )
+            err_redirect = True
+            return ( None, None )
+        if not files:
+            msg = "The directory '%s' contains no valid files" % full_dir
+            err_redirect = True
+            return ( None, None )
         data_list = []
-        created_ldda_ids = ''
-        if 'filename' in dir( data_file ):
-            file_name = data_file.filename
-            file_name = file_name.split( '\\' )[-1]
-            file_name = file_name.split( '/' )[-1]
-            try:
-                created_ldda = self.add_file( trans,
-                                              folder,
-                                              data_file.file,
-                                              file_name,
-                                              file_type,
-                                              dbkey,
-                                              roles,
-                                              info="uploaded file",
-                                              space_to_tab=space_to_tab,
-                                              replace_dataset=replace_dataset,
-                                              template=template,
-                                              template_field_contents=template_field_contents,
-                                              message=message )
-                created_ldda_ids = str( created_ldda.id )
-            except Exception, e:
-                log.exception( 'exception in upload_dataset using file_name %s: %s' % ( str( file_name ), str( e ) ) )
-                return self.upload_empty( trans, controller, library_id, folder_id, "Error:", str( e ) )
-        elif url_paste not in [ None, "" ]:
-            if url_paste.lower().find( 'http://' ) >= 0 or url_paste.lower().find( 'ftp://' ) >= 0:
-                url_paste = url_paste.replace( '\r', '' ).split( '\n' )
-                # If we are setting the name from the line, it needs to be the line that creates that dataset
-                name_set_from_line = False
-                for line in url_paste:
-                    line = line.rstrip( '\r\n' )
-                    if line:
-                        if not line or name_set_from_line:
-                            name_set_from_line = True
-                        try:
-                            created_ldda = self.add_file( trans,
-                                                          folder,
-                                                          urllib.urlopen( line ),
-                                                          line,
-                                                          file_type,
-                                                          dbkey,
-                                                          roles,
-                                                          info="uploaded url",
-                                                          space_to_tab=space_to_tab,
-                                                          replace_dataset=replace_dataset,
-                                                          template=template,
-                                                          template_field_contents=template_field_contents,
-                                                          message=message )
-                            created_ldda_ids = '%s,%s' % ( created_ldda_ids, str( created_ldda.id ) )
-                        except Exception, e:
-                            log.exception( 'exception in upload_dataset using url_paste %s' % str( e ) )
-                            return self.upload_empty( trans, controller, library_id, folder_id, "Error:", str( e ) )
-            else:
-                is_valid = False
-                for line in url_paste:
-                    line = line.rstrip( '\r\n' )
-                    if line:
-                        is_valid = True
-                        break
-                if is_valid:
-                    try:
-                        created_ldda = self.add_file( trans,
-                                                      folder,
-                                                      StringIO.StringIO( url_paste ),
-                                                      'Pasted Entry',
-                                                      file_type,
-                                                      dbkey,
-                                                      roles,
-                                                      info="pasted entry",
-                                                      space_to_tab=space_to_tab,
-                                                      replace_dataset=replace_dataset,
-                                                      template=template,
-                                                      template_field_contents=template_field_contents,
-                                                      message=message )
-                        created_ldda_ids = '%s,%s' % ( created_ldda_ids, str( created_ldda.id ) )
-                    except Exception, e:
-                        log.exception( 'exception in add_file using StringIO.StringIO( url_paste ) %s' % str( e ) )
-                        return self.upload_empty( trans, controller, library_id, folder_id, "Error:", str( e ) )
-        elif server_dir not in [ None, "", "None" ]:
-            # See if our request is from the Admin view or the Libraries view
-            if trans.request.browser_url.find( 'admin' ) >= 0:
-                import_dir = trans.app.config.library_import_dir
-                import_dir_desc = 'library_import_dir'
-                full_dir = os.path.join( import_dir, server_dir )
-            else:
-                imrport_dir = trans.app.config.user_library_import_dir
-                import_dir_desc = 'user_library_import_dir'
-                # From the Libraries view, users are restricted to the directory named the same as
-                # their email within the configured user_library_import_dir.  If this directory contains
-                # sub-directories, server_dir will be the name of the selected sub-directory.  Otherwise
-                # server_dir will be the user's email address.
-                if server_dir == trans.user.email:
-                    full_dir = os.path.join( import_dir, server_dir )
-                else:
-                    full_dir = os.path.join( import_dir, trans.user.email, server_dir )
-            files = []
-            try:
-                for entry in os.listdir( full_dir ):
-                    # Only import regular files
-                    if os.path.isfile( os.path.join( full_dir, entry ) ):
-                        files.append( entry )
-            except Exception, e:
-                msg = "Unable to get file list for configured %s, error: %s" % ( import_dir_desc, str( e ) )
-                return self.upload_empty( trans, controller, library_id, folder_id, "Error:", msg )
-            if not files:
-                msg = "The directory '%s' contains no valid files" % full_dir
-                return self.upload_empty( trans, controller, library_id, folder_id, "Error:", msg )
-            for file in files:
-                full_file = os.path.join( full_dir, file )
-                if not os.path.isfile( full_file ):
-                    continue
-                try:
-                    created_ldda = self.add_file( trans,
-                                                  folder,
-                                                  open( full_file, 'rb' ),
-                                                  file,
-                                                  file_type,
-                                                  dbkey,
-                                                  roles,
-                                                  info="imported file",
-                                                  space_to_tab=space_to_tab,
-                                                  replace_dataset=replace_dataset,
-                                                  template=template,
-                                                  template_field_contents=template_field_contents,
-                                                  message=message )
-                    created_ldda_ids = '%s,%s' % ( created_ldda_ids, str( created_ldda.id ) )
-                except Exception, e:
-                    log.exception( 'exception in add_file using server_dir %s' % str( e ) )
-                    return self.upload_empty( trans, controller, library_id, folder_id, "Error:", str( e ) )
-        if created_ldda_ids:
-            created_ldda_ids = created_ldda_ids.lstrip( ',' )
-            return created_ldda_ids
-        else:
-            return ''
-    def check_gzip( self, temp_name ):
-        temp = open( temp_name, "U" )
-        magic_check = temp.read( 2 )
-        temp.close()
-        if magic_check != util.gzip_magic:
-            return ( False, False )
-        CHUNK_SIZE = 2**15 # 32Kb
-        gzipped_file = gzip.GzipFile( temp_name )
-        chunk = gzipped_file.read( CHUNK_SIZE )
-        gzipped_file.close()
-        if self.check_html( temp_name, chunk=chunk ) or self.check_binary( temp_name, chunk=chunk ):
-            return( True, False )
-        return ( True, True )
-    def check_zip( self, temp_name ):
-        if not zipfile.is_zipfile( temp_name ):
-            return ( False, False, None )
-        zip_file = zipfile.ZipFile( temp_name, "r" )
-        # Make sure the archive consists of valid files.  The current rules are:
-        # 1. Archives can only include .ab1, .scf or .txt files
-        # 2. All file file_types within an archive must be the same
-        name = zip_file.namelist()[0]
-        test_ext = name.split( "." )[1].strip().lower()
-        if not ( test_ext == 'scf' or test_ext == 'ab1' or test_ext == 'txt' ):
-            return ( True, False, test_ext )
-        for name in zip_file.namelist():
-            ext = name.split( "." )[1].strip().lower()
-            if ext != test_ext:
-                return ( True, False, test_ext )
-        return ( True, True, test_ext )
-    def check_html( self, temp_name, chunk=None ):
-        if chunk is None:
-            temp = open(temp_name, "U")
-        else:
-            temp = chunk
-        regexp1 = re.compile( "<A\s+[^>]*HREF[^>]+>", re.I )
-        regexp2 = re.compile( "<IFRAME[^>]*>", re.I )
-        regexp3 = re.compile( "<FRAMESET[^>]*>", re.I )
-        regexp4 = re.compile( "<META[^>]*>", re.I )
-        lineno = 0
-        for line in temp:
-            lineno += 1
-            matches = regexp1.search( line ) or regexp2.search( line ) or regexp3.search( line ) or regexp4.search( line )
-            if matches:
-                if chunk is None:
-                    temp.close()
-                return True
-            if lineno > 100:
-                break
-        if chunk is None:
-            temp.close()
-        return False
-    def check_binary( self, temp_name, chunk=None ):
-        if chunk is None:
-            temp = open( temp_name, "U" )
-        else:
-            temp = chunk
-        lineno = 0
-        for line in temp:
-            lineno += 1
-            line = line.strip()
-            if line:
-                if util.is_multi_byte( line ):
-                    return False
-                for char in line:
-                    if ord( char ) > 128:
-                        if chunk is None:
-                            temp.close()
-                        return True
-            if lineno > 10:
-                break
-        if chunk is None:
-            temp.close()
-        return False
-    def upload_empty( self, trans, controller, library_id, folder_id, err_code, err_msg ):
-        msg = err_code + err_msg
-        return trans.response.send_redirect( web.url_for( controller=controller,
-                                                          action='library_dataset_dataset_association',
-                                                          library_id=library_id,
-                                                          folder_id=folder_id,
-                                                          msg=util.sanitize_text( msg ),
-                                                          messagetype='error' ) )
-class BadFileException( Exception ):
-    pass
+        json_file = tempfile.mkstemp()
+        json_file_path = json_file[1]
+        json_file = os.fdopen( json_file[0], 'w' )
+        for file in files:
+            full_file = os.path.join( full_dir, file )
+            if not os.path.isfile( full_file ):
+                continue
+            uploaded_dataset = util.bunch.Bunch()
+            uploaded_dataset.name = file
+            uploaded_dataset.file_type = params.file_type
+            uploaded_dataset.dbkey = params.dbkey
+            data = upload_common.new_library_upload( trans, uploaded_dataset, None, folder, template, template_field_contents, roles, message )
+            json = dict( file_type = uploaded_dataset.file_type,
+                         ext = None,
+                         name = uploaded_dataset.name,
+                         dataset_id = data.dataset.id,
+                         dbkey = uploaded_dataset.dbkey,
+                         type = 'server_dir',
+                         is_binary = None,
+                         space_to_tab = params.space_to_tab,
+                         path = full_file )
+            json_file.write( to_json_string( json ) + '\n' )
+            data_list.append( data )
+        json_file.close()
+        return ( json_file_path, data_list )
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 static/june_2007_style/blue/library.css
--- a/static/june_2007_style/blue/library.css	Fri Sep 25 17:06:45 2009 -0400
+++ b/static/june_2007_style/blue/library.css	Fri Sep 25 17:07:13 2009 -0400
@@ -1,7 +1,7 @@
 .libraryRow{background-color:#ebd9b2;}
 .datasetHighlighted{background-color:#C1C9E5;}
 .libraryItemDeleted-True{font-style:italic;}
-div.historyItemBody{padding:4px 4px 2px 4px;}
+div.libraryItemBody{padding:4px 4px 2px 4px;}
 li.folderRow,li.datasetRow{border-top:solid 1px #ddd;}
 li.folderRow:hover,li.datasetRow:hover{background-color:#C1C9E5;}
 img.expanderIcon{padding-right:4px;}
@@ -15,3 +15,6 @@
 span.expandLink{width:16px;height:16px;display:inline-block;vertical-align:middle;background:url(../images/silk/resultset_next.png);}
 .folderRow.expanded span.expandLink{background:url(../images/silk/resultset_bottom.png);}
 .folderRow span.rowIcon{width:16px;height:16px;display:inline-block;vertical-align:middle;background:url(../images/silk/folder.png);}
+.libraryItem-error{margin-right:2px;padding:0 2px 0 2px;border:1px solid #AA6666;background:#FFCCCC;}
+.libraryItem-queued{margin-right:2px;padding:0 2px 0 2px;border:1px solid #888888;background:#EEEEEE;}
+.libraryItem-running{margin-right:2px;padding:0 2px 0 2px;border:1px solid #AAAA66;background:#FFFFCC;}
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 static/june_2007_style/library.css.tmpl
--- a/static/june_2007_style/library.css.tmpl	Fri Sep 25 17:06:45 2009 -0400
+++ b/static/june_2007_style/library.css.tmpl	Fri Sep 25 17:07:13 2009 -0400
@@ -10,7 +10,7 @@
     font-style: italic;
 }
 
-div.historyItemBody {
+div.libraryItemBody {
     padding: 4px 4px 2px 4px;
 }
 
@@ -88,3 +88,24 @@
     background: url(../images/silk/folder.png);
 }
 
+.libraryItem-error {
+    margin-right: 2px;
+    padding: 0 2px 0 2px;
+    border: 1px solid $history_error_border;
+    background: $history_error_bg;
+}
+
+.libraryItem-queued {
+    margin-right: 2px;
+    padding: 0 2px 0 2px;
+    border: 1px solid $history_queued_border;
+    background: $history_queued_bg;
+}
+
+.libraryItem-running {
+    margin-right: 2px;
+    padding: 0 2px 0 2px;
+    border: 1px solid $history_running_border;
+    background: $history_running_bg;
+}
+
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 templates/admin/library/browse_library.mako
--- a/templates/admin/library/browse_library.mako	Fri Sep 25 17:06:45 2009 -0400
+++ b/templates/admin/library/browse_library.mako	Fri Sep 25 17:07:13 2009 -0400
@@ -1,5 +1,6 @@
 <%inherit file="/base.mako"/>
 <%namespace file="/message.mako" import="render_msg" />
+<%namespace file="/library/library_item_info.mako" import="render_library_item_info" />
 <%
     from time import strftime
     from galaxy import util
@@ -10,6 +11,8 @@
     <link href="${h.url_for('/static/style/base.css')}" rel="stylesheet" type="text/css" />
     <link href="${h.url_for('/static/style/library.css')}" rel="stylesheet" type="text/css" />
 </%def>
+
+<% tracked_datasets = {} %>
 
 <script type="text/javascript">
     $( document ).ready( function () {
@@ -35,29 +38,6 @@
                 $(this).children().find("img.rowIcon").each( function() { this.src = icon_open; });
             }
         });
-        // Hide all dataset bodies
-        $("div.historyItemBody").hide();
-        // Handle the dataset body hide/show link.
-        $("div.historyItemWrapper").each( function() {
-            var id = this.id;
-            var li = $(this).parent();
-            var body = $(this).children( "div.historyItemBody" );
-            var peek = body.find( "pre.peek" )
-            $(this).children( ".historyItemTitleBar" ).find( ".historyItemTitle" ).wrap( "<a href='#'></a>" ).click( function() {
-                if ( body.is(":visible") ) {
-                    if ( $.browser.mozilla ) { peek.css( "overflow", "hidden" ) }
-                    body.slideUp( "fast" );
-                    li.removeClass( "datasetHighlighted" );
-                } 
-                else {
-                    body.slideDown( "fast", function() { 
-                        if ( $.browser.mozilla ) { peek.css( "overflow", "auto" ); } 
-                    });
-                    li.addClass( "datasetHighlighted" );
-                }
-                return false;
-            });
-        });
     });
     function checkForm() {
         if ( $("select#action_on_datasets_select option:selected").text() == "delete" ) {
@@ -68,6 +48,54 @@
             }
         }
     }
+    // Looks for changes in dataset state using an async request. Keeps
+    // calling itself (via setTimeout) until all datasets are in a terminal
+    // state.
+    var updater = function ( tracked_datasets ) {
+        // Check if there are any items left to track
+        var empty = true;
+        for ( i in tracked_datasets ) {
+            empty = false;
+            break;
+        }
+        if ( ! empty ) {
+            setTimeout( function() { updater_callback( tracked_datasets ) }, 3000 );
+        }
+    };
+    var updater_callback = function ( tracked_datasets ) {
+        // Build request data
+        var ids = []
+        var states = []
+        $.each( tracked_datasets, function ( id, state ) {
+            ids.push( id );
+            states.push( state );
+        });
+        // Make ajax call
+        $.ajax( {
+            type: "POST",
+            url: "${h.url_for( controller='library_dataset', action='library_item_updates' )}",
+            dataType: "json",
+            data: { ids: ids.join( "," ), states: states.join( "," ) },
+            success : function ( data ) {
+                $.each( data, function( id, val ) {
+                    // Replace HTML
+                    var cell = $("#libraryItem-" + id).find("#libraryItemInfo");
+                    cell.html( val.html );
+                    // If new state was terminal, stop tracking
+                    if (( val.state == "ok") || ( val.state == "error") || ( val.state == "empty") || ( val.state == "deleted" ) || ( val.state == "discarded" )) {
+                        delete tracked_datasets[ parseInt(id) ];
+                    } else {
+                        tracked_datasets[ parseInt(id) ] = val.state;
+                    }
+                });
+                updater( tracked_datasets ); 
+            },
+            error: function() {
+                // Just retry, like the old method, should try to be smarter
+                updater( tracked_datasets );
+            }
+        });
+    };
 </script>
 
 <%def name="render_dataset( ldda, library_dataset, selected, library, folder, deleted, show_deleted )">
@@ -84,11 +112,13 @@
             current_version = True
         else:
             current_version = False
+        if current_version and ldda.state not in ( 'ok', 'error', 'empty', 'deleted', 'discarded' ):
+            tracked_datasets[ldda.id] = ldda.state
     %>
     %if current_version:
-        <div class="historyItemWrapper historyItem historyItem-${ldda.state}" id="libraryItem-${ldda.id}">
+        <div class="libraryItemWrapper libraryItem" id="libraryItem-${ldda.id}">
             ## Header row for library items (name, state, action buttons)
-            <div class="historyItemTitleBar"> 
+            <div class="libraryItemTitleBar"> 
                 <table cellspacing="0" cellpadding="0" border="0" width="100%">
                     <tr>
                         <td width="*">
@@ -119,7 +149,7 @@
                                 </div>
                             %endif
                         </td>
-                        <td width="300">${ldda.message}</td>
+                        <td width="300" id="libraryItemInfo">${render_library_item_info( ldda )}</td>
                         <td width="150">${uploaded_by}</td>
                         <td width="60">${ldda.create_time.strftime( "%Y-%m-%d" )}</td>
                     </tr>
@@ -287,3 +317,11 @@
         </p>
     %endif
 </form>
+
+%if tracked_datasets:
+    <script type="text/javascript">
+        // Updater
+        updater({${ ",".join( [ '"%s" : "%s"' % ( k, v ) for k, v in tracked_datasets.iteritems() ] ) }});
+    </script>
+    <!-- running: do not change this comment, used by TwillTestCase.library_wait -->
+%endif
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 templates/admin/library/new_library.mako
--- a/templates/admin/library/new_library.mako	Fri Sep 25 17:06:45 2009 -0400
+++ b/templates/admin/library/new_library.mako	Fri Sep 25 17:07:13 2009 -0400
@@ -29,7 +29,9 @@
                 </div>
               <div style="clear: both"></div>
             </div>
-            <input type="submit" name="create_library_button" value="Create"/>
+            <div class="form-row">
+                <input type="submit" name="create_library_button" value="Create"/>
+            </div>
         </form>
     </div>
 </div>
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 templates/base_panels.mako
--- a/templates/base_panels.mako	Fri Sep 25 17:06:45 2009 -0400
+++ b/templates/base_panels.mako	Fri Sep 25 17:07:13 2009 -0400
@@ -167,8 +167,9 @@
         <div class="submenu">
         <ul>            
             <li><a href="${app.config.get( "bugs_email", "mailto:galaxy-bugs@bx.psu.edu"  )}">Email comments, bug reports, or suggestions</a></li>
-            <li><a target="_blank" href="${app.config.get( "wiki_url", "http://g2.trac.bx.psu.edu/" )}">Galaxy Wiki</a></li>             
-            <li><a target="_blank" href="${app.config.get( "screencasts_url", "http://g2.trac.bx.psu.edu/wiki/ScreenCasts" )}">Video tutorials (screencasts)</a></li>
+            <li><a target="_blank" href="${app.config.get( "wiki_url", "http://bitbucket.org/galaxy/galaxy-central/wiki" )}">Galaxy Wiki</a></li>             
+            <li><a target="_blank" href="${app.config.get( "screencasts_url", "http://galaxycast.org" )}">Video tutorials (screencasts)</a></li>
+            <li><a target="_blank" href="${app.config.get( "citation_url", "http://bitbucket.org/galaxy/galaxy-central/wiki/Citations" )}">How to Cite Galaxy</a></li>
         </ul>
         </div>
     </td>
@@ -282,7 +283,7 @@
     </head>
     
     <body scroll="no" class="${self.body_class}">
-	<div id="everything" style="position: absolute; top: 0; left: 0; width: 100%; height: 100%; min-width: 600px;">
+	<div id="everything" style="position: absolute; top: 0; left: 0; width: 100%; height: 100%; min-width: 960px;">
         ## Background displays first
         <div id="background"></div>
         ## Layer iframes over backgrounds
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 templates/library/browse_library.mako
--- a/templates/library/browse_library.mako	Fri Sep 25 17:06:45 2009 -0400
+++ b/templates/library/browse_library.mako	Fri Sep 25 17:07:13 2009 -0400
@@ -1,5 +1,6 @@
 <%inherit file="/base.mako"/>
 <%namespace file="/message.mako" import="render_msg" />
+<%namespace file="/library/library_item_info.mako" import="render_library_item_info" />
 <% 
     from galaxy import util
     from galaxy.web.controllers.library import active_folders
@@ -12,6 +13,8 @@
     <link href="${h.url_for('/static/style/base.css')}" rel="stylesheet" type="text/css" />
     <link href="${h.url_for('/static/style/library.css')}" rel="stylesheet" type="text/css" />
 </%def>
+
+<% tracked_datasets = {} %>
 
 <%
 class RowCounter( object ):
@@ -77,6 +80,54 @@
            });
         });
     });
+    // Looks for changes in dataset state using an async request. Keeps
+    // calling itself (via setTimeout) until all datasets are in a terminal
+    // state.
+    var updater = function ( tracked_datasets ) {
+        // Check if there are any items left to track
+        var empty = true;
+        for ( i in tracked_datasets ) {
+            empty = false;
+            break;
+        }
+        if ( ! empty ) {
+            setTimeout( function() { updater_callback( tracked_datasets ) }, 3000 );
+        }
+    };
+    var updater_callback = function ( tracked_datasets ) {
+        // Build request data
+        var ids = []
+        var states = []
+        $.each( tracked_datasets, function ( id, state ) {
+            ids.push( id );
+            states.push( state );
+        });
+        // Make ajax call
+        $.ajax( {
+            type: "POST",
+            url: "${h.url_for( controller='library_dataset', action='library_item_updates' )}",
+            dataType: "json",
+            data: { ids: ids.join( "," ), states: states.join( "," ) },
+            success : function ( data ) {
+                $.each( data, function( id, val ) {
+                    // Replace HTML
+                    var cell = $("#libraryItem-" + id).find("#libraryItemInfo");
+                    cell.html( val.html );
+                    // If new state was terminal, stop tracking
+                    if (( val.state == "ok") || ( val.state == "error") || ( val.state == "empty") || ( val.state == "deleted" ) || ( val.state == "discarded" )) {
+                        delete tracked_datasets[ parseInt(id) ];
+                    } else {
+                        tracked_datasets[ parseInt(id) ] = val.state;
+                    }
+                });
+                updater( tracked_datasets ); 
+            },
+            error: function() {
+                // Just retry, like the old method, should try to be smarter
+                updater( tracked_datasets );
+            }
+        });
+    };
 </script>
 
 <%def name="render_dataset( ldda, library_dataset, selected, library, folder, pad, parent, row_conter )">
@@ -95,6 +146,8 @@
             can_manage_library_dataset = trans.app.security_agent.can_manage_library_item( user, roles, library_dataset )
         else:
             current_version = False
+        if current_version and ldda.state not in ( 'ok', 'error', 'empty', 'deleted', 'discarded' ):
+            tracked_datasets[ldda.id] = ldda.state
     %>
     %if current_version:
         <tr class="datasetRow"
@@ -102,7 +155,7 @@
             parent="${parent}"
             style="display: none;"
         %endif
-        >
+        id="libraryItem-${ldda.id}">
             <td style="padding-left: ${pad+20}px;">
                 %if selected:
                     <input type="checkbox" name="ldda_ids" value="${ldda.id}" checked/>
@@ -129,7 +182,7 @@
                     %endif
                 </div>
             </td>
-            <td>${ldda.message}</td>
+            <td id="libraryItemInfo">${render_library_item_info( ldda )}</td>
             <td>${uploaded_by}</td>
             <td>${ldda.create_time.strftime( "%Y-%m-%d" )}</td>
         </tr>     
@@ -305,6 +358,14 @@
     </table>
 </form>
 
+%if tracked_datasets:
+    <script type="text/javascript">
+        // Updater
+        updater({${ ",".join( [ '"%s" : "%s"' % ( k, v ) for k, v in tracked_datasets.iteritems() ] ) }});
+    </script>
+    <!-- running: do not change this comment, used by TwillTestCase.library_wait -->
+%endif
+
 ## Help about compression types
 
 %if len( comptypes ) > 1:
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 templates/library/library_dataset_common.mako
--- a/templates/library/library_dataset_common.mako	Fri Sep 25 17:06:45 2009 -0400
+++ b/templates/library/library_dataset_common.mako	Fri Sep 25 17:07:13 2009 -0400
@@ -40,7 +40,8 @@
                         <div class="form-row">
                             <label>File:</label>
                             <div class="form-row-input">
-                                <input type="file" name="files_0|file_data" galaxy-ajax-upload="true"/>
+                                ##<input type="file" name="files_0|file_data" galaxy-ajax-upload="true"/>
+                                <input type="file" name="files_0|file_data"/>
                             </div>
                             <div style="clear: both"></div>
                         </div>
@@ -109,11 +110,16 @@
                             Convert spaces to tabs:
                         </label>
                         <div class="form-row-input">
-                            <input type="checkbox" name="files_0|space_to_tab" value="Yes"/>Yes
+                            ## The files grouping only makes sense in the upload_file context.
+                            %if upload_option == 'upload_file':
+                                <input type="checkbox" name="files_0|space_to_tab" value="Yes"/>Yes
+                            %else:
+                                <input type="checkbox" name="space_to_tab" value="Yes"/>Yes
+                            %endif
                         </div>
-                    </div>
-                    <div class="toolParamHelp" style="clear: both;">
-                        Use this option if you are entering intervals by hand.
+                        <div class="toolParamHelp" style="clear: both;">
+                            Use this option if you are entering intervals by hand.
+                        </div>
                     </div>
                     <div style="clear: both"></div>
                     <div class="form-row">
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 templates/library/library_item_info.mako
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/templates/library/library_item_info.mako	Fri Sep 25 17:07:13 2009 -0400
@@ -0,0 +1,13 @@
+<%def name="render_library_item_info( ldda )">
+                            %if ldda.state == 'error':
+                                <div class="libraryItem-${ldda.state}">Job error <i>(click name for more info)</i></div>
+                            %elif ldda.state == 'queued':
+                                <div class="libraryItem-${ldda.state}">This job is queued</div>
+                            %elif ldda.state == 'running':
+                                <div class="libraryItem-${ldda.state}">This job is running</div>
+                            %else:
+                                ${ldda.message}
+                            %endif
+</%def>
+
+${render_library_item_info( ldda )}
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 test-data/groupby_out1.dat
--- a/test-data/groupby_out1.dat	Fri Sep 25 17:06:45 2009 -0400
+++ b/test-data/groupby_out1.dat	Fri Sep 25 17:07:13 2009 -0400
@@ -17,4 +17,4 @@
 chr7	1.15958e+08
 chr8	1.18881e+08
 chr9	1.28843e+08
-chrX	1.45195e+08
+chrx	1.45195e+08
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 test-data/users/test3(a)bx.psu.edu/run1/2.fasta
--- a/test-data/users/test3(a)bx.psu.edu/run1/2.fasta	Fri Sep 25 17:06:45 2009 -0400
+++ b/test-data/users/test3(a)bx.psu.edu/run1/2.fasta	Fri Sep 25 17:07:13 2009 -0400
@@ -8,4 +8,4 @@
 ctcaatgttc atgttcttag gttgttttgg ataatatgcg gtcagtttaa tcttcgttgt
 ttcttcttaa aatatttatt catggtttaa tttttggttt gtacttgttc aggggccagt
 tcattattta ctctgtttgt atacagcagt tcttttattt ttagtatgat tttaatttaa
-aacaattcta atggtcaaaa a
\ No newline at end of file
+aacaattcta atggtcaaaa a
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 test/base/twilltestcase.py
--- a/test/base/twilltestcase.py	Fri Sep 25 17:06:45 2009 -0400
+++ b/test/base/twilltestcase.py	Fri Sep 25 17:07:13 2009 -0400
@@ -1274,6 +1274,7 @@
         else:
             check_str = "Added 1 datasets to the folder '%s' ( each is selected )." % folder_name
         self.check_page_for_string( check_str )
+        self.library_wait( library_id )
         self.home()
     def set_library_dataset_permissions( self, library_id, folder_id, ldda_id, ldda_name, role_id, permissions_in, permissions_out ):
         url = "library_admin/library_dataset_dataset_association?library_id=%s&folder_id=%s&&id=%s&permissions=True&update_roles_button=Save" % \
@@ -1359,25 +1360,7 @@
         tc.submit( "runtool_btn" )
         check_str = "Added 1 dataset versions to the library dataset '%s' in the folder '%s'." % ( ldda_name, folder_name )
         self.check_page_for_string( check_str )
-        self.home()
-    def upload_new_dataset_versions( self, library_id, folder_id, folder_name, library_dataset_id, ldda_name, file_type='auto',
-                                    dbkey='hg18', message='', template_field_name1='', template_field_contents1='' ):
-        """Upload new version(s) of a dataset using a directory of files"""
-        self.home()
-        self.visit_url( "%s/library_admin/library_dataset_dataset_association?upload_option=upload_directory&library_id=%s&folder_id=%s&replace_id=%s" \
-                        % ( self.url, library_id, folder_id, library_dataset_id ) )
-        self.check_page_for_string( 'Upload a directory of files' )
-        self.check_page_for_string( 'You are currently selecting a new file to replace' )
-        tc.fv( "1", "file_type", file_type )
-        tc.fv( "1", "dbkey", dbkey )
-        tc.fv( "1", "message", message.replace( '+', ' ' ) )
-        tc.fv( "1", "server_dir", "library" )
-        # Add template field contents, if any...
-        if template_field_name1:
-            tc.fv( "1", template_field_name1, template_field_contents1 )
-        tc.submit( "runtool_btn" )
-        check_str = "Added 3 dataset versions to the library dataset '%s' in the folder '%s'." % ( ldda_name, folder_name )
-        self.check_page_for_string( check_str )
+        self.library_wait( library_id )
         self.home()
     def add_history_datasets_to_library( self, library_id, folder_id, folder_name, hda_id, root=False ):
         """Copy a dataset from the current history to a library folder"""
@@ -1410,6 +1393,7 @@
         tc.submit( "runtool_btn" )
         if check_str_after_submit:
             self.check_page_for_string( check_str_after_submit )
+        self.library_wait( library_id )
         self.home()
     def add_dir_of_files_from_libraries_view( self, library_id, folder_id, selected_dir, file_type='auto', dbkey='hg18', roles_tuple=[],
                                               message='', check_str_after_submit='', template_field_name1='', template_field_contents1='' ):
@@ -1432,6 +1416,7 @@
         tc.submit( "runtool_btn" )
         if check_str_after_submit:
             self.check_page_for_string( check_str_after_submit )
+        self.library_wait( library_id, controller='library' )
         self.home()
     def delete_library_item( self, library_id, library_item_id, library_item_name, library_item_type='library_dataset' ):
         """Mark a library item as deleted"""
@@ -1464,3 +1449,18 @@
         check_str = "Library '%s' and all of its contents have been purged" % library_name
         self.check_page_for_string( check_str )
         self.home()
+    def library_wait( self, library_id, controller='library_admin', maxiter=20 ):
+        """Waits for the tools to finish"""
+        count = 0
+        sleep_amount = 1
+        self.home()
+        while count < maxiter:
+            count += 1
+            self.visit_url( "%s/%s/browse_library?id=%s" % ( self.url, controller, library_id ) )
+            page = tc.browser.get_html()
+            if page.find( '<!-- running: do not change this comment, used by TwillTestCase.library_wait -->' ) > -1:
+                time.sleep( sleep_amount )
+                sleep_amount += 1
+            else:
+                break
+        self.assertNotEqual(count, maxiter)
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 test/functional/__init__.py
--- a/test/functional/__init__.py	Fri Sep 25 17:06:45 2009 -0400
+++ b/test/functional/__init__.py	Fri Sep 25 17:07:13 2009 -0400
@@ -79,8 +79,8 @@
                                    allow_user_creation = True,
                                    allow_user_deletion = True,
                                    admin_users = 'test(a)bx.psu.edu',
-                                   library_import_dir = galaxy_test_file_dir,
-                                   user_library_import_dir = os.path.join( galaxy_test_file_dir, 'users' ),
+                                   library_import_dir = os.path.join( os.getcwd(), galaxy_test_file_dir ),
+                                   user_library_import_dir = os.path.join( os.getcwd(), galaxy_test_file_dir, 'users' ),
                                    global_conf = { "__file__": "universe_wsgi.ini.sample" } )
                                    
         log.info( "Embedded Universe application started" )
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 tools/data_source/upload.py
--- a/tools/data_source/upload.py	Fri Sep 25 17:06:45 2009 -0400
+++ b/tools/data_source/upload.py	Fri Sep 25 17:07:13 2009 -0400
@@ -137,7 +137,7 @@
 
     # See if we have an empty file
     if not os.path.exists( dataset.path ):
-        file_err( 'Uploaded temporary file (%s) does not exist.  Please' % dataset.path, dataset, json_file )
+        file_err( 'Uploaded temporary file (%s) does not exist.' % dataset.path, dataset, json_file )
         return
     if not os.path.getsize( dataset.path ) > 0:
         file_err( 'The uploaded file is empty', dataset, json_file )
@@ -237,7 +237,10 @@
     if ext == 'auto':
         ext = 'data'
     # Move the dataset to its "real" path
-    shutil.move( dataset.path, output_path )
+    if dataset.type == 'server_dir':
+        shutil.copy( dataset.path, output_path )
+    else:
+        shutil.move( dataset.path, output_path )
     # Write the job info
     info = dict( type = 'dataset',
                  dataset_id = dataset.dataset_id,
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 tools/fastx_toolkit/fastq_to_fasta.xml
--- a/tools/fastx_toolkit/fastq_to_fasta.xml	Fri Sep 25 17:06:45 2009 -0400
+++ b/tools/fastx_toolkit/fastq_to_fasta.xml	Fri Sep 25 17:07:13 2009 -0400
@@ -3,7 +3,7 @@
 	<command>gunzip -cf $input | fastq_to_fasta $SKIPN $RENAMESEQ -o $output -v </command>
 
 	<inputs>
-		<param format="fastqsolexa" name="input" type="data" label="FASTQ Library to convert" />
+		<param format="fastqsolexa,fastqsanger" name="input" type="data" label="FASTQ Library to convert" />
 
 		<param name="SKIPN" type="select" label="Discard sequences with unknown (N) bases ">
 			<option value="">yes</option>
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 tools/samtools/pileup_parser.xml
--- a/tools/samtools/pileup_parser.xml	Fri Sep 25 17:06:45 2009 -0400
+++ b/tools/samtools/pileup_parser.xml	Fri Sep 25 17:07:13 2009 -0400
@@ -1,5 +1,5 @@
-<tool id="pileup_parser" name="Parse pileup">
-  <description>to find variants</description>
+<tool id="pileup_parser" name="Filter pileup">
+  <description>on coverage and SNPs</description>
   <command interpreter="perl">
     #if   $pileup_type.type_select == "six":    #pileup_parser.pl $input "3" "5" "6" "4" $qv_cutoff $cvrg_cutoff $snps_only $interval "2" $out_file1
     #elif $pileup_type.type_select == "ten":    #pileup_parser.pl $input "3" "9" "10" "8" $qv_cutoff $cvrg_cutoff $snps_only $interval "2" $out_file1
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 tools/samtools/sam_pileup.xml
--- a/tools/samtools/sam_pileup.xml	Fri Sep 25 17:06:45 2009 -0400
+++ b/tools/samtools/sam_pileup.xml	Fri Sep 25 17:07:13 2009 -0400
@@ -1,5 +1,5 @@
-<tool id="sam_pileup" name="SAM Pileup Format" version="1.0.0">
-  <description>generates the pileup format for a provided BAM file</description>
+<tool id="sam_pileup" name="Generate pileup" version="1.0.0">
+  <description>from BAM dataset</description>
   <command interpreter="python">
     sam_pileup.py 
 	    --input1=$input1
@@ -85,9 +85,9 @@
 
 **Types of pileup datasets**
 
-The description of pileup format below is largely based on information that can be found on SAMTools_ documentation page. The 6- and 10-column variants are described below.
+The description of pileup format below is largely based on information that can be found on SAMTools Pileup_ documentation page. The 6- and 10-column variants are described below.
 
-.. _SAMTools: http://samtools.sourceforge.net/pileup.shtml
+.. _Pileup: http://samtools.sourceforge.net/pileup.shtml
 
 **Six column pileup**::
 
@@ -111,7 +111,7 @@
        
 **Ten column pileup**
 
-The `ten-column`__ pileup incorporates additional consensus information generated with *-c* option of *samtools pileup* command::
+The `ten-column` (consensus_) pileup incorporates additional consensus information generated with *-c* option of *samtools pileup* command::
 
 
     1    2  3  4   5   6   7   8       9       10
@@ -137,7 +137,7 @@
       10 Quality values (phred33 scale, see Galaxy wiki for more)
 
 
-.. __: http://samtools.sourceforge.net/cns0.shtml
+.. _consensus: http://samtools.sourceforge.net/cns0.shtml
 
 
   </help>
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 tools/stats/grouping.py
--- a/tools/stats/grouping.py	Fri Sep 25 17:06:45 2009 -0400
+++ b/tools/stats/grouping.py	Fri Sep 25 17:07:13 2009 -0400
@@ -12,13 +12,13 @@
 
 def main():
     inputfile = sys.argv[2]
-    
+    ignorecase = int(sys.argv[4])
     ops = []
     cols = []
     rounds = []
     elems = []
     
-    for var in sys.argv[4:]:
+    for var in sys.argv[5:]:
         ops.append(var.split()[0])
         cols.append(var.split()[1])
         rounds.append(var.split()[2])
@@ -71,7 +71,10 @@
         we need to add 1 to group_col.
         if POS2 is not specified, the newer versions of sort will consider the entire line for sorting. To prevent this, we set POS2=POS1.
         """
-        command_line = "sort -f -k " + str(group_col+1) +"," + str(group_col+1) + " -o " + tmpfile.name + " " + inputfile
+        case = ''
+        if ignorecase == 1:
+            case = '-f' 
+        command_line = "sort -t $'\t' " + case + " -k" + str(group_col+1) +"," + str(group_col+1) + " -o " + tmpfile.name + " " + inputfile
     except Exception, exc:
         stop_err( 'Initialization error -> %s' %str(exc) )
     
@@ -95,6 +98,8 @@
             try:
                 fields = line.split("\t")
                 item = fields[group_col]
+                if ignorecase == 1:
+                    item = item.lower()
                 if prev_item != "":
                     # At this level, we're grouping on values (item and prev_item) in group_col
                     if item == prev_item:
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 tools/stats/grouping.xml
--- a/tools/stats/grouping.xml	Fri Sep 25 17:06:45 2009 -0400
+++ b/tools/stats/grouping.xml	Fri Sep 25 17:07:13 2009 -0400
@@ -1,10 +1,11 @@
-<tool id="Grouping1" name="Group" version="1.7.0">
+<tool id="Grouping1" name="Group" version="1.8.0">
   <description>data by a column and perform aggregate operation on other columns.</description>
   <command interpreter="python">
     grouping.py 
       $out_file1
       $input1
       $groupcol
+      $ignorecase
       #for $op in $operations
        '${op.optype}
         ${op.opcol}
@@ -14,6 +15,9 @@
   <inputs>
     <param format="tabular" name="input1" type="data" label="Select data" help="Query missing? See TIP below."/>
     <param name="groupcol" label="Group by column" type="data_column" data_ref="input1" />
+    <param name="ignorecase" type="boolean" truevalue="1" falsevalue="0">
+      <label>Ignore case while grouping?</label>
+    </param>
     <repeat name="operations" title="Operation">
       <param name="optype" type="select" label="Type">
         <option value="mean">Mean</option>
@@ -44,6 +48,7 @@
     <test>
       <param name="input1" value="1.bed"/>
       <param name="groupcol" value="1"/>
+      <param name="ignorecase" value="true"/>
       <param name="optype" value="mean"/>
       <param name="opcol" value="2"/>
       <param name="opround" value="no"/>
@@ -54,6 +59,7 @@
     <test>
       <param name="input1" value="1.tabular"/>
       <param name="groupcol" value="1"/>
+      <param name="ignorecase" value="true"/>
       <param name="optype" value="mean"/>
       <param name="opcol" value="2"/>
       <param name="opround" value="no"/>
@@ -80,15 +86,22 @@
 
 - For the following input::
 
-   chr22  1000  NM_17
-   chr22  2000  NM_18
-   chr10  2200  NM_10
-   chr10  1200  NM_11
-   chr22  1600  NM_19
+   chr22  1000  1003  TTT
+   chr22  2000  2003  aaa
+   chr10  2200  2203  TTT
+   chr10  1200  1203  ttt
+   chr22  1600  1603  AAA
 
-- running this tool with **Group by column 1**, Operations **Mean on column 2** and **Concatenate on column 3** will return::
+- **Grouping on column 4** while ignoring case, and performing operation **Count on column 1** will return::
 
-   chr10    1700.00 NM_11,NM_10
-   chr22    1533.33 NM_17,NM_19,NM_18
+   AAA    2
+   TTT    3
+   
+- **Grouping on column 4** while not ignoring case, and performing operation **Count on column 1** will return::
+
+   aaa    1
+   AAA    1
+   ttt    1
+   TTT    2
   </help>
 </tool>
diff -r 83dc9981e3c4 -r 6f8b5f1e8ec9 universe_wsgi.ini.sample
--- a/universe_wsgi.ini.sample	Fri Sep 25 17:06:45 2009 -0400
+++ b/universe_wsgi.ini.sample	Fri Sep 25 17:07:13 2009 -0400
@@ -102,9 +102,11 @@
 ## Brand: appends "/[brand]" to the "Galaxy" text in the masthead
 ## wiki_url: replaces the default galaxy main wiki
 ## bugs_email: replaces the default galaxy bugs email list
+##citation_url: point to a URL listing citations
 #brand = Private local mirror
 #wiki_url = /path/to/my/local/wiki
 #bugs_email = mailto:galaxy-bugs@example.org
+#citation_url = /path/to/my/citations
 
 # ---- Logging and Debugging ------------------------------------------------
 
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            0
                            
                          
                          
                            
    
                          
                        
                    
                    
                        details:   http://www.bx.psu.edu/hg/galaxy/rev/c4c409bda49b
changeset: 2779:c4c409bda49b
user:      Anton Nekrutenko <anton(a)bx.psu.edu>
date:      Fri Sep 25 14:06:43 2009 -0400
description:
Moving cite to help
1 file(s) affected in this change:
templates/base_panels.mako
diffs (19 lines):
diff -r 40f58d95a051 -r c4c409bda49b templates/base_panels.mako
--- a/templates/base_panels.mako	Fri Sep 25 13:58:15 2009 -0400
+++ b/templates/base_panels.mako	Fri Sep 25 14:06:43 2009 -0400
@@ -169,14 +169,10 @@
             <li><a href="${app.config.get( "bugs_email", "mailto:galaxy-bugs@bx.psu.edu"  )}">Email comments, bug reports, or suggestions</a></li>
             <li><a target="_blank" href="${app.config.get( "wiki_url", "http://bitbucket.org/galaxy/galaxy-central/wiki" )}">Galaxy Wiki</a></li>             
             <li><a target="_blank" href="${app.config.get( "screencasts_url", "http://galaxycast.org" )}">Video tutorials (screencasts)</a></li>
+            <li><a target="_blank" href="${app.config.get( "citation_url", "http://bitbucket.org/galaxy/galaxy-central/wiki/Citations" )}">How to Cite Galaxy</a></li>
         </ul>
         </div>
     </td>
-    
-    <td class="tab">
-        <a target="_blank" href="${app.config.get( "citation_url", "http://bitbucket.org/galaxy/galaxy-central/wiki/Citations" )}">Cite</a>
-    </td>
-
     
     <td class="tab">
         <a>User</a>
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            0
                            
                          
                          
                            
    
                          
                        
                     
                        
                    02 Oct '09
                    
                        details:   http://www.bx.psu.edu/hg/galaxy/rev/dd50d8d45177
changeset: 2781:dd50d8d45177
user:      gua110
date:      Fri Sep 25 14:50:34 2009 -0400
description:
Adding an option to Group tool to ignore case while grouping.
3 file(s) affected in this change:
test-data/groupby_out1.dat
tools/stats/grouping.py
tools/stats/grouping.xml
diffs (122 lines):
diff -r 3559f7377b9c -r dd50d8d45177 test-data/groupby_out1.dat
--- a/test-data/groupby_out1.dat	Fri Sep 25 14:36:12 2009 -0400
+++ b/test-data/groupby_out1.dat	Fri Sep 25 14:50:34 2009 -0400
@@ -17,4 +17,4 @@
 chr7	1.15958e+08
 chr8	1.18881e+08
 chr9	1.28843e+08
-chrX	1.45195e+08
+chrx	1.45195e+08
diff -r 3559f7377b9c -r dd50d8d45177 tools/stats/grouping.py
--- a/tools/stats/grouping.py	Fri Sep 25 14:36:12 2009 -0400
+++ b/tools/stats/grouping.py	Fri Sep 25 14:50:34 2009 -0400
@@ -12,13 +12,13 @@
 
 def main():
     inputfile = sys.argv[2]
-    
+    ignorecase = int(sys.argv[4])
     ops = []
     cols = []
     rounds = []
     elems = []
     
-    for var in sys.argv[4:]:
+    for var in sys.argv[5:]:
         ops.append(var.split()[0])
         cols.append(var.split()[1])
         rounds.append(var.split()[2])
@@ -71,7 +71,10 @@
         we need to add 1 to group_col.
         if POS2 is not specified, the newer versions of sort will consider the entire line for sorting. To prevent this, we set POS2=POS1.
         """
-        command_line = "sort -f -k " + str(group_col+1) +"," + str(group_col+1) + " -o " + tmpfile.name + " " + inputfile
+        case = ''
+        if ignorecase == 1:
+            case = '-f' 
+        command_line = "sort -t $'\t' " + case + " -k" + str(group_col+1) +"," + str(group_col+1) + " -o " + tmpfile.name + " " + inputfile
     except Exception, exc:
         stop_err( 'Initialization error -> %s' %str(exc) )
     
@@ -95,6 +98,8 @@
             try:
                 fields = line.split("\t")
                 item = fields[group_col]
+                if ignorecase == 1:
+                    item = item.lower()
                 if prev_item != "":
                     # At this level, we're grouping on values (item and prev_item) in group_col
                     if item == prev_item:
diff -r 3559f7377b9c -r dd50d8d45177 tools/stats/grouping.xml
--- a/tools/stats/grouping.xml	Fri Sep 25 14:36:12 2009 -0400
+++ b/tools/stats/grouping.xml	Fri Sep 25 14:50:34 2009 -0400
@@ -1,10 +1,11 @@
-<tool id="Grouping1" name="Group" version="1.7.0">
+<tool id="Grouping1" name="Group" version="1.8.0">
   <description>data by a column and perform aggregate operation on other columns.</description>
   <command interpreter="python">
     grouping.py 
       $out_file1
       $input1
       $groupcol
+      $ignorecase
       #for $op in $operations
        '${op.optype}
         ${op.opcol}
@@ -14,6 +15,9 @@
   <inputs>
     <param format="tabular" name="input1" type="data" label="Select data" help="Query missing? See TIP below."/>
     <param name="groupcol" label="Group by column" type="data_column" data_ref="input1" />
+    <param name="ignorecase" type="boolean" truevalue="1" falsevalue="0">
+      <label>Ignore case while grouping?</label>
+    </param>
     <repeat name="operations" title="Operation">
       <param name="optype" type="select" label="Type">
         <option value="mean">Mean</option>
@@ -44,6 +48,7 @@
     <test>
       <param name="input1" value="1.bed"/>
       <param name="groupcol" value="1"/>
+      <param name="ignorecase" value="true"/>
       <param name="optype" value="mean"/>
       <param name="opcol" value="2"/>
       <param name="opround" value="no"/>
@@ -54,6 +59,7 @@
     <test>
       <param name="input1" value="1.tabular"/>
       <param name="groupcol" value="1"/>
+      <param name="ignorecase" value="true"/>
       <param name="optype" value="mean"/>
       <param name="opcol" value="2"/>
       <param name="opround" value="no"/>
@@ -80,15 +86,22 @@
 
 - For the following input::
 
-   chr22  1000  NM_17
-   chr22  2000  NM_18
-   chr10  2200  NM_10
-   chr10  1200  NM_11
-   chr22  1600  NM_19
+   chr22  1000  1003  TTT
+   chr22  2000  2003  aaa
+   chr10  2200  2203  TTT
+   chr10  1200  1203  ttt
+   chr22  1600  1603  AAA
 
-- running this tool with **Group by column 1**, Operations **Mean on column 2** and **Concatenate on column 3** will return::
+- **Grouping on column 4** while ignoring case, and performing operation **Count on column 1** will return::
 
-   chr10    1700.00 NM_11,NM_10
-   chr22    1533.33 NM_17,NM_19,NM_18
+   AAA    2
+   TTT    3
+   
+- **Grouping on column 4** while not ignoring case, and performing operation **Count on column 1** will return::
+
+   aaa    1
+   AAA    1
+   ttt    1
+   TTT    2
   </help>
 </tool>
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            0
                            
                          
                          
                            
    
                          
                        
                     
                        
                    02 Oct '09
                    
                        details:   http://www.bx.psu.edu/hg/galaxy/rev/3559f7377b9c
changeset: 2780:3559f7377b9c
user:      Nate Coraor <nate(a)bx.psu.edu>
date:      Fri Sep 25 14:36:12 2009 -0400
description:
Real Job(tm) support for the library upload tool.  Does not include iframe upload for the library side yet.
20 file(s) affected in this change:
lib/galaxy/jobs/__init__.py
lib/galaxy/model/__init__.py
lib/galaxy/model/mapping.py
lib/galaxy/model/migrate/versions/0020_library_upload_job.py
lib/galaxy/tools/actions/upload.py
lib/galaxy/tools/actions/upload_common.py
lib/galaxy/web/controllers/library.py
lib/galaxy/web/controllers/library_admin.py
lib/galaxy/web/controllers/library_dataset.py
static/june_2007_style/blue/library.css
static/june_2007_style/library.css.tmpl
templates/admin/library/browse_library.mako
templates/admin/library/new_library.mako
templates/library/browse_library.mako
templates/library/library_dataset_common.mako
templates/library/library_item_info.mako
test-data/users/test3(a)bx.psu.edu/run1/2.fasta
test/base/twilltestcase.py
test/functional/__init__.py
tools/data_source/upload.py
diffs (1850 lines):
diff -r c4c409bda49b -r 3559f7377b9c lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py	Fri Sep 25 14:06:43 2009 -0400
+++ b/lib/galaxy/jobs/__init__.py	Fri Sep 25 14:36:12 2009 -0400
@@ -357,13 +357,14 @@
         # Restore input / output data lists
         inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] )
         out_data = dict( [ ( da.name, da.dataset ) for da in job.output_datasets ] )
+        out_data.update( [ ( da.name, da.dataset ) for da in job.output_library_datasets ] )
         # These can be passed on the command line if wanted as $userId $userEmail
-        if job.history.user: # check for anonymous user!
-             userId = '%d' % job.history.user.id
-             userEmail = str(job.history.user.email)
+        if job.history and job.history.user: # check for anonymous user!
+            userId = '%d' % job.history.user.id
+            userEmail = str(job.history.user.email)
         else:
-             userId = 'Anonymous'
-             userEmail = 'Anonymous'
+            userId = 'Anonymous'
+            userEmail = 'Anonymous'
         incoming['userId'] = userId
         incoming['userEmail'] = userEmail
         # Build params, done before hook so hook can use
@@ -424,7 +425,7 @@
                         log.debug( "fail(): Moved %s to %s" % ( dataset_path.false_path, dataset_path.real_path ) )
                     except ( IOError, OSError ), e:
                         log.error( "fail(): Missing output file in working directory: %s" % e )
-            for dataset_assoc in job.output_datasets:
+            for dataset_assoc in job.output_datasets + job.output_library_datasets:
                 dataset = dataset_assoc.dataset
                 dataset.refresh()
                 dataset.state = dataset.states.ERROR
@@ -444,7 +445,7 @@
     def change_state( self, state, info = False ):
         job = model.Job.get( self.job_id )
         job.refresh()
-        for dataset_assoc in job.output_datasets:
+        for dataset_assoc in job.output_datasets + job.output_library_datasets:
             dataset = dataset_assoc.dataset
             dataset.refresh()
             dataset.state = state
@@ -504,10 +505,10 @@
                         self.fail( "Job %s's output dataset(s) could not be read" % job.id )
                         return
         job_context = ExpressionContext( dict( stdout = stdout, stderr = stderr ) )
-        for dataset_assoc in job.output_datasets:
+        for dataset_assoc in job.output_datasets + job.output_library_datasets:
             context = self.get_dataset_finish_context( job_context, dataset_assoc.dataset.dataset )
             #should this also be checking library associations? - can a library item be added from a history before the job has ended? - lets not allow this to occur
-            for dataset in dataset_assoc.dataset.dataset.history_associations: #need to update all associated output hdas, i.e. history was shared with job running
+            for dataset in dataset_assoc.dataset.dataset.history_associations + dataset_assoc.dataset.dataset.library_associations: #need to update all associated output hdas, i.e. history was shared with job running
                 dataset.blurb = 'done'
                 dataset.peek  = 'no peek'
                 dataset.info  = context['stdout'] + context['stderr']
@@ -576,6 +577,7 @@
         # custom post process setup
         inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] )
         out_data = dict( [ ( da.name, da.dataset ) for da in job.output_datasets ] )
+        out_data.update( [ ( da.name, da.dataset ) for da in job.output_library_datasets ] )
         param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] ) # why not re-use self.param_dict here? ##dunno...probably should, this causes tools.parameters.basic.UnvalidatedValue to be used in following methods instead of validated and transformed values during i.e. running workflows
         param_dict = self.tool.params_from_strings( param_dict, self.app )
         # Check for and move associated_files
@@ -647,11 +649,11 @@
         job = model.Job.get( self.job_id )
         if self.app.config.outputs_to_working_directory:
             self.output_paths = []
-            for name, data in [ ( da.name, da.dataset.dataset ) for da in job.output_datasets ]:
+            for name, data in [ ( da.name, da.dataset.dataset ) for da in job.output_datasets + job.output_library_datasets ]:
                 false_path = os.path.abspath( os.path.join( self.working_directory, "galaxy_dataset_%d.dat" % data.id ) )
                 self.output_paths.append( DatasetPath( data.id, data.file_name, false_path ) )
         else:
-            self.output_paths = [ DatasetPath( da.dataset.dataset.id, da.dataset.file_name ) for da in job.output_datasets ]
+            self.output_paths = [ DatasetPath( da.dataset.dataset.id, da.dataset.file_name ) for da in job.output_datasets + job.output_library_datasets ]
         return self.output_paths
 
     def get_output_file_id( self, file ):
diff -r c4c409bda49b -r 3559f7377b9c lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py	Fri Sep 25 14:06:43 2009 -0400
+++ b/lib/galaxy/model/__init__.py	Fri Sep 25 14:36:12 2009 -0400
@@ -74,6 +74,7 @@
         self.parameters = []
         self.input_datasets = []
         self.output_datasets = []
+        self.output_library_datasets = []
         self.state = Job.states.NEW
         self.info = None
         self.job_runner_name = None
@@ -84,6 +85,8 @@
         self.input_datasets.append( JobToInputDatasetAssociation( name, dataset ) )
     def add_output_dataset( self, name, dataset ):
         self.output_datasets.append( JobToOutputDatasetAssociation( name, dataset ) )
+    def add_output_library_dataset( self, name, dataset ):
+        self.output_library_datasets.append( JobToOutputLibraryDatasetAssociation( name, dataset ) )
     def set_state( self, state ):
         self.state = state
         # For historical reasons state propogates down to datasets
@@ -138,6 +141,11 @@
         self.dataset = dataset
         
 class JobToOutputDatasetAssociation( object ):
+    def __init__( self, name, dataset ):
+        self.name = name
+        self.dataset = dataset
+
+class JobToOutputLibraryDatasetAssociation( object ):
     def __init__( self, name, dataset ):
         self.name = name
         self.dataset = dataset
diff -r c4c409bda49b -r 3559f7377b9c lib/galaxy/model/mapping.py
--- a/lib/galaxy/model/mapping.py	Fri Sep 25 14:06:43 2009 -0400
+++ b/lib/galaxy/model/mapping.py	Fri Sep 25 14:36:12 2009 -0400
@@ -107,7 +107,7 @@
     Column( "id", Integer, primary_key=True ),
     Column( "create_time", DateTime, default=now ),
     Column( "update_time", DateTime, index=True, default=now, onupdate=now ),
-    Column( "state", TrimmedString( 64 ) ),
+    Column( "state", TrimmedString( 64 ), index=True ),
     Column( "deleted", Boolean, index=True, default=False ),
     Column( "purged", Boolean, index=True, default=False ),
     Column( "purgable", Boolean, default=True ),
@@ -307,6 +307,7 @@
     Column( "create_time", DateTime, default=now ),
     Column( "update_time", DateTime, default=now, onupdate=now ),
     Column( "history_id", Integer, ForeignKey( "history.id" ), index=True ),
+    Column( "library_folder_id", Integer, ForeignKey( "library_folder.id" ), index=True ),
     Column( "tool_id", String( 255 ) ),
     Column( "tool_version", TEXT, default="1.0.0" ),
     Column( "state", String( 64 ), index=True ),
@@ -337,6 +338,12 @@
     Column( "id", Integer, primary_key=True ),
     Column( "job_id", Integer, ForeignKey( "job.id" ), index=True ),
     Column( "dataset_id", Integer, ForeignKey( "history_dataset_association.id" ), index=True ),
+    Column( "name", String(255) ) )
+    
+JobToOutputLibraryDatasetAssociation.table = Table( "job_to_output_library_dataset", metadata,
+    Column( "id", Integer, primary_key=True ),
+    Column( "job_id", Integer, ForeignKey( "job.id" ), index=True ),
+    Column( "ldda_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), index=True ),
     Column( "name", String(255) ) )
     
 JobExternalOutputMetadata.table = Table( "job_external_output_metadata", metadata,
@@ -907,6 +914,9 @@
 assign_mapper( context, JobToOutputDatasetAssociation, JobToOutputDatasetAssociation.table,
     properties=dict( job=relation( Job ), dataset=relation( HistoryDatasetAssociation, lazy=False ) ) )
 
+assign_mapper( context, JobToOutputLibraryDatasetAssociation, JobToOutputLibraryDatasetAssociation.table,
+    properties=dict( job=relation( Job ), dataset=relation( LibraryDatasetDatasetAssociation, lazy=False ) ) )
+
 assign_mapper( context, JobParameter, JobParameter.table )
 
 assign_mapper( context, JobExternalOutputMetadata, JobExternalOutputMetadata.table,
@@ -917,9 +927,11 @@
 assign_mapper( context, Job, Job.table, 
     properties=dict( galaxy_session=relation( GalaxySession ),
                      history=relation( History ),
+                     library_folder=relation( LibraryFolder ),
                      parameters=relation( JobParameter, lazy=False ),
                      input_datasets=relation( JobToInputDatasetAssociation, lazy=False ),
                      output_datasets=relation( JobToOutputDatasetAssociation, lazy=False ),
+                     output_library_datasets=relation( JobToOutputLibraryDatasetAssociation, lazy=False ),
                      external_output_metadata = relation( JobExternalOutputMetadata, lazy = False ) ) )
 
 assign_mapper( context, Event, Event.table,
diff -r c4c409bda49b -r 3559f7377b9c lib/galaxy/model/migrate/versions/0020_library_upload_job.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/model/migrate/versions/0020_library_upload_job.py	Fri Sep 25 14:36:12 2009 -0400
@@ -0,0 +1,121 @@
+from sqlalchemy import *
+from sqlalchemy.orm import *
+from sqlalchemy.exceptions import *
+from migrate import *
+from migrate.changeset import *
+import datetime
+now = datetime.datetime.utcnow
+import sys, logging
+# Need our custom types, but don't import anything else from model
+from galaxy.model.custom_types import *
+
+log = logging.getLogger( __name__ )
+log.setLevel(logging.DEBUG)
+handler = logging.StreamHandler( sys.stdout )
+format = "%(name)s %(levelname)s %(asctime)s %(message)s"
+formatter = logging.Formatter( format )
+handler.setFormatter( formatter )
+log.addHandler( handler )
+
+metadata = MetaData( migrate_engine )
+db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, transactional=False ) )
+
+def display_migration_details():
+    print ""
+    print "========================================"
+    print """This script creates a job_to_output_library_dataset table for allowing library
+uploads to run as regular jobs.  To support this, a library_folder_id column is
+added to the job table, and library_folder/output_library_datasets relations
+are added to the Job object.  An index is also added to the dataset.state
+column."""
+    print "========================================"
+
+JobToOutputLibraryDatasetAssociation_table = Table( "job_to_output_library_dataset", metadata,
+    Column( "id", Integer, primary_key=True ),
+    Column( "job_id", Integer, ForeignKey( "job.id" ), index=True ),
+    Column( "ldda_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), index=True ),
+    Column( "name", String(255) ) )
+
+def upgrade():
+    display_migration_details()
+    # Load existing tables
+    metadata.reflect()
+    # Create the job_to_output_library_dataset table
+    try:
+        JobToOutputLibraryDatasetAssociation_table.create()
+    except Exception, e:
+        print "Creating job_to_output_library_dataset table failed: %s" % str( e )
+        log.debug( "Creating job_to_output_library_dataset table failed: %s" % str( e ) )
+    # Create the library_folder_id column
+    try:
+        Job_table = Table( "job", metadata, autoload=True )
+    except NoSuchTableError:
+        Job_table = None
+        log.debug( "Failed loading table job" )
+    if Job_table:
+        try:
+            col = Column( "library_folder_id", Integer, index=True )
+            col.create( Job_table )
+            assert col is Job_table.c.library_folder_id
+        except Exception, e:
+            log.debug( "Adding column 'library_folder_id' to job table failed: %s" % ( str( e ) ) )
+        try:
+            LibraryFolder_table = Table( "library_folder", metadata, autoload=True )
+        except NoSuchTableError:
+            LibraryFolder_table = None
+            log.debug( "Failed loading table library_folder" )
+        # Add 1 foreign key constraint to the job table
+        if Job_table and LibraryFolder_table:
+            try:
+                cons = ForeignKeyConstraint( [Job_table.c.library_folder_id],
+                                             [LibraryFolder_table.c.id],
+                                             name='job_library_folder_id_fk' )
+                # Create the constraint
+                cons.create()
+            except Exception, e:
+                log.debug( "Adding foreign key constraint 'job_library_folder_id_fk' to table 'library_folder' failed: %s" % ( str( e ) ) )
+    # Create the ix_dataset_state index
+    try:
+        Dataset_table = Table( "dataset", metadata, autoload=True )
+    except NoSuchTableError:
+        Dataset_table = None
+        log.debug( "Failed loading table dataset" )
+    i = Index( "ix_dataset_state", Dataset_table.c.state )
+    try:
+        i.create()
+    except Exception, e:
+        print str(e)
+        log.debug( "Adding index 'ix_dataset_state' to dataset table failed: %s" % str( e ) )
+
+def downgrade():
+    metadata.reflect()
+    # Drop the library_folder_id column
+    try:
+        Job_table = Table( "job", metadata, autoload=True )
+    except NoSuchTableError:
+        Job_table = None
+        log.debug( "Failed loading table job" )
+    if Job_table:
+        try:
+            col = Job_table.c.library_folder_id
+            col.drop()
+        except Exception, e:
+            log.debug( "Dropping column 'library_folder_id' from job table failed: %s" % ( str( e ) ) )
+    # Drop the job_to_output_library_dataset table
+    try:
+        JobToOutputLibraryDatasetAssociation_table.drop()
+    except Exception, e:
+        print str(e)
+        log.debug( "Dropping job_to_output_library_dataset table failed: %s" % str( e ) )
+    # Drop the ix_dataset_state index
+    try:
+        Dataset_table = Table( "dataset", metadata, autoload=True )
+    except NoSuchTableError:
+        Dataset_table = None
+        log.debug( "Failed loading table dataset" )
+    i = Index( "ix_dataset_state", Dataset_table.c.state )
+    try:
+        i.drop()
+    except Exception, e:
+        print str(e)
+        log.debug( "Dropping index 'ix_dataset_state' from dataset table failed: %s" % str( e ) )
diff -r c4c409bda49b -r 3559f7377b9c lib/galaxy/tools/actions/upload.py
--- a/lib/galaxy/tools/actions/upload.py	Fri Sep 25 14:06:43 2009 -0400
+++ b/lib/galaxy/tools/actions/upload.py	Fri Sep 25 14:36:12 2009 -0400
@@ -1,126 +1,22 @@
-import os, shutil, urllib, StringIO, re, gzip, tempfile, shutil, zipfile
-from cgi import FieldStorage
+import os
 from __init__ import ToolAction
-from galaxy import datatypes, jobs
-from galaxy.datatypes import sniff
-from galaxy import model, util
-from galaxy.util.json import to_json_string
-
-import sys, traceback
+from galaxy.tools.actions import upload_common
 
 import logging
 log = logging.getLogger( __name__ )
 
 class UploadToolAction( ToolAction ):
-    # Action for uploading files
-    def persist_uploads( self, incoming ):
-        if 'files' in incoming:
-            new_files = []
-            temp_files = []
-            for upload_dataset in incoming['files']:
-                f = upload_dataset['file_data']
-                if isinstance( f, FieldStorage ): 
-                    assert not isinstance( f.file, StringIO.StringIO )
-                    assert f.file.name != '<fdopen>'
-                    local_filename = util.mkstemp_ln( f.file.name, 'upload_file_data_' )
-                    f.file.close()
-                    upload_dataset['file_data'] = dict( filename = f.filename,
-                                                        local_filename = local_filename )
-                if upload_dataset['url_paste'].strip() != '':
-                    upload_dataset['url_paste'] = datatypes.sniff.stream_to_file( StringIO.StringIO( upload_dataset['url_paste'] ), prefix="strio_url_paste_" )[0]
-                else:
-                    upload_dataset['url_paste'] = None
-                new_files.append( upload_dataset )
-            incoming['files'] = new_files
-        return incoming
     def execute( self, tool, trans, incoming={}, set_output_hid = True ):
         dataset_upload_inputs = []
         for input_name, input in tool.inputs.iteritems():
             if input.type == "upload_dataset":
                 dataset_upload_inputs.append( input )
         assert dataset_upload_inputs, Exception( "No dataset upload groups were found." )
-        # Get any precreated datasets (when using asynchronous uploads)
-        async_datasets = []
-        self.precreated_datasets = []
-        if incoming.get( 'async_datasets', None ) not in ["None", "", None]:
-            async_datasets = incoming['async_datasets'].split(',')
-        for id in async_datasets:
-            try:
-                data = trans.app.model.HistoryDatasetAssociation.get( int( id ) )
-            except:
-                log.exception( 'Unable to load precreated dataset (%s) sent in upload form' % id )
-                continue
-            if trans.user is None and trans.galaxy_session.current_history != data.history:
-               log.error( 'Got a precreated dataset (%s) but it does not belong to anonymous user\'s current session (%s)' % ( data.id, trans.galaxy_session.id ) )
-            elif data.history.user != trans.user:
-               log.error( 'Got a precreated dataset (%s) but it does not belong to current user (%s)' % ( data.id, trans.user.id ) )
-            else:
-                self.precreated_datasets.append( data )
 
-        data_list = []
-
-        incoming = self.persist_uploads( incoming )
-
-        json_file = tempfile.mkstemp()
-        json_file_path = json_file[1]
-        json_file = os.fdopen( json_file[0], 'w' )
-        for dataset_upload_input in dataset_upload_inputs:
-            uploaded_datasets = dataset_upload_input.get_uploaded_datasets( trans, incoming )
-            for uploaded_dataset in uploaded_datasets:
-                data = self.get_precreated_dataset( uploaded_dataset.name )
-                if not data:
-                    data = trans.app.model.HistoryDatasetAssociation( history = trans.history, create_dataset = True )
-                    data.name = uploaded_dataset.name
-                    data.state = data.states.QUEUED
-                    data.extension = uploaded_dataset.file_type
-                    data.dbkey = uploaded_dataset.dbkey
-                    data.flush()
-                    trans.history.add_dataset( data, genome_build = uploaded_dataset.dbkey )
-                    permissions = trans.app.security_agent.history_get_default_permissions( trans.history )
-                    trans.app.security_agent.set_all_dataset_permissions( data.dataset, permissions )
-                else:
-                    data.extension = uploaded_dataset.file_type
-                    data.dbkey = uploaded_dataset.dbkey
-                    data.flush()
-                    trans.history.genome_build = uploaded_dataset.dbkey
-                if uploaded_dataset.type == 'composite':
-                    # we need to init metadata before the job is dispatched
-                    data.init_meta()
-                    for meta_name, meta_value in uploaded_dataset.metadata.iteritems():
-                        setattr( data.metadata, meta_name, meta_value )
-                    data.flush()
-                    json = dict( file_type = uploaded_dataset.file_type,
-                                 dataset_id = data.dataset.id,
-                                 dbkey = uploaded_dataset.dbkey,
-                                 type = uploaded_dataset.type,
-                                 metadata = uploaded_dataset.metadata,
-                                 primary_file = uploaded_dataset.primary_file,
-                                 extra_files_path = data.extra_files_path,
-                                 composite_file_paths = uploaded_dataset.composite_files,
-                                 composite_files = dict( [ ( k, v.__dict__ ) for k, v in data.datatype.get_composite_files( data ).items() ] ) )
-                else:
-                    try:
-                        is_binary = uploaded_dataset.datatype.is_binary
-                    except:
-                        is_binary = None
-                    json = dict( file_type = uploaded_dataset.file_type,
-                                 ext = uploaded_dataset.ext,
-                                 name = uploaded_dataset.name,
-                                 dataset_id = data.dataset.id,
-                                 dbkey = uploaded_dataset.dbkey,
-                                 type = uploaded_dataset.type,
-                                 is_binary = is_binary,
-                                 space_to_tab = uploaded_dataset.space_to_tab,
-                                 path = uploaded_dataset.path )
-                json_file.write( to_json_string( json ) + '\n' )
-                data_list.append( data )
-        json_file.close()
-
-        #cleanup unclaimed precreated datasets:
-        for data in self.precreated_datasets:
-            log.info( 'Cleaned up unclaimed precreated dataset (%s).' % ( data.id ) )
-            data.state = data.states.ERROR
-            data.info = 'No file contents were available.'
+        precreated_datasets = upload_common.get_precreated_datasets( trans, incoming, trans.app.model.HistoryDatasetAssociation )
+        incoming = upload_common.persist_uploads( incoming )
+        json_file_path, data_list = upload_common.create_paramfile( trans, incoming, precreated_datasets, dataset_upload_inputs )
+        upload_common.cleanup_unused_precreated_datasets( precreated_datasets )
         
         if not data_list:
             try:
@@ -129,38 +25,4 @@
                 pass
             return 'No data was entered in the upload form, please go back and choose data to upload.'
         
-        # Create the job object
-        job = trans.app.model.Job()
-        job.session_id = trans.get_galaxy_session().id
-        job.history_id = trans.history.id
-        job.tool_id = tool.id
-        job.tool_version = tool.version
-        job.state = trans.app.model.Job.states.UPLOAD
-        job.flush()
-        log.info( 'tool %s created job id %d' % ( tool.id, job.id ) )
-        trans.log_event( 'created job id %d' % job.id, tool_id=tool.id )
-
-        for name, value in tool.params_to_strings( incoming, trans.app ).iteritems():
-            job.add_parameter( name, value )
-        job.add_parameter( 'paramfile', to_json_string( json_file_path ) )
-        for i, dataset in enumerate( data_list ):
-            job.add_output_dataset( 'output%i' % i, dataset )
-        job.state = trans.app.model.Job.states.NEW
-        trans.app.model.flush()
-        
-        # Queue the job for execution
-        trans.app.job_queue.put( job.id, tool )
-        trans.log_event( "Added job to the job queue, id: %s" % str(job.id), tool_id=job.tool_id )
-        return dict( [ ( i, v ) for i, v in enumerate( data_list ) ] )
-
-    def get_precreated_dataset( self, name ):
-        """
-        Return a dataset matching a name from the list of precreated (via async
-        upload) datasets. If there's more than one upload with the exact same
-        name, we need to pop one (the first) so it isn't chosen next time.
-        """
-        names = [ d.name for d in self.precreated_datasets ]
-        if names.count( name ) > 0:
-            return self.precreated_datasets.pop( names.index( name ) )
-        else:
-            return None
+        return upload_common.create_job( trans, incoming, tool, json_file_path, data_list )
diff -r c4c409bda49b -r 3559f7377b9c lib/galaxy/tools/actions/upload_common.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/tools/actions/upload_common.py	Fri Sep 25 14:36:12 2009 -0400
@@ -0,0 +1,235 @@
+import os, tempfile, StringIO
+from cgi import FieldStorage
+from galaxy import datatypes, util
+from galaxy.datatypes import sniff
+from galaxy.util.json import to_json_string
+
+import logging
+log = logging.getLogger( __name__ )
+
+def persist_uploads( params ):
+    """
+    Turn any uploads in the submitted form to persisted files.
+    """
+    if 'files' in params:
+        new_files = []
+        temp_files = []
+        for upload_dataset in params['files']:
+            f = upload_dataset['file_data']
+            if isinstance( f, FieldStorage ):
+                assert not isinstance( f.file, StringIO.StringIO )
+                assert f.file.name != '<fdopen>'
+                local_filename = util.mkstemp_ln( f.file.name, 'upload_file_data_' )
+                f.file.close()
+                upload_dataset['file_data'] = dict( filename = f.filename,
+                                                    local_filename = local_filename )
+            if upload_dataset['url_paste'].strip() != '':
+                upload_dataset['url_paste'] = datatypes.sniff.stream_to_file( StringIO.StringIO( upload_dataset['url_paste'] ), prefix="strio_url_paste_" )[0]
+            else:
+                upload_dataset['url_paste'] = None
+            new_files.append( upload_dataset )
+        params['files'] = new_files
+    return params
+
+def get_precreated_datasets( trans, params, data_obj ):
+    """
+    Get any precreated datasets (when using asynchronous uploads).
+    """
+    rval = []
+    async_datasets = []
+    if params.get( 'async_datasets', None ) not in ["None", "", None]:
+        async_datasets = params['async_datasets'].split(',')
+    user, roles = trans.get_user_and_roles()
+    for id in async_datasets:
+        try:
+            data = data_obj.get( int( id ) )
+        except:
+            log.exception( 'Unable to load precreated dataset (%s) sent in upload form' % id )
+            continue
+        if data_obj is trans.app.model.HistoryDatasetAssociation:
+            if user is None and trans.galaxy_session.current_history != data.history:
+                log.error( 'Got a precreated dataset (%s) but it does not belong to anonymous user\'s current session (%s)' % ( data.id, trans.galaxy_session.id ) )
+            elif data.history.user != user:
+                log.error( 'Got a precreated dataset (%s) but it does not belong to current user (%s)' % ( data.id, user.id ) )
+            else:
+                rval.append( data )
+        elif data_obj is trans.app.model.LibraryDatasetDatasetAssociation:
+            if not trans.app.security_agent.can_add_library_item( user, roles, data.library_dataset.folder ):
+                log.error( 'Got a precreated dataset (%s) but this user (%s) is not allowed to write to it' % ( data.id, user.id ) )
+            else:
+                rval.append( data )
+    return rval
+
+def get_precreated_dataset( precreated_datasets, name ):
+    """
+    Return a dataset matching a name from the list of precreated (via async
+    upload) datasets. If there's more than one upload with the exact same
+    name, we need to pop one (the first) so it isn't chosen next time.
+    """
+    names = [ d.name for d in precreated_datasets ]
+    if names.count( name ) > 0:
+        return precreated_datasets.pop( names.index( name ) )
+    else:
+        return None
+
+def cleanup_unused_precreated_datasets( precreated_datasets ):
+    for data in precreated_datasets:
+        log.info( 'Cleaned up unclaimed precreated dataset (%s).' % ( data.id ) )
+        data.state = data.states.ERROR
+        data.info = 'No file contents were available.'
+
+def new_history_upload( trans, uploaded_dataset ):
+    hda = trans.app.model.HistoryDatasetAssociation( name = uploaded_dataset.name,
+                                                     extension = uploaded_dataset.file_type,
+                                                     dbkey = uploaded_dataset.dbkey, 
+                                                     history = trans.history,
+                                                     create_dataset = True )
+    hda.state = hda.states.QUEUED
+    hda.flush()
+    trans.history.add_dataset( hda, genome_build = uploaded_dataset.dbkey )
+    permissions = trans.app.security_agent.history_get_default_permissions( trans.history )
+    trans.app.security_agent.set_all_dataset_permissions( hda.dataset, permissions )
+    return hda
+
+def new_library_upload( trans, uploaded_dataset, replace_dataset, folder,
+                        template, template_field_contents, roles, message ):
+    if replace_dataset:
+        ld = replace_dataset
+    else:
+        ld = trans.app.model.LibraryDataset( folder=folder, name=uploaded_dataset.name )
+        ld.flush()
+        trans.app.security_agent.copy_library_permissions( folder, ld )
+    ldda = trans.app.model.LibraryDatasetDatasetAssociation( name = uploaded_dataset.name,
+                                                             extension = uploaded_dataset.file_type,
+                                                             dbkey = uploaded_dataset.dbkey,
+                                                             library_dataset = ld,
+                                                             user = trans.user,
+                                                             create_dataset = True )
+    ldda.state = ldda.states.QUEUED
+    ldda.message = message
+    ldda.flush()
+    # Permissions must be the same on the LibraryDatasetDatasetAssociation and the associated LibraryDataset
+    trans.app.security_agent.copy_library_permissions( ld, ldda )
+    if replace_dataset:
+        # Copy the Dataset level permissions from replace_dataset to the new LibraryDatasetDatasetAssociation.dataset
+        trans.app.security_agent.copy_dataset_permissions( replace_dataset.library_dataset_dataset_association.dataset, ldda.dataset )
+    else:
+        # Copy the current user's DefaultUserPermissions to the new LibraryDatasetDatasetAssociation.dataset
+        trans.app.security_agent.set_all_dataset_permissions( ldda.dataset, trans.app.security_agent.user_get_default_permissions( trans.user ) )
+        folder.add_library_dataset( ld, genome_build=uploaded_dataset.dbkey )
+        folder.flush()
+    ld.library_dataset_dataset_association_id = ldda.id
+    ld.flush()
+    # Handle template included in the upload form, if any
+    if template and template_field_contents:
+        # Since information templates are inherited, the template fields can be displayed on the upload form.
+        # If the user has added field contents, we'll need to create a new form_values and info_association
+        # for the new library_dataset_dataset_association object.
+        # Create a new FormValues object, using the template we previously retrieved
+        form_values = trans.app.model.FormValues( template, template_field_contents )
+        form_values.flush()
+        # Create a new info_association between the current ldda and form_values
+        info_association = trans.app.model.LibraryDatasetDatasetInfoAssociation( ldda, template, form_values )
+        info_association.flush()
+    # If roles were selected upon upload, restrict access to the Dataset to those roles
+    if roles:
+        for role in roles:
+            dp = trans.app.model.DatasetPermissions( trans.app.security_agent.permitted_actions.DATASET_ACCESS.action, ldda.dataset, role )
+            dp.flush()
+    return ldda
+
+def create_paramfile( trans, params, precreated_datasets, dataset_upload_inputs,
+                      replace_dataset=None, folder=None, template=None,
+                      template_field_contents=None, roles=None, message=None ):
+    """
+    Create the upload tool's JSON "param" file.
+    """
+    data_list = []
+    json_file = tempfile.mkstemp()
+    json_file_path = json_file[1]
+    json_file = os.fdopen( json_file[0], 'w' )
+    for dataset_upload_input in dataset_upload_inputs:
+        uploaded_datasets = dataset_upload_input.get_uploaded_datasets( trans, params )
+        for uploaded_dataset in uploaded_datasets:
+            data = get_precreated_dataset( precreated_datasets, uploaded_dataset.name )
+            if not data:
+                if folder:
+                    data = new_library_upload( trans, uploaded_dataset, replace_dataset, folder, template, template_field_contents, roles, message )
+                else:
+                    data = new_history_upload( trans, uploaded_dataset )
+            else:
+                data.extension = uploaded_dataset.file_type
+                data.dbkey = uploaded_dataset.dbkey
+                data.flush()
+                if folder:
+                    folder.genome_build = uploaded_dataset.dbkey
+                    folder.flush()
+                else:
+                    trans.history.genome_build = uploaded_dataset.dbkey
+            if uploaded_dataset.type == 'composite':
+                # we need to init metadata before the job is dispatched
+                data.init_meta()
+                for meta_name, meta_value in uploaded_dataset.metadata.iteritems():
+                    setattr( data.metadata, meta_name, meta_value )
+                data.flush()
+                json = dict( file_type = uploaded_dataset.file_type,
+                             dataset_id = data.dataset.id,
+                             dbkey = uploaded_dataset.dbkey,
+                             type = uploaded_dataset.type,
+                             metadata = uploaded_dataset.metadata,
+                             primary_file = uploaded_dataset.primary_file,
+                             extra_files_path = data.extra_files_path,
+                             composite_file_paths = uploaded_dataset.composite_files,
+                             composite_files = dict( [ ( k, v.__dict__ ) for k, v in data.datatype.get_composite_files( data ).items() ] ) )
+            else:
+                try:
+                    is_binary = uploaded_dataset.datatype.is_binary
+                except:
+                    is_binary = None
+                json = dict( file_type = uploaded_dataset.file_type,
+                             ext = uploaded_dataset.ext,
+                             name = uploaded_dataset.name,
+                             dataset_id = data.dataset.id,
+                             dbkey = uploaded_dataset.dbkey,
+                             type = uploaded_dataset.type,
+                             is_binary = is_binary,
+                             space_to_tab = uploaded_dataset.space_to_tab,
+                             path = uploaded_dataset.path )
+            json_file.write( to_json_string( json ) + '\n' )
+            data_list.append( data )
+    json_file.close()
+    return ( json_file_path, data_list )
+
+def create_job( trans, params, tool, json_file_path, data_list, folder=None ):
+    """
+    Create the upload job.
+    """
+    job = trans.app.model.Job()
+    job.session_id = trans.get_galaxy_session().id
+    if folder:
+        job.library_folder_id = folder.id
+    else:
+        job.history_id = trans.history.id
+    job.tool_id = tool.id
+    job.tool_version = tool.version
+    job.state = job.states.UPLOAD
+    job.flush()
+    log.info( 'tool %s created job id %d' % ( tool.id, job.id ) )
+    trans.log_event( 'created job id %d' % job.id, tool_id=tool.id )
+
+    for name, value in tool.params_to_strings( params, trans.app ).iteritems():
+        job.add_parameter( name, value )
+    job.add_parameter( 'paramfile', to_json_string( json_file_path ) )
+    if folder:
+        for i, dataset in enumerate( data_list ):
+            job.add_output_library_dataset( 'output%i' % i, dataset )
+    else:
+        for i, dataset in enumerate( data_list ):
+            job.add_output_dataset( 'output%i' % i, dataset )
+    job.state = job.states.NEW
+    trans.app.model.flush()
+
+    # Queue the job for execution
+    trans.app.job_queue.put( job.id, tool )
+    trans.log_event( "Added job to the job queue, id: %s" % str(job.id), tool_id=job.tool_id )
+    return dict( [ ( 'output%i' % i, v ) for i, v in enumerate( data_list ) ] )
diff -r c4c409bda49b -r 3559f7377b9c lib/galaxy/web/controllers/library.py
--- a/lib/galaxy/web/controllers/library.py	Fri Sep 25 14:06:43 2009 -0400
+++ b/lib/galaxy/web/controllers/library.py	Fri Sep 25 14:36:12 2009 -0400
@@ -726,17 +726,17 @@
                     template_id = 'None'
                     widgets = []
                 upload_option = params.get( 'upload_option', 'upload_file' )
-                created_ldda_ids = trans.webapp.controllers[ 'library_dataset' ].upload_dataset( trans,
-                                                                                                 controller='library', 
-                                                                                                 library_id=library_id,
-                                                                                                 folder_id=folder_id,
-                                                                                                 template_id=template_id,
-                                                                                                 widgets=widgets,
-                                                                                                 replace_dataset=replace_dataset,
-                                                                                                 **kwd )
-                if created_ldda_ids:
-                    ldda_id_list = created_ldda_ids.split( ',' )
-                    total_added = len( ldda_id_list )
+                created_outputs = trans.webapp.controllers[ 'library_dataset' ].upload_dataset( trans,
+                                                                                                controller='library', 
+                                                                                                library_id=library_id,
+                                                                                                folder_id=folder_id,
+                                                                                                template_id=template_id,
+                                                                                                widgets=widgets,
+                                                                                                replace_dataset=replace_dataset,
+                                                                                                **kwd )
+                if created_outputs:
+                    ldda_id_list = [ str( v.id ) for v in created_outputs.values() ]
+                    total_added = len( created_outputs.values() )
                     if replace_dataset:
                         msg = "Added %d dataset versions to the library dataset '%s' in the folder '%s'." % ( total_added, replace_dataset.name, folder.name )
                     else:
@@ -760,7 +760,7 @@
                                                                action='browse_library',
                                                                id=library_id,
                                                                default_action=default_action,
-                                                               created_ldda_ids=created_ldda_ids, 
+                                                               created_ldda_ids=",".join( ldda_id_list ), 
                                                                msg=util.sanitize_text( msg ), 
                                                                messagetype='done' ) )
                     
@@ -769,7 +769,7 @@
                     trans.response.send_redirect( web.url_for( controller='library',
                                                                action='browse_library',
                                                                id=library_id,
-                                                               created_ldda_ids=created_ldda_ids, 
+                                                               created_ldda_ids=",".join( ldda_id_list ), 
                                                                msg=util.sanitize_text( msg ), 
                                                                messagetype='error' ) )
         if not id or replace_dataset:
diff -r c4c409bda49b -r 3559f7377b9c lib/galaxy/web/controllers/library_admin.py
--- a/lib/galaxy/web/controllers/library_admin.py	Fri Sep 25 14:06:43 2009 -0400
+++ b/lib/galaxy/web/controllers/library_admin.py	Fri Sep 25 14:36:12 2009 -0400
@@ -438,16 +438,16 @@
                 template_id = 'None'
                 widgets = []
             upload_option = params.get( 'upload_option', 'upload_file' )
-            created_ldda_ids = trans.webapp.controllers[ 'library_dataset' ].upload_dataset( trans,
-                                                                                             controller='library_admin',
-                                                                                             library_id=library_id,
-                                                                                             folder_id=folder_id,
-                                                                                             template_id=template_id,
-                                                                                             widgets=widgets,
-                                                                                             replace_dataset=replace_dataset,
-                                                                                             **kwd )
-            if created_ldda_ids:
-                total_added = len( created_ldda_ids.split( ',' ) )
+            created_outputs = trans.webapp.controllers[ 'library_dataset' ].upload_dataset( trans,
+                                                                                            controller='library_admin',
+                                                                                            library_id=library_id,
+                                                                                            folder_id=folder_id,
+                                                                                            template_id=template_id,
+                                                                                            widgets=widgets,
+                                                                                            replace_dataset=replace_dataset,
+                                                                                            **kwd )
+            if created_outputs:
+                total_added = len( created_outputs.values() )
                 if replace_dataset:
                     msg = "Added %d dataset versions to the library dataset '%s' in the folder '%s'." % ( total_added, replace_dataset.name, folder.name )
                 else:
@@ -464,7 +464,7 @@
             trans.response.send_redirect( web.url_for( controller='library_admin',
                                                        action='browse_library',
                                                        id=library_id,
-                                                       created_ldda_ids=created_ldda_ids,
+                                                       created_ldda_ids=",".join( [ str( v.id ) for v in created_outputs.values() ] ),
                                                        msg=util.sanitize_text( msg ),
                                                        messagetype=messagetype ) )
         elif not id or replace_dataset:
diff -r c4c409bda49b -r 3559f7377b9c lib/galaxy/web/controllers/library_dataset.py
--- a/lib/galaxy/web/controllers/library_dataset.py	Fri Sep 25 14:06:43 2009 -0400
+++ b/lib/galaxy/web/controllers/library_dataset.py	Fri Sep 25 14:36:12 2009 -0400
@@ -3,196 +3,51 @@
 from galaxy import util, jobs
 from galaxy.datatypes import sniff
 from galaxy.security import RBACAgent
+from galaxy.util.json import to_json_string
+from galaxy.tools.actions import upload_common
 
 log = logging.getLogger( __name__ )
 
 class UploadLibraryDataset( BaseController ):
-    def remove_tempfile( self, filename ):
-        try:
-            os.unlink( filename )
-        except:
-            log.exception( 'failure removing temporary file: %s' % filename )
-    def add_file( self, trans, folder, file_obj, name, file_type, dbkey, roles,
-                  info='no info', space_to_tab=False, replace_dataset=None,
-                  template=None, template_field_contents=[], message=None ):
-        data_type = None
-        line_count = 0
-        temp_name, is_multi_byte = sniff.stream_to_file( file_obj )
-        # See if we have an empty file
-        if not os.path.getsize( temp_name ) > 0:
-            raise BadFileException( "you attempted to upload an empty file." )
-        if is_multi_byte:
-            ext = sniff.guess_ext( temp_name, is_multi_byte=True )
-        else:
-            if not data_type:
-                # See if we have a gzipped file, which, if it passes our restrictions, we'll uncompress on the fly.
-                is_gzipped, is_valid = self.check_gzip( temp_name )
-                if is_gzipped and not is_valid:
-                    raise BadFileException( "you attempted to upload an inappropriate file." )
-                elif is_gzipped and is_valid:
-                    # We need to uncompress the temp_name file
-                    CHUNK_SIZE = 2**20 # 1Mb   
-                    fd, uncompressed = tempfile.mkstemp()   
-                    gzipped_file = gzip.GzipFile( temp_name )
-                    while 1:
-                        try:
-                            chunk = gzipped_file.read( CHUNK_SIZE )
-                        except IOError:
-                            os.close( fd )
-                            os.remove( uncompressed )
-                            raise BadFileException( 'problem uncompressing gzipped data.' )
-                        if not chunk:
-                            break
-                        os.write( fd, chunk )
-                    os.close( fd )
-                    gzipped_file.close()
-                    # Replace the gzipped file with the decompressed file
-                    shutil.move( uncompressed, temp_name )
-                    name = name.rstrip( '.gz' )
-                    data_type = 'gzip'
-            ext = ''
-            if not data_type:
-                # See if we have a zip archive
-                is_zipped, is_valid, test_ext = self.check_zip( temp_name )
-                if is_zipped and not is_valid:
-                    raise BadFileException( "you attempted to upload an inappropriate file." )
-                elif is_zipped and is_valid:
-                    # Currently, we force specific tools to handle this case.  We also require the user
-                    # to manually set the incoming file_type
-                    if ( test_ext == 'ab1' or test_ext == 'scf' ) and file_type != 'binseq.zip':
-                        raise BadFileException( "Invalid 'File Format' for archive consisting of binary files - use 'Binseq.zip'." )
-                    elif test_ext == 'txt' and file_type != 'txtseq.zip':
-                        raise BadFileException( "Invalid 'File Format' for archive consisting of text files - use 'Txtseq.zip'." )
-                    if not ( file_type == 'binseq.zip' or file_type == 'txtseq.zip' ):
-                        raise BadFileException( "you must manually set the 'File Format' to either 'Binseq.zip' or 'Txtseq.zip' when uploading zip files." )
-                    data_type = 'zip'
-                    ext = file_type
-            if not data_type:
-                if self.check_binary( temp_name ):
-                    try:
-                        ext = name.split( "." )[1].strip().lower()
-                    except:
-                        ext = ''
-                    try:
-                        is_pdf = open( temp_name ).read( len( '%PDF' ) ) == '%PDF'
-                    except:
-                        is_pdf = False #file failed to open or contents are smaller than pdf header
-                    if is_pdf:
-                        file_type = 'pdf' #allow the upload of PDFs to library via the admin interface.
-                    else:
-                        if not( ext == 'ab1' or ext == 'scf' ):
-                            raise BadFileException( "you attempted to upload an inappropriate file." )
-                        if ext == 'ab1' and file_type != 'ab1':
-                            raise BadFileException( "you must manually set the 'File Format' to 'Ab1' when uploading ab1 files." )
-                        elif ext == 'scf' and file_type != 'scf':
-                            raise BadFileException( "you must manually set the 'File Format' to 'Scf' when uploading scf files." )
-                    data_type = 'binary'
-            if not data_type:
-                # We must have a text file
-                if self.check_html( temp_name ):
-                    raise BadFileException( "you attempted to upload an inappropriate file." )
-            if data_type != 'binary' and data_type != 'zip':
-                if space_to_tab:
-                    line_count = sniff.convert_newlines_sep2tabs( temp_name )
-                elif os.stat( temp_name ).st_size < 262144000: # 250MB
-                    line_count = sniff.convert_newlines( temp_name )
-                else:
-                    if sniff.check_newlines( temp_name ):
-                        line_count = sniff.convert_newlines( temp_name )
-                    else:
-                        line_count = None
-                if file_type == 'auto':
-                    ext = sniff.guess_ext( temp_name, sniff_order=trans.app.datatypes_registry.sniff_order )    
-                else:
-                    ext = file_type
-                data_type = ext
-        if info is None:
-            info = 'uploaded %s file' % data_type
-        if file_type == 'auto':
-            data_type = sniff.guess_ext( temp_name, sniff_order=trans.app.datatypes_registry.sniff_order )    
-        else:
-            data_type = file_type
-        if replace_dataset:
-            # The replace_dataset param ( when not None ) refers to a LibraryDataset that is being replaced with a new version.
-            library_dataset = replace_dataset
-        else:
-            # If replace_dataset is None, the Library level permissions will be taken from the folder and applied to the new 
-            # LibraryDataset, and the current user's DefaultUserPermissions will be applied to the associated Dataset.
-            library_dataset = trans.app.model.LibraryDataset( folder=folder, name=name, info=info )
-            library_dataset.flush()
-            trans.app.security_agent.copy_library_permissions( folder, library_dataset )
-        ldda = trans.app.model.LibraryDatasetDatasetAssociation( name=name, 
-                                                                 info=info, 
-                                                                 extension=data_type, 
-                                                                 dbkey=dbkey, 
-                                                                 library_dataset=library_dataset,
-                                                                 user=trans.get_user(),
-                                                                 create_dataset=True )
-        ldda.message = message
-        ldda.flush()
-        # Permissions must be the same on the LibraryDatasetDatasetAssociation and the associated LibraryDataset
-        trans.app.security_agent.copy_library_permissions( library_dataset, ldda )
-        if replace_dataset:
-            # Copy the Dataset level permissions from replace_dataset to the new LibraryDatasetDatasetAssociation.dataset
-            trans.app.security_agent.copy_dataset_permissions( replace_dataset.library_dataset_dataset_association.dataset, ldda.dataset )
-        else:
-            # Copy the current user's DefaultUserPermissions to the new LibraryDatasetDatasetAssociation.dataset
-            trans.app.security_agent.set_all_dataset_permissions( ldda.dataset, trans.app.security_agent.user_get_default_permissions( trans.get_user() ) )
-            folder.add_library_dataset( library_dataset, genome_build=dbkey )
-            folder.flush()
-        library_dataset.library_dataset_dataset_association_id = ldda.id
-        library_dataset.flush()
-        # Handle template included in the upload form, if any
-        if template and template_field_contents:
-            # Since information templates are inherited, the template fields can be displayed on the upload form.
-            # If the user has added field contents, we'll need to create a new form_values and info_association
-            # for the new library_dataset_dataset_association object.
-            # Create a new FormValues object, using the template we previously retrieved
-            form_values = trans.app.model.FormValues( template, template_field_contents )
-            form_values.flush()
-            # Create a new info_association between the current ldda and form_values
-            info_association = trans.app.model.LibraryDatasetDatasetInfoAssociation( ldda, template, form_values )
-            info_association.flush()
-        # If roles were selected upon upload, restrict access to the Dataset to those roles
-        if roles:
-            for role in roles:
-                dp = trans.app.model.DatasetPermissions( RBACAgent.permitted_actions.DATASET_ACCESS.action, ldda.dataset, role )
-                dp.flush()
-        shutil.move( temp_name, ldda.dataset.file_name )
-        ldda.state = ldda.states.OK
-        ldda.init_meta()
-        if line_count:
-            try:
-                if is_multi_byte:
-                    ldda.set_multi_byte_peek( line_count=line_count )
-                else:
-                    ldda.set_peek( line_count=line_count )
-            except:
-                if is_multi_byte:
-                    ldda.set_multi_byte_peek()
-                else:
-                    ldda.set_peek()
-        else:
-            if is_multi_byte:
-                ldda.set_multi_byte_peek()
-            else:
-                ldda.set_peek()
-        ldda.set_size()
-        if ldda.missing_meta():
-            ldda.datatype.set_meta( ldda )
-        ldda.flush()
-        return ldda
+    @web.json
+    def library_item_updates( self, trans, ids=None, states=None ):
+        # Avoid caching
+        trans.response.headers['Pragma'] = 'no-cache'
+        trans.response.headers['Expires'] = '0'
+        # Create new HTML for any that have changed
+        rval = {}
+        if ids is not None and states is not None:
+            ids = map( int, ids.split( "," ) )
+            states = states.split( "," )
+            for id, state in zip( ids, states ):
+                data = self.app.model.LibraryDatasetDatasetAssociation.get( id )
+                if data.state != state:
+                    job_ldda = data
+                    while job_ldda.copied_from_library_dataset_dataset_association:
+                        job_ldda = job_ldda.copied_from_library_dataset_dataset_association
+                    force_history_refresh = False
+                    rval[id] = {
+                        "state": data.state,
+                        "html": unicode( trans.fill_template( "library/library_item_info.mako", ldda=data ), 'utf-8' )
+                        #"force_history_refresh": force_history_refresh
+                    }
+        return rval
     @web.expose
     def upload_dataset( self, trans, controller, library_id, folder_id, replace_dataset=None, **kwd ):
-        # This method is called from both the admin and library controllers.  The replace_dataset param ( when
-        # not None ) refers to a LibraryDataset that is being replaced with a new version.
-        params = util.Params( kwd )
+        # Set up the traditional tool state/params
+        tool_id = 'upload1'
+        tool = trans.app.toolbox.tools_by_id[ tool_id ]
+        state = tool.new_state( trans )
+        errors = tool.update_state( trans, tool.inputs_by_page[0], state.inputs, kwd, changed_dependencies={} )
+        tool_params = state.inputs
+        dataset_upload_inputs = []
+        for input_name, input in tool.inputs.iteritems():
+            if input.type == "upload_dataset":
+                dataset_upload_inputs.append( input )
+        # Library-specific params
+        params = util.Params( kwd ) # is this filetoolparam safe?
         msg = util.restore_text( params.get( 'msg', ''  ) )
         messagetype = params.get( 'messagetype', 'done' )
-        dbkey = params.get( 'dbkey', '?' )
-        file_type = params.get( 'file_type', 'auto' )
-        data_file = params.get( 'files_0|file_data', '' )
-        url_paste = params.get( 'files_0|url_paste', '' )
         server_dir = util.restore_text( params.get( 'server_dir', '' ) )
         if replace_dataset not in [ None, 'None' ]:
             replace_id = replace_dataset.id
@@ -217,24 +72,43 @@
                     template_field_contents.append( field_value )
         else:
             template = None
-        if upload_option == 'upload_file' and data_file == '' and url_paste == '':
-                msg = 'Select a file, enter a URL or enter text'
-                err_redirect = True
-        elif upload_option == 'upload_directory':
+        if upload_option == 'upload_directory':
             if server_dir in [ None, 'None', '' ]:
                 err_redirect = True
-            # See if our request is from the Admin view or the Libraries view
-            if trans.request.browser_url.find( 'admin' ) >= 0:
+            if controller == 'library_admin':
                 import_dir = trans.app.config.library_import_dir
                 import_dir_desc = 'library_import_dir'
+                full_dir = os.path.join( import_dir, server_dir )
             else:
                 import_dir = trans.app.config.user_library_import_dir
                 import_dir_desc = 'user_library_import_dir'
+                if server_dir == trans.user.email:
+                    full_dir = os.path.join( import_dir, server_dir )
+                else:
+                    full_dir = os.path.join( import_dir, trans.user.email, server_dir )
             if import_dir:
                 msg = 'Select a directory'
             else:
                 msg = '"%s" is not defined in the Galaxy configuration file' % import_dir_desc
+        roles = []
+        for role_id in util.listify( params.get( 'roles', [] ) ):
+            roles.append( trans.app.model.Role.get( role_id ) )
+        # Proceed with (mostly) regular upload processing
+        precreated_datasets = upload_common.get_precreated_datasets( trans, tool_params, trans.app.model.HistoryDatasetAssociation )
+        if upload_option == 'upload_file':
+            tool_params = upload_common.persist_uploads( tool_params )
+            json_file_path, data_list = upload_common.create_paramfile( trans, tool_params, precreated_datasets, dataset_upload_inputs, replace_dataset, folder, template, template_field_contents, roles, message )
+        elif upload_option == 'upload_directory':
+            json_file_path, data_list = self.create_server_dir_paramfile( trans, params, full_dir, import_dir_desc, folder, template, template_field_contents, roles, message, err_redirect, msg )
+        upload_common.cleanup_unused_precreated_datasets( precreated_datasets )
+        if upload_option == 'upload_file' and not data_list:
+            msg = 'Select a file, enter a URL or enter text'
+            err_redirect = True
         if err_redirect:
+            try:
+                os.remove( json_file_path )
+            except:
+                pass
             trans.response.send_redirect( web.url_for( controller=controller,
                                                        action='library_dataset_dataset_association',
                                                        library_id=library_id,
@@ -243,226 +117,49 @@
                                                        upload_option=upload_option,
                                                        msg=util.sanitize_text( msg ),
                                                        messagetype='error' ) )
-        space_to_tab = params.get( 'files_0|space_to_tab', False )
-        if space_to_tab and space_to_tab not in [ "None", None ]:
-            space_to_tab = True
-        roles = []
-        for role_id in util.listify( params.get( 'roles', [] ) ):
-            roles.append( trans.app.model.Role.get( role_id ) )
+        return upload_common.create_job( trans, tool_params, tool, json_file_path, data_list, folder=folder )
+    def create_server_dir_paramfile( self, trans, params, full_dir, import_dir_desc, folder, template,
+                                     template_field_contents, roles, message, err_redirect, msg ):
+        """
+        Create JSON param file for the upload tool when using the server_dir upload.
+        """
+        files = []
+        try:
+            for entry in os.listdir( full_dir ):
+                # Only import regular files
+                if os.path.isfile( os.path.join( full_dir, entry ) ):
+                    files.append( entry )
+        except Exception, e:
+            msg = "Unable to get file list for configured %s, error: %s" % ( import_dir_desc, str( e ) )
+            err_redirect = True
+            return ( None, None )
+        if not files:
+            msg = "The directory '%s' contains no valid files" % full_dir
+            err_redirect = True
+            return ( None, None )
         data_list = []
-        created_ldda_ids = ''
-        if 'filename' in dir( data_file ):
-            file_name = data_file.filename
-            file_name = file_name.split( '\\' )[-1]
-            file_name = file_name.split( '/' )[-1]
-            try:
-                created_ldda = self.add_file( trans,
-                                              folder,
-                                              data_file.file,
-                                              file_name,
-                                              file_type,
-                                              dbkey,
-                                              roles,
-                                              info="uploaded file",
-                                              space_to_tab=space_to_tab,
-                                              replace_dataset=replace_dataset,
-                                              template=template,
-                                              template_field_contents=template_field_contents,
-                                              message=message )
-                created_ldda_ids = str( created_ldda.id )
-            except Exception, e:
-                log.exception( 'exception in upload_dataset using file_name %s: %s' % ( str( file_name ), str( e ) ) )
-                return self.upload_empty( trans, controller, library_id, folder_id, "Error:", str( e ) )
-        elif url_paste not in [ None, "" ]:
-            if url_paste.lower().find( 'http://' ) >= 0 or url_paste.lower().find( 'ftp://' ) >= 0:
-                url_paste = url_paste.replace( '\r', '' ).split( '\n' )
-                # If we are setting the name from the line, it needs to be the line that creates that dataset
-                name_set_from_line = False
-                for line in url_paste:
-                    line = line.rstrip( '\r\n' )
-                    if line:
-                        if not line or name_set_from_line:
-                            name_set_from_line = True
-                        try:
-                            created_ldda = self.add_file( trans,
-                                                          folder,
-                                                          urllib.urlopen( line ),
-                                                          line,
-                                                          file_type,
-                                                          dbkey,
-                                                          roles,
-                                                          info="uploaded url",
-                                                          space_to_tab=space_to_tab,
-                                                          replace_dataset=replace_dataset,
-                                                          template=template,
-                                                          template_field_contents=template_field_contents,
-                                                          message=message )
-                            created_ldda_ids = '%s,%s' % ( created_ldda_ids, str( created_ldda.id ) )
-                        except Exception, e:
-                            log.exception( 'exception in upload_dataset using url_paste %s' % str( e ) )
-                            return self.upload_empty( trans, controller, library_id, folder_id, "Error:", str( e ) )
-            else:
-                is_valid = False
-                for line in url_paste:
-                    line = line.rstrip( '\r\n' )
-                    if line:
-                        is_valid = True
-                        break
-                if is_valid:
-                    try:
-                        created_ldda = self.add_file( trans,
-                                                      folder,
-                                                      StringIO.StringIO( url_paste ),
-                                                      'Pasted Entry',
-                                                      file_type,
-                                                      dbkey,
-                                                      roles,
-                                                      info="pasted entry",
-                                                      space_to_tab=space_to_tab,
-                                                      replace_dataset=replace_dataset,
-                                                      template=template,
-                                                      template_field_contents=template_field_contents,
-                                                      message=message )
-                        created_ldda_ids = '%s,%s' % ( created_ldda_ids, str( created_ldda.id ) )
-                    except Exception, e:
-                        log.exception( 'exception in add_file using StringIO.StringIO( url_paste ) %s' % str( e ) )
-                        return self.upload_empty( trans, controller, library_id, folder_id, "Error:", str( e ) )
-        elif server_dir not in [ None, "", "None" ]:
-            # See if our request is from the Admin view or the Libraries view
-            if trans.request.browser_url.find( 'admin' ) >= 0:
-                import_dir = trans.app.config.library_import_dir
-                import_dir_desc = 'library_import_dir'
-                full_dir = os.path.join( import_dir, server_dir )
-            else:
-                imrport_dir = trans.app.config.user_library_import_dir
-                import_dir_desc = 'user_library_import_dir'
-                # From the Libraries view, users are restricted to the directory named the same as
-                # their email within the configured user_library_import_dir.  If this directory contains
-                # sub-directories, server_dir will be the name of the selected sub-directory.  Otherwise
-                # server_dir will be the user's email address.
-                if server_dir == trans.user.email:
-                    full_dir = os.path.join( import_dir, server_dir )
-                else:
-                    full_dir = os.path.join( import_dir, trans.user.email, server_dir )
-            files = []
-            try:
-                for entry in os.listdir( full_dir ):
-                    # Only import regular files
-                    if os.path.isfile( os.path.join( full_dir, entry ) ):
-                        files.append( entry )
-            except Exception, e:
-                msg = "Unable to get file list for configured %s, error: %s" % ( import_dir_desc, str( e ) )
-                return self.upload_empty( trans, controller, library_id, folder_id, "Error:", msg )
-            if not files:
-                msg = "The directory '%s' contains no valid files" % full_dir
-                return self.upload_empty( trans, controller, library_id, folder_id, "Error:", msg )
-            for file in files:
-                full_file = os.path.join( full_dir, file )
-                if not os.path.isfile( full_file ):
-                    continue
-                try:
-                    created_ldda = self.add_file( trans,
-                                                  folder,
-                                                  open( full_file, 'rb' ),
-                                                  file,
-                                                  file_type,
-                                                  dbkey,
-                                                  roles,
-                                                  info="imported file",
-                                                  space_to_tab=space_to_tab,
-                                                  replace_dataset=replace_dataset,
-                                                  template=template,
-                                                  template_field_contents=template_field_contents,
-                                                  message=message )
-                    created_ldda_ids = '%s,%s' % ( created_ldda_ids, str( created_ldda.id ) )
-                except Exception, e:
-                    log.exception( 'exception in add_file using server_dir %s' % str( e ) )
-                    return self.upload_empty( trans, controller, library_id, folder_id, "Error:", str( e ) )
-        if created_ldda_ids:
-            created_ldda_ids = created_ldda_ids.lstrip( ',' )
-            return created_ldda_ids
-        else:
-            return ''
-    def check_gzip( self, temp_name ):
-        temp = open( temp_name, "U" )
-        magic_check = temp.read( 2 )
-        temp.close()
-        if magic_check != util.gzip_magic:
-            return ( False, False )
-        CHUNK_SIZE = 2**15 # 32Kb
-        gzipped_file = gzip.GzipFile( temp_name )
-        chunk = gzipped_file.read( CHUNK_SIZE )
-        gzipped_file.close()
-        if self.check_html( temp_name, chunk=chunk ) or self.check_binary( temp_name, chunk=chunk ):
-            return( True, False )
-        return ( True, True )
-    def check_zip( self, temp_name ):
-        if not zipfile.is_zipfile( temp_name ):
-            return ( False, False, None )
-        zip_file = zipfile.ZipFile( temp_name, "r" )
-        # Make sure the archive consists of valid files.  The current rules are:
-        # 1. Archives can only include .ab1, .scf or .txt files
-        # 2. All file file_types within an archive must be the same
-        name = zip_file.namelist()[0]
-        test_ext = name.split( "." )[1].strip().lower()
-        if not ( test_ext == 'scf' or test_ext == 'ab1' or test_ext == 'txt' ):
-            return ( True, False, test_ext )
-        for name in zip_file.namelist():
-            ext = name.split( "." )[1].strip().lower()
-            if ext != test_ext:
-                return ( True, False, test_ext )
-        return ( True, True, test_ext )
-    def check_html( self, temp_name, chunk=None ):
-        if chunk is None:
-            temp = open(temp_name, "U")
-        else:
-            temp = chunk
-        regexp1 = re.compile( "<A\s+[^>]*HREF[^>]+>", re.I )
-        regexp2 = re.compile( "<IFRAME[^>]*>", re.I )
-        regexp3 = re.compile( "<FRAMESET[^>]*>", re.I )
-        regexp4 = re.compile( "<META[^>]*>", re.I )
-        lineno = 0
-        for line in temp:
-            lineno += 1
-            matches = regexp1.search( line ) or regexp2.search( line ) or regexp3.search( line ) or regexp4.search( line )
-            if matches:
-                if chunk is None:
-                    temp.close()
-                return True
-            if lineno > 100:
-                break
-        if chunk is None:
-            temp.close()
-        return False
-    def check_binary( self, temp_name, chunk=None ):
-        if chunk is None:
-            temp = open( temp_name, "U" )
-        else:
-            temp = chunk
-        lineno = 0
-        for line in temp:
-            lineno += 1
-            line = line.strip()
-            if line:
-                if util.is_multi_byte( line ):
-                    return False
-                for char in line:
-                    if ord( char ) > 128:
-                        if chunk is None:
-                            temp.close()
-                        return True
-            if lineno > 10:
-                break
-        if chunk is None:
-            temp.close()
-        return False
-    def upload_empty( self, trans, controller, library_id, folder_id, err_code, err_msg ):
-        msg = err_code + err_msg
-        return trans.response.send_redirect( web.url_for( controller=controller,
-                                                          action='library_dataset_dataset_association',
-                                                          library_id=library_id,
-                                                          folder_id=folder_id,
-                                                          msg=util.sanitize_text( msg ),
-                                                          messagetype='error' ) )
-class BadFileException( Exception ):
-    pass
+        json_file = tempfile.mkstemp()
+        json_file_path = json_file[1]
+        json_file = os.fdopen( json_file[0], 'w' )
+        for file in files:
+            full_file = os.path.join( full_dir, file )
+            if not os.path.isfile( full_file ):
+                continue
+            uploaded_dataset = util.bunch.Bunch()
+            uploaded_dataset.name = file
+            uploaded_dataset.file_type = params.file_type
+            uploaded_dataset.dbkey = params.dbkey
+            data = upload_common.new_library_upload( trans, uploaded_dataset, None, folder, template, template_field_contents, roles, message )
+            json = dict( file_type = uploaded_dataset.file_type,
+                         ext = None,
+                         name = uploaded_dataset.name,
+                         dataset_id = data.dataset.id,
+                         dbkey = uploaded_dataset.dbkey,
+                         type = 'server_dir',
+                         is_binary = None,
+                         space_to_tab = params.space_to_tab,
+                         path = full_file )
+            json_file.write( to_json_string( json ) + '\n' )
+            data_list.append( data )
+        json_file.close()
+        return ( json_file_path, data_list )
diff -r c4c409bda49b -r 3559f7377b9c static/june_2007_style/blue/library.css
--- a/static/june_2007_style/blue/library.css	Fri Sep 25 14:06:43 2009 -0400
+++ b/static/june_2007_style/blue/library.css	Fri Sep 25 14:36:12 2009 -0400
@@ -1,7 +1,7 @@
 .libraryRow{background-color:#ebd9b2;}
 .datasetHighlighted{background-color:#C1C9E5;}
 .libraryItemDeleted-True{font-style:italic;}
-div.historyItemBody{padding:4px 4px 2px 4px;}
+div.libraryItemBody{padding:4px 4px 2px 4px;}
 li.folderRow,li.datasetRow{border-top:solid 1px #ddd;}
 li.folderRow:hover,li.datasetRow:hover{background-color:#C1C9E5;}
 img.expanderIcon{padding-right:4px;}
@@ -15,3 +15,6 @@
 span.expandLink{width:16px;height:16px;display:inline-block;vertical-align:middle;background:url(../images/silk/resultset_next.png);}
 .folderRow.expanded span.expandLink{background:url(../images/silk/resultset_bottom.png);}
 .folderRow span.rowIcon{width:16px;height:16px;display:inline-block;vertical-align:middle;background:url(../images/silk/folder.png);}
+.libraryItem-error{margin-right:2px;padding:0 2px 0 2px;border:1px solid #AA6666;background:#FFCCCC;}
+.libraryItem-queued{margin-right:2px;padding:0 2px 0 2px;border:1px solid #888888;background:#EEEEEE;}
+.libraryItem-running{margin-right:2px;padding:0 2px 0 2px;border:1px solid #AAAA66;background:#FFFFCC;}
diff -r c4c409bda49b -r 3559f7377b9c static/june_2007_style/library.css.tmpl
--- a/static/june_2007_style/library.css.tmpl	Fri Sep 25 14:06:43 2009 -0400
+++ b/static/june_2007_style/library.css.tmpl	Fri Sep 25 14:36:12 2009 -0400
@@ -10,7 +10,7 @@
     font-style: italic;
 }
 
-div.historyItemBody {
+div.libraryItemBody {
     padding: 4px 4px 2px 4px;
 }
 
@@ -88,3 +88,24 @@
     background: url(../images/silk/folder.png);
 }
 
+.libraryItem-error {
+    margin-right: 2px;
+    padding: 0 2px 0 2px;
+    border: 1px solid $history_error_border;
+    background: $history_error_bg;
+}
+
+.libraryItem-queued {
+    margin-right: 2px;
+    padding: 0 2px 0 2px;
+    border: 1px solid $history_queued_border;
+    background: $history_queued_bg;
+}
+
+.libraryItem-running {
+    margin-right: 2px;
+    padding: 0 2px 0 2px;
+    border: 1px solid $history_running_border;
+    background: $history_running_bg;
+}
+
diff -r c4c409bda49b -r 3559f7377b9c templates/admin/library/browse_library.mako
--- a/templates/admin/library/browse_library.mako	Fri Sep 25 14:06:43 2009 -0400
+++ b/templates/admin/library/browse_library.mako	Fri Sep 25 14:36:12 2009 -0400
@@ -1,5 +1,6 @@
 <%inherit file="/base.mako"/>
 <%namespace file="/message.mako" import="render_msg" />
+<%namespace file="/library/library_item_info.mako" import="render_library_item_info" />
 <%
     from time import strftime
     from galaxy import util
@@ -10,6 +11,8 @@
     <link href="${h.url_for('/static/style/base.css')}" rel="stylesheet" type="text/css" />
     <link href="${h.url_for('/static/style/library.css')}" rel="stylesheet" type="text/css" />
 </%def>
+
+<% tracked_datasets = {} %>
 
 <script type="text/javascript">
     $( document ).ready( function () {
@@ -35,29 +38,6 @@
                 $(this).children().find("img.rowIcon").each( function() { this.src = icon_open; });
             }
         });
-        // Hide all dataset bodies
-        $("div.historyItemBody").hide();
-        // Handle the dataset body hide/show link.
-        $("div.historyItemWrapper").each( function() {
-            var id = this.id;
-            var li = $(this).parent();
-            var body = $(this).children( "div.historyItemBody" );
-            var peek = body.find( "pre.peek" )
-            $(this).children( ".historyItemTitleBar" ).find( ".historyItemTitle" ).wrap( "<a href='#'></a>" ).click( function() {
-                if ( body.is(":visible") ) {
-                    if ( $.browser.mozilla ) { peek.css( "overflow", "hidden" ) }
-                    body.slideUp( "fast" );
-                    li.removeClass( "datasetHighlighted" );
-                } 
-                else {
-                    body.slideDown( "fast", function() { 
-                        if ( $.browser.mozilla ) { peek.css( "overflow", "auto" ); } 
-                    });
-                    li.addClass( "datasetHighlighted" );
-                }
-                return false;
-            });
-        });
     });
     function checkForm() {
         if ( $("select#action_on_datasets_select option:selected").text() == "delete" ) {
@@ -68,6 +48,54 @@
             }
         }
     }
+    // Looks for changes in dataset state using an async request. Keeps
+    // calling itself (via setTimeout) until all datasets are in a terminal
+    // state.
+    var updater = function ( tracked_datasets ) {
+        // Check if there are any items left to track
+        var empty = true;
+        for ( i in tracked_datasets ) {
+            empty = false;
+            break;
+        }
+        if ( ! empty ) {
+            setTimeout( function() { updater_callback( tracked_datasets ) }, 3000 );
+        }
+    };
+    var updater_callback = function ( tracked_datasets ) {
+        // Build request data
+        var ids = []
+        var states = []
+        $.each( tracked_datasets, function ( id, state ) {
+            ids.push( id );
+            states.push( state );
+        });
+        // Make ajax call
+        $.ajax( {
+            type: "POST",
+            url: "${h.url_for( controller='library_dataset', action='library_item_updates' )}",
+            dataType: "json",
+            data: { ids: ids.join( "," ), states: states.join( "," ) },
+            success : function ( data ) {
+                $.each( data, function( id, val ) {
+                    // Replace HTML
+                    var cell = $("#libraryItem-" + id).find("#libraryItemInfo");
+                    cell.html( val.html );
+                    // If new state was terminal, stop tracking
+                    if (( val.state == "ok") || ( val.state == "error") || ( val.state == "empty") || ( val.state == "deleted" ) || ( val.state == "discarded" )) {
+                        delete tracked_datasets[ parseInt(id) ];
+                    } else {
+                        tracked_datasets[ parseInt(id) ] = val.state;
+                    }
+                });
+                updater( tracked_datasets ); 
+            },
+            error: function() {
+                // Just retry, like the old method, should try to be smarter
+                updater( tracked_datasets );
+            }
+        });
+    };
 </script>
 
 <%def name="render_dataset( ldda, library_dataset, selected, library, folder, deleted, show_deleted )">
@@ -84,11 +112,13 @@
             current_version = True
         else:
             current_version = False
+        if current_version and ldda.state not in ( 'ok', 'error', 'empty', 'deleted', 'discarded' ):
+            tracked_datasets[ldda.id] = ldda.state
     %>
     %if current_version:
-        <div class="historyItemWrapper historyItem historyItem-${ldda.state}" id="libraryItem-${ldda.id}">
+        <div class="libraryItemWrapper libraryItem" id="libraryItem-${ldda.id}">
             ## Header row for library items (name, state, action buttons)
-            <div class="historyItemTitleBar"> 
+            <div class="libraryItemTitleBar"> 
                 <table cellspacing="0" cellpadding="0" border="0" width="100%">
                     <tr>
                         <td width="*">
@@ -119,7 +149,7 @@
                                 </div>
                             %endif
                         </td>
-                        <td width="300">${ldda.message}</td>
+                        <td width="300" id="libraryItemInfo">${render_library_item_info( ldda )}</td>
                         <td width="150">${uploaded_by}</td>
                         <td width="60">${ldda.create_time.strftime( "%Y-%m-%d" )}</td>
                     </tr>
@@ -287,3 +317,11 @@
         </p>
     %endif
 </form>
+
+%if tracked_datasets:
+    <script type="text/javascript">
+        // Updater
+        updater({${ ",".join( [ '"%s" : "%s"' % ( k, v ) for k, v in tracked_datasets.iteritems() ] ) }});
+    </script>
+    <!-- running: do not change this comment, used by TwillTestCase.library_wait -->
+%endif
diff -r c4c409bda49b -r 3559f7377b9c templates/admin/library/new_library.mako
--- a/templates/admin/library/new_library.mako	Fri Sep 25 14:06:43 2009 -0400
+++ b/templates/admin/library/new_library.mako	Fri Sep 25 14:36:12 2009 -0400
@@ -29,7 +29,9 @@
                 </div>
               <div style="clear: both"></div>
             </div>
-            <input type="submit" name="create_library_button" value="Create"/>
+            <div class="form-row">
+                <input type="submit" name="create_library_button" value="Create"/>
+            </div>
         </form>
     </div>
 </div>
diff -r c4c409bda49b -r 3559f7377b9c templates/library/browse_library.mako
--- a/templates/library/browse_library.mako	Fri Sep 25 14:06:43 2009 -0400
+++ b/templates/library/browse_library.mako	Fri Sep 25 14:36:12 2009 -0400
@@ -1,5 +1,6 @@
 <%inherit file="/base.mako"/>
 <%namespace file="/message.mako" import="render_msg" />
+<%namespace file="/library/library_item_info.mako" import="render_library_item_info" />
 <% 
     from galaxy import util
     from galaxy.web.controllers.library import active_folders
@@ -12,6 +13,8 @@
     <link href="${h.url_for('/static/style/base.css')}" rel="stylesheet" type="text/css" />
     <link href="${h.url_for('/static/style/library.css')}" rel="stylesheet" type="text/css" />
 </%def>
+
+<% tracked_datasets = {} %>
 
 <%
 class RowCounter( object ):
@@ -77,6 +80,54 @@
            });
         });
     });
+    // Looks for changes in dataset state using an async request. Keeps
+    // calling itself (via setTimeout) until all datasets are in a terminal
+    // state.
+    var updater = function ( tracked_datasets ) {
+        // Check if there are any items left to track
+        var empty = true;
+        for ( i in tracked_datasets ) {
+            empty = false;
+            break;
+        }
+        if ( ! empty ) {
+            setTimeout( function() { updater_callback( tracked_datasets ) }, 3000 );
+        }
+    };
+    var updater_callback = function ( tracked_datasets ) {
+        // Build request data
+        var ids = []
+        var states = []
+        $.each( tracked_datasets, function ( id, state ) {
+            ids.push( id );
+            states.push( state );
+        });
+        // Make ajax call
+        $.ajax( {
+            type: "POST",
+            url: "${h.url_for( controller='library_dataset', action='library_item_updates' )}",
+            dataType: "json",
+            data: { ids: ids.join( "," ), states: states.join( "," ) },
+            success : function ( data ) {
+                $.each( data, function( id, val ) {
+                    // Replace HTML
+                    var cell = $("#libraryItem-" + id).find("#libraryItemInfo");
+                    cell.html( val.html );
+                    // If new state was terminal, stop tracking
+                    if (( val.state == "ok") || ( val.state == "error") || ( val.state == "empty") || ( val.state == "deleted" ) || ( val.state == "discarded" )) {
+                        delete tracked_datasets[ parseInt(id) ];
+                    } else {
+                        tracked_datasets[ parseInt(id) ] = val.state;
+                    }
+                });
+                updater( tracked_datasets ); 
+            },
+            error: function() {
+                // Just retry, like the old method, should try to be smarter
+                updater( tracked_datasets );
+            }
+        });
+    };
 </script>
 
 <%def name="render_dataset( ldda, library_dataset, selected, library, folder, pad, parent, row_conter )">
@@ -95,6 +146,8 @@
             can_manage_library_dataset = trans.app.security_agent.can_manage_library_item( user, roles, library_dataset )
         else:
             current_version = False
+        if current_version and ldda.state not in ( 'ok', 'error', 'empty', 'deleted', 'discarded' ):
+            tracked_datasets[ldda.id] = ldda.state
     %>
     %if current_version:
         <tr class="datasetRow"
@@ -102,7 +155,7 @@
             parent="${parent}"
             style="display: none;"
         %endif
-        >
+        id="libraryItem-${ldda.id}">
             <td style="padding-left: ${pad+20}px;">
                 %if selected:
                     <input type="checkbox" name="ldda_ids" value="${ldda.id}" checked/>
@@ -129,7 +182,7 @@
                     %endif
                 </div>
             </td>
-            <td>${ldda.message}</td>
+            <td id="libraryItemInfo">${render_library_item_info( ldda )}</td>
             <td>${uploaded_by}</td>
             <td>${ldda.create_time.strftime( "%Y-%m-%d" )}</td>
         </tr>     
@@ -305,6 +358,14 @@
     </table>
 </form>
 
+%if tracked_datasets:
+    <script type="text/javascript">
+        // Updater
+        updater({${ ",".join( [ '"%s" : "%s"' % ( k, v ) for k, v in tracked_datasets.iteritems() ] ) }});
+    </script>
+    <!-- running: do not change this comment, used by TwillTestCase.library_wait -->
+%endif
+
 ## Help about compression types
 
 %if len( comptypes ) > 1:
diff -r c4c409bda49b -r 3559f7377b9c templates/library/library_dataset_common.mako
--- a/templates/library/library_dataset_common.mako	Fri Sep 25 14:06:43 2009 -0400
+++ b/templates/library/library_dataset_common.mako	Fri Sep 25 14:36:12 2009 -0400
@@ -40,7 +40,8 @@
                         <div class="form-row">
                             <label>File:</label>
                             <div class="form-row-input">
-                                <input type="file" name="files_0|file_data" galaxy-ajax-upload="true"/>
+                                ##<input type="file" name="files_0|file_data" galaxy-ajax-upload="true"/>
+                                <input type="file" name="files_0|file_data"/>
                             </div>
                             <div style="clear: both"></div>
                         </div>
@@ -109,11 +110,16 @@
                             Convert spaces to tabs:
                         </label>
                         <div class="form-row-input">
-                            <input type="checkbox" name="files_0|space_to_tab" value="Yes"/>Yes
+                            ## The files grouping only makes sense in the upload_file context.
+                            %if upload_option == 'upload_file':
+                                <input type="checkbox" name="files_0|space_to_tab" value="Yes"/>Yes
+                            %else:
+                                <input type="checkbox" name="space_to_tab" value="Yes"/>Yes
+                            %endif
                         </div>
-                    </div>
-                    <div class="toolParamHelp" style="clear: both;">
-                        Use this option if you are entering intervals by hand.
+                        <div class="toolParamHelp" style="clear: both;">
+                            Use this option if you are entering intervals by hand.
+                        </div>
                     </div>
                     <div style="clear: both"></div>
                     <div class="form-row">
diff -r c4c409bda49b -r 3559f7377b9c templates/library/library_item_info.mako
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/templates/library/library_item_info.mako	Fri Sep 25 14:36:12 2009 -0400
@@ -0,0 +1,13 @@
+<%def name="render_library_item_info( ldda )">
+                            %if ldda.state == 'error':
+                                <div class="libraryItem-${ldda.state}">Job error <i>(click name for more info)</i></div>
+                            %elif ldda.state == 'queued':
+                                <div class="libraryItem-${ldda.state}">This job is queued</div>
+                            %elif ldda.state == 'running':
+                                <div class="libraryItem-${ldda.state}">This job is running</div>
+                            %else:
+                                ${ldda.message}
+                            %endif
+</%def>
+
+${render_library_item_info( ldda )}
diff -r c4c409bda49b -r 3559f7377b9c test-data/users/test3(a)bx.psu.edu/run1/2.fasta
--- a/test-data/users/test3(a)bx.psu.edu/run1/2.fasta	Fri Sep 25 14:06:43 2009 -0400
+++ b/test-data/users/test3(a)bx.psu.edu/run1/2.fasta	Fri Sep 25 14:36:12 2009 -0400
@@ -8,4 +8,4 @@
 ctcaatgttc atgttcttag gttgttttgg ataatatgcg gtcagtttaa tcttcgttgt
 ttcttcttaa aatatttatt catggtttaa tttttggttt gtacttgttc aggggccagt
 tcattattta ctctgtttgt atacagcagt tcttttattt ttagtatgat tttaatttaa
-aacaattcta atggtcaaaa a
\ No newline at end of file
+aacaattcta atggtcaaaa a
diff -r c4c409bda49b -r 3559f7377b9c test/base/twilltestcase.py
--- a/test/base/twilltestcase.py	Fri Sep 25 14:06:43 2009 -0400
+++ b/test/base/twilltestcase.py	Fri Sep 25 14:36:12 2009 -0400
@@ -1274,6 +1274,7 @@
         else:
             check_str = "Added 1 datasets to the folder '%s' ( each is selected )." % folder_name
         self.check_page_for_string( check_str )
+        self.library_wait( library_id )
         self.home()
     def set_library_dataset_permissions( self, library_id, folder_id, ldda_id, ldda_name, role_id, permissions_in, permissions_out ):
         url = "library_admin/library_dataset_dataset_association?library_id=%s&folder_id=%s&&id=%s&permissions=True&update_roles_button=Save" % \
@@ -1359,25 +1360,7 @@
         tc.submit( "runtool_btn" )
         check_str = "Added 1 dataset versions to the library dataset '%s' in the folder '%s'." % ( ldda_name, folder_name )
         self.check_page_for_string( check_str )
-        self.home()
-    def upload_new_dataset_versions( self, library_id, folder_id, folder_name, library_dataset_id, ldda_name, file_type='auto',
-                                    dbkey='hg18', message='', template_field_name1='', template_field_contents1='' ):
-        """Upload new version(s) of a dataset using a directory of files"""
-        self.home()
-        self.visit_url( "%s/library_admin/library_dataset_dataset_association?upload_option=upload_directory&library_id=%s&folder_id=%s&replace_id=%s" \
-                        % ( self.url, library_id, folder_id, library_dataset_id ) )
-        self.check_page_for_string( 'Upload a directory of files' )
-        self.check_page_for_string( 'You are currently selecting a new file to replace' )
-        tc.fv( "1", "file_type", file_type )
-        tc.fv( "1", "dbkey", dbkey )
-        tc.fv( "1", "message", message.replace( '+', ' ' ) )
-        tc.fv( "1", "server_dir", "library" )
-        # Add template field contents, if any...
-        if template_field_name1:
-            tc.fv( "1", template_field_name1, template_field_contents1 )
-        tc.submit( "runtool_btn" )
-        check_str = "Added 3 dataset versions to the library dataset '%s' in the folder '%s'." % ( ldda_name, folder_name )
-        self.check_page_for_string( check_str )
+        self.library_wait( library_id )
         self.home()
     def add_history_datasets_to_library( self, library_id, folder_id, folder_name, hda_id, root=False ):
         """Copy a dataset from the current history to a library folder"""
@@ -1410,6 +1393,7 @@
         tc.submit( "runtool_btn" )
         if check_str_after_submit:
             self.check_page_for_string( check_str_after_submit )
+        self.library_wait( library_id )
         self.home()
     def add_dir_of_files_from_libraries_view( self, library_id, folder_id, selected_dir, file_type='auto', dbkey='hg18', roles_tuple=[],
                                               message='', check_str_after_submit='', template_field_name1='', template_field_contents1='' ):
@@ -1432,6 +1416,7 @@
         tc.submit( "runtool_btn" )
         if check_str_after_submit:
             self.check_page_for_string( check_str_after_submit )
+        self.library_wait( library_id, controller='library' )
         self.home()
     def delete_library_item( self, library_id, library_item_id, library_item_name, library_item_type='library_dataset' ):
         """Mark a library item as deleted"""
@@ -1464,3 +1449,18 @@
         check_str = "Library '%s' and all of its contents have been purged" % library_name
         self.check_page_for_string( check_str )
         self.home()
+    def library_wait( self, library_id, controller='library_admin', maxiter=20 ):
+        """Waits for the tools to finish"""
+        count = 0
+        sleep_amount = 1
+        self.home()
+        while count < maxiter:
+            count += 1
+            self.visit_url( "%s/%s/browse_library?id=%s" % ( self.url, controller, library_id ) )
+            page = tc.browser.get_html()
+            if page.find( '<!-- running: do not change this comment, used by TwillTestCase.library_wait -->' ) > -1:
+                time.sleep( sleep_amount )
+                sleep_amount += 1
+            else:
+                break
+        self.assertNotEqual(count, maxiter)
diff -r c4c409bda49b -r 3559f7377b9c test/functional/__init__.py
--- a/test/functional/__init__.py	Fri Sep 25 14:06:43 2009 -0400
+++ b/test/functional/__init__.py	Fri Sep 25 14:36:12 2009 -0400
@@ -79,8 +79,8 @@
                                    allow_user_creation = True,
                                    allow_user_deletion = True,
                                    admin_users = 'test(a)bx.psu.edu',
-                                   library_import_dir = galaxy_test_file_dir,
-                                   user_library_import_dir = os.path.join( galaxy_test_file_dir, 'users' ),
+                                   library_import_dir = os.path.join( os.getcwd(), galaxy_test_file_dir ),
+                                   user_library_import_dir = os.path.join( os.getcwd(), galaxy_test_file_dir, 'users' ),
                                    global_conf = { "__file__": "universe_wsgi.ini.sample" } )
                                    
         log.info( "Embedded Universe application started" )
diff -r c4c409bda49b -r 3559f7377b9c tools/data_source/upload.py
--- a/tools/data_source/upload.py	Fri Sep 25 14:06:43 2009 -0400
+++ b/tools/data_source/upload.py	Fri Sep 25 14:36:12 2009 -0400
@@ -137,7 +137,7 @@
 
     # See if we have an empty file
     if not os.path.exists( dataset.path ):
-        file_err( 'Uploaded temporary file (%s) does not exist.  Please' % dataset.path, dataset, json_file )
+        file_err( 'Uploaded temporary file (%s) does not exist.' % dataset.path, dataset, json_file )
         return
     if not os.path.getsize( dataset.path ) > 0:
         file_err( 'The uploaded file is empty', dataset, json_file )
@@ -237,7 +237,10 @@
     if ext == 'auto':
         ext = 'data'
     # Move the dataset to its "real" path
-    shutil.move( dataset.path, output_path )
+    if dataset.type == 'server_dir':
+        shutil.copy( dataset.path, output_path )
+    else:
+        shutil.move( dataset.path, output_path )
     # Write the job info
     info = dict( type = 'dataset',
                  dataset_id = dataset.dataset_id,
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            0