galaxy-dev
Threads by month
- ----- 2025 -----
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2009 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2008 -----
- December
- November
- October
- September
- August
- 10007 discussions
02 Oct '09
details: http://www.bx.psu.edu/hg/galaxy/rev/3559f7377b9c
changeset: 2780:3559f7377b9c
user: Nate Coraor <nate(a)bx.psu.edu>
date: Fri Sep 25 14:36:12 2009 -0400
description:
Real Job(tm) support for the library upload tool. Does not include iframe upload for the library side yet.
20 file(s) affected in this change:
lib/galaxy/jobs/__init__.py
lib/galaxy/model/__init__.py
lib/galaxy/model/mapping.py
lib/galaxy/model/migrate/versions/0020_library_upload_job.py
lib/galaxy/tools/actions/upload.py
lib/galaxy/tools/actions/upload_common.py
lib/galaxy/web/controllers/library.py
lib/galaxy/web/controllers/library_admin.py
lib/galaxy/web/controllers/library_dataset.py
static/june_2007_style/blue/library.css
static/june_2007_style/library.css.tmpl
templates/admin/library/browse_library.mako
templates/admin/library/new_library.mako
templates/library/browse_library.mako
templates/library/library_dataset_common.mako
templates/library/library_item_info.mako
test-data/users/test3(a)bx.psu.edu/run1/2.fasta
test/base/twilltestcase.py
test/functional/__init__.py
tools/data_source/upload.py
diffs (1850 lines):
diff -r c4c409bda49b -r 3559f7377b9c lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py Fri Sep 25 14:06:43 2009 -0400
+++ b/lib/galaxy/jobs/__init__.py Fri Sep 25 14:36:12 2009 -0400
@@ -357,13 +357,14 @@
# Restore input / output data lists
inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] )
out_data = dict( [ ( da.name, da.dataset ) for da in job.output_datasets ] )
+ out_data.update( [ ( da.name, da.dataset ) for da in job.output_library_datasets ] )
# These can be passed on the command line if wanted as $userId $userEmail
- if job.history.user: # check for anonymous user!
- userId = '%d' % job.history.user.id
- userEmail = str(job.history.user.email)
+ if job.history and job.history.user: # check for anonymous user!
+ userId = '%d' % job.history.user.id
+ userEmail = str(job.history.user.email)
else:
- userId = 'Anonymous'
- userEmail = 'Anonymous'
+ userId = 'Anonymous'
+ userEmail = 'Anonymous'
incoming['userId'] = userId
incoming['userEmail'] = userEmail
# Build params, done before hook so hook can use
@@ -424,7 +425,7 @@
log.debug( "fail(): Moved %s to %s" % ( dataset_path.false_path, dataset_path.real_path ) )
except ( IOError, OSError ), e:
log.error( "fail(): Missing output file in working directory: %s" % e )
- for dataset_assoc in job.output_datasets:
+ for dataset_assoc in job.output_datasets + job.output_library_datasets:
dataset = dataset_assoc.dataset
dataset.refresh()
dataset.state = dataset.states.ERROR
@@ -444,7 +445,7 @@
def change_state( self, state, info = False ):
job = model.Job.get( self.job_id )
job.refresh()
- for dataset_assoc in job.output_datasets:
+ for dataset_assoc in job.output_datasets + job.output_library_datasets:
dataset = dataset_assoc.dataset
dataset.refresh()
dataset.state = state
@@ -504,10 +505,10 @@
self.fail( "Job %s's output dataset(s) could not be read" % job.id )
return
job_context = ExpressionContext( dict( stdout = stdout, stderr = stderr ) )
- for dataset_assoc in job.output_datasets:
+ for dataset_assoc in job.output_datasets + job.output_library_datasets:
context = self.get_dataset_finish_context( job_context, dataset_assoc.dataset.dataset )
#should this also be checking library associations? - can a library item be added from a history before the job has ended? - lets not allow this to occur
- for dataset in dataset_assoc.dataset.dataset.history_associations: #need to update all associated output hdas, i.e. history was shared with job running
+ for dataset in dataset_assoc.dataset.dataset.history_associations + dataset_assoc.dataset.dataset.library_associations: #need to update all associated output hdas, i.e. history was shared with job running
dataset.blurb = 'done'
dataset.peek = 'no peek'
dataset.info = context['stdout'] + context['stderr']
@@ -576,6 +577,7 @@
# custom post process setup
inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] )
out_data = dict( [ ( da.name, da.dataset ) for da in job.output_datasets ] )
+ out_data.update( [ ( da.name, da.dataset ) for da in job.output_library_datasets ] )
param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] ) # why not re-use self.param_dict here? ##dunno...probably should, this causes tools.parameters.basic.UnvalidatedValue to be used in following methods instead of validated and transformed values during i.e. running workflows
param_dict = self.tool.params_from_strings( param_dict, self.app )
# Check for and move associated_files
@@ -647,11 +649,11 @@
job = model.Job.get( self.job_id )
if self.app.config.outputs_to_working_directory:
self.output_paths = []
- for name, data in [ ( da.name, da.dataset.dataset ) for da in job.output_datasets ]:
+ for name, data in [ ( da.name, da.dataset.dataset ) for da in job.output_datasets + job.output_library_datasets ]:
false_path = os.path.abspath( os.path.join( self.working_directory, "galaxy_dataset_%d.dat" % data.id ) )
self.output_paths.append( DatasetPath( data.id, data.file_name, false_path ) )
else:
- self.output_paths = [ DatasetPath( da.dataset.dataset.id, da.dataset.file_name ) for da in job.output_datasets ]
+ self.output_paths = [ DatasetPath( da.dataset.dataset.id, da.dataset.file_name ) for da in job.output_datasets + job.output_library_datasets ]
return self.output_paths
def get_output_file_id( self, file ):
diff -r c4c409bda49b -r 3559f7377b9c lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py Fri Sep 25 14:06:43 2009 -0400
+++ b/lib/galaxy/model/__init__.py Fri Sep 25 14:36:12 2009 -0400
@@ -74,6 +74,7 @@
self.parameters = []
self.input_datasets = []
self.output_datasets = []
+ self.output_library_datasets = []
self.state = Job.states.NEW
self.info = None
self.job_runner_name = None
@@ -84,6 +85,8 @@
self.input_datasets.append( JobToInputDatasetAssociation( name, dataset ) )
def add_output_dataset( self, name, dataset ):
self.output_datasets.append( JobToOutputDatasetAssociation( name, dataset ) )
+ def add_output_library_dataset( self, name, dataset ):
+ self.output_library_datasets.append( JobToOutputLibraryDatasetAssociation( name, dataset ) )
def set_state( self, state ):
self.state = state
# For historical reasons state propogates down to datasets
@@ -138,6 +141,11 @@
self.dataset = dataset
class JobToOutputDatasetAssociation( object ):
+ def __init__( self, name, dataset ):
+ self.name = name
+ self.dataset = dataset
+
+class JobToOutputLibraryDatasetAssociation( object ):
def __init__( self, name, dataset ):
self.name = name
self.dataset = dataset
diff -r c4c409bda49b -r 3559f7377b9c lib/galaxy/model/mapping.py
--- a/lib/galaxy/model/mapping.py Fri Sep 25 14:06:43 2009 -0400
+++ b/lib/galaxy/model/mapping.py Fri Sep 25 14:36:12 2009 -0400
@@ -107,7 +107,7 @@
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, index=True, default=now, onupdate=now ),
- Column( "state", TrimmedString( 64 ) ),
+ Column( "state", TrimmedString( 64 ), index=True ),
Column( "deleted", Boolean, index=True, default=False ),
Column( "purged", Boolean, index=True, default=False ),
Column( "purgable", Boolean, default=True ),
@@ -307,6 +307,7 @@
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "history_id", Integer, ForeignKey( "history.id" ), index=True ),
+ Column( "library_folder_id", Integer, ForeignKey( "library_folder.id" ), index=True ),
Column( "tool_id", String( 255 ) ),
Column( "tool_version", TEXT, default="1.0.0" ),
Column( "state", String( 64 ), index=True ),
@@ -337,6 +338,12 @@
Column( "id", Integer, primary_key=True ),
Column( "job_id", Integer, ForeignKey( "job.id" ), index=True ),
Column( "dataset_id", Integer, ForeignKey( "history_dataset_association.id" ), index=True ),
+ Column( "name", String(255) ) )
+
+JobToOutputLibraryDatasetAssociation.table = Table( "job_to_output_library_dataset", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "job_id", Integer, ForeignKey( "job.id" ), index=True ),
+ Column( "ldda_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), index=True ),
Column( "name", String(255) ) )
JobExternalOutputMetadata.table = Table( "job_external_output_metadata", metadata,
@@ -907,6 +914,9 @@
assign_mapper( context, JobToOutputDatasetAssociation, JobToOutputDatasetAssociation.table,
properties=dict( job=relation( Job ), dataset=relation( HistoryDatasetAssociation, lazy=False ) ) )
+assign_mapper( context, JobToOutputLibraryDatasetAssociation, JobToOutputLibraryDatasetAssociation.table,
+ properties=dict( job=relation( Job ), dataset=relation( LibraryDatasetDatasetAssociation, lazy=False ) ) )
+
assign_mapper( context, JobParameter, JobParameter.table )
assign_mapper( context, JobExternalOutputMetadata, JobExternalOutputMetadata.table,
@@ -917,9 +927,11 @@
assign_mapper( context, Job, Job.table,
properties=dict( galaxy_session=relation( GalaxySession ),
history=relation( History ),
+ library_folder=relation( LibraryFolder ),
parameters=relation( JobParameter, lazy=False ),
input_datasets=relation( JobToInputDatasetAssociation, lazy=False ),
output_datasets=relation( JobToOutputDatasetAssociation, lazy=False ),
+ output_library_datasets=relation( JobToOutputLibraryDatasetAssociation, lazy=False ),
external_output_metadata = relation( JobExternalOutputMetadata, lazy = False ) ) )
assign_mapper( context, Event, Event.table,
diff -r c4c409bda49b -r 3559f7377b9c lib/galaxy/model/migrate/versions/0020_library_upload_job.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/model/migrate/versions/0020_library_upload_job.py Fri Sep 25 14:36:12 2009 -0400
@@ -0,0 +1,121 @@
+from sqlalchemy import *
+from sqlalchemy.orm import *
+from sqlalchemy.exceptions import *
+from migrate import *
+from migrate.changeset import *
+import datetime
+now = datetime.datetime.utcnow
+import sys, logging
+# Need our custom types, but don't import anything else from model
+from galaxy.model.custom_types import *
+
+log = logging.getLogger( __name__ )
+log.setLevel(logging.DEBUG)
+handler = logging.StreamHandler( sys.stdout )
+format = "%(name)s %(levelname)s %(asctime)s %(message)s"
+formatter = logging.Formatter( format )
+handler.setFormatter( formatter )
+log.addHandler( handler )
+
+metadata = MetaData( migrate_engine )
+db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, transactional=False ) )
+
+def display_migration_details():
+ print ""
+ print "========================================"
+ print """This script creates a job_to_output_library_dataset table for allowing library
+uploads to run as regular jobs. To support this, a library_folder_id column is
+added to the job table, and library_folder/output_library_datasets relations
+are added to the Job object. An index is also added to the dataset.state
+column."""
+ print "========================================"
+
+JobToOutputLibraryDatasetAssociation_table = Table( "job_to_output_library_dataset", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "job_id", Integer, ForeignKey( "job.id" ), index=True ),
+ Column( "ldda_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), index=True ),
+ Column( "name", String(255) ) )
+
+def upgrade():
+ display_migration_details()
+ # Load existing tables
+ metadata.reflect()
+ # Create the job_to_output_library_dataset table
+ try:
+ JobToOutputLibraryDatasetAssociation_table.create()
+ except Exception, e:
+ print "Creating job_to_output_library_dataset table failed: %s" % str( e )
+ log.debug( "Creating job_to_output_library_dataset table failed: %s" % str( e ) )
+ # Create the library_folder_id column
+ try:
+ Job_table = Table( "job", metadata, autoload=True )
+ except NoSuchTableError:
+ Job_table = None
+ log.debug( "Failed loading table job" )
+ if Job_table:
+ try:
+ col = Column( "library_folder_id", Integer, index=True )
+ col.create( Job_table )
+ assert col is Job_table.c.library_folder_id
+ except Exception, e:
+ log.debug( "Adding column 'library_folder_id' to job table failed: %s" % ( str( e ) ) )
+ try:
+ LibraryFolder_table = Table( "library_folder", metadata, autoload=True )
+ except NoSuchTableError:
+ LibraryFolder_table = None
+ log.debug( "Failed loading table library_folder" )
+ # Add 1 foreign key constraint to the job table
+ if Job_table and LibraryFolder_table:
+ try:
+ cons = ForeignKeyConstraint( [Job_table.c.library_folder_id],
+ [LibraryFolder_table.c.id],
+ name='job_library_folder_id_fk' )
+ # Create the constraint
+ cons.create()
+ except Exception, e:
+ log.debug( "Adding foreign key constraint 'job_library_folder_id_fk' to table 'library_folder' failed: %s" % ( str( e ) ) )
+ # Create the ix_dataset_state index
+ try:
+ Dataset_table = Table( "dataset", metadata, autoload=True )
+ except NoSuchTableError:
+ Dataset_table = None
+ log.debug( "Failed loading table dataset" )
+ i = Index( "ix_dataset_state", Dataset_table.c.state )
+ try:
+ i.create()
+ except Exception, e:
+ print str(e)
+ log.debug( "Adding index 'ix_dataset_state' to dataset table failed: %s" % str( e ) )
+
+def downgrade():
+ metadata.reflect()
+ # Drop the library_folder_id column
+ try:
+ Job_table = Table( "job", metadata, autoload=True )
+ except NoSuchTableError:
+ Job_table = None
+ log.debug( "Failed loading table job" )
+ if Job_table:
+ try:
+ col = Job_table.c.library_folder_id
+ col.drop()
+ except Exception, e:
+ log.debug( "Dropping column 'library_folder_id' from job table failed: %s" % ( str( e ) ) )
+ # Drop the job_to_output_library_dataset table
+ try:
+ JobToOutputLibraryDatasetAssociation_table.drop()
+ except Exception, e:
+ print str(e)
+ log.debug( "Dropping job_to_output_library_dataset table failed: %s" % str( e ) )
+ # Drop the ix_dataset_state index
+ try:
+ Dataset_table = Table( "dataset", metadata, autoload=True )
+ except NoSuchTableError:
+ Dataset_table = None
+ log.debug( "Failed loading table dataset" )
+ i = Index( "ix_dataset_state", Dataset_table.c.state )
+ try:
+ i.drop()
+ except Exception, e:
+ print str(e)
+ log.debug( "Dropping index 'ix_dataset_state' from dataset table failed: %s" % str( e ) )
diff -r c4c409bda49b -r 3559f7377b9c lib/galaxy/tools/actions/upload.py
--- a/lib/galaxy/tools/actions/upload.py Fri Sep 25 14:06:43 2009 -0400
+++ b/lib/galaxy/tools/actions/upload.py Fri Sep 25 14:36:12 2009 -0400
@@ -1,126 +1,22 @@
-import os, shutil, urllib, StringIO, re, gzip, tempfile, shutil, zipfile
-from cgi import FieldStorage
+import os
from __init__ import ToolAction
-from galaxy import datatypes, jobs
-from galaxy.datatypes import sniff
-from galaxy import model, util
-from galaxy.util.json import to_json_string
-
-import sys, traceback
+from galaxy.tools.actions import upload_common
import logging
log = logging.getLogger( __name__ )
class UploadToolAction( ToolAction ):
- # Action for uploading files
- def persist_uploads( self, incoming ):
- if 'files' in incoming:
- new_files = []
- temp_files = []
- for upload_dataset in incoming['files']:
- f = upload_dataset['file_data']
- if isinstance( f, FieldStorage ):
- assert not isinstance( f.file, StringIO.StringIO )
- assert f.file.name != '<fdopen>'
- local_filename = util.mkstemp_ln( f.file.name, 'upload_file_data_' )
- f.file.close()
- upload_dataset['file_data'] = dict( filename = f.filename,
- local_filename = local_filename )
- if upload_dataset['url_paste'].strip() != '':
- upload_dataset['url_paste'] = datatypes.sniff.stream_to_file( StringIO.StringIO( upload_dataset['url_paste'] ), prefix="strio_url_paste_" )[0]
- else:
- upload_dataset['url_paste'] = None
- new_files.append( upload_dataset )
- incoming['files'] = new_files
- return incoming
def execute( self, tool, trans, incoming={}, set_output_hid = True ):
dataset_upload_inputs = []
for input_name, input in tool.inputs.iteritems():
if input.type == "upload_dataset":
dataset_upload_inputs.append( input )
assert dataset_upload_inputs, Exception( "No dataset upload groups were found." )
- # Get any precreated datasets (when using asynchronous uploads)
- async_datasets = []
- self.precreated_datasets = []
- if incoming.get( 'async_datasets', None ) not in ["None", "", None]:
- async_datasets = incoming['async_datasets'].split(',')
- for id in async_datasets:
- try:
- data = trans.app.model.HistoryDatasetAssociation.get( int( id ) )
- except:
- log.exception( 'Unable to load precreated dataset (%s) sent in upload form' % id )
- continue
- if trans.user is None and trans.galaxy_session.current_history != data.history:
- log.error( 'Got a precreated dataset (%s) but it does not belong to anonymous user\'s current session (%s)' % ( data.id, trans.galaxy_session.id ) )
- elif data.history.user != trans.user:
- log.error( 'Got a precreated dataset (%s) but it does not belong to current user (%s)' % ( data.id, trans.user.id ) )
- else:
- self.precreated_datasets.append( data )
- data_list = []
-
- incoming = self.persist_uploads( incoming )
-
- json_file = tempfile.mkstemp()
- json_file_path = json_file[1]
- json_file = os.fdopen( json_file[0], 'w' )
- for dataset_upload_input in dataset_upload_inputs:
- uploaded_datasets = dataset_upload_input.get_uploaded_datasets( trans, incoming )
- for uploaded_dataset in uploaded_datasets:
- data = self.get_precreated_dataset( uploaded_dataset.name )
- if not data:
- data = trans.app.model.HistoryDatasetAssociation( history = trans.history, create_dataset = True )
- data.name = uploaded_dataset.name
- data.state = data.states.QUEUED
- data.extension = uploaded_dataset.file_type
- data.dbkey = uploaded_dataset.dbkey
- data.flush()
- trans.history.add_dataset( data, genome_build = uploaded_dataset.dbkey )
- permissions = trans.app.security_agent.history_get_default_permissions( trans.history )
- trans.app.security_agent.set_all_dataset_permissions( data.dataset, permissions )
- else:
- data.extension = uploaded_dataset.file_type
- data.dbkey = uploaded_dataset.dbkey
- data.flush()
- trans.history.genome_build = uploaded_dataset.dbkey
- if uploaded_dataset.type == 'composite':
- # we need to init metadata before the job is dispatched
- data.init_meta()
- for meta_name, meta_value in uploaded_dataset.metadata.iteritems():
- setattr( data.metadata, meta_name, meta_value )
- data.flush()
- json = dict( file_type = uploaded_dataset.file_type,
- dataset_id = data.dataset.id,
- dbkey = uploaded_dataset.dbkey,
- type = uploaded_dataset.type,
- metadata = uploaded_dataset.metadata,
- primary_file = uploaded_dataset.primary_file,
- extra_files_path = data.extra_files_path,
- composite_file_paths = uploaded_dataset.composite_files,
- composite_files = dict( [ ( k, v.__dict__ ) for k, v in data.datatype.get_composite_files( data ).items() ] ) )
- else:
- try:
- is_binary = uploaded_dataset.datatype.is_binary
- except:
- is_binary = None
- json = dict( file_type = uploaded_dataset.file_type,
- ext = uploaded_dataset.ext,
- name = uploaded_dataset.name,
- dataset_id = data.dataset.id,
- dbkey = uploaded_dataset.dbkey,
- type = uploaded_dataset.type,
- is_binary = is_binary,
- space_to_tab = uploaded_dataset.space_to_tab,
- path = uploaded_dataset.path )
- json_file.write( to_json_string( json ) + '\n' )
- data_list.append( data )
- json_file.close()
-
- #cleanup unclaimed precreated datasets:
- for data in self.precreated_datasets:
- log.info( 'Cleaned up unclaimed precreated dataset (%s).' % ( data.id ) )
- data.state = data.states.ERROR
- data.info = 'No file contents were available.'
+ precreated_datasets = upload_common.get_precreated_datasets( trans, incoming, trans.app.model.HistoryDatasetAssociation )
+ incoming = upload_common.persist_uploads( incoming )
+ json_file_path, data_list = upload_common.create_paramfile( trans, incoming, precreated_datasets, dataset_upload_inputs )
+ upload_common.cleanup_unused_precreated_datasets( precreated_datasets )
if not data_list:
try:
@@ -129,38 +25,4 @@
pass
return 'No data was entered in the upload form, please go back and choose data to upload.'
- # Create the job object
- job = trans.app.model.Job()
- job.session_id = trans.get_galaxy_session().id
- job.history_id = trans.history.id
- job.tool_id = tool.id
- job.tool_version = tool.version
- job.state = trans.app.model.Job.states.UPLOAD
- job.flush()
- log.info( 'tool %s created job id %d' % ( tool.id, job.id ) )
- trans.log_event( 'created job id %d' % job.id, tool_id=tool.id )
-
- for name, value in tool.params_to_strings( incoming, trans.app ).iteritems():
- job.add_parameter( name, value )
- job.add_parameter( 'paramfile', to_json_string( json_file_path ) )
- for i, dataset in enumerate( data_list ):
- job.add_output_dataset( 'output%i' % i, dataset )
- job.state = trans.app.model.Job.states.NEW
- trans.app.model.flush()
-
- # Queue the job for execution
- trans.app.job_queue.put( job.id, tool )
- trans.log_event( "Added job to the job queue, id: %s" % str(job.id), tool_id=job.tool_id )
- return dict( [ ( i, v ) for i, v in enumerate( data_list ) ] )
-
- def get_precreated_dataset( self, name ):
- """
- Return a dataset matching a name from the list of precreated (via async
- upload) datasets. If there's more than one upload with the exact same
- name, we need to pop one (the first) so it isn't chosen next time.
- """
- names = [ d.name for d in self.precreated_datasets ]
- if names.count( name ) > 0:
- return self.precreated_datasets.pop( names.index( name ) )
- else:
- return None
+ return upload_common.create_job( trans, incoming, tool, json_file_path, data_list )
diff -r c4c409bda49b -r 3559f7377b9c lib/galaxy/tools/actions/upload_common.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/tools/actions/upload_common.py Fri Sep 25 14:36:12 2009 -0400
@@ -0,0 +1,235 @@
+import os, tempfile, StringIO
+from cgi import FieldStorage
+from galaxy import datatypes, util
+from galaxy.datatypes import sniff
+from galaxy.util.json import to_json_string
+
+import logging
+log = logging.getLogger( __name__ )
+
+def persist_uploads( params ):
+ """
+ Turn any uploads in the submitted form to persisted files.
+ """
+ if 'files' in params:
+ new_files = []
+ temp_files = []
+ for upload_dataset in params['files']:
+ f = upload_dataset['file_data']
+ if isinstance( f, FieldStorage ):
+ assert not isinstance( f.file, StringIO.StringIO )
+ assert f.file.name != '<fdopen>'
+ local_filename = util.mkstemp_ln( f.file.name, 'upload_file_data_' )
+ f.file.close()
+ upload_dataset['file_data'] = dict( filename = f.filename,
+ local_filename = local_filename )
+ if upload_dataset['url_paste'].strip() != '':
+ upload_dataset['url_paste'] = datatypes.sniff.stream_to_file( StringIO.StringIO( upload_dataset['url_paste'] ), prefix="strio_url_paste_" )[0]
+ else:
+ upload_dataset['url_paste'] = None
+ new_files.append( upload_dataset )
+ params['files'] = new_files
+ return params
+
+def get_precreated_datasets( trans, params, data_obj ):
+ """
+ Get any precreated datasets (when using asynchronous uploads).
+ """
+ rval = []
+ async_datasets = []
+ if params.get( 'async_datasets', None ) not in ["None", "", None]:
+ async_datasets = params['async_datasets'].split(',')
+ user, roles = trans.get_user_and_roles()
+ for id in async_datasets:
+ try:
+ data = data_obj.get( int( id ) )
+ except:
+ log.exception( 'Unable to load precreated dataset (%s) sent in upload form' % id )
+ continue
+ if data_obj is trans.app.model.HistoryDatasetAssociation:
+ if user is None and trans.galaxy_session.current_history != data.history:
+ log.error( 'Got a precreated dataset (%s) but it does not belong to anonymous user\'s current session (%s)' % ( data.id, trans.galaxy_session.id ) )
+ elif data.history.user != user:
+ log.error( 'Got a precreated dataset (%s) but it does not belong to current user (%s)' % ( data.id, user.id ) )
+ else:
+ rval.append( data )
+ elif data_obj is trans.app.model.LibraryDatasetDatasetAssociation:
+ if not trans.app.security_agent.can_add_library_item( user, roles, data.library_dataset.folder ):
+ log.error( 'Got a precreated dataset (%s) but this user (%s) is not allowed to write to it' % ( data.id, user.id ) )
+ else:
+ rval.append( data )
+ return rval
+
+def get_precreated_dataset( precreated_datasets, name ):
+ """
+ Return a dataset matching a name from the list of precreated (via async
+ upload) datasets. If there's more than one upload with the exact same
+ name, we need to pop one (the first) so it isn't chosen next time.
+ """
+ names = [ d.name for d in precreated_datasets ]
+ if names.count( name ) > 0:
+ return precreated_datasets.pop( names.index( name ) )
+ else:
+ return None
+
+def cleanup_unused_precreated_datasets( precreated_datasets ):
+ for data in precreated_datasets:
+ log.info( 'Cleaned up unclaimed precreated dataset (%s).' % ( data.id ) )
+ data.state = data.states.ERROR
+ data.info = 'No file contents were available.'
+
+def new_history_upload( trans, uploaded_dataset ):
+ hda = trans.app.model.HistoryDatasetAssociation( name = uploaded_dataset.name,
+ extension = uploaded_dataset.file_type,
+ dbkey = uploaded_dataset.dbkey,
+ history = trans.history,
+ create_dataset = True )
+ hda.state = hda.states.QUEUED
+ hda.flush()
+ trans.history.add_dataset( hda, genome_build = uploaded_dataset.dbkey )
+ permissions = trans.app.security_agent.history_get_default_permissions( trans.history )
+ trans.app.security_agent.set_all_dataset_permissions( hda.dataset, permissions )
+ return hda
+
+def new_library_upload( trans, uploaded_dataset, replace_dataset, folder,
+ template, template_field_contents, roles, message ):
+ if replace_dataset:
+ ld = replace_dataset
+ else:
+ ld = trans.app.model.LibraryDataset( folder=folder, name=uploaded_dataset.name )
+ ld.flush()
+ trans.app.security_agent.copy_library_permissions( folder, ld )
+ ldda = trans.app.model.LibraryDatasetDatasetAssociation( name = uploaded_dataset.name,
+ extension = uploaded_dataset.file_type,
+ dbkey = uploaded_dataset.dbkey,
+ library_dataset = ld,
+ user = trans.user,
+ create_dataset = True )
+ ldda.state = ldda.states.QUEUED
+ ldda.message = message
+ ldda.flush()
+ # Permissions must be the same on the LibraryDatasetDatasetAssociation and the associated LibraryDataset
+ trans.app.security_agent.copy_library_permissions( ld, ldda )
+ if replace_dataset:
+ # Copy the Dataset level permissions from replace_dataset to the new LibraryDatasetDatasetAssociation.dataset
+ trans.app.security_agent.copy_dataset_permissions( replace_dataset.library_dataset_dataset_association.dataset, ldda.dataset )
+ else:
+ # Copy the current user's DefaultUserPermissions to the new LibraryDatasetDatasetAssociation.dataset
+ trans.app.security_agent.set_all_dataset_permissions( ldda.dataset, trans.app.security_agent.user_get_default_permissions( trans.user ) )
+ folder.add_library_dataset( ld, genome_build=uploaded_dataset.dbkey )
+ folder.flush()
+ ld.library_dataset_dataset_association_id = ldda.id
+ ld.flush()
+ # Handle template included in the upload form, if any
+ if template and template_field_contents:
+ # Since information templates are inherited, the template fields can be displayed on the upload form.
+ # If the user has added field contents, we'll need to create a new form_values and info_association
+ # for the new library_dataset_dataset_association object.
+ # Create a new FormValues object, using the template we previously retrieved
+ form_values = trans.app.model.FormValues( template, template_field_contents )
+ form_values.flush()
+ # Create a new info_association between the current ldda and form_values
+ info_association = trans.app.model.LibraryDatasetDatasetInfoAssociation( ldda, template, form_values )
+ info_association.flush()
+ # If roles were selected upon upload, restrict access to the Dataset to those roles
+ if roles:
+ for role in roles:
+ dp = trans.app.model.DatasetPermissions( trans.app.security_agent.permitted_actions.DATASET_ACCESS.action, ldda.dataset, role )
+ dp.flush()
+ return ldda
+
+def create_paramfile( trans, params, precreated_datasets, dataset_upload_inputs,
+ replace_dataset=None, folder=None, template=None,
+ template_field_contents=None, roles=None, message=None ):
+ """
+ Create the upload tool's JSON "param" file.
+ """
+ data_list = []
+ json_file = tempfile.mkstemp()
+ json_file_path = json_file[1]
+ json_file = os.fdopen( json_file[0], 'w' )
+ for dataset_upload_input in dataset_upload_inputs:
+ uploaded_datasets = dataset_upload_input.get_uploaded_datasets( trans, params )
+ for uploaded_dataset in uploaded_datasets:
+ data = get_precreated_dataset( precreated_datasets, uploaded_dataset.name )
+ if not data:
+ if folder:
+ data = new_library_upload( trans, uploaded_dataset, replace_dataset, folder, template, template_field_contents, roles, message )
+ else:
+ data = new_history_upload( trans, uploaded_dataset )
+ else:
+ data.extension = uploaded_dataset.file_type
+ data.dbkey = uploaded_dataset.dbkey
+ data.flush()
+ if folder:
+ folder.genome_build = uploaded_dataset.dbkey
+ folder.flush()
+ else:
+ trans.history.genome_build = uploaded_dataset.dbkey
+ if uploaded_dataset.type == 'composite':
+ # we need to init metadata before the job is dispatched
+ data.init_meta()
+ for meta_name, meta_value in uploaded_dataset.metadata.iteritems():
+ setattr( data.metadata, meta_name, meta_value )
+ data.flush()
+ json = dict( file_type = uploaded_dataset.file_type,
+ dataset_id = data.dataset.id,
+ dbkey = uploaded_dataset.dbkey,
+ type = uploaded_dataset.type,
+ metadata = uploaded_dataset.metadata,
+ primary_file = uploaded_dataset.primary_file,
+ extra_files_path = data.extra_files_path,
+ composite_file_paths = uploaded_dataset.composite_files,
+ composite_files = dict( [ ( k, v.__dict__ ) for k, v in data.datatype.get_composite_files( data ).items() ] ) )
+ else:
+ try:
+ is_binary = uploaded_dataset.datatype.is_binary
+ except:
+ is_binary = None
+ json = dict( file_type = uploaded_dataset.file_type,
+ ext = uploaded_dataset.ext,
+ name = uploaded_dataset.name,
+ dataset_id = data.dataset.id,
+ dbkey = uploaded_dataset.dbkey,
+ type = uploaded_dataset.type,
+ is_binary = is_binary,
+ space_to_tab = uploaded_dataset.space_to_tab,
+ path = uploaded_dataset.path )
+ json_file.write( to_json_string( json ) + '\n' )
+ data_list.append( data )
+ json_file.close()
+ return ( json_file_path, data_list )
+
+def create_job( trans, params, tool, json_file_path, data_list, folder=None ):
+ """
+ Create the upload job.
+ """
+ job = trans.app.model.Job()
+ job.session_id = trans.get_galaxy_session().id
+ if folder:
+ job.library_folder_id = folder.id
+ else:
+ job.history_id = trans.history.id
+ job.tool_id = tool.id
+ job.tool_version = tool.version
+ job.state = job.states.UPLOAD
+ job.flush()
+ log.info( 'tool %s created job id %d' % ( tool.id, job.id ) )
+ trans.log_event( 'created job id %d' % job.id, tool_id=tool.id )
+
+ for name, value in tool.params_to_strings( params, trans.app ).iteritems():
+ job.add_parameter( name, value )
+ job.add_parameter( 'paramfile', to_json_string( json_file_path ) )
+ if folder:
+ for i, dataset in enumerate( data_list ):
+ job.add_output_library_dataset( 'output%i' % i, dataset )
+ else:
+ for i, dataset in enumerate( data_list ):
+ job.add_output_dataset( 'output%i' % i, dataset )
+ job.state = job.states.NEW
+ trans.app.model.flush()
+
+ # Queue the job for execution
+ trans.app.job_queue.put( job.id, tool )
+ trans.log_event( "Added job to the job queue, id: %s" % str(job.id), tool_id=job.tool_id )
+ return dict( [ ( 'output%i' % i, v ) for i, v in enumerate( data_list ) ] )
diff -r c4c409bda49b -r 3559f7377b9c lib/galaxy/web/controllers/library.py
--- a/lib/galaxy/web/controllers/library.py Fri Sep 25 14:06:43 2009 -0400
+++ b/lib/galaxy/web/controllers/library.py Fri Sep 25 14:36:12 2009 -0400
@@ -726,17 +726,17 @@
template_id = 'None'
widgets = []
upload_option = params.get( 'upload_option', 'upload_file' )
- created_ldda_ids = trans.webapp.controllers[ 'library_dataset' ].upload_dataset( trans,
- controller='library',
- library_id=library_id,
- folder_id=folder_id,
- template_id=template_id,
- widgets=widgets,
- replace_dataset=replace_dataset,
- **kwd )
- if created_ldda_ids:
- ldda_id_list = created_ldda_ids.split( ',' )
- total_added = len( ldda_id_list )
+ created_outputs = trans.webapp.controllers[ 'library_dataset' ].upload_dataset( trans,
+ controller='library',
+ library_id=library_id,
+ folder_id=folder_id,
+ template_id=template_id,
+ widgets=widgets,
+ replace_dataset=replace_dataset,
+ **kwd )
+ if created_outputs:
+ ldda_id_list = [ str( v.id ) for v in created_outputs.values() ]
+ total_added = len( created_outputs.values() )
if replace_dataset:
msg = "Added %d dataset versions to the library dataset '%s' in the folder '%s'." % ( total_added, replace_dataset.name, folder.name )
else:
@@ -760,7 +760,7 @@
action='browse_library',
id=library_id,
default_action=default_action,
- created_ldda_ids=created_ldda_ids,
+ created_ldda_ids=",".join( ldda_id_list ),
msg=util.sanitize_text( msg ),
messagetype='done' ) )
@@ -769,7 +769,7 @@
trans.response.send_redirect( web.url_for( controller='library',
action='browse_library',
id=library_id,
- created_ldda_ids=created_ldda_ids,
+ created_ldda_ids=",".join( ldda_id_list ),
msg=util.sanitize_text( msg ),
messagetype='error' ) )
if not id or replace_dataset:
diff -r c4c409bda49b -r 3559f7377b9c lib/galaxy/web/controllers/library_admin.py
--- a/lib/galaxy/web/controllers/library_admin.py Fri Sep 25 14:06:43 2009 -0400
+++ b/lib/galaxy/web/controllers/library_admin.py Fri Sep 25 14:36:12 2009 -0400
@@ -438,16 +438,16 @@
template_id = 'None'
widgets = []
upload_option = params.get( 'upload_option', 'upload_file' )
- created_ldda_ids = trans.webapp.controllers[ 'library_dataset' ].upload_dataset( trans,
- controller='library_admin',
- library_id=library_id,
- folder_id=folder_id,
- template_id=template_id,
- widgets=widgets,
- replace_dataset=replace_dataset,
- **kwd )
- if created_ldda_ids:
- total_added = len( created_ldda_ids.split( ',' ) )
+ created_outputs = trans.webapp.controllers[ 'library_dataset' ].upload_dataset( trans,
+ controller='library_admin',
+ library_id=library_id,
+ folder_id=folder_id,
+ template_id=template_id,
+ widgets=widgets,
+ replace_dataset=replace_dataset,
+ **kwd )
+ if created_outputs:
+ total_added = len( created_outputs.values() )
if replace_dataset:
msg = "Added %d dataset versions to the library dataset '%s' in the folder '%s'." % ( total_added, replace_dataset.name, folder.name )
else:
@@ -464,7 +464,7 @@
trans.response.send_redirect( web.url_for( controller='library_admin',
action='browse_library',
id=library_id,
- created_ldda_ids=created_ldda_ids,
+ created_ldda_ids=",".join( [ str( v.id ) for v in created_outputs.values() ] ),
msg=util.sanitize_text( msg ),
messagetype=messagetype ) )
elif not id or replace_dataset:
diff -r c4c409bda49b -r 3559f7377b9c lib/galaxy/web/controllers/library_dataset.py
--- a/lib/galaxy/web/controllers/library_dataset.py Fri Sep 25 14:06:43 2009 -0400
+++ b/lib/galaxy/web/controllers/library_dataset.py Fri Sep 25 14:36:12 2009 -0400
@@ -3,196 +3,51 @@
from galaxy import util, jobs
from galaxy.datatypes import sniff
from galaxy.security import RBACAgent
+from galaxy.util.json import to_json_string
+from galaxy.tools.actions import upload_common
log = logging.getLogger( __name__ )
class UploadLibraryDataset( BaseController ):
- def remove_tempfile( self, filename ):
- try:
- os.unlink( filename )
- except:
- log.exception( 'failure removing temporary file: %s' % filename )
- def add_file( self, trans, folder, file_obj, name, file_type, dbkey, roles,
- info='no info', space_to_tab=False, replace_dataset=None,
- template=None, template_field_contents=[], message=None ):
- data_type = None
- line_count = 0
- temp_name, is_multi_byte = sniff.stream_to_file( file_obj )
- # See if we have an empty file
- if not os.path.getsize( temp_name ) > 0:
- raise BadFileException( "you attempted to upload an empty file." )
- if is_multi_byte:
- ext = sniff.guess_ext( temp_name, is_multi_byte=True )
- else:
- if not data_type:
- # See if we have a gzipped file, which, if it passes our restrictions, we'll uncompress on the fly.
- is_gzipped, is_valid = self.check_gzip( temp_name )
- if is_gzipped and not is_valid:
- raise BadFileException( "you attempted to upload an inappropriate file." )
- elif is_gzipped and is_valid:
- # We need to uncompress the temp_name file
- CHUNK_SIZE = 2**20 # 1Mb
- fd, uncompressed = tempfile.mkstemp()
- gzipped_file = gzip.GzipFile( temp_name )
- while 1:
- try:
- chunk = gzipped_file.read( CHUNK_SIZE )
- except IOError:
- os.close( fd )
- os.remove( uncompressed )
- raise BadFileException( 'problem uncompressing gzipped data.' )
- if not chunk:
- break
- os.write( fd, chunk )
- os.close( fd )
- gzipped_file.close()
- # Replace the gzipped file with the decompressed file
- shutil.move( uncompressed, temp_name )
- name = name.rstrip( '.gz' )
- data_type = 'gzip'
- ext = ''
- if not data_type:
- # See if we have a zip archive
- is_zipped, is_valid, test_ext = self.check_zip( temp_name )
- if is_zipped and not is_valid:
- raise BadFileException( "you attempted to upload an inappropriate file." )
- elif is_zipped and is_valid:
- # Currently, we force specific tools to handle this case. We also require the user
- # to manually set the incoming file_type
- if ( test_ext == 'ab1' or test_ext == 'scf' ) and file_type != 'binseq.zip':
- raise BadFileException( "Invalid 'File Format' for archive consisting of binary files - use 'Binseq.zip'." )
- elif test_ext == 'txt' and file_type != 'txtseq.zip':
- raise BadFileException( "Invalid 'File Format' for archive consisting of text files - use 'Txtseq.zip'." )
- if not ( file_type == 'binseq.zip' or file_type == 'txtseq.zip' ):
- raise BadFileException( "you must manually set the 'File Format' to either 'Binseq.zip' or 'Txtseq.zip' when uploading zip files." )
- data_type = 'zip'
- ext = file_type
- if not data_type:
- if self.check_binary( temp_name ):
- try:
- ext = name.split( "." )[1].strip().lower()
- except:
- ext = ''
- try:
- is_pdf = open( temp_name ).read( len( '%PDF' ) ) == '%PDF'
- except:
- is_pdf = False #file failed to open or contents are smaller than pdf header
- if is_pdf:
- file_type = 'pdf' #allow the upload of PDFs to library via the admin interface.
- else:
- if not( ext == 'ab1' or ext == 'scf' ):
- raise BadFileException( "you attempted to upload an inappropriate file." )
- if ext == 'ab1' and file_type != 'ab1':
- raise BadFileException( "you must manually set the 'File Format' to 'Ab1' when uploading ab1 files." )
- elif ext == 'scf' and file_type != 'scf':
- raise BadFileException( "you must manually set the 'File Format' to 'Scf' when uploading scf files." )
- data_type = 'binary'
- if not data_type:
- # We must have a text file
- if self.check_html( temp_name ):
- raise BadFileException( "you attempted to upload an inappropriate file." )
- if data_type != 'binary' and data_type != 'zip':
- if space_to_tab:
- line_count = sniff.convert_newlines_sep2tabs( temp_name )
- elif os.stat( temp_name ).st_size < 262144000: # 250MB
- line_count = sniff.convert_newlines( temp_name )
- else:
- if sniff.check_newlines( temp_name ):
- line_count = sniff.convert_newlines( temp_name )
- else:
- line_count = None
- if file_type == 'auto':
- ext = sniff.guess_ext( temp_name, sniff_order=trans.app.datatypes_registry.sniff_order )
- else:
- ext = file_type
- data_type = ext
- if info is None:
- info = 'uploaded %s file' % data_type
- if file_type == 'auto':
- data_type = sniff.guess_ext( temp_name, sniff_order=trans.app.datatypes_registry.sniff_order )
- else:
- data_type = file_type
- if replace_dataset:
- # The replace_dataset param ( when not None ) refers to a LibraryDataset that is being replaced with a new version.
- library_dataset = replace_dataset
- else:
- # If replace_dataset is None, the Library level permissions will be taken from the folder and applied to the new
- # LibraryDataset, and the current user's DefaultUserPermissions will be applied to the associated Dataset.
- library_dataset = trans.app.model.LibraryDataset( folder=folder, name=name, info=info )
- library_dataset.flush()
- trans.app.security_agent.copy_library_permissions( folder, library_dataset )
- ldda = trans.app.model.LibraryDatasetDatasetAssociation( name=name,
- info=info,
- extension=data_type,
- dbkey=dbkey,
- library_dataset=library_dataset,
- user=trans.get_user(),
- create_dataset=True )
- ldda.message = message
- ldda.flush()
- # Permissions must be the same on the LibraryDatasetDatasetAssociation and the associated LibraryDataset
- trans.app.security_agent.copy_library_permissions( library_dataset, ldda )
- if replace_dataset:
- # Copy the Dataset level permissions from replace_dataset to the new LibraryDatasetDatasetAssociation.dataset
- trans.app.security_agent.copy_dataset_permissions( replace_dataset.library_dataset_dataset_association.dataset, ldda.dataset )
- else:
- # Copy the current user's DefaultUserPermissions to the new LibraryDatasetDatasetAssociation.dataset
- trans.app.security_agent.set_all_dataset_permissions( ldda.dataset, trans.app.security_agent.user_get_default_permissions( trans.get_user() ) )
- folder.add_library_dataset( library_dataset, genome_build=dbkey )
- folder.flush()
- library_dataset.library_dataset_dataset_association_id = ldda.id
- library_dataset.flush()
- # Handle template included in the upload form, if any
- if template and template_field_contents:
- # Since information templates are inherited, the template fields can be displayed on the upload form.
- # If the user has added field contents, we'll need to create a new form_values and info_association
- # for the new library_dataset_dataset_association object.
- # Create a new FormValues object, using the template we previously retrieved
- form_values = trans.app.model.FormValues( template, template_field_contents )
- form_values.flush()
- # Create a new info_association between the current ldda and form_values
- info_association = trans.app.model.LibraryDatasetDatasetInfoAssociation( ldda, template, form_values )
- info_association.flush()
- # If roles were selected upon upload, restrict access to the Dataset to those roles
- if roles:
- for role in roles:
- dp = trans.app.model.DatasetPermissions( RBACAgent.permitted_actions.DATASET_ACCESS.action, ldda.dataset, role )
- dp.flush()
- shutil.move( temp_name, ldda.dataset.file_name )
- ldda.state = ldda.states.OK
- ldda.init_meta()
- if line_count:
- try:
- if is_multi_byte:
- ldda.set_multi_byte_peek( line_count=line_count )
- else:
- ldda.set_peek( line_count=line_count )
- except:
- if is_multi_byte:
- ldda.set_multi_byte_peek()
- else:
- ldda.set_peek()
- else:
- if is_multi_byte:
- ldda.set_multi_byte_peek()
- else:
- ldda.set_peek()
- ldda.set_size()
- if ldda.missing_meta():
- ldda.datatype.set_meta( ldda )
- ldda.flush()
- return ldda
+ @web.json
+ def library_item_updates( self, trans, ids=None, states=None ):
+ # Avoid caching
+ trans.response.headers['Pragma'] = 'no-cache'
+ trans.response.headers['Expires'] = '0'
+ # Create new HTML for any that have changed
+ rval = {}
+ if ids is not None and states is not None:
+ ids = map( int, ids.split( "," ) )
+ states = states.split( "," )
+ for id, state in zip( ids, states ):
+ data = self.app.model.LibraryDatasetDatasetAssociation.get( id )
+ if data.state != state:
+ job_ldda = data
+ while job_ldda.copied_from_library_dataset_dataset_association:
+ job_ldda = job_ldda.copied_from_library_dataset_dataset_association
+ force_history_refresh = False
+ rval[id] = {
+ "state": data.state,
+ "html": unicode( trans.fill_template( "library/library_item_info.mako", ldda=data ), 'utf-8' )
+ #"force_history_refresh": force_history_refresh
+ }
+ return rval
@web.expose
def upload_dataset( self, trans, controller, library_id, folder_id, replace_dataset=None, **kwd ):
- # This method is called from both the admin and library controllers. The replace_dataset param ( when
- # not None ) refers to a LibraryDataset that is being replaced with a new version.
- params = util.Params( kwd )
+ # Set up the traditional tool state/params
+ tool_id = 'upload1'
+ tool = trans.app.toolbox.tools_by_id[ tool_id ]
+ state = tool.new_state( trans )
+ errors = tool.update_state( trans, tool.inputs_by_page[0], state.inputs, kwd, changed_dependencies={} )
+ tool_params = state.inputs
+ dataset_upload_inputs = []
+ for input_name, input in tool.inputs.iteritems():
+ if input.type == "upload_dataset":
+ dataset_upload_inputs.append( input )
+ # Library-specific params
+ params = util.Params( kwd ) # is this filetoolparam safe?
msg = util.restore_text( params.get( 'msg', '' ) )
messagetype = params.get( 'messagetype', 'done' )
- dbkey = params.get( 'dbkey', '?' )
- file_type = params.get( 'file_type', 'auto' )
- data_file = params.get( 'files_0|file_data', '' )
- url_paste = params.get( 'files_0|url_paste', '' )
server_dir = util.restore_text( params.get( 'server_dir', '' ) )
if replace_dataset not in [ None, 'None' ]:
replace_id = replace_dataset.id
@@ -217,24 +72,43 @@
template_field_contents.append( field_value )
else:
template = None
- if upload_option == 'upload_file' and data_file == '' and url_paste == '':
- msg = 'Select a file, enter a URL or enter text'
- err_redirect = True
- elif upload_option == 'upload_directory':
+ if upload_option == 'upload_directory':
if server_dir in [ None, 'None', '' ]:
err_redirect = True
- # See if our request is from the Admin view or the Libraries view
- if trans.request.browser_url.find( 'admin' ) >= 0:
+ if controller == 'library_admin':
import_dir = trans.app.config.library_import_dir
import_dir_desc = 'library_import_dir'
+ full_dir = os.path.join( import_dir, server_dir )
else:
import_dir = trans.app.config.user_library_import_dir
import_dir_desc = 'user_library_import_dir'
+ if server_dir == trans.user.email:
+ full_dir = os.path.join( import_dir, server_dir )
+ else:
+ full_dir = os.path.join( import_dir, trans.user.email, server_dir )
if import_dir:
msg = 'Select a directory'
else:
msg = '"%s" is not defined in the Galaxy configuration file' % import_dir_desc
+ roles = []
+ for role_id in util.listify( params.get( 'roles', [] ) ):
+ roles.append( trans.app.model.Role.get( role_id ) )
+ # Proceed with (mostly) regular upload processing
+ precreated_datasets = upload_common.get_precreated_datasets( trans, tool_params, trans.app.model.HistoryDatasetAssociation )
+ if upload_option == 'upload_file':
+ tool_params = upload_common.persist_uploads( tool_params )
+ json_file_path, data_list = upload_common.create_paramfile( trans, tool_params, precreated_datasets, dataset_upload_inputs, replace_dataset, folder, template, template_field_contents, roles, message )
+ elif upload_option == 'upload_directory':
+ json_file_path, data_list = self.create_server_dir_paramfile( trans, params, full_dir, import_dir_desc, folder, template, template_field_contents, roles, message, err_redirect, msg )
+ upload_common.cleanup_unused_precreated_datasets( precreated_datasets )
+ if upload_option == 'upload_file' and not data_list:
+ msg = 'Select a file, enter a URL or enter text'
+ err_redirect = True
if err_redirect:
+ try:
+ os.remove( json_file_path )
+ except:
+ pass
trans.response.send_redirect( web.url_for( controller=controller,
action='library_dataset_dataset_association',
library_id=library_id,
@@ -243,226 +117,49 @@
upload_option=upload_option,
msg=util.sanitize_text( msg ),
messagetype='error' ) )
- space_to_tab = params.get( 'files_0|space_to_tab', False )
- if space_to_tab and space_to_tab not in [ "None", None ]:
- space_to_tab = True
- roles = []
- for role_id in util.listify( params.get( 'roles', [] ) ):
- roles.append( trans.app.model.Role.get( role_id ) )
+ return upload_common.create_job( trans, tool_params, tool, json_file_path, data_list, folder=folder )
+ def create_server_dir_paramfile( self, trans, params, full_dir, import_dir_desc, folder, template,
+ template_field_contents, roles, message, err_redirect, msg ):
+ """
+ Create JSON param file for the upload tool when using the server_dir upload.
+ """
+ files = []
+ try:
+ for entry in os.listdir( full_dir ):
+ # Only import regular files
+ if os.path.isfile( os.path.join( full_dir, entry ) ):
+ files.append( entry )
+ except Exception, e:
+ msg = "Unable to get file list for configured %s, error: %s" % ( import_dir_desc, str( e ) )
+ err_redirect = True
+ return ( None, None )
+ if not files:
+ msg = "The directory '%s' contains no valid files" % full_dir
+ err_redirect = True
+ return ( None, None )
data_list = []
- created_ldda_ids = ''
- if 'filename' in dir( data_file ):
- file_name = data_file.filename
- file_name = file_name.split( '\\' )[-1]
- file_name = file_name.split( '/' )[-1]
- try:
- created_ldda = self.add_file( trans,
- folder,
- data_file.file,
- file_name,
- file_type,
- dbkey,
- roles,
- info="uploaded file",
- space_to_tab=space_to_tab,
- replace_dataset=replace_dataset,
- template=template,
- template_field_contents=template_field_contents,
- message=message )
- created_ldda_ids = str( created_ldda.id )
- except Exception, e:
- log.exception( 'exception in upload_dataset using file_name %s: %s' % ( str( file_name ), str( e ) ) )
- return self.upload_empty( trans, controller, library_id, folder_id, "Error:", str( e ) )
- elif url_paste not in [ None, "" ]:
- if url_paste.lower().find( 'http://' ) >= 0 or url_paste.lower().find( 'ftp://' ) >= 0:
- url_paste = url_paste.replace( '\r', '' ).split( '\n' )
- # If we are setting the name from the line, it needs to be the line that creates that dataset
- name_set_from_line = False
- for line in url_paste:
- line = line.rstrip( '\r\n' )
- if line:
- if not line or name_set_from_line:
- name_set_from_line = True
- try:
- created_ldda = self.add_file( trans,
- folder,
- urllib.urlopen( line ),
- line,
- file_type,
- dbkey,
- roles,
- info="uploaded url",
- space_to_tab=space_to_tab,
- replace_dataset=replace_dataset,
- template=template,
- template_field_contents=template_field_contents,
- message=message )
- created_ldda_ids = '%s,%s' % ( created_ldda_ids, str( created_ldda.id ) )
- except Exception, e:
- log.exception( 'exception in upload_dataset using url_paste %s' % str( e ) )
- return self.upload_empty( trans, controller, library_id, folder_id, "Error:", str( e ) )
- else:
- is_valid = False
- for line in url_paste:
- line = line.rstrip( '\r\n' )
- if line:
- is_valid = True
- break
- if is_valid:
- try:
- created_ldda = self.add_file( trans,
- folder,
- StringIO.StringIO( url_paste ),
- 'Pasted Entry',
- file_type,
- dbkey,
- roles,
- info="pasted entry",
- space_to_tab=space_to_tab,
- replace_dataset=replace_dataset,
- template=template,
- template_field_contents=template_field_contents,
- message=message )
- created_ldda_ids = '%s,%s' % ( created_ldda_ids, str( created_ldda.id ) )
- except Exception, e:
- log.exception( 'exception in add_file using StringIO.StringIO( url_paste ) %s' % str( e ) )
- return self.upload_empty( trans, controller, library_id, folder_id, "Error:", str( e ) )
- elif server_dir not in [ None, "", "None" ]:
- # See if our request is from the Admin view or the Libraries view
- if trans.request.browser_url.find( 'admin' ) >= 0:
- import_dir = trans.app.config.library_import_dir
- import_dir_desc = 'library_import_dir'
- full_dir = os.path.join( import_dir, server_dir )
- else:
- imrport_dir = trans.app.config.user_library_import_dir
- import_dir_desc = 'user_library_import_dir'
- # From the Libraries view, users are restricted to the directory named the same as
- # their email within the configured user_library_import_dir. If this directory contains
- # sub-directories, server_dir will be the name of the selected sub-directory. Otherwise
- # server_dir will be the user's email address.
- if server_dir == trans.user.email:
- full_dir = os.path.join( import_dir, server_dir )
- else:
- full_dir = os.path.join( import_dir, trans.user.email, server_dir )
- files = []
- try:
- for entry in os.listdir( full_dir ):
- # Only import regular files
- if os.path.isfile( os.path.join( full_dir, entry ) ):
- files.append( entry )
- except Exception, e:
- msg = "Unable to get file list for configured %s, error: %s" % ( import_dir_desc, str( e ) )
- return self.upload_empty( trans, controller, library_id, folder_id, "Error:", msg )
- if not files:
- msg = "The directory '%s' contains no valid files" % full_dir
- return self.upload_empty( trans, controller, library_id, folder_id, "Error:", msg )
- for file in files:
- full_file = os.path.join( full_dir, file )
- if not os.path.isfile( full_file ):
- continue
- try:
- created_ldda = self.add_file( trans,
- folder,
- open( full_file, 'rb' ),
- file,
- file_type,
- dbkey,
- roles,
- info="imported file",
- space_to_tab=space_to_tab,
- replace_dataset=replace_dataset,
- template=template,
- template_field_contents=template_field_contents,
- message=message )
- created_ldda_ids = '%s,%s' % ( created_ldda_ids, str( created_ldda.id ) )
- except Exception, e:
- log.exception( 'exception in add_file using server_dir %s' % str( e ) )
- return self.upload_empty( trans, controller, library_id, folder_id, "Error:", str( e ) )
- if created_ldda_ids:
- created_ldda_ids = created_ldda_ids.lstrip( ',' )
- return created_ldda_ids
- else:
- return ''
- def check_gzip( self, temp_name ):
- temp = open( temp_name, "U" )
- magic_check = temp.read( 2 )
- temp.close()
- if magic_check != util.gzip_magic:
- return ( False, False )
- CHUNK_SIZE = 2**15 # 32Kb
- gzipped_file = gzip.GzipFile( temp_name )
- chunk = gzipped_file.read( CHUNK_SIZE )
- gzipped_file.close()
- if self.check_html( temp_name, chunk=chunk ) or self.check_binary( temp_name, chunk=chunk ):
- return( True, False )
- return ( True, True )
- def check_zip( self, temp_name ):
- if not zipfile.is_zipfile( temp_name ):
- return ( False, False, None )
- zip_file = zipfile.ZipFile( temp_name, "r" )
- # Make sure the archive consists of valid files. The current rules are:
- # 1. Archives can only include .ab1, .scf or .txt files
- # 2. All file file_types within an archive must be the same
- name = zip_file.namelist()[0]
- test_ext = name.split( "." )[1].strip().lower()
- if not ( test_ext == 'scf' or test_ext == 'ab1' or test_ext == 'txt' ):
- return ( True, False, test_ext )
- for name in zip_file.namelist():
- ext = name.split( "." )[1].strip().lower()
- if ext != test_ext:
- return ( True, False, test_ext )
- return ( True, True, test_ext )
- def check_html( self, temp_name, chunk=None ):
- if chunk is None:
- temp = open(temp_name, "U")
- else:
- temp = chunk
- regexp1 = re.compile( "<A\s+[^>]*HREF[^>]+>", re.I )
- regexp2 = re.compile( "<IFRAME[^>]*>", re.I )
- regexp3 = re.compile( "<FRAMESET[^>]*>", re.I )
- regexp4 = re.compile( "<META[^>]*>", re.I )
- lineno = 0
- for line in temp:
- lineno += 1
- matches = regexp1.search( line ) or regexp2.search( line ) or regexp3.search( line ) or regexp4.search( line )
- if matches:
- if chunk is None:
- temp.close()
- return True
- if lineno > 100:
- break
- if chunk is None:
- temp.close()
- return False
- def check_binary( self, temp_name, chunk=None ):
- if chunk is None:
- temp = open( temp_name, "U" )
- else:
- temp = chunk
- lineno = 0
- for line in temp:
- lineno += 1
- line = line.strip()
- if line:
- if util.is_multi_byte( line ):
- return False
- for char in line:
- if ord( char ) > 128:
- if chunk is None:
- temp.close()
- return True
- if lineno > 10:
- break
- if chunk is None:
- temp.close()
- return False
- def upload_empty( self, trans, controller, library_id, folder_id, err_code, err_msg ):
- msg = err_code + err_msg
- return trans.response.send_redirect( web.url_for( controller=controller,
- action='library_dataset_dataset_association',
- library_id=library_id,
- folder_id=folder_id,
- msg=util.sanitize_text( msg ),
- messagetype='error' ) )
-class BadFileException( Exception ):
- pass
+ json_file = tempfile.mkstemp()
+ json_file_path = json_file[1]
+ json_file = os.fdopen( json_file[0], 'w' )
+ for file in files:
+ full_file = os.path.join( full_dir, file )
+ if not os.path.isfile( full_file ):
+ continue
+ uploaded_dataset = util.bunch.Bunch()
+ uploaded_dataset.name = file
+ uploaded_dataset.file_type = params.file_type
+ uploaded_dataset.dbkey = params.dbkey
+ data = upload_common.new_library_upload( trans, uploaded_dataset, None, folder, template, template_field_contents, roles, message )
+ json = dict( file_type = uploaded_dataset.file_type,
+ ext = None,
+ name = uploaded_dataset.name,
+ dataset_id = data.dataset.id,
+ dbkey = uploaded_dataset.dbkey,
+ type = 'server_dir',
+ is_binary = None,
+ space_to_tab = params.space_to_tab,
+ path = full_file )
+ json_file.write( to_json_string( json ) + '\n' )
+ data_list.append( data )
+ json_file.close()
+ return ( json_file_path, data_list )
diff -r c4c409bda49b -r 3559f7377b9c static/june_2007_style/blue/library.css
--- a/static/june_2007_style/blue/library.css Fri Sep 25 14:06:43 2009 -0400
+++ b/static/june_2007_style/blue/library.css Fri Sep 25 14:36:12 2009 -0400
@@ -1,7 +1,7 @@
.libraryRow{background-color:#ebd9b2;}
.datasetHighlighted{background-color:#C1C9E5;}
.libraryItemDeleted-True{font-style:italic;}
-div.historyItemBody{padding:4px 4px 2px 4px;}
+div.libraryItemBody{padding:4px 4px 2px 4px;}
li.folderRow,li.datasetRow{border-top:solid 1px #ddd;}
li.folderRow:hover,li.datasetRow:hover{background-color:#C1C9E5;}
img.expanderIcon{padding-right:4px;}
@@ -15,3 +15,6 @@
span.expandLink{width:16px;height:16px;display:inline-block;vertical-align:middle;background:url(../images/silk/resultset_next.png);}
.folderRow.expanded span.expandLink{background:url(../images/silk/resultset_bottom.png);}
.folderRow span.rowIcon{width:16px;height:16px;display:inline-block;vertical-align:middle;background:url(../images/silk/folder.png);}
+.libraryItem-error{margin-right:2px;padding:0 2px 0 2px;border:1px solid #AA6666;background:#FFCCCC;}
+.libraryItem-queued{margin-right:2px;padding:0 2px 0 2px;border:1px solid #888888;background:#EEEEEE;}
+.libraryItem-running{margin-right:2px;padding:0 2px 0 2px;border:1px solid #AAAA66;background:#FFFFCC;}
diff -r c4c409bda49b -r 3559f7377b9c static/june_2007_style/library.css.tmpl
--- a/static/june_2007_style/library.css.tmpl Fri Sep 25 14:06:43 2009 -0400
+++ b/static/june_2007_style/library.css.tmpl Fri Sep 25 14:36:12 2009 -0400
@@ -10,7 +10,7 @@
font-style: italic;
}
-div.historyItemBody {
+div.libraryItemBody {
padding: 4px 4px 2px 4px;
}
@@ -88,3 +88,24 @@
background: url(../images/silk/folder.png);
}
+.libraryItem-error {
+ margin-right: 2px;
+ padding: 0 2px 0 2px;
+ border: 1px solid $history_error_border;
+ background: $history_error_bg;
+}
+
+.libraryItem-queued {
+ margin-right: 2px;
+ padding: 0 2px 0 2px;
+ border: 1px solid $history_queued_border;
+ background: $history_queued_bg;
+}
+
+.libraryItem-running {
+ margin-right: 2px;
+ padding: 0 2px 0 2px;
+ border: 1px solid $history_running_border;
+ background: $history_running_bg;
+}
+
diff -r c4c409bda49b -r 3559f7377b9c templates/admin/library/browse_library.mako
--- a/templates/admin/library/browse_library.mako Fri Sep 25 14:06:43 2009 -0400
+++ b/templates/admin/library/browse_library.mako Fri Sep 25 14:36:12 2009 -0400
@@ -1,5 +1,6 @@
<%inherit file="/base.mako"/>
<%namespace file="/message.mako" import="render_msg" />
+<%namespace file="/library/library_item_info.mako" import="render_library_item_info" />
<%
from time import strftime
from galaxy import util
@@ -10,6 +11,8 @@
<link href="${h.url_for('/static/style/base.css')}" rel="stylesheet" type="text/css" />
<link href="${h.url_for('/static/style/library.css')}" rel="stylesheet" type="text/css" />
</%def>
+
+<% tracked_datasets = {} %>
<script type="text/javascript">
$( document ).ready( function () {
@@ -35,29 +38,6 @@
$(this).children().find("img.rowIcon").each( function() { this.src = icon_open; });
}
});
- // Hide all dataset bodies
- $("div.historyItemBody").hide();
- // Handle the dataset body hide/show link.
- $("div.historyItemWrapper").each( function() {
- var id = this.id;
- var li = $(this).parent();
- var body = $(this).children( "div.historyItemBody" );
- var peek = body.find( "pre.peek" )
- $(this).children( ".historyItemTitleBar" ).find( ".historyItemTitle" ).wrap( "<a href='#'></a>" ).click( function() {
- if ( body.is(":visible") ) {
- if ( $.browser.mozilla ) { peek.css( "overflow", "hidden" ) }
- body.slideUp( "fast" );
- li.removeClass( "datasetHighlighted" );
- }
- else {
- body.slideDown( "fast", function() {
- if ( $.browser.mozilla ) { peek.css( "overflow", "auto" ); }
- });
- li.addClass( "datasetHighlighted" );
- }
- return false;
- });
- });
});
function checkForm() {
if ( $("select#action_on_datasets_select option:selected").text() == "delete" ) {
@@ -68,6 +48,54 @@
}
}
}
+ // Looks for changes in dataset state using an async request. Keeps
+ // calling itself (via setTimeout) until all datasets are in a terminal
+ // state.
+ var updater = function ( tracked_datasets ) {
+ // Check if there are any items left to track
+ var empty = true;
+ for ( i in tracked_datasets ) {
+ empty = false;
+ break;
+ }
+ if ( ! empty ) {
+ setTimeout( function() { updater_callback( tracked_datasets ) }, 3000 );
+ }
+ };
+ var updater_callback = function ( tracked_datasets ) {
+ // Build request data
+ var ids = []
+ var states = []
+ $.each( tracked_datasets, function ( id, state ) {
+ ids.push( id );
+ states.push( state );
+ });
+ // Make ajax call
+ $.ajax( {
+ type: "POST",
+ url: "${h.url_for( controller='library_dataset', action='library_item_updates' )}",
+ dataType: "json",
+ data: { ids: ids.join( "," ), states: states.join( "," ) },
+ success : function ( data ) {
+ $.each( data, function( id, val ) {
+ // Replace HTML
+ var cell = $("#libraryItem-" + id).find("#libraryItemInfo");
+ cell.html( val.html );
+ // If new state was terminal, stop tracking
+ if (( val.state == "ok") || ( val.state == "error") || ( val.state == "empty") || ( val.state == "deleted" ) || ( val.state == "discarded" )) {
+ delete tracked_datasets[ parseInt(id) ];
+ } else {
+ tracked_datasets[ parseInt(id) ] = val.state;
+ }
+ });
+ updater( tracked_datasets );
+ },
+ error: function() {
+ // Just retry, like the old method, should try to be smarter
+ updater( tracked_datasets );
+ }
+ });
+ };
</script>
<%def name="render_dataset( ldda, library_dataset, selected, library, folder, deleted, show_deleted )">
@@ -84,11 +112,13 @@
current_version = True
else:
current_version = False
+ if current_version and ldda.state not in ( 'ok', 'error', 'empty', 'deleted', 'discarded' ):
+ tracked_datasets[ldda.id] = ldda.state
%>
%if current_version:
- <div class="historyItemWrapper historyItem historyItem-${ldda.state}" id="libraryItem-${ldda.id}">
+ <div class="libraryItemWrapper libraryItem" id="libraryItem-${ldda.id}">
## Header row for library items (name, state, action buttons)
- <div class="historyItemTitleBar">
+ <div class="libraryItemTitleBar">
<table cellspacing="0" cellpadding="0" border="0" width="100%">
<tr>
<td width="*">
@@ -119,7 +149,7 @@
</div>
%endif
</td>
- <td width="300">${ldda.message}</td>
+ <td width="300" id="libraryItemInfo">${render_library_item_info( ldda )}</td>
<td width="150">${uploaded_by}</td>
<td width="60">${ldda.create_time.strftime( "%Y-%m-%d" )}</td>
</tr>
@@ -287,3 +317,11 @@
</p>
%endif
</form>
+
+%if tracked_datasets:
+ <script type="text/javascript">
+ // Updater
+ updater({${ ",".join( [ '"%s" : "%s"' % ( k, v ) for k, v in tracked_datasets.iteritems() ] ) }});
+ </script>
+ <!-- running: do not change this comment, used by TwillTestCase.library_wait -->
+%endif
diff -r c4c409bda49b -r 3559f7377b9c templates/admin/library/new_library.mako
--- a/templates/admin/library/new_library.mako Fri Sep 25 14:06:43 2009 -0400
+++ b/templates/admin/library/new_library.mako Fri Sep 25 14:36:12 2009 -0400
@@ -29,7 +29,9 @@
</div>
<div style="clear: both"></div>
</div>
- <input type="submit" name="create_library_button" value="Create"/>
+ <div class="form-row">
+ <input type="submit" name="create_library_button" value="Create"/>
+ </div>
</form>
</div>
</div>
diff -r c4c409bda49b -r 3559f7377b9c templates/library/browse_library.mako
--- a/templates/library/browse_library.mako Fri Sep 25 14:06:43 2009 -0400
+++ b/templates/library/browse_library.mako Fri Sep 25 14:36:12 2009 -0400
@@ -1,5 +1,6 @@
<%inherit file="/base.mako"/>
<%namespace file="/message.mako" import="render_msg" />
+<%namespace file="/library/library_item_info.mako" import="render_library_item_info" />
<%
from galaxy import util
from galaxy.web.controllers.library import active_folders
@@ -12,6 +13,8 @@
<link href="${h.url_for('/static/style/base.css')}" rel="stylesheet" type="text/css" />
<link href="${h.url_for('/static/style/library.css')}" rel="stylesheet" type="text/css" />
</%def>
+
+<% tracked_datasets = {} %>
<%
class RowCounter( object ):
@@ -77,6 +80,54 @@
});
});
});
+ // Looks for changes in dataset state using an async request. Keeps
+ // calling itself (via setTimeout) until all datasets are in a terminal
+ // state.
+ var updater = function ( tracked_datasets ) {
+ // Check if there are any items left to track
+ var empty = true;
+ for ( i in tracked_datasets ) {
+ empty = false;
+ break;
+ }
+ if ( ! empty ) {
+ setTimeout( function() { updater_callback( tracked_datasets ) }, 3000 );
+ }
+ };
+ var updater_callback = function ( tracked_datasets ) {
+ // Build request data
+ var ids = []
+ var states = []
+ $.each( tracked_datasets, function ( id, state ) {
+ ids.push( id );
+ states.push( state );
+ });
+ // Make ajax call
+ $.ajax( {
+ type: "POST",
+ url: "${h.url_for( controller='library_dataset', action='library_item_updates' )}",
+ dataType: "json",
+ data: { ids: ids.join( "," ), states: states.join( "," ) },
+ success : function ( data ) {
+ $.each( data, function( id, val ) {
+ // Replace HTML
+ var cell = $("#libraryItem-" + id).find("#libraryItemInfo");
+ cell.html( val.html );
+ // If new state was terminal, stop tracking
+ if (( val.state == "ok") || ( val.state == "error") || ( val.state == "empty") || ( val.state == "deleted" ) || ( val.state == "discarded" )) {
+ delete tracked_datasets[ parseInt(id) ];
+ } else {
+ tracked_datasets[ parseInt(id) ] = val.state;
+ }
+ });
+ updater( tracked_datasets );
+ },
+ error: function() {
+ // Just retry, like the old method, should try to be smarter
+ updater( tracked_datasets );
+ }
+ });
+ };
</script>
<%def name="render_dataset( ldda, library_dataset, selected, library, folder, pad, parent, row_conter )">
@@ -95,6 +146,8 @@
can_manage_library_dataset = trans.app.security_agent.can_manage_library_item( user, roles, library_dataset )
else:
current_version = False
+ if current_version and ldda.state not in ( 'ok', 'error', 'empty', 'deleted', 'discarded' ):
+ tracked_datasets[ldda.id] = ldda.state
%>
%if current_version:
<tr class="datasetRow"
@@ -102,7 +155,7 @@
parent="${parent}"
style="display: none;"
%endif
- >
+ id="libraryItem-${ldda.id}">
<td style="padding-left: ${pad+20}px;">
%if selected:
<input type="checkbox" name="ldda_ids" value="${ldda.id}" checked/>
@@ -129,7 +182,7 @@
%endif
</div>
</td>
- <td>${ldda.message}</td>
+ <td id="libraryItemInfo">${render_library_item_info( ldda )}</td>
<td>${uploaded_by}</td>
<td>${ldda.create_time.strftime( "%Y-%m-%d" )}</td>
</tr>
@@ -305,6 +358,14 @@
</table>
</form>
+%if tracked_datasets:
+ <script type="text/javascript">
+ // Updater
+ updater({${ ",".join( [ '"%s" : "%s"' % ( k, v ) for k, v in tracked_datasets.iteritems() ] ) }});
+ </script>
+ <!-- running: do not change this comment, used by TwillTestCase.library_wait -->
+%endif
+
## Help about compression types
%if len( comptypes ) > 1:
diff -r c4c409bda49b -r 3559f7377b9c templates/library/library_dataset_common.mako
--- a/templates/library/library_dataset_common.mako Fri Sep 25 14:06:43 2009 -0400
+++ b/templates/library/library_dataset_common.mako Fri Sep 25 14:36:12 2009 -0400
@@ -40,7 +40,8 @@
<div class="form-row">
<label>File:</label>
<div class="form-row-input">
- <input type="file" name="files_0|file_data" galaxy-ajax-upload="true"/>
+ ##<input type="file" name="files_0|file_data" galaxy-ajax-upload="true"/>
+ <input type="file" name="files_0|file_data"/>
</div>
<div style="clear: both"></div>
</div>
@@ -109,11 +110,16 @@
Convert spaces to tabs:
</label>
<div class="form-row-input">
- <input type="checkbox" name="files_0|space_to_tab" value="Yes"/>Yes
+ ## The files grouping only makes sense in the upload_file context.
+ %if upload_option == 'upload_file':
+ <input type="checkbox" name="files_0|space_to_tab" value="Yes"/>Yes
+ %else:
+ <input type="checkbox" name="space_to_tab" value="Yes"/>Yes
+ %endif
</div>
- </div>
- <div class="toolParamHelp" style="clear: both;">
- Use this option if you are entering intervals by hand.
+ <div class="toolParamHelp" style="clear: both;">
+ Use this option if you are entering intervals by hand.
+ </div>
</div>
<div style="clear: both"></div>
<div class="form-row">
diff -r c4c409bda49b -r 3559f7377b9c templates/library/library_item_info.mako
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/templates/library/library_item_info.mako Fri Sep 25 14:36:12 2009 -0400
@@ -0,0 +1,13 @@
+<%def name="render_library_item_info( ldda )">
+ %if ldda.state == 'error':
+ <div class="libraryItem-${ldda.state}">Job error <i>(click name for more info)</i></div>
+ %elif ldda.state == 'queued':
+ <div class="libraryItem-${ldda.state}">This job is queued</div>
+ %elif ldda.state == 'running':
+ <div class="libraryItem-${ldda.state}">This job is running</div>
+ %else:
+ ${ldda.message}
+ %endif
+</%def>
+
+${render_library_item_info( ldda )}
diff -r c4c409bda49b -r 3559f7377b9c test-data/users/test3(a)bx.psu.edu/run1/2.fasta
--- a/test-data/users/test3(a)bx.psu.edu/run1/2.fasta Fri Sep 25 14:06:43 2009 -0400
+++ b/test-data/users/test3(a)bx.psu.edu/run1/2.fasta Fri Sep 25 14:36:12 2009 -0400
@@ -8,4 +8,4 @@
ctcaatgttc atgttcttag gttgttttgg ataatatgcg gtcagtttaa tcttcgttgt
ttcttcttaa aatatttatt catggtttaa tttttggttt gtacttgttc aggggccagt
tcattattta ctctgtttgt atacagcagt tcttttattt ttagtatgat tttaatttaa
-aacaattcta atggtcaaaa a
\ No newline at end of file
+aacaattcta atggtcaaaa a
diff -r c4c409bda49b -r 3559f7377b9c test/base/twilltestcase.py
--- a/test/base/twilltestcase.py Fri Sep 25 14:06:43 2009 -0400
+++ b/test/base/twilltestcase.py Fri Sep 25 14:36:12 2009 -0400
@@ -1274,6 +1274,7 @@
else:
check_str = "Added 1 datasets to the folder '%s' ( each is selected )." % folder_name
self.check_page_for_string( check_str )
+ self.library_wait( library_id )
self.home()
def set_library_dataset_permissions( self, library_id, folder_id, ldda_id, ldda_name, role_id, permissions_in, permissions_out ):
url = "library_admin/library_dataset_dataset_association?library_id=%s&folder_id=%s&&id=%s&permissions=True&update_roles_button=Save" % \
@@ -1359,25 +1360,7 @@
tc.submit( "runtool_btn" )
check_str = "Added 1 dataset versions to the library dataset '%s' in the folder '%s'." % ( ldda_name, folder_name )
self.check_page_for_string( check_str )
- self.home()
- def upload_new_dataset_versions( self, library_id, folder_id, folder_name, library_dataset_id, ldda_name, file_type='auto',
- dbkey='hg18', message='', template_field_name1='', template_field_contents1='' ):
- """Upload new version(s) of a dataset using a directory of files"""
- self.home()
- self.visit_url( "%s/library_admin/library_dataset_dataset_association?upload_option=upload_directory&library_id=%s&folder_id=%s&replace_id=%s" \
- % ( self.url, library_id, folder_id, library_dataset_id ) )
- self.check_page_for_string( 'Upload a directory of files' )
- self.check_page_for_string( 'You are currently selecting a new file to replace' )
- tc.fv( "1", "file_type", file_type )
- tc.fv( "1", "dbkey", dbkey )
- tc.fv( "1", "message", message.replace( '+', ' ' ) )
- tc.fv( "1", "server_dir", "library" )
- # Add template field contents, if any...
- if template_field_name1:
- tc.fv( "1", template_field_name1, template_field_contents1 )
- tc.submit( "runtool_btn" )
- check_str = "Added 3 dataset versions to the library dataset '%s' in the folder '%s'." % ( ldda_name, folder_name )
- self.check_page_for_string( check_str )
+ self.library_wait( library_id )
self.home()
def add_history_datasets_to_library( self, library_id, folder_id, folder_name, hda_id, root=False ):
"""Copy a dataset from the current history to a library folder"""
@@ -1410,6 +1393,7 @@
tc.submit( "runtool_btn" )
if check_str_after_submit:
self.check_page_for_string( check_str_after_submit )
+ self.library_wait( library_id )
self.home()
def add_dir_of_files_from_libraries_view( self, library_id, folder_id, selected_dir, file_type='auto', dbkey='hg18', roles_tuple=[],
message='', check_str_after_submit='', template_field_name1='', template_field_contents1='' ):
@@ -1432,6 +1416,7 @@
tc.submit( "runtool_btn" )
if check_str_after_submit:
self.check_page_for_string( check_str_after_submit )
+ self.library_wait( library_id, controller='library' )
self.home()
def delete_library_item( self, library_id, library_item_id, library_item_name, library_item_type='library_dataset' ):
"""Mark a library item as deleted"""
@@ -1464,3 +1449,18 @@
check_str = "Library '%s' and all of its contents have been purged" % library_name
self.check_page_for_string( check_str )
self.home()
+ def library_wait( self, library_id, controller='library_admin', maxiter=20 ):
+ """Waits for the tools to finish"""
+ count = 0
+ sleep_amount = 1
+ self.home()
+ while count < maxiter:
+ count += 1
+ self.visit_url( "%s/%s/browse_library?id=%s" % ( self.url, controller, library_id ) )
+ page = tc.browser.get_html()
+ if page.find( '<!-- running: do not change this comment, used by TwillTestCase.library_wait -->' ) > -1:
+ time.sleep( sleep_amount )
+ sleep_amount += 1
+ else:
+ break
+ self.assertNotEqual(count, maxiter)
diff -r c4c409bda49b -r 3559f7377b9c test/functional/__init__.py
--- a/test/functional/__init__.py Fri Sep 25 14:06:43 2009 -0400
+++ b/test/functional/__init__.py Fri Sep 25 14:36:12 2009 -0400
@@ -79,8 +79,8 @@
allow_user_creation = True,
allow_user_deletion = True,
admin_users = 'test(a)bx.psu.edu',
- library_import_dir = galaxy_test_file_dir,
- user_library_import_dir = os.path.join( galaxy_test_file_dir, 'users' ),
+ library_import_dir = os.path.join( os.getcwd(), galaxy_test_file_dir ),
+ user_library_import_dir = os.path.join( os.getcwd(), galaxy_test_file_dir, 'users' ),
global_conf = { "__file__": "universe_wsgi.ini.sample" } )
log.info( "Embedded Universe application started" )
diff -r c4c409bda49b -r 3559f7377b9c tools/data_source/upload.py
--- a/tools/data_source/upload.py Fri Sep 25 14:06:43 2009 -0400
+++ b/tools/data_source/upload.py Fri Sep 25 14:36:12 2009 -0400
@@ -137,7 +137,7 @@
# See if we have an empty file
if not os.path.exists( dataset.path ):
- file_err( 'Uploaded temporary file (%s) does not exist. Please' % dataset.path, dataset, json_file )
+ file_err( 'Uploaded temporary file (%s) does not exist.' % dataset.path, dataset, json_file )
return
if not os.path.getsize( dataset.path ) > 0:
file_err( 'The uploaded file is empty', dataset, json_file )
@@ -237,7 +237,10 @@
if ext == 'auto':
ext = 'data'
# Move the dataset to its "real" path
- shutil.move( dataset.path, output_path )
+ if dataset.type == 'server_dir':
+ shutil.copy( dataset.path, output_path )
+ else:
+ shutil.move( dataset.path, output_path )
# Write the job info
info = dict( type = 'dataset',
dataset_id = dataset.dataset_id,
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/c4c409bda49b
changeset: 2779:c4c409bda49b
user: Anton Nekrutenko <anton(a)bx.psu.edu>
date: Fri Sep 25 14:06:43 2009 -0400
description:
Moving cite to help
1 file(s) affected in this change:
templates/base_panels.mako
diffs (19 lines):
diff -r 40f58d95a051 -r c4c409bda49b templates/base_panels.mako
--- a/templates/base_panels.mako Fri Sep 25 13:58:15 2009 -0400
+++ b/templates/base_panels.mako Fri Sep 25 14:06:43 2009 -0400
@@ -169,14 +169,10 @@
<li><a href="${app.config.get( "bugs_email", "mailto:galaxy-bugs@bx.psu.edu" )}">Email comments, bug reports, or suggestions</a></li>
<li><a target="_blank" href="${app.config.get( "wiki_url", "http://bitbucket.org/galaxy/galaxy-central/wiki" )}">Galaxy Wiki</a></li>
<li><a target="_blank" href="${app.config.get( "screencasts_url", "http://galaxycast.org" )}">Video tutorials (screencasts)</a></li>
+ <li><a target="_blank" href="${app.config.get( "citation_url", "http://bitbucket.org/galaxy/galaxy-central/wiki/Citations" )}">How to Cite Galaxy</a></li>
</ul>
</div>
</td>
-
- <td class="tab">
- <a target="_blank" href="${app.config.get( "citation_url", "http://bitbucket.org/galaxy/galaxy-central/wiki/Citations" )}">Cite</a>
- </td>
-
<td class="tab">
<a>User</a>
1
0
02 Oct '09
details: http://www.bx.psu.edu/hg/galaxy/rev/dd50d8d45177
changeset: 2781:dd50d8d45177
user: gua110
date: Fri Sep 25 14:50:34 2009 -0400
description:
Adding an option to Group tool to ignore case while grouping.
3 file(s) affected in this change:
test-data/groupby_out1.dat
tools/stats/grouping.py
tools/stats/grouping.xml
diffs (122 lines):
diff -r 3559f7377b9c -r dd50d8d45177 test-data/groupby_out1.dat
--- a/test-data/groupby_out1.dat Fri Sep 25 14:36:12 2009 -0400
+++ b/test-data/groupby_out1.dat Fri Sep 25 14:50:34 2009 -0400
@@ -17,4 +17,4 @@
chr7 1.15958e+08
chr8 1.18881e+08
chr9 1.28843e+08
-chrX 1.45195e+08
+chrx 1.45195e+08
diff -r 3559f7377b9c -r dd50d8d45177 tools/stats/grouping.py
--- a/tools/stats/grouping.py Fri Sep 25 14:36:12 2009 -0400
+++ b/tools/stats/grouping.py Fri Sep 25 14:50:34 2009 -0400
@@ -12,13 +12,13 @@
def main():
inputfile = sys.argv[2]
-
+ ignorecase = int(sys.argv[4])
ops = []
cols = []
rounds = []
elems = []
- for var in sys.argv[4:]:
+ for var in sys.argv[5:]:
ops.append(var.split()[0])
cols.append(var.split()[1])
rounds.append(var.split()[2])
@@ -71,7 +71,10 @@
we need to add 1 to group_col.
if POS2 is not specified, the newer versions of sort will consider the entire line for sorting. To prevent this, we set POS2=POS1.
"""
- command_line = "sort -f -k " + str(group_col+1) +"," + str(group_col+1) + " -o " + tmpfile.name + " " + inputfile
+ case = ''
+ if ignorecase == 1:
+ case = '-f'
+ command_line = "sort -t $'\t' " + case + " -k" + str(group_col+1) +"," + str(group_col+1) + " -o " + tmpfile.name + " " + inputfile
except Exception, exc:
stop_err( 'Initialization error -> %s' %str(exc) )
@@ -95,6 +98,8 @@
try:
fields = line.split("\t")
item = fields[group_col]
+ if ignorecase == 1:
+ item = item.lower()
if prev_item != "":
# At this level, we're grouping on values (item and prev_item) in group_col
if item == prev_item:
diff -r 3559f7377b9c -r dd50d8d45177 tools/stats/grouping.xml
--- a/tools/stats/grouping.xml Fri Sep 25 14:36:12 2009 -0400
+++ b/tools/stats/grouping.xml Fri Sep 25 14:50:34 2009 -0400
@@ -1,10 +1,11 @@
-<tool id="Grouping1" name="Group" version="1.7.0">
+<tool id="Grouping1" name="Group" version="1.8.0">
<description>data by a column and perform aggregate operation on other columns.</description>
<command interpreter="python">
grouping.py
$out_file1
$input1
$groupcol
+ $ignorecase
#for $op in $operations
'${op.optype}
${op.opcol}
@@ -14,6 +15,9 @@
<inputs>
<param format="tabular" name="input1" type="data" label="Select data" help="Query missing? See TIP below."/>
<param name="groupcol" label="Group by column" type="data_column" data_ref="input1" />
+ <param name="ignorecase" type="boolean" truevalue="1" falsevalue="0">
+ <label>Ignore case while grouping?</label>
+ </param>
<repeat name="operations" title="Operation">
<param name="optype" type="select" label="Type">
<option value="mean">Mean</option>
@@ -44,6 +48,7 @@
<test>
<param name="input1" value="1.bed"/>
<param name="groupcol" value="1"/>
+ <param name="ignorecase" value="true"/>
<param name="optype" value="mean"/>
<param name="opcol" value="2"/>
<param name="opround" value="no"/>
@@ -54,6 +59,7 @@
<test>
<param name="input1" value="1.tabular"/>
<param name="groupcol" value="1"/>
+ <param name="ignorecase" value="true"/>
<param name="optype" value="mean"/>
<param name="opcol" value="2"/>
<param name="opround" value="no"/>
@@ -80,15 +86,22 @@
- For the following input::
- chr22 1000 NM_17
- chr22 2000 NM_18
- chr10 2200 NM_10
- chr10 1200 NM_11
- chr22 1600 NM_19
+ chr22 1000 1003 TTT
+ chr22 2000 2003 aaa
+ chr10 2200 2203 TTT
+ chr10 1200 1203 ttt
+ chr22 1600 1603 AAA
-- running this tool with **Group by column 1**, Operations **Mean on column 2** and **Concatenate on column 3** will return::
+- **Grouping on column 4** while ignoring case, and performing operation **Count on column 1** will return::
- chr10 1700.00 NM_11,NM_10
- chr22 1533.33 NM_17,NM_19,NM_18
+ AAA 2
+ TTT 3
+
+- **Grouping on column 4** while not ignoring case, and performing operation **Count on column 1** will return::
+
+ aaa 1
+ AAA 1
+ ttt 1
+ TTT 2
</help>
</tool>
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/40f58d95a051
changeset: 2778:40f58d95a051
user: Anton Nekrutenko <anton(a)bx.psu.edu>
date: Fri Sep 25 13:58:15 2009 -0400
description:
Added Cite to masthead
5 file(s) affected in this change:
templates/base_panels.mako
tools/fastx_toolkit/fastq_to_fasta.xml
tools/samtools/pileup_parser.xml
tools/samtools/sam_pileup.xml
universe_wsgi.ini.sample
diffs (80 lines):
diff -r 6a86a558f405 -r 40f58d95a051 templates/base_panels.mako
--- a/templates/base_panels.mako Fri Sep 25 11:32:47 2009 -0400
+++ b/templates/base_panels.mako Fri Sep 25 13:58:15 2009 -0400
@@ -167,11 +167,16 @@
<div class="submenu">
<ul>
<li><a href="${app.config.get( "bugs_email", "mailto:galaxy-bugs@bx.psu.edu" )}">Email comments, bug reports, or suggestions</a></li>
- <li><a target="_blank" href="${app.config.get( "wiki_url", "http://g2.trac.bx.psu.edu/" )}">Galaxy Wiki</a></li>
- <li><a target="_blank" href="${app.config.get( "screencasts_url", "http://g2.trac.bx.psu.edu/wiki/ScreenCasts" )}">Video tutorials (screencasts)</a></li>
+ <li><a target="_blank" href="${app.config.get( "wiki_url", "http://bitbucket.org/galaxy/galaxy-central/wiki" )}">Galaxy Wiki</a></li>
+ <li><a target="_blank" href="${app.config.get( "screencasts_url", "http://galaxycast.org" )}">Video tutorials (screencasts)</a></li>
</ul>
</div>
</td>
+
+ <td class="tab">
+ <a target="_blank" href="${app.config.get( "citation_url", "http://bitbucket.org/galaxy/galaxy-central/wiki/Citations" )}">Cite</a>
+ </td>
+
<td class="tab">
<a>User</a>
@@ -282,7 +287,7 @@
</head>
<body scroll="no" class="${self.body_class}">
- <div id="everything" style="position: absolute; top: 0; left: 0; width: 100%; height: 100%; min-width: 600px;">
+ <div id="everything" style="position: absolute; top: 0; left: 0; width: 100%; height: 100%; min-width: 960px;">
## Background displays first
<div id="background"></div>
## Layer iframes over backgrounds
diff -r 6a86a558f405 -r 40f58d95a051 tools/fastx_toolkit/fastq_to_fasta.xml
--- a/tools/fastx_toolkit/fastq_to_fasta.xml Fri Sep 25 11:32:47 2009 -0400
+++ b/tools/fastx_toolkit/fastq_to_fasta.xml Fri Sep 25 13:58:15 2009 -0400
@@ -3,7 +3,7 @@
<command>gunzip -cf $input | fastq_to_fasta $SKIPN $RENAMESEQ -o $output -v </command>
<inputs>
- <param format="fastqsolexa" name="input" type="data" label="FASTQ Library to convert" />
+ <param format="fastqsolexa,fastqsanger" name="input" type="data" label="FASTQ Library to convert" />
<param name="SKIPN" type="select" label="Discard sequences with unknown (N) bases ">
<option value="">yes</option>
diff -r 6a86a558f405 -r 40f58d95a051 tools/samtools/pileup_parser.xml
--- a/tools/samtools/pileup_parser.xml Fri Sep 25 11:32:47 2009 -0400
+++ b/tools/samtools/pileup_parser.xml Fri Sep 25 13:58:15 2009 -0400
@@ -1,5 +1,5 @@
-<tool id="pileup_parser" name="Parse pileup">
- <description>to find variants</description>
+<tool id="pileup_parser" name="Filter pileup">
+ <description>on coverage and SNPs</description>
<command interpreter="perl">
#if $pileup_type.type_select == "six": #pileup_parser.pl $input "3" "5" "6" "4" $qv_cutoff $cvrg_cutoff $snps_only $interval "2" $out_file1
#elif $pileup_type.type_select == "ten": #pileup_parser.pl $input "3" "9" "10" "8" $qv_cutoff $cvrg_cutoff $snps_only $interval "2" $out_file1
diff -r 6a86a558f405 -r 40f58d95a051 tools/samtools/sam_pileup.xml
--- a/tools/samtools/sam_pileup.xml Fri Sep 25 11:32:47 2009 -0400
+++ b/tools/samtools/sam_pileup.xml Fri Sep 25 13:58:15 2009 -0400
@@ -1,5 +1,5 @@
-<tool id="sam_pileup" name="SAM Pileup Format" version="1.0.0">
- <description>generates the pileup format for a provided BAM file</description>
+<tool id="sam_pileup" name="Generate pileup" version="1.0.0">
+ <description>from BAM dataset</description>
<command interpreter="python">
sam_pileup.py
--input1=$input1
diff -r 6a86a558f405 -r 40f58d95a051 universe_wsgi.ini.sample
--- a/universe_wsgi.ini.sample Fri Sep 25 11:32:47 2009 -0400
+++ b/universe_wsgi.ini.sample Fri Sep 25 13:58:15 2009 -0400
@@ -102,9 +102,11 @@
## Brand: appends "/[brand]" to the "Galaxy" text in the masthead
## wiki_url: replaces the default galaxy main wiki
## bugs_email: replaces the default galaxy bugs email list
+##citation_url: point to a URL listing citations
#brand = Private local mirror
#wiki_url = /path/to/my/local/wiki
#bugs_email = mailto:galaxy-bugs@example.org
+#citation_url = /path/to/my/citations
# ---- Logging and Debugging ------------------------------------------------
1
0
Hello,
we're in the process of installing bowtie and bwa to get them working
with Galaxy and have a question. Looking at left hand menu item "NGS:
Mapping" on the home page of Galaxy that we have just installed we see:
NGS: Mapping
Megablast compare short reads against nt and wgs databases
Parse blast XML output
Digging in tools_config.xml we see:
<section name="NGS: Mapping" id="solexa_tools">
<!-- <tool file="sr_mapping/lastz_wrapper.xml" /> -->
<tool file="metag_tools/megablast_wrapper.xml" />
<tool file="metag_tools/megablast_xml_parser.xml" />
<tool file="sr_mapping/bowtie_wrapper.xml" />
<tool file="sr_mapping/bwa_wrapper.xml" />
</section>
Looking in .../tools/sr_mapping we see both:
bowtie_wrapper.xml
bwa_wrapper.xml
And checking say bowtie_wrapper.xml we see
<tool id="bowtie_wrapper" name="Bowtie" version="1.0.0">
<description> fast alignment of reads against reference sequence </
description
>
Everything looks good so why do we not see sub-menu items for bowtie
and bwa? All we see under NGS: Mapping are the two items I listed
initially, "Megablast compare short reads against nt and wgs
databases" and "Parse blast XML output".
Thanks for your help.
Mark.
2
1
Hello,
About the "Galaxy as a subdirectory" section of the documentation I have a
couple questions.
Assuming the rewrite rules be changes to the apache httpd.conf file should
they be part of a directory tag and should the directory specified just be
the path to the galaxy_dist directory in the main server configuration?
Also to change the port from 8080 to another port are the only changes
necessary the following rewrite rule : RewriteRule ^/galaxy(.*)
http://localhost:8080$1 [P] and "port=8080" in the server:main section of
the universe_wsgi.ini file?
Thanks,
Kimberly
2
1
28 Sep '09
Matthias Dodt wrote:
> We would like to integrate a new datasource in our galaxy system. We
> want data to be inserted into galaxy via copying them directly from
> fileservers instead of uploading them through a http connection- maybe
> via some browser-based "explorer" who gives access to certain folders-
> Is there any galaxy-build in feature which may support something like this?
Hi Matthias,
I have relocated this question to galaxy-dev since it deals with local
implementation.
This is available now via the 'library_import_dir' and
'user_library_import_dir' options in universe_wsgi.ini.
library_import_dir: A directory containing subdirectories, from which
all contents may be uploaded into a data library via the "upload a
directory" version of the upload form. We often symlink files to these
subdirectories to avoid double copies.
user_library_import_dir: A directory containing subdirectories named as
Galaxy user email addresses which work as above, except that regular
users may upload from these directories to libraries (assuming they have
"add to library" permissions).
--nate
2
2
Hi,
some minor suggestion: could you place the config of the tool_runner
(local, sge, etc.) into tool_conf.xml or even into the tools xml file?
To check if a tool is running locally I have to look up the location of
the tools xml file in tool_conf.xml, then find the tools id in this file
and then look into universe_wsgi.ini to find out it's tool_runner.
Quite confusing
regards, Andreas
2
1
25 Sep '09
details: http://www.bx.psu.edu/hg/galaxy/rev/6a86a558f405
changeset: 2777:6a86a558f405
user: Greg Von Kuster <greg(a)bx.psu.edu>
date: Fri Sep 25 11:32:47 2009 -0400
description:
Fix a bug in the security agent where multiples roles associated with the same permission on the same object were not properly handled.
1 file(s) affected in this change:
lib/galaxy/security/__init__.py
diffs (100 lines):
diff -r a2849c4b7219 -r 6a86a558f405 lib/galaxy/security/__init__.py
--- a/lib/galaxy/security/__init__.py Fri Sep 25 11:02:02 2009 -0400
+++ b/lib/galaxy/security/__init__.py Fri Sep 25 11:32:47 2009 -0400
@@ -33,7 +33,7 @@
def get_actions( self ):
"""Get all permitted actions as a list of Action objects"""
return self.permitted_actions.__dict__.values()
- def get_item_action( self, action, item ):
+ def get_item_actions( self, action, item ):
raise 'No valid method of retrieving action (%s) for item %s.' % ( action, item )
def guess_derived_permissions_for_datasets( self, datasets = [] ):
raise "Unimplemented Method"
@@ -102,10 +102,15 @@
Returns true when user has permission to perform an action on an
instance of Dataset.
"""
- dataset_action = self.get_item_action( action, dataset )
- if dataset_action is None:
+ dataset_actions = self.get_item_actions( action, dataset )
+ if not dataset_actions:
return action.model == 'restrict'
- return dataset_action.role in roles
+ ret_val = False
+ for dataset_action in dataset_actions:
+ if dataset_action.role in roles:
+ ret_val = True
+ break
+ return ret_val
def can_access_dataset( self, roles, dataset ):
return self.allow_dataset_action( roles, self.permitted_actions.DATASET_ACCESS, dataset )
def can_manage_dataset( self, roles, dataset ):
@@ -120,23 +125,25 @@
# All permissions are granted, so non-users cannot have permissions
return False
# Check to see if user has access to any of the roles associated with action
- item_action = self.get_item_action( action, item )
- if item_action is None:
+ item_actions = self.get_item_actions( action, item )
+ if not item_actions:
# All permissions are granted, so item must have action
return False
- return item_action.role in roles
+ ret_val = False
+ for item_action in item_actions:
+ if item_action.role in roles:
+ ret_val = True
+ break
+ return ret_val
def can_add_library_item( self, user, roles, item ):
return self.allow_library_item_action( user, roles, self.permitted_actions.LIBRARY_ADD, item )
def can_modify_library_item( self, user, roles, item ):
return self.allow_library_item_action( user, roles, self.permitted_actions.LIBRARY_MODIFY, item )
def can_manage_library_item( self, user, roles, item ):
return self.allow_library_item_action( user, roles, self.permitted_actions.LIBRARY_MANAGE, item )
- def get_item_action( self, action, item ):
+ def get_item_actions( self, action, item ):
# item must be one of: Dataset, Library, LibraryFolder, LibraryDataset, LibraryDatasetDatasetAssociation
- for permission in item.actions:
- if permission.action == action.action:
- return permission
- return None
+ return [ permission for permission in item.actions if permission.action == action.action ]
def guess_derived_permissions_for_datasets( self, datasets=[] ):
"""Returns a dict of { action : [ role, role, ... ] } for the output dataset based upon provided datasets"""
perms = {}
@@ -427,12 +434,12 @@
on library_item. Otherwise, cycle through all sub-folders in library_item until one is found that meets
this criteria, if it exists. This method does not necessarily scan the entire library as it returns
when it finds the first library_item that allows user to perform any one action in actions_to_check.
- """
+ """
for action in actions_to_check:
if self.allow_library_item_action( user, roles, action, library_item ):
return True, hidden_folder_ids
if isinstance( library_item, self.model.Library ):
- return self.show_library_item( user, roles, library_item.root_folder, actions_to_check, hidden_folder_ids=hidden_folder_ids )
+ return self.show_library_item( user, roles, library_item.root_folder, actions_to_check, hidden_folder_ids='' )
if isinstance( library_item, self.model.LibraryFolder ):
for folder in library_item.active_folders:
can_show, hidden_folder_ids = self.show_library_item( user, roles, folder, actions_to_check, hidden_folder_ids=hidden_folder_ids )
@@ -525,13 +532,14 @@
.options( eagerload_all( "dataset.actions" ) ) \
.all()
for ldda in lddas:
- ldda_access = self.get_item_action( action, ldda.dataset )
- if ldda_access is None:
+ ldda_access_permissions = self.get_item_actions( action, ldda.dataset )
+ if not ldda_access_permissions:
# Dataset is public
return True, hidden_folder_ids
- if ldda_access.role in roles:
- # The current user has access permission on the dataset
- return True, hidden_folder_ids
+ for ldda_access_permission in ldda_access_permissions:
+ if ldda_access_permission.role in roles:
+ # The current user has access permission on the dataset
+ return True, hidden_folder_ids
for sub_folder in folder.active_folders:
can_access, hidden_folder_ids = self.check_folder_contents( user, roles, sub_folder, hidden_folder_ids=hidden_folder_ids )
if can_access:
1
0
25 Sep '09
details: http://www.bx.psu.edu/hg/galaxy/rev/a2849c4b7219
changeset: 2776:a2849c4b7219
user: rc
date: Fri Sep 25 11:02:02 2009 -0400
description:
Fixed a request grids bug. Filters are working now.
2 file(s) affected in this change:
lib/galaxy/web/controllers/requests.py
lib/galaxy/web/controllers/requests_admin.py
diffs (47 lines):
diff -r 200e243a28e0 -r a2849c4b7219 lib/galaxy/web/controllers/requests.py
--- a/lib/galaxy/web/controllers/requests.py Fri Sep 25 10:45:10 2009 -0400
+++ b/lib/galaxy/web/controllers/requests.py Fri Sep 25 11:02:02 2009 -0400
@@ -51,7 +51,11 @@
def get_request_type(self, trans, request):
return request.type.name
def apply_default_filter( self, trans, query, **kwargs ):
- return query.filter_by( user=trans.user )
+ query = query.filter_by( user=trans.user )
+ if self.default_filter:
+ return query.filter_by( **self.default_filter )
+ else:
+ return query
def number_of_samples(self, trans, request):
return str(len(request.samples))
def get_state(self, trans, request):
@@ -475,6 +479,7 @@
params = util.Params( kwd )
lib_id = params.get( 'library_id', 'none' )
# if editing a request
+ selected_lib = None
if request and lib_id == 'none':
if request.library:
lib_id = str(request.library.id)
diff -r 200e243a28e0 -r a2849c4b7219 lib/galaxy/web/controllers/requests_admin.py
--- a/lib/galaxy/web/controllers/requests_admin.py Fri Sep 25 10:45:10 2009 -0400
+++ b/lib/galaxy/web/controllers/requests_admin.py Fri Sep 25 11:02:02 2009 -0400
@@ -54,6 +54,11 @@
return request_type.name
def number_of_samples(self, trans, request):
return str(len(request.samples))
+ def apply_default_filter( self, trans, query, **kwargs ):
+ if self.default_filter:
+ return query.filter_by( **self.default_filter )
+ else:
+ return query
class Requests( BaseController ):
request_grid = RequestsListGrid()
@@ -721,6 +726,7 @@
params = util.Params( kwd )
lib_id = params.get( 'library_id', 'none' )
# if editing a request
+ selected_lib = None
if request and lib_id == 'none':
if request.library:
lib_id = str(request.library.id)
1
0