galaxy-dev
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2009 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2008 -----
- December
- November
- October
- September
- August
October 2008
- 4 participants
- 57 discussions
details: http://www.bx.psu.edu/hg/galaxy/rev/20591fa0d05d
changeset: 1542:20591fa0d05d
user: Greg Von Kuster <greg(a)bx.psu.edu>
date: Thu Oct 02 15:51:17 2008 -0400
description:
Reverting to rev 1459.
1 file(s) affected in this change:
lib/galaxy/web/controllers/root.py
diffs (458 lines):
diff -r 993bed7b5e26 -r 20591fa0d05d lib/galaxy/web/controllers/root.py
--- a/lib/galaxy/web/controllers/root.py Thu Oct 02 13:18:50 2008 -0400
+++ b/lib/galaxy/web/controllers/root.py Thu Oct 02 15:51:17 2008 -0400
@@ -21,7 +21,9 @@
@web.expose
def index(self, trans, id=None, tool_id=None, mode=None, m_c=None, m_a=None, **kwd):
- return trans.fill_template( "root/index.mako", tool_id=tool_id, m_c=m_c, m_a=m_a )
+ return trans.fill_template( "root/index.mako",
+ tool_id=tool_id,
+ m_c=m_c, m_a=m_a )
## ---- Tool related -----------------------------------------------------
@@ -66,10 +68,10 @@
@web.expose
def dataset_state ( self, trans, id=None, stamp=None ):
if id is not None:
- try:
- data = self.app.model.Dataset.get( id )
+ try:
+ data = self.app.model.HistoryDatasetAssociation.get( id )
except:
- return trans.show_error_message( "Unable to check dataset id %s." %str( id ) )
+ return trans.show_error_message( "Unable to check dataset %s." %str( id ) )
trans.response.headers['X-Dataset-State'] = data.state
trans.response.headers['Pragma'] = 'no-cache'
trans.response.headers['Expires'] = '0'
@@ -80,13 +82,13 @@
@web.expose
def dataset_code( self, trans, id=None, hid=None, stamp=None ):
if id is not None:
- try:
- hda = self.app.model.HistoryDatasetAssociation.filter_by( dataset_id=id ).first()
+ try:
+ data = self.app.model.HistoryDatasetAssociation.get( id )
except:
- return trans.show_error_message( "Unable to check dataset id %s." % str( id ) )
+ return trans.show_error_message( "Unable to check dataset %s." %str( id ) )
trans.response.headers['Pragma'] = 'no-cache'
trans.response.headers['Expires'] = '0'
- return trans.fill_template( "root/history_item.mako", data=hda.dataset, hid=hid )
+ return trans.fill_template("root/history_item.mako", data=data, hid=hid)
else:
return trans.show_error_message( "Must specify a dataset id.")
@@ -101,11 +103,11 @@
ids = map( int, ids.split( "," ) )
states = states.split( "," )
for id, state in zip( ids, states ):
- hda = self.app.model.HistoryDatasetAssociation.filter_by( dataset_id=id ).first()
- if hda.dataset.state != state:
+ data = self.app.model.HistoryDatasetAssociation.get( id )
+ if data.state != state:
rval[id] = {
- "state": hda.dataset.state,
- "html": trans.fill_template( "root/history_item.mako", data=hda, hid=hda.hid )
+ "state": data.state,
+ "html": trans.fill_template( "root/history_item.mako", data=data, hid=data.hid )
}
return rval
@@ -123,36 +125,36 @@
except:
return "hid '%s' is invalid" %str( hid )
history = trans.get_history()
- for hda in history.datasets:
- if hda.hid == hid:
- history_dataset_assoc = hda
+ for dataset in history.datasets:
+ if dataset.hid == hid:
+ data = dataset
break
else:
- raise Exception( "History_dataset_association with hid '%s' does not exist." % str( hid ) )
+ raise Exception( "No dataset with hid '%d'" % hid )
else:
try:
- history_dataset_assoc = self.app.model.HistoryDatasetAssociation.filter_by( dataset_id=id ).first()
+ data = self.app.model.HistoryDatasetAssociation.get( id )
except:
- return "Dataset id '%s' is invalid." %str( id )
- if history_dataset_assoc:
- mime = trans.app.datatypes_registry.get_mimetype_by_extension( history_dataset_assoc.extension.lower() )
+ return "Dataset id '%s' is invalid" %str( id )
+ if data:
+ mime = trans.app.datatypes_registry.get_mimetype_by_extension( data.extension.lower() )
trans.response.set_content_type(mime)
if tofile:
- fStat = os.stat(history_dataset_assoc.file_name)
+ fStat = os.stat(data.file_name)
trans.response.headers['Content-Length'] = int(fStat.st_size)
if toext[0:1] != ".":
toext = "." + toext
valid_chars = '.,^_-()[]0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
- fname = history_dataset_assoc.name
+ fname = data.name
fname = ''.join(c in valid_chars and c or '_' for c in fname)[0:150]
- trans.response.headers["Content-Disposition"] = "attachment; filename=GalaxyHistoryItem-%s-[%s]%s" % (history_dataset_assoc.hid, fname, toext)
- trans.log_event( "Display dataset id: '%s'." % str(id) )
+ trans.response.headers["Content-Disposition"] = "attachment; filename=GalaxyHistoryItem-%s-[%s]%s" % (data.hid, fname, toext)
+ trans.log_event( "Display dataset id: %s" % str(id) )
try:
- return open( history_dataset_assoc.file_name )
+ return open( data.file_name )
except:
- return "Dataset id '%s' contains no content." % str( id )
+ return "This dataset contains no content"
else:
- return "Dataset id '%s' does not exist." % str( id )
+ return "No dataset with id '%s'" % str( id )
@web.expose
def display_child(self, trans, parent_id=None, designation=None, tofile=None, toext=".txt"):
@@ -160,36 +162,36 @@
Returns child data directly into the browser, based upon parent_id and designation.
"""
try:
- hda = self.app.model.HistoryDatasetAssociation.get( parent_id )
- if hda:
- child = hda.get_child_by_designation( designation )
+ data = self.app.model.HistoryDatasetAssociation.get( parent_id )
+ if data:
+ child = data.get_child_by_designation(designation)
if child:
return self.display(trans, id=child.id, tofile=tofile, toext=toext)
except Exception:
pass
- return "A child named '%s' could not be found for history_dataset_association id '%s'" % ( designation, str( parent_id ) )
+ return "A child named %s could not be found for data %s" % ( designation, parent_id )
@web.expose
def display_as( self, trans, id=None, display_app=None, **kwd ):
"""Returns a file in a format that can successfully be displayed in display_app"""
- hda = self.app.model.HistoryDatasetAssociation.filter_by( dataset_id=id ).first()
- if hda:
- trans.response.set_content_type( hda.get_mime() )
+ data = self.app.model.HistoryDatasetAssociation.get( id )
+ if data:
+ trans.response.set_content_type(data.get_mime())
trans.log_event( "Formatted dataset id %s for display at %s" % ( str(id), display_app ) )
- return hda.as_display_type( display_app, **kwd )
+ return data.as_display_type(display_app, **kwd)
else:
- return "Dataset 'id' %s does not exist." % str( id )
+ return "No data with id=%d" % id
@web.expose
def peek(self, trans, id=None):
"""Returns a 'peek' at the data"""
- hda = self.app.model.HistoryDatasetAssociation.filter_by( dataset_id=id ).first()
- if hda:
+ data = self.app.model.HistoryDatasetAssociation.get( id )
+ if data:
yield "<html><body><pre>"
- yield hda.peek
+ yield data.peek
yield "</pre></body></html>"
else:
- yield "Dataset 'id' %s does not exist." % str( id )
+ yield "No data with id=%d" % id
@web.expose
def edit(self, trans, id=None, hid=None, **kwd):
@@ -197,75 +199,76 @@
if hid is not None:
history = trans.get_history()
# TODO: hid handling
- hda = history.datasets[ int( hid ) - 1 ]
+ data = history.datasets[ int( hid ) - 1 ]
elif id is None:
- return trans.show_error_message( "Problem loading dataset id '%s' with history id '%s'." % ( str( id ), str( hid ) ) )
+ return trans.show_error_message( "Problem loading dataset id %s with history id %s." % ( str( id ), str( hid ) ) )
else:
- hda = self.app.model.HistoryDatasetAssociation.filter_by( dataset_id=id ).first()
- if hda is None:
- return trans.show_error_message( "Problem retrieving dataset 'id' %s with history id '%s'." % ( str( id ), str( hid ) ) )
+ data = self.app.model.HistoryDatasetAssociation.get( id )
+ if data is None:
+ return trans.show_error_message( "Problem retrieving dataset id %s with history id %s." % ( str( id ), str( hid ) ) )
p = util.Params(kwd, safe=False)
if p.change:
# The user clicked the Save button on the 'Change data type' form
- trans.app.datatypes_registry.change_datatype( hda, p.datatype )
+ trans.app.datatypes_registry.change_datatype( data, p.datatype )
trans.app.model.flush()
elif p.save:
# The user clicked the Save button on the 'Edit Attributes' form
- hda.name = p.name
- hda.info = p.info
+ data.name = p.name
+ data.info = p.info
# The following for loop will save all metadata_spec items
- for name, spec in hda.datatype.metadata_spec.items():
+ for name, spec in data.datatype.metadata_spec.items():
if spec.get("readonly"):
continue
optional = p.get("is_"+name, None)
if optional and optional == 'true':
# optional element... == 'true' actually means it is NOT checked (and therefore ommitted)
- setattr( hda.metadata, name, None )
+ setattr(data.metadata,name,None)
else:
- setattr( hda.metadata, name, spec.unwrap( p.get( name, None ), p ) )
+ setattr(data.metadata,name,spec.unwrap(p.get(name, None), p))
- hda.datatype.after_edit( hda )
+ data.datatype.after_edit( data )
trans.app.model.flush()
return trans.show_ok_message( "Attributes updated", refresh_frames=['history'] )
elif p.detect:
# The user clicked the Auto-detect button on the 'Edit Attributes' form
- for name, spec in hda.datatype.metadata_spec.items():
+ for name, spec in data.datatype.metadata_spec.items():
# We need to be careful about the attributes we are resetting
if name != 'name' and name != 'info' and name != 'dbkey':
if spec.get( 'default' ):
- setattr( hda.metadata, name, spec.unwrap( spec.get( 'default' ), spec ) )
- hda.datatype.set_meta( hda )
- hda.datatype.after_edit( hda )
+ setattr( data.metadata,name,spec.unwrap( spec.get( 'default' ), spec ))
+ data.datatype.set_meta( data )
+ data.datatype.after_edit( data )
trans.app.model.flush()
return trans.show_ok_message( "Attributes updated", refresh_frames=['history'] )
elif p.convert_data:
"""The user clicked the Convert button on the 'Convert to new format' form"""
target_type = kwd.get("target_type", None)
if target_type:
- msg = hda.datatype.convert_dataset(trans, hda, target_type)
+ msg = data.datatype.convert_dataset(trans, data, target_type)
return trans.show_ok_message( msg, refresh_frames=['history'] )
- hda.datatype.before_edit( hda )
+ data.datatype.before_edit( data )
- if "dbkey" in hda.datatype.metadata_spec and not hda.metadata.dbkey:
+ if "dbkey" in data.datatype.metadata_spec and not data.metadata.dbkey:
# Copy dbkey into metadata, for backwards compatability
# This looks like it does nothing, but getting the dbkey
# returns the metadata dbkey unless it is None, in which
# case it resorts to the old dbkey. Setting the dbkey
# sets it properly in the metadata
- hda.metadata.dbkey = hda.dbkey
+ data.metadata.dbkey = data.dbkey
metadata = list()
# a list of MetadataParemeters
- for name, spec in hda.datatype.metadata_spec.items():
+ for name, spec in data.datatype.metadata_spec.items():
if spec.visible:
- metadata.append( spec.wrap( hda.metadata.get( name ), hda ) )
+ metadata.append( spec.wrap( data.metadata.get(name), data ) )
# let's not overwrite the imported datatypes module with the variable datatypes?
ldatatypes = [x for x in trans.app.datatypes_registry.datatypes_by_extension.iterkeys()]
ldatatypes.sort()
- trans.log_event( "Opened edit view on dataset id '%s'" % str( id ) )
- return trans.fill_template( "/dataset/edit_attributes.mako", data=hda, metadata=metadata, datatypes=ldatatypes, err=None )
+ trans.log_event( "Opened edit view on dataset %s" % str(id) )
+ return trans.fill_template( "/dataset/edit_attributes.mako", data=data, metadata=metadata,
+ datatypes=ldatatypes, err=None )
@web.expose
def delete( self, trans, id = None, **kwd):
@@ -280,21 +283,21 @@
int( id )
except:
continue
- hda = self.app.model.HistoryDatasetAssociation.filter_by( dataset_id=id ).first()
- if hda:
- # Walk up parent hdas to find the containing history
- topmost_parent = hda
+ data = self.app.model.HistoryDatasetAssociation.get( id )
+ if data:
+ # Walk up parent datasets to find the containing history
+ topmost_parent = data
while topmost_parent.parent:
topmost_parent = topmost_parent.parent
assert topmost_parent in history.datasets, "Data does not belong to current history"
# Mark deleted and cleanup
- hda.mark_deleted()
- hda.clear_associated_files()
+ data.mark_deleted()
+ data.clear_associated_files()
self.app.model.flush()
- trans.log_event( "Dataset id '%s' marked as deleted" % str( id ) )
- if hda.parent_id is None:
+ trans.log_event( "Dataset id %s marked as deleted" % str(id) )
+ if data.parent_id is None:
try:
- self.app.job_stop_queue.put( hda.creating_job_associations[0].job )
+ self.app.job_stop_queue.put( data.creating_job_associations[0].job )
except IndexError:
pass # upload tool will cause this since it doesn't have a job
return self.history( trans )
@@ -307,21 +310,21 @@
except:
return "Dataset id '%s' is invalid" %str( id )
history = trans.get_history()
- hda = self.app.model.HistoryDatasetAssociation.filter_by( dataset_id=id ).first()
- if hda:
+ data = self.app.model.HistoryDatasetAssociation.get( id )
+ if data:
# Walk up parent datasets to find the containing history
- topmost_parent = hda
+ topmost_parent = data
while topmost_parent.parent:
topmost_parent = topmost_parent.parent
assert topmost_parent in history.datasets, "Data does not belong to current history"
# Mark deleted and cleanup
- hda.mark_deleted()
- hda.clear_associated_files()
+ data.mark_deleted()
+ data.clear_associated_files()
self.app.model.flush()
- trans.log_event( "Dataset id '%s' marked as deleted async" % str( id ) )
- if hda.parent_id is None:
+ trans.log_event( "Dataset id %s marked as deleted async" % str(id) )
+ if data.parent_id is None:
try:
- self.app.job_stop_queue.put( hda.creating_job_associations[0].job )
+ self.app.job_stop_queue.put( data.creating_job_associations[0].job )
except IndexError:
pass # upload tool will cause this since it doesn't have a job
return "OK"
@@ -331,7 +334,8 @@
@web.expose
def history_options( self, trans ):
"""Displays a list of history related actions"""
- return trans.fill_template( "/history/options.mako", user=trans.get_user(), history=trans.get_history() )
+ return trans.fill_template( "/history/options.mako",
+ user = trans.get_user(), history = trans.get_history() )
@web.expose
def history_delete( self, trans, id=None, **kwd):
@@ -363,16 +367,16 @@
trans.log_event( "History id %s marked as deleted" % str(hid) )
else:
return trans.show_message( "You must select at least one history to delete." )
- return trans.show_message( "History deleted: %s" % ",".join(history_names), refresh_frames=['history'] )
+ return trans.show_message( "History deleted: %s" % ",".join(history_names),
+ refresh_frames=['history'])
@web.expose
def clear_history( self, trans ):
"""Clears the history for a user"""
history = trans.get_history()
- for hda in history.datasets:
- hda.deleted = True
- hda.dataset.deleted = True
- hda.clear_associated_files()
+ for dataset in history.datasets:
+ dataset.deleted = True
+ dataset.clear_associated_files()
self.app.model.flush()
trans.log_event( "History id %s cleared" % (str(history.id)) )
trans.response.send_redirect( url_for("/index" ) )
@@ -420,7 +424,9 @@
if not isinstance( id, list ):
id = [ id ]
trans.log_event( "History id %s available" % str( id ) )
- return trans.fill_template( "/history/list.mako", ids=id, user=trans.get_user(), current_history=trans.get_history() )
+ return trans.fill_template( "/history/list.mako", ids=id,
+ user=trans.get_user(),
+ current_history=trans.get_history() )
@web.expose
def history_import( self, trans, id=None, confirm=False, **kwd ):
@@ -546,23 +552,23 @@
"""Adds a POSTed file to a History"""
try:
history = trans.app.model.History.get( history_id )
- hda = trans.app.model.HistoryDatasetAssociation( name=name, info=info, extension=ext, dbkey=dbkey, create_dataset=True )
- hda.flush()
- data_file = open( hda.file_name, "wb" )
+ data = trans.app.model.HistoryDatasetAssociation( name = name, info = info, extension = ext, dbkey = dbkey, create_dataset = True )
+ data.flush()
+ data_file = open( data.file_name, "wb" )
file_data.file.seek( 0 )
data_file.write( file_data.file.read() )
data_file.close()
- hda.dataset.state = hda.dataset.states.OK
- hda.init_meta()
- hda.set_meta()
- hda.flush()
- history.add_dataset( hda )
+ data.state = data.states.OK
+ data.init_meta()
+ data.set_meta()
+ data.flush()
+ history.add_dataset( data )
history.flush()
- hda.set_peek()
- hda.set_size()
- hda.flush()
- trans.log_event( "Added dataset id '%s' to history id '%s'." % ( str( hda.dataset_id ), str( history_id ) ) )
- return trans.show_ok_message( "Dataset id " + str( hda.dataset_id ) + " added to history id " + str( history_id ) + "." )
+ data.set_peek()
+ data.set_size()
+ data.flush()
+ trans.log_event("Added dataset %d to history %d" %(data.id, trans.history.id))
+ return trans.show_ok_message("Dataset "+str(data.hid)+" added to history "+str(history_id)+".")
except:
return trans.show_error_message("Adding File to History has Failed")
@@ -570,11 +576,13 @@
def dataset_make_primary( self, trans, id=None):
"""Copies a dataset and makes primary"""
try:
- old_hda = self.app.model.HistoryDatasetAssociation.filter_by( dataset_id=id ).first()
- new_hda = old_hda.copy()
+ old_data = self.app.model.HistoryDatasetAssociation.get( id )
+ new_data = old_data.copy()
+ ## new_data.parent = None
+ ## history = trans.app.model.History.get( old_data.history_id )
history = trans.get_history()
- history.add_dataset( new_hda )
- new_hda.flush()
+ history.add_dataset(new_data)
+ new_data.flush()
return trans.show_message( "<p>Secondary dataset has been made primary.</p>", refresh_frames=['history'] )
except:
return trans.show_error_message( "<p>Failed to make secondary dataset primary.</p>" )
@@ -594,7 +602,7 @@
@web.expose
def dataset_errors( self, trans, id=None, **kwd ):
"""View/fix errors associated with dataset"""
- hda = trans.app.model.HistoryDatasetAssociation.filter_by( dataset_id=id ).first()
+ data = trans.app.model.HistoryDatasetAssociation.get( id )
p = kwd
if p.get("fix_errors", None):
# launch tool to create new, (hopefully) error free dataset
@@ -602,19 +610,18 @@
tool_params["tool_id"] = 'fix_errors'
tool_params["runtool_btn"] = 'T'
tool_params["input"] = id
- tool_params["ext"] = hda.ext
+ tool_params["ext"] = data.ext
# send methods selected
- repair_methods = hda.datatype.repair_methods( hda )
+ repair_methods = data.datatype.repair_methods( data )
methods = []
for method, description in repair_methods:
- if method in p:
- methods.append( method )
+ if method in p: methods.append(method)
tool_params["methods"] = ",".join(methods)
url = "/tool_runner/index?" + urllib.urlencode(tool_params)
trans.response.send_redirect(url)
else:
- history = trans.app.model.History.get( hda.history_id )
- return trans.fill_template('dataset/validation.tmpl', data=hda, history=history)
+ history = trans.app.model.History.get( data.history_id )
+ return trans.fill_template('dataset/validation.tmpl', data=data, history=history)
# ---- Debug methods ----------------------------------------------------
1
0
02 Oct '08
details: http://www.bx.psu.edu/hg/galaxy/rev/993bed7b5e26
changeset: 1541:993bed7b5e26
user: Greg Von Kuster <greg(a)bx.psu.edu>
date: Thu Oct 02 13:18:50 2008 -0400
description:
Remove a debug statement from root controller...
1 file(s) affected in this change:
lib/galaxy/web/controllers/root.py
diffs (11 lines):
diff -r 0041f2ba49ed -r 993bed7b5e26 lib/galaxy/web/controllers/root.py
--- a/lib/galaxy/web/controllers/root.py Thu Oct 02 12:45:03 2008 -0400
+++ b/lib/galaxy/web/controllers/root.py Thu Oct 02 13:18:50 2008 -0400
@@ -281,7 +281,6 @@
except:
continue
hda = self.app.model.HistoryDatasetAssociation.filter_by( dataset_id=id ).first()
- log.debug("***hda.dataset_id: %s" % str( hda.dataset_id))
if hda:
# Walk up parent hdas to find the containing history
topmost_parent = hda
1
0
02 Oct '08
details: http://www.bx.psu.edu/hg/galaxy/rev/0041f2ba49ed
changeset: 1540:0041f2ba49ed
user: Greg Von Kuster <greg(a)bx.psu.edu>
date: Thu Oct 02 12:45:03 2008 -0400
description:
Fix all root controller methods so that history_dataset_association.id is not confused with history_dataset_association.dataset_id.
1 file(s) affected in this change:
lib/galaxy/web/controllers/root.py
diffs (415 lines):
diff -r dd58d5aeb274 -r 0041f2ba49ed lib/galaxy/web/controllers/root.py
--- a/lib/galaxy/web/controllers/root.py Wed Oct 01 12:43:59 2008 -0400
+++ b/lib/galaxy/web/controllers/root.py Thu Oct 02 12:45:03 2008 -0400
@@ -21,9 +21,7 @@
@web.expose
def index(self, trans, id=None, tool_id=None, mode=None, m_c=None, m_a=None, **kwd):
- return trans.fill_template( "root/index.mako",
- tool_id=tool_id,
- m_c=m_c, m_a=m_a )
+ return trans.fill_template( "root/index.mako", tool_id=tool_id, m_c=m_c, m_a=m_a )
## ---- Tool related -----------------------------------------------------
@@ -68,10 +66,10 @@
@web.expose
def dataset_state ( self, trans, id=None, stamp=None ):
if id is not None:
- try:
- data = self.app.model.HistoryDatasetAssociation.get( id )
+ try:
+ data = self.app.model.Dataset.get( id )
except:
- return trans.show_error_message( "Unable to check dataset %s." %str( id ) )
+ return trans.show_error_message( "Unable to check dataset id %s." %str( id ) )
trans.response.headers['X-Dataset-State'] = data.state
trans.response.headers['Pragma'] = 'no-cache'
trans.response.headers['Expires'] = '0'
@@ -82,13 +80,13 @@
@web.expose
def dataset_code( self, trans, id=None, hid=None, stamp=None ):
if id is not None:
- try:
- data = self.app.model.HistoryDatasetAssociation.get( id )
+ try:
+ hda = self.app.model.HistoryDatasetAssociation.filter_by( dataset_id=id ).first()
except:
- return trans.show_error_message( "Unable to check dataset %s." %str( id ) )
+ return trans.show_error_message( "Unable to check dataset id %s." % str( id ) )
trans.response.headers['Pragma'] = 'no-cache'
trans.response.headers['Expires'] = '0'
- return trans.fill_template("root/history_item.mako", data=data, hid=hid)
+ return trans.fill_template( "root/history_item.mako", data=hda.dataset, hid=hid )
else:
return trans.show_error_message( "Must specify a dataset id.")
@@ -103,11 +101,11 @@
ids = map( int, ids.split( "," ) )
states = states.split( "," )
for id, state in zip( ids, states ):
- data = self.app.model.HistoryDatasetAssociation.get( id )
- if data.state != state:
+ hda = self.app.model.HistoryDatasetAssociation.filter_by( dataset_id=id ).first()
+ if hda.dataset.state != state:
rval[id] = {
- "state": data.state,
- "html": trans.fill_template( "root/history_item.mako", data=data, hid=data.hid )
+ "state": hda.dataset.state,
+ "html": trans.fill_template( "root/history_item.mako", data=hda, hid=hda.hid )
}
return rval
@@ -133,7 +131,6 @@
raise Exception( "History_dataset_association with hid '%s' does not exist." % str( hid ) )
else:
try:
- id = int( id )
history_dataset_assoc = self.app.model.HistoryDatasetAssociation.filter_by( dataset_id=id ).first()
except:
return "Dataset id '%s' is invalid." %str( id )
@@ -163,36 +160,36 @@
Returns child data directly into the browser, based upon parent_id and designation.
"""
try:
- data = self.app.model.HistoryDatasetAssociation.get( parent_id )
- if data:
- child = data.get_child_by_designation(designation)
+ hda = self.app.model.HistoryDatasetAssociation.get( parent_id )
+ if hda:
+ child = hda.get_child_by_designation( designation )
if child:
return self.display(trans, id=child.id, tofile=tofile, toext=toext)
except Exception:
pass
- return "A child named %s could not be found for data %s" % ( designation, parent_id )
+ return "A child named '%s' could not be found for history_dataset_association id '%s'" % ( designation, str( parent_id ) )
@web.expose
def display_as( self, trans, id=None, display_app=None, **kwd ):
"""Returns a file in a format that can successfully be displayed in display_app"""
- data = self.app.model.HistoryDatasetAssociation.get( id )
- if data:
- trans.response.set_content_type(data.get_mime())
+ hda = self.app.model.HistoryDatasetAssociation.filter_by( dataset_id=id ).first()
+ if hda:
+ trans.response.set_content_type( hda.get_mime() )
trans.log_event( "Formatted dataset id %s for display at %s" % ( str(id), display_app ) )
- return data.as_display_type(display_app, **kwd)
+ return hda.as_display_type( display_app, **kwd )
else:
- return "No data with id=%d" % id
+ return "Dataset 'id' %s does not exist." % str( id )
@web.expose
def peek(self, trans, id=None):
"""Returns a 'peek' at the data"""
- data = self.app.model.HistoryDatasetAssociation.get( id )
- if data:
+ hda = self.app.model.HistoryDatasetAssociation.filter_by( dataset_id=id ).first()
+ if hda:
yield "<html><body><pre>"
- yield data.peek
+ yield hda.peek
yield "</pre></body></html>"
else:
- yield "No data with id=%d" % id
+ yield "Dataset 'id' %s does not exist." % str( id )
@web.expose
def edit(self, trans, id=None, hid=None, **kwd):
@@ -200,76 +197,75 @@
if hid is not None:
history = trans.get_history()
# TODO: hid handling
- data = history.datasets[ int( hid ) - 1 ]
+ hda = history.datasets[ int( hid ) - 1 ]
elif id is None:
- return trans.show_error_message( "Problem loading dataset id %s with history id %s." % ( str( id ), str( hid ) ) )
+ return trans.show_error_message( "Problem loading dataset id '%s' with history id '%s'." % ( str( id ), str( hid ) ) )
else:
- data = self.app.model.HistoryDatasetAssociation.get( id )
- if data is None:
- return trans.show_error_message( "Problem retrieving dataset id %s with history id %s." % ( str( id ), str( hid ) ) )
+ hda = self.app.model.HistoryDatasetAssociation.filter_by( dataset_id=id ).first()
+ if hda is None:
+ return trans.show_error_message( "Problem retrieving dataset 'id' %s with history id '%s'." % ( str( id ), str( hid ) ) )
p = util.Params(kwd, safe=False)
if p.change:
# The user clicked the Save button on the 'Change data type' form
- trans.app.datatypes_registry.change_datatype( data, p.datatype )
+ trans.app.datatypes_registry.change_datatype( hda, p.datatype )
trans.app.model.flush()
elif p.save:
# The user clicked the Save button on the 'Edit Attributes' form
- data.name = p.name
- data.info = p.info
+ hda.name = p.name
+ hda.info = p.info
# The following for loop will save all metadata_spec items
- for name, spec in data.datatype.metadata_spec.items():
+ for name, spec in hda.datatype.metadata_spec.items():
if spec.get("readonly"):
continue
optional = p.get("is_"+name, None)
if optional and optional == 'true':
# optional element... == 'true' actually means it is NOT checked (and therefore ommitted)
- setattr(data.metadata,name,None)
+ setattr( hda.metadata, name, None )
else:
- setattr(data.metadata,name,spec.unwrap(p.get(name, None), p))
+ setattr( hda.metadata, name, spec.unwrap( p.get( name, None ), p ) )
- data.datatype.after_edit( data )
+ hda.datatype.after_edit( hda )
trans.app.model.flush()
return trans.show_ok_message( "Attributes updated", refresh_frames=['history'] )
elif p.detect:
# The user clicked the Auto-detect button on the 'Edit Attributes' form
- for name, spec in data.datatype.metadata_spec.items():
+ for name, spec in hda.datatype.metadata_spec.items():
# We need to be careful about the attributes we are resetting
if name != 'name' and name != 'info' and name != 'dbkey':
if spec.get( 'default' ):
- setattr( data.metadata,name,spec.unwrap( spec.get( 'default' ), spec ))
- data.datatype.set_meta( data )
- data.datatype.after_edit( data )
+ setattr( hda.metadata, name, spec.unwrap( spec.get( 'default' ), spec ) )
+ hda.datatype.set_meta( hda )
+ hda.datatype.after_edit( hda )
trans.app.model.flush()
return trans.show_ok_message( "Attributes updated", refresh_frames=['history'] )
elif p.convert_data:
"""The user clicked the Convert button on the 'Convert to new format' form"""
target_type = kwd.get("target_type", None)
if target_type:
- msg = data.datatype.convert_dataset(trans, data, target_type)
+ msg = hda.datatype.convert_dataset(trans, hda, target_type)
return trans.show_ok_message( msg, refresh_frames=['history'] )
- data.datatype.before_edit( data )
+ hda.datatype.before_edit( hda )
- if "dbkey" in data.datatype.metadata_spec and not data.metadata.dbkey:
+ if "dbkey" in hda.datatype.metadata_spec and not hda.metadata.dbkey:
# Copy dbkey into metadata, for backwards compatability
# This looks like it does nothing, but getting the dbkey
# returns the metadata dbkey unless it is None, in which
# case it resorts to the old dbkey. Setting the dbkey
# sets it properly in the metadata
- data.metadata.dbkey = data.dbkey
+ hda.metadata.dbkey = hda.dbkey
metadata = list()
# a list of MetadataParemeters
- for name, spec in data.datatype.metadata_spec.items():
+ for name, spec in hda.datatype.metadata_spec.items():
if spec.visible:
- metadata.append( spec.wrap( data.metadata.get(name), data ) )
+ metadata.append( spec.wrap( hda.metadata.get( name ), hda ) )
# let's not overwrite the imported datatypes module with the variable datatypes?
ldatatypes = [x for x in trans.app.datatypes_registry.datatypes_by_extension.iterkeys()]
ldatatypes.sort()
- trans.log_event( "Opened edit view on dataset %s" % str(id) )
- return trans.fill_template( "/dataset/edit_attributes.mako", data=data, metadata=metadata,
- datatypes=ldatatypes, err=None )
+ trans.log_event( "Opened edit view on dataset id '%s'" % str( id ) )
+ return trans.fill_template( "/dataset/edit_attributes.mako", data=hda, metadata=metadata, datatypes=ldatatypes, err=None )
@web.expose
def delete( self, trans, id = None, **kwd):
@@ -284,21 +280,22 @@
int( id )
except:
continue
- data = self.app.model.HistoryDatasetAssociation.get( id )
- if data:
- # Walk up parent datasets to find the containing history
- topmost_parent = data
+ hda = self.app.model.HistoryDatasetAssociation.filter_by( dataset_id=id ).first()
+ log.debug("***hda.dataset_id: %s" % str( hda.dataset_id))
+ if hda:
+ # Walk up parent hdas to find the containing history
+ topmost_parent = hda
while topmost_parent.parent:
topmost_parent = topmost_parent.parent
assert topmost_parent in history.datasets, "Data does not belong to current history"
# Mark deleted and cleanup
- data.mark_deleted()
- data.clear_associated_files()
+ hda.mark_deleted()
+ hda.clear_associated_files()
self.app.model.flush()
- trans.log_event( "Dataset id %s marked as deleted" % str(id) )
- if data.parent_id is None:
+ trans.log_event( "Dataset id '%s' marked as deleted" % str( id ) )
+ if hda.parent_id is None:
try:
- self.app.job_stop_queue.put( data.creating_job_associations[0].job )
+ self.app.job_stop_queue.put( hda.creating_job_associations[0].job )
except IndexError:
pass # upload tool will cause this since it doesn't have a job
return self.history( trans )
@@ -311,21 +308,21 @@
except:
return "Dataset id '%s' is invalid" %str( id )
history = trans.get_history()
- data = self.app.model.HistoryDatasetAssociation.get( id )
- if data:
+ hda = self.app.model.HistoryDatasetAssociation.filter_by( dataset_id=id ).first()
+ if hda:
# Walk up parent datasets to find the containing history
- topmost_parent = data
+ topmost_parent = hda
while topmost_parent.parent:
topmost_parent = topmost_parent.parent
assert topmost_parent in history.datasets, "Data does not belong to current history"
# Mark deleted and cleanup
- data.mark_deleted()
- data.clear_associated_files()
+ hda.mark_deleted()
+ hda.clear_associated_files()
self.app.model.flush()
- trans.log_event( "Dataset id %s marked as deleted async" % str(id) )
- if data.parent_id is None:
+ trans.log_event( "Dataset id '%s' marked as deleted async" % str( id ) )
+ if hda.parent_id is None:
try:
- self.app.job_stop_queue.put( data.creating_job_associations[0].job )
+ self.app.job_stop_queue.put( hda.creating_job_associations[0].job )
except IndexError:
pass # upload tool will cause this since it doesn't have a job
return "OK"
@@ -335,8 +332,7 @@
@web.expose
def history_options( self, trans ):
"""Displays a list of history related actions"""
- return trans.fill_template( "/history/options.mako",
- user = trans.get_user(), history = trans.get_history() )
+ return trans.fill_template( "/history/options.mako", user=trans.get_user(), history=trans.get_history() )
@web.expose
def history_delete( self, trans, id=None, **kwd):
@@ -368,16 +364,16 @@
trans.log_event( "History id %s marked as deleted" % str(hid) )
else:
return trans.show_message( "You must select at least one history to delete." )
- return trans.show_message( "History deleted: %s" % ",".join(history_names),
- refresh_frames=['history'])
+ return trans.show_message( "History deleted: %s" % ",".join(history_names), refresh_frames=['history'] )
@web.expose
def clear_history( self, trans ):
"""Clears the history for a user"""
history = trans.get_history()
- for dataset in history.datasets:
- dataset.deleted = True
- dataset.clear_associated_files()
+ for hda in history.datasets:
+ hda.deleted = True
+ hda.dataset.deleted = True
+ hda.clear_associated_files()
self.app.model.flush()
trans.log_event( "History id %s cleared" % (str(history.id)) )
trans.response.send_redirect( url_for("/index" ) )
@@ -425,9 +421,7 @@
if not isinstance( id, list ):
id = [ id ]
trans.log_event( "History id %s available" % str( id ) )
- return trans.fill_template( "/history/list.mako", ids=id,
- user=trans.get_user(),
- current_history=trans.get_history() )
+ return trans.fill_template( "/history/list.mako", ids=id, user=trans.get_user(), current_history=trans.get_history() )
@web.expose
def history_import( self, trans, id=None, confirm=False, **kwd ):
@@ -553,23 +547,23 @@
"""Adds a POSTed file to a History"""
try:
history = trans.app.model.History.get( history_id )
- data = trans.app.model.HistoryDatasetAssociation( name = name, info = info, extension = ext, dbkey = dbkey, create_dataset = True )
- data.flush()
- data_file = open( data.file_name, "wb" )
+ hda = trans.app.model.HistoryDatasetAssociation( name=name, info=info, extension=ext, dbkey=dbkey, create_dataset=True )
+ hda.flush()
+ data_file = open( hda.file_name, "wb" )
file_data.file.seek( 0 )
data_file.write( file_data.file.read() )
data_file.close()
- data.state = data.states.OK
- data.init_meta()
- data.set_meta()
- data.flush()
- history.add_dataset( data )
+ hda.dataset.state = hda.dataset.states.OK
+ hda.init_meta()
+ hda.set_meta()
+ hda.flush()
+ history.add_dataset( hda )
history.flush()
- data.set_peek()
- data.set_size()
- data.flush()
- trans.log_event("Added dataset %d to history %d" %(data.id, trans.history.id))
- return trans.show_ok_message("Dataset "+str(data.hid)+" added to history "+str(history_id)+".")
+ hda.set_peek()
+ hda.set_size()
+ hda.flush()
+ trans.log_event( "Added dataset id '%s' to history id '%s'." % ( str( hda.dataset_id ), str( history_id ) ) )
+ return trans.show_ok_message( "Dataset id " + str( hda.dataset_id ) + " added to history id " + str( history_id ) + "." )
except:
return trans.show_error_message("Adding File to History has Failed")
@@ -577,13 +571,11 @@
def dataset_make_primary( self, trans, id=None):
"""Copies a dataset and makes primary"""
try:
- old_data = self.app.model.HistoryDatasetAssociation.get( id )
- new_data = old_data.copy()
- ## new_data.parent = None
- ## history = trans.app.model.History.get( old_data.history_id )
+ old_hda = self.app.model.HistoryDatasetAssociation.filter_by( dataset_id=id ).first()
+ new_hda = old_hda.copy()
history = trans.get_history()
- history.add_dataset(new_data)
- new_data.flush()
+ history.add_dataset( new_hda )
+ new_hda.flush()
return trans.show_message( "<p>Secondary dataset has been made primary.</p>", refresh_frames=['history'] )
except:
return trans.show_error_message( "<p>Failed to make secondary dataset primary.</p>" )
@@ -603,7 +595,7 @@
@web.expose
def dataset_errors( self, trans, id=None, **kwd ):
"""View/fix errors associated with dataset"""
- data = trans.app.model.HistoryDatasetAssociation.get( id )
+ hda = trans.app.model.HistoryDatasetAssociation.filter_by( dataset_id=id ).first()
p = kwd
if p.get("fix_errors", None):
# launch tool to create new, (hopefully) error free dataset
@@ -611,18 +603,19 @@
tool_params["tool_id"] = 'fix_errors'
tool_params["runtool_btn"] = 'T'
tool_params["input"] = id
- tool_params["ext"] = data.ext
+ tool_params["ext"] = hda.ext
# send methods selected
- repair_methods = data.datatype.repair_methods( data )
+ repair_methods = hda.datatype.repair_methods( hda )
methods = []
for method, description in repair_methods:
- if method in p: methods.append(method)
+ if method in p:
+ methods.append( method )
tool_params["methods"] = ",".join(methods)
url = "/tool_runner/index?" + urllib.urlencode(tool_params)
trans.response.send_redirect(url)
else:
- history = trans.app.model.History.get( data.history_id )
- return trans.fill_template('dataset/validation.tmpl', data=data, history=history)
+ history = trans.app.model.History.get( hda.history_id )
+ return trans.fill_template('dataset/validation.tmpl', data=hda, history=history)
# ---- Debug methods ----------------------------------------------------
1
0
01 Oct '08
details: http://www.bx.psu.edu/hg/galaxy/rev/dd58d5aeb274
changeset: 1539:dd58d5aeb274
user: Greg Von Kuster <greg(a)bx.psu.edu>
date: Wed Oct 01 12:43:59 2008 -0400
description:
Fix for root/display() - history_dataset_association.dataset_id no longer confused with history_dataset_association.id.
1 file(s) affected in this change:
lib/galaxy/web/controllers/root.py
diffs (56 lines):
diff -r f9431ad593b4 -r dd58d5aeb274 lib/galaxy/web/controllers/root.py
--- a/lib/galaxy/web/controllers/root.py Wed Oct 01 10:17:58 2008 -0400
+++ b/lib/galaxy/web/controllers/root.py Wed Oct 01 12:43:59 2008 -0400
@@ -125,36 +125,37 @@
except:
return "hid '%s' is invalid" %str( hid )
history = trans.get_history()
- for dataset in history.datasets:
- if dataset.hid == hid:
- data = dataset
+ for hda in history.datasets:
+ if hda.hid == hid:
+ history_dataset_assoc = hda
break
else:
- raise Exception( "No dataset with hid '%d'" % hid )
+ raise Exception( "History_dataset_association with hid '%s' does not exist." % str( hid ) )
else:
try:
- data = self.app.model.HistoryDatasetAssociation.get( id )
+ id = int( id )
+ history_dataset_assoc = self.app.model.HistoryDatasetAssociation.filter_by( dataset_id=id ).first()
except:
- return "Dataset id '%s' is invalid" %str( id )
- if data:
- mime = trans.app.datatypes_registry.get_mimetype_by_extension( data.extension.lower() )
+ return "Dataset id '%s' is invalid." %str( id )
+ if history_dataset_assoc:
+ mime = trans.app.datatypes_registry.get_mimetype_by_extension( history_dataset_assoc.extension.lower() )
trans.response.set_content_type(mime)
if tofile:
- fStat = os.stat(data.file_name)
+ fStat = os.stat(history_dataset_assoc.file_name)
trans.response.headers['Content-Length'] = int(fStat.st_size)
if toext[0:1] != ".":
toext = "." + toext
valid_chars = '.,^_-()[]0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
- fname = data.name
+ fname = history_dataset_assoc.name
fname = ''.join(c in valid_chars and c or '_' for c in fname)[0:150]
- trans.response.headers["Content-Disposition"] = "attachment; filename=GalaxyHistoryItem-%s-[%s]%s" % (data.hid, fname, toext)
- trans.log_event( "Display dataset id: %s" % str(id) )
+ trans.response.headers["Content-Disposition"] = "attachment; filename=GalaxyHistoryItem-%s-[%s]%s" % (history_dataset_assoc.hid, fname, toext)
+ trans.log_event( "Display dataset id: '%s'." % str(id) )
try:
- return open( data.file_name )
+ return open( history_dataset_assoc.file_name )
except:
- return "This dataset contains no content"
+ return "Dataset id '%s' contains no content." % str( id )
else:
- return "No dataset with id '%s'" % str( id )
+ return "Dataset id '%s' does not exist." % str( id )
@web.expose
def display_child(self, trans, parent_id=None, designation=None, tofile=None, toext=".txt"):
1
0
01 Oct '08
details: http://www.bx.psu.edu/hg/galaxy/rev/1dbd88fb9e57
changeset: 1537:1dbd88fb9e57
user: wychung
date: Tue Sep 30 16:40:42 2008 -0400
description:
add error msg for shrimp_wrapper when memory error and zero hits.
convert last column of megablast output to float.
3 file(s) affected in this change:
tools/metag_tools/megablast_wrapper.py
tools/metag_tools/shrimp_wrapper.py
tools/metag_tools/shrimp_wrapper.xml
diffs (111 lines):
diff -r 8ab38aa72998 -r 1dbd88fb9e57 tools/metag_tools/megablast_wrapper.py
--- a/tools/metag_tools/megablast_wrapper.py Tue Sep 30 16:17:12 2008 -0400
+++ b/tools/metag_tools/megablast_wrapper.py Tue Sep 30 16:40:42 2008 -0400
@@ -55,7 +55,9 @@
chunk = db[(db_build)]
megablast_command = "megablast -d %s -i %s -o %s -m 8 -a 8 -W %s -p %s -e %s -F %s > /dev/null 2>&1 " \
% ( chunk, query_filename, mega_temp_output, mega_word_size, mega_iden_cutoff, mega_evalue_cutoff, mega_filter )
-
+
+ print megablast_command
+
try:
os.system( megablast_command )
except Exception, e:
@@ -67,8 +69,12 @@
line = line.rstrip( '\r\n' )
fields = line.split()
try:
+ # get gi and length of that gi seq
gi, gi_len = fields[1].split('_')
- new_line = "%s\t%s\t%s\t%s" % ( fields[0], gi, gi_len, '\t'.join( fields[2:] ) )
+ # convert the last column (causing problem in filter tool) to float
+ fields[-1] = float(fields[-1])
+
+ new_line = "%s\t%s\t%s\t%s\t%0.1f" % ( fields[0], gi, gi_len, '\t'.join( fields[2:-1] ), fields[-1] )
except:
new_line = line
invalid_lines += 1
diff -r 8ab38aa72998 -r 1dbd88fb9e57 tools/metag_tools/shrimp_wrapper.py
--- a/tools/metag_tools/shrimp_wrapper.py Tue Sep 30 16:17:12 2008 -0400
+++ b/tools/metag_tools/shrimp_wrapper.py Tue Sep 30 16:40:42 2008 -0400
@@ -1,15 +1,21 @@
#! /usr/bin/python
"""
+TODO
+1. decrease memory usage
+2. multi-fasta fastq file, ex. 454
+3. split reads into small chuncks?
+
SHRiMP wrapper
Inputs:
- reference seq and reads
+1. reference seq
+2. reads
Outputs:
- table of 8 columns:
+1. table of 8 columns:
chrom ref_loc read_id read_loc ref_nuc read_nuc quality coverage
- SHRiMP output
+2. SHRiMP output
Parameters:
-s Spaced Seed (default: 111111011111)
@@ -37,13 +43,9 @@
>7:2:1147:982/1 chr3 + 95338194 95338225 4 35 36 2700 9T7C14
>7:2:587:93/1 chr3 + 14913541 14913577 1 35 36 2960 19--16
-Testing:
-%python shrimp_wrapper.py single ~/Desktop/shrimp_wrapper/phix_anc.fa tmp tmp1 ~/Desktop/shrimp_wrapper/phix.10.solexa.fastq
-%python shrimp_wrapper.py paired ~/Desktop/shrimp_wrapper/eca_ref_chrMT.fa tmp tmp1 ~/Desktop/shrimp_wrapper/eca.5.solexa_1.fastq ~/Desktop/shrimp_wrapper/eca.5.solexa_2.fastq
-
"""
-import os, sys, tempfile, os.path
+import os, sys, tempfile, os.path, re
assert sys.version_info[:2] >= (2.4)
@@ -575,6 +577,29 @@
if os.path.exists(query_qual_end1): os.remove(query_qual_end1)
if os.path.exists(query_qual_end2): os.remove(query_qual_end2)
stop_err(str(e))
+
+ # check SHRiMP output: count number of lines
+ num_hits = 0
+ if shrimp_outfile:
+ for i, line in enumerate(file(shrimp_outfile)):
+ line = line.rstrip('\r\n')
+ if not line or line.startswith('#'): continue
+ try:
+ fields = line.split()
+ num_hits += 1
+ except Exception, e:
+ stop_err(str(e))
+
+ if num_hits == 0: # no hits generated
+ err_msg = ''
+ if shrimp_log:
+ for i, line in enumerate(file(shrimp_log)):
+ if line.startswith('error'): # deal with memory error:
+ err_msg += line # error: realloc failed: Cannot allocate memory
+ if re.search('Reads Matched', line): # deal with zero hits
+ if int(line[8:].split()[2]) == 0:
+ err_msg = 'Zero hits found.\n'
+ stop_err('SHRiMP Failed due to:\n' + err_msg)
# convert to table
if type_of_reads == 'single':
diff -r 8ab38aa72998 -r 1dbd88fb9e57 tools/metag_tools/shrimp_wrapper.xml
--- a/tools/metag_tools/shrimp_wrapper.xml Tue Sep 30 16:17:12 2008 -0400
+++ b/tools/metag_tools/shrimp_wrapper.xml Tue Sep 30 16:40:42 2008 -0400
@@ -1,5 +1,5 @@
<tool id="shrimp_wrapper" name="SHRiMP" version="1.0.0">
- <description>SHort Read Mapping Package</description>
+ <description>: SHort Read Mapping Package</description>
<command interpreter="python">
#if ($type_of_reads.single_or_paired=="single" and $param.skip_or_full=="skip"):#shrimp_wrapper.py $input_target $output1 $output2 $input_query
#elif ($type_of_reads.single_or_paired=="paired" and $param.skip_or_full=="skip"):#shrimp_wrapper.py $input_target $output1 $output2 $type_of_reads.input1,$type_of_reads.input2,$type_of_reads.insertion_size
1
0
01 Oct '08
details: http://www.bx.psu.edu/hg/galaxy/rev/f9431ad593b4
changeset: 1538:f9431ad593b4
user: Greg Von Kuster <greg(a)bx.psu.edu>
date: Wed Oct 01 10:17:58 2008 -0400
description:
Fix for histogram tool when input dataset is empty.
1 file(s) affected in this change:
tools/stats/gsummary.py
diffs (11 lines):
diff -r 1dbd88fb9e57 -r f9431ad593b4 tools/stats/gsummary.py
--- a/tools/stats/gsummary.py Tue Sep 30 16:40:42 2008 -0400
+++ b/tools/stats/gsummary.py Wed Oct 01 10:17:58 2008 -0400
@@ -57,6 +57,7 @@
tmp_file.write( "%s\n" % hdr_str )
skipped_lines = 0
first_invalid_line = 0
+ i = 0
for i, line in enumerate( file( datafile ) ):
line = line.rstrip( '\r\n' )
if line and not line.startswith( '#' ):
1
0
01 Oct '08
details: http://www.bx.psu.edu/hg/galaxy/rev/8ab38aa72998
changeset: 1536:8ab38aa72998
user: Dan Blankenberg <dan(a)bx.psu.edu>
date: Tue Sep 30 16:17:12 2008 -0400
description:
Add a change_format tag to output datasets in tools. This allows for dynamic switching of output datatype based upon input values.
Several tools have been updated to take advantage of this, eliminating their need for code_files.
12 file(s) affected in this change:
lib/galaxy/tools/__init__.py
lib/galaxy/tools/actions/__init__.py
tools/annotation_profiler/annotation_profiler.xml
tools/annotation_profiler/annotation_profiler_code.py
tools/extract/extract_genomic_dna.xml
tools/extract/extract_genomic_dna_code.py
tools/filters/pasteWrapper.xml
tools/filters/pasteWrapper_code.py
tools/maf/maf_stats.xml
tools/maf/maf_stats_code.py
tools/sr_mapping/lastz_code.py
tools/sr_mapping/lastz_wrapper.xml
diffs (222 lines):
diff -r 931d6ca549d3 -r 8ab38aa72998 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py Tue Sep 30 15:30:57 2008 -0400
+++ b/lib/galaxy/tools/__init__.py Tue Sep 30 16:17:12 2008 -0400
@@ -299,6 +299,7 @@
for data_elem in out_elem.findall("data"):
output = ToolOutput( data_elem.get("name") )
output.format = data_elem.get("format", "data")
+ output.change_format = data_elem.findall("change_format")
output.metadata_source = data_elem.get("metadata_source", "")
output.parent = data_elem.get("parent", None)
output.label = util.xml_text( data_elem, "label" )
diff -r 931d6ca549d3 -r 8ab38aa72998 lib/galaxy/tools/actions/__init__.py
--- a/lib/galaxy/tools/actions/__init__.py Tue Sep 30 15:30:57 2008 -0400
+++ b/lib/galaxy/tools/actions/__init__.py Tue Sep 30 16:17:12 2008 -0400
@@ -115,6 +115,21 @@
ext = output.format
if ext == "input":
ext = input_ext
+ #process change_format tags
+ if output.change_format:
+ for change_elem in output.change_format:
+ for when_elem in change_elem.findall( 'when' ):
+ check = incoming.get( when_elem.get( 'input' ), None )
+ if check is not None:
+ if check == when_elem.get( 'value', None ):
+ ext = when_elem.get( 'format', ext )
+ else:
+ check = when_elem.get( 'input_dataset', None )
+ if check is not None:
+ check = inp_data.get( check, None )
+ if check is not None:
+ if str( getattr( check, when_elem.get( 'attribute' ) ) ) == when_elem.get( 'value', None ):
+ ext = when_elem.get( 'format', ext )
data = trans.app.model.HistoryDatasetAssociation( extension=ext, create_dataset=True )
# Commit the dataset immediately so it gets database assigned unique id
data.flush()
diff -r 931d6ca549d3 -r 8ab38aa72998 tools/annotation_profiler/annotation_profiler.xml
--- a/tools/annotation_profiler/annotation_profiler.xml Tue Sep 30 15:30:57 2008 -0400
+++ b/tools/annotation_profiler/annotation_profiler.xml Tue Sep 30 16:17:12 2008 -0400
@@ -16,9 +16,12 @@
<param name="table_names" type="drill_down" display="checkbox" hierarchy="recurse" multiple="true" label="Choose Tables to Use" help="Selecting no tables will result in using all tables." from_file="annotation_profiler_options.xml"/>
</inputs>
<outputs>
- <data format="input" name="out_file1"/>
+ <data format="input" name="out_file1">
+ <change_format>
+ <when input="summary" value="-S" format="tabular" />
+ </change_format>
+ </data>
</outputs>
- <code file="annotation_profiler_code.py" />
<tests>
<test>
<param name="input1" value="4.bed" dbkey="hg18"/>
diff -r 931d6ca549d3 -r 8ab38aa72998 tools/annotation_profiler/annotation_profiler_code.py
--- a/tools/annotation_profiler/annotation_profiler_code.py Tue Sep 30 15:30:57 2008 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,4 +0,0 @@
-#Change format from Interval to tabular if needed
-def exec_before_job(app, inp_data, out_data, param_dict, tool):
- if param_dict['summary']:
- out_data['out_file1'].change_datatype('tabular')
\ No newline at end of file
diff -r 931d6ca549d3 -r 8ab38aa72998 tools/extract/extract_genomic_dna.xml
--- a/tools/extract/extract_genomic_dna.xml Tue Sep 30 15:30:57 2008 -0400
+++ b/tools/extract/extract_genomic_dna.xml Tue Sep 30 16:17:12 2008 -0400
@@ -12,9 +12,12 @@
</param>
</inputs>
<outputs>
- <data format="fasta" name="out_file1" />
+ <data format="fasta" name="out_file1">
+ <change_format>
+ <when input="out_format" value="interval" format="interval" />
+ </change_format>
+ </data>
</outputs>
- <code file="extract_genomic_dna_code.py" />
<tests>
<test>
<param name="input" value="1.bed" dbkey="hg17" ftype="bed" />
diff -r 931d6ca549d3 -r 8ab38aa72998 tools/extract/extract_genomic_dna_code.py
--- a/tools/extract/extract_genomic_dna_code.py Tue Sep 30 15:30:57 2008 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,6 +0,0 @@
-# by dan
-#Change format from FASTA to Interval if needed; use metadata from input file
-def exec_before_job(app, inp_data, out_data, param_dict, tool):
- if param_dict['out_format'] == "interval":
- out_data['out_file1'].change_datatype('interval')
- out_data['out_file1'].init_meta( copy_from=inp_data['input'] )
diff -r 931d6ca549d3 -r 8ab38aa72998 tools/filters/pasteWrapper.xml
--- a/tools/filters/pasteWrapper.xml Tue Sep 30 15:30:57 2008 -0400
+++ b/tools/filters/pasteWrapper.xml Tue Sep 30 16:17:12 2008 -0400
@@ -15,7 +15,11 @@
</param>
</inputs>
<outputs>
- <data format="input" name="out_file1" metadata_source="input1" />
+ <data format="input" name="out_file1" metadata_source="input1">
+ <change_format>
+ <when input_dataset="input1" attribute="ext" value="bed" format="interval"/>
+ </change_format>
+ </data>
</outputs>
<tests>
<test>
@@ -60,5 +64,4 @@
a 3 40
</help>
-<code file="pasteWrapper_code.py"/>
</tool>
diff -r 931d6ca549d3 -r 8ab38aa72998 tools/filters/pasteWrapper_code.py
--- a/tools/filters/pasteWrapper_code.py Tue Sep 30 15:30:57 2008 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,7 +0,0 @@
-#post processing, if bed file, change to interval file
-from galaxy import datatypes
-def exec_after_process(app, inp_data, out_data, param_dict, tool, stdout, stderr):
- for name, data in out_data.items():
- if data.ext == "bed":
- data = app.datatypes_registry.change_datatype(data, "interval")
- data.flush()
\ No newline at end of file
diff -r 931d6ca549d3 -r 8ab38aa72998 tools/maf/maf_stats.xml
--- a/tools/maf/maf_stats.xml Tue Sep 30 15:30:57 2008 -0400
+++ b/tools/maf/maf_stats.xml Tue Sep 30 16:17:12 2008 -0400
@@ -44,7 +44,11 @@
</param>
</inputs>
<outputs>
- <data format="interval" name="out_file1" metadata_source="input1"/>
+ <data format="interval" name="out_file1" metadata_source="input1">
+ <change_format>
+ <when input="summary" value="true" format="tabular" />
+ </change_format>
+ </data>
</outputs>
<requirements>
<requirement type="python-module">numpy</requirement>
@@ -95,5 +99,4 @@
where **coverage** is the number of nucleotides divided by the total length of the provided intervals.
</help>
- <code file="maf_stats_code.py"/>
</tool>
diff -r 931d6ca549d3 -r 8ab38aa72998 tools/maf/maf_stats_code.py
--- a/tools/maf/maf_stats_code.py Tue Sep 30 15:30:57 2008 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-import os
-
-def load_maf_data( GALAXY_DATA_INDEX_DIR, sep='\t' ):
- # FIXME: this function is duplicated in the DynamicOptions class. It is used here only to
- # set data.name in exec_before_job().
- maf_sets = {}
- filename = "%s/maf_index.loc" % GALAXY_DATA_INDEX_DIR
- for i, line in enumerate( file( filename ) ):
- line = line.rstrip( '\r\n' )
- if line and not line.startswith( '#' ):
- fields = line.split( sep )
- #read each line, if not enough fields, go to next line
- try:
- maf_desc = fields[0]
- maf_uid = fields[1]
- builds = fields[2]
- build_list =[]
- split_builds = builds.split( "," )
- for build in split_builds:
- this_build = build.split( "=" )[0]
- build_list.append( this_build )
- paths = fields[3]
- maf_sets[ maf_uid ] = {}
- maf_sets[ maf_uid ][ 'description' ] = maf_desc
- maf_sets[ maf_uid ][ 'builds' ] = build_list
- except:
- continue
- return maf_sets
-def exec_before_job(app, inp_data, out_data, param_dict, tool):
- maf_sets = load_maf_data( app.config.tool_data_path, sep='\t' )
- if param_dict[ 'maf_source_type' ][ 'maf_source' ] == "cached":
- for name, data in out_data.items():
- try:
- data.name = data.name + " [" + maf_sets[ str( param_dict[ 'maf_source_type' ][ 'mafType' ] ) ][ 'description' ] + "]"
- except KeyError:
- data.name = data.name + " [unknown MAF source specified]"
- if param_dict[ 'summary' ].lower() == "true":
- for name, data in out_data.items():
- data.change_datatype( 'tabular' )
diff -r 931d6ca549d3 -r 8ab38aa72998 tools/sr_mapping/lastz_code.py
--- a/tools/sr_mapping/lastz_code.py Tue Sep 30 15:30:57 2008 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,6 +0,0 @@
-# by dan
-#Change format from tabular to maf if needed; use metadata from input file
-def exec_before_job(app, inp_data, out_data, param_dict, tool):
- if param_dict['out_format'] == "maf":
- out_data['output1'].change_datatype('maf')
-# out_data['output1'].init_meta( copy_from=inp_data['input1'] )
diff -r 931d6ca549d3 -r 8ab38aa72998 tools/sr_mapping/lastz_wrapper.xml
--- a/tools/sr_mapping/lastz_wrapper.xml Tue Sep 30 15:30:57 2008 -0400
+++ b/tools/sr_mapping/lastz_wrapper.xml Tue Sep 30 16:17:12 2008 -0400
@@ -77,13 +77,16 @@
<param name="min_cvrg" type="integer" size="3" value="0" label="Do not report matches that cover less than this fraction (%) of each read"/>
</inputs>
<outputs>
- <data format="tabular" name="output1" />
+ <data format="tabular" name="output1">
+ <change_format>
+ <when input="out_format" value="maf" format="maf" />
+ </change_format>
+ </data>
<data format="tabular" name="output2" />
</outputs>
<requirements>
<requirement type="binary">lastz</requirement>
</requirements>
- <code file="lastz_code.py" />
<tests>
<test>
<param name="input1" value="phiX.fa" ftype="fasta" />
1
0