galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- 15302 discussions
25 May '10
details: http://www.bx.psu.edu/hg/galaxy/rev/6056caca2503
changeset: 3814:6056caca2503
user: jeremy goecks <jeremy.goecks(a)emory.edu>
date: Mon May 24 14:50:20 2010 -0400
description:
Page editor: fix indenting issue for webkit browsers.
diffstat:
templates/page/editor.mako | 26 ++++++++++++--------------
1 files changed, 12 insertions(+), 14 deletions(-)
diffs (49 lines):
diff -r 37ecd71e87f3 -r 6056caca2503 templates/page/editor.mako
--- a/templates/page/editor.mako Mon May 24 14:46:19 2010 -0400
+++ b/templates/page/editor.mako Mon May 24 14:50:20 2010 -0400
@@ -476,8 +476,7 @@
// item_class='History').
var item_elt_id = item_info.iclass + "-" + item_id;
var item_embed_html =
- "\
- <p><div id='" + item_elt_id + "' class='embedded-item " + item_info.singular.toLowerCase() +
+ "<p><div id='" + item_elt_id + "' class='embedded-item " + item_info.singular.toLowerCase() +
" placeholder'> \
<p class='title'>Embedded Galaxy " + item_info.singular + " '" + item_name + "'</p> \
<p class='content'> \
@@ -487,24 +486,23 @@
</div></p>";
// Insert embedded item into document.
+ wym.insert(" "); // Needed to prevent insertion from occurring in child element in webkit browsers.
wym.insert(item_embed_html);
// TODO: can we fix this?
// Due to oddities of wym.insert() [likely due to inserting a <div> and/or a complete paragraph], an
- // empty paragraph may be included either before or after an embedded item. Remove these paragraphs.
+ // empty paragraph (or two!) may be included either before an embedded item. Remove these paragraphs.
$("#" + item_elt_id, wym._doc.body).each( function() {
// Remove previous empty paragraphs.
- var prev_elt = $(this).prev();
- if ( prev_elt.length != 0 && jQuery.trim(prev_elt.text()) == "" )
- prev_elt.remove();
-
- // Remove subsequent empty paragraphs.
- /*
- var next_elt = $(this).next();
- var next_next_elt = next_elt.next();
- if (next_next_elt.length != 0)
- next_elt.remove();
- */
+ var removing = true;
+ while (removing)
+ {
+ var prev_elt = $(this).prev();
+ if ( prev_elt.length != 0 && jQuery.trim(prev_elt.text()) == "" )
+ prev_elt.remove();
+ else
+ removing = false;
+ }
});
});
1
0
25 May '10
details: http://www.bx.psu.edu/hg/galaxy/rev/37ecd71e87f3
changeset: 3813:37ecd71e87f3
user: Nate Coraor <nate(a)bx.psu.edu>
date: Mon May 24 14:46:19 2010 -0400
description:
Added nginx mod_zip support for library downloads
diffstat:
lib/galaxy/config.py | 1 +
lib/galaxy/web/controllers/library_common.py | 40 ++++++++++++++++++++++-----
templates/library/common/common.mako | 8 +++++
3 files changed, 41 insertions(+), 8 deletions(-)
diffs (144 lines):
diff -r 663b2fd4a44c -r 37ecd71e87f3 lib/galaxy/config.py
--- a/lib/galaxy/config.py Mon May 24 14:15:02 2010 -0400
+++ b/lib/galaxy/config.py Mon May 24 14:46:19 2010 -0400
@@ -101,6 +101,7 @@
# Configuration options for taking advantage of nginx features
self.apache_xsendfile = kwargs.get( 'apache_xsendfile', False )
self.nginx_x_accel_redirect_base = kwargs.get( 'nginx_x_accel_redirect_base', False )
+ self.nginx_x_archive_files_base = kwargs.get( 'nginx_x_archive_files_base', False )
self.nginx_upload_store = kwargs.get( 'nginx_upload_store', False )
self.nginx_upload_path = kwargs.get( 'nginx_upload_path', False )
if self.nginx_upload_store:
diff -r 663b2fd4a44c -r 37ecd71e87f3 lib/galaxy/web/controllers/library_common.py
--- a/lib/galaxy/web/controllers/library_common.py Mon May 24 14:15:02 2010 -0400
+++ b/lib/galaxy/web/controllers/library_common.py Mon May 24 14:46:19 2010 -0400
@@ -106,6 +106,9 @@
message += "Don't navigate away from Galaxy or use the browser's \"stop\" or \"reload\" buttons (on this tab) until the "
message += "message \"This job is running\" is cleared from the \"Information\" column below for each selected dataset."
status = "info"
+ comptypes_t = comptypes
+ if trans.app.config.nginx_x_archive_files_base:
+ comptypes_t = ['ngxzip']
return trans.fill_template( '/library/common/browse_library.mako',
cntrller=cntrller,
use_panels=use_panels,
@@ -113,7 +116,7 @@
created_ldda_ids=created_ldda_ids,
hidden_folder_ids=hidden_folder_ids,
show_deleted=show_deleted,
- comptypes=comptypes,
+ comptypes=comptypes_t,
current_user_roles=current_user_roles,
message=message,
status=status )
@@ -1253,6 +1256,19 @@
status=status )
@web.expose
def act_on_multiple_datasets( self, trans, cntrller, library_id, ldda_ids='', **kwd ):
+ class NgxZip( object ):
+ def __init__( self, url_base ):
+ self.files = {}
+ self.url_base = url_base
+ def add( self, file, relpath ):
+ self.files[file] = relpath
+ def __str__( self ):
+ rval = ''
+ for fname, relpath in self.files.items():
+ size = os.stat( fname ).st_size
+ quoted_fname = urllib.quote_plus( fname, '/' )
+ rval += '- %i %s%s %s\n' % ( size, self.url_base, quoted_fname, relpath )
+ return rval
# Perform an action on a list of library datasets.
params = util.Params( kwd )
message = util.restore_text( params.get( 'message', '' ) )
@@ -1319,10 +1335,10 @@
trans.sa_session.add( ld )
trans.sa_session.flush()
message = "The selected datasets have been removed from this data library"
- elif action in ['zip','tgz','tbz']:
+ elif action in ['zip','tgz','tbz','ngxzip']:
error = False
killme = string.punctuation + string.whitespace
- trantab = string.maketrans(killme,'_'*len(killme))
+ trantab = string.maketrans(killme,'_'*len(killme))
try:
outext = 'zip'
if action == 'zip':
@@ -1340,6 +1356,8 @@
elif action == 'tbz':
archive = util.streamball.StreamBall( 'w|bz2' )
outext = 'tbz2'
+ elif action == 'ngxzip':
+ archive = NgxZip( trans.app.config.nginx_x_archive_files_base )
except (OSError, zipfile.BadZipFile):
error = True
log.exception( "Unable to create archive for download" )
@@ -1347,7 +1365,7 @@
status = 'error'
except:
error = True
- log.exception( "Unexpected error %s in create archive for download" % sys.exc_info()[0])
+ log.exception( "Unexpected error %s in create archive for download" % sys.exc_info()[0])
message = "Unable to create archive for download, please report - %s" % sys.exc_info()[0]
status = 'error'
if not error:
@@ -1377,7 +1395,8 @@
seen.append( path )
zpath = os.path.split(path)[-1] # comes as base_name/fname
outfname,zpathext = os.path.splitext(zpath)
- if is_composite: # need to add all the components from the extra_files_path to the zip
+ if is_composite:
+ # need to add all the components from the extra_files_path to the zip
if zpathext == '':
zpath = '%s.html' % zpath # fake the real nature of the html file
try:
@@ -1391,8 +1410,8 @@
flist = glob.glob(os.path.join(ldda.dataset.extra_files_path,'*.*')) # glob returns full paths
for fpath in flist:
efp,fname = os.path.split(fpath)
- if fname > '':
- fname = fname.translate(trantab)
+ if fname > '':
+ fname = fname.translate(trantab)
try:
archive.add( fpath,fname )
except IOError:
@@ -1409,7 +1428,7 @@
log.exception( "Unable to write %s to temporary library download archive" % ldda.dataset.file_name)
message = "Unable to create archive for download, please report this error"
status = 'error'
- if not error:
+ if not error:
if action == 'zip':
archive.close()
tmpfh = open( tmpf )
@@ -1426,6 +1445,11 @@
trans.response.set_content_type( "application/x-zip-compressed" )
trans.response.headers[ "Content-Disposition" ] = "attachment; filename=%s.%s" % (outfname,outext)
return tmpfh
+ elif action == 'ngxzip':
+ #trans.response.set_content_type( "application/x-zip-compressed" )
+ #trans.response.headers[ "Content-Disposition" ] = "attachment; filename=%s.%s" % (outfname,outext)
+ trans.response.headers[ "X-Archive-Files" ] = "zip"
+ return archive
else:
trans.response.set_content_type( "application/x-tar" )
trans.response.headers[ "Content-Disposition" ] = "attachment; filename=%s.%s" % (outfname,outext)
diff -r 663b2fd4a44c -r 37ecd71e87f3 templates/library/common/common.mako
--- a/templates/library/common/common.mako Mon May 24 14:15:02 2010 -0400
+++ b/templates/library/common/common.mako Mon May 24 14:46:19 2010 -0400
@@ -391,6 +391,14 @@
%if 'zip' in comptypes:
<option value="zip">Download as a .zip file</option>
%endif
+ %if 'ngxzip' in comptypes:
+ ## We can safely have two default selected items since ngxzip, if present, will always be the only available type.
+ <option value="ngxzip"
+ %if default_action == 'download':
+ selected
+ %endif>
+ >Download as a .zip file</option>
+ %endif
%endif
</select>
<input type="submit" class="primary-button" name="action_on_datasets_button" id="action_on_datasets_button" value="Go"/>
1
0
25 May '10
details: http://www.bx.psu.edu/hg/galaxy/rev/663b2fd4a44c
changeset: 3812:663b2fd4a44c
user: Dan Blankenberg <dan(a)bx.psu.edu>
date: Mon May 24 14:15:02 2010 -0400
description:
First pass at implementing a method for allowing a maximum file size cutoff for setting optional metadata (e.g. line and sequence counts). Currently csFasta, qualsolid, and fastq make use of this option.
diffstat:
datatypes_conf.xml.sample | 2 +-
lib/galaxy/datatypes/data.py | 14 ++++++++++++++
lib/galaxy/datatypes/qualityscore.py | 7 +++++++
lib/galaxy/datatypes/registry.py | 2 ++
lib/galaxy/datatypes/sequence.py | 7 +++++++
5 files changed, 31 insertions(+), 1 deletions(-)
diffs (96 lines):
diff -r 7faa12ac9746 -r 663b2fd4a44c datatypes_conf.xml.sample
--- a/datatypes_conf.xml.sample Mon May 24 11:22:12 2010 -0400
+++ b/datatypes_conf.xml.sample Mon May 24 14:15:02 2010 -0400
@@ -33,7 +33,7 @@
</datatype>
<datatype extension="customtrack" type="galaxy.datatypes.interval:CustomTrack"/>
<datatype extension="csfasta" type="galaxy.datatypes.sequence:csFasta" display_in_upload="true"/>
- <datatype extension="data" type="galaxy.datatypes.data:Data" mimetype="application/octet-stream"/>
+ <datatype extension="data" type="galaxy.datatypes.data:Data" mimetype="application/octet-stream" max_optional_metadata_filesize="1048576" />
<datatype extension="fasta" type="galaxy.datatypes.sequence:Fasta" display_in_upload="true">
<converter file="fasta_to_tabular_converter.xml" target_datatype="tabular"/>
</datatype>
diff -r 7faa12ac9746 -r 663b2fd4a44c lib/galaxy/datatypes/data.py
--- a/lib/galaxy/datatypes/data.py Mon May 24 11:22:12 2010 -0400
+++ b/lib/galaxy/datatypes/data.py Mon May 24 14:15:02 2010 -0400
@@ -57,6 +57,8 @@
composite_type = None
composite_files = odict()
primary_file_name = 'index'
+ #A per datatype setting (inherited): max file size (in bytes) for setting optional metadata
+ _max_optional_metadata_filesize = None
def __init__(self, **kwd):
"""Initialize the datatype"""
@@ -116,6 +118,18 @@
if not value:
return True
return False
+ def set_max_optional_metadata_filesize( self, max_value ):
+ try:
+ max_value = int( max_value )
+ except:
+ return
+ self.__class__._max_optional_metadata_filesize = max_value
+ def get_max_optional_metadata_filesize( self ):
+ rval = self.__class__._max_optional_metadata_filesize
+ if rval is None:
+ return -1
+ return rval
+ max_optional_metadata_filesize = property( get_max_optional_metadata_filesize, set_max_optional_metadata_filesize )
def set_peek( self, dataset, is_multi_byte=False ):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
diff -r 7faa12ac9746 -r 663b2fd4a44c lib/galaxy/datatypes/qualityscore.py
--- a/lib/galaxy/datatypes/qualityscore.py Mon May 24 11:22:12 2010 -0400
+++ b/lib/galaxy/datatypes/qualityscore.py Mon May 24 14:15:02 2010 -0400
@@ -63,6 +63,13 @@
except:
pass
return False
+
+ def set_meta( self, dataset, **kwd ):
+ if self.max_optional_metadata_filesize >= 0 and dataset.get_size() > self.max_optional_metadata_filesize:
+ return
+ return QualityScore.set_meta( self, dataset, **kwd )
+
+
class QualityScore454 ( QualityScore ):
"""
diff -r 7faa12ac9746 -r 663b2fd4a44c lib/galaxy/datatypes/registry.py
--- a/lib/galaxy/datatypes/registry.py Mon May 24 11:22:12 2010 -0400
+++ b/lib/galaxy/datatypes/registry.py Mon May 24 14:15:02 2010 -0400
@@ -64,6 +64,8 @@
self.available_tracks.append( extension )
if display_in_upload:
self.upload_file_formats.append( extension )
+ #max file size cut off for setting optional metadata
+ self.datatypes_by_extension[extension].max_optional_metadata_filesize = elem.get( 'max_optional_metadata_filesize', None )
for converter in elem.findall( 'converter' ):
# Build the list of datatype converters which will later be loaded
# into the calling app's toolbox.
diff -r 7faa12ac9746 -r 663b2fd4a44c lib/galaxy/datatypes/sequence.py
--- a/lib/galaxy/datatypes/sequence.py Mon May 24 11:22:12 2010 -0400
+++ b/lib/galaxy/datatypes/sequence.py Mon May 24 14:15:02 2010 -0400
@@ -148,6 +148,11 @@
except:
pass
return False
+
+ def set_meta( self, dataset, **kwd ):
+ if self.max_optional_metadata_filesize >= 0 and dataset.get_size() > self.max_optional_metadata_filesize:
+ return
+ return Sequence.set_meta( self, dataset, **kwd )
class Fastq ( Sequence ):
"""Class representing a generic FASTQ sequence"""
@@ -158,6 +163,8 @@
Set the number of sequences and the number of data lines
in dataset.
"""
+ if self.max_optional_metadata_filesize >= 0 and dataset.get_size() > self.max_optional_metadata_filesize:
+ return
data_lines = 0
sequences = 0
seq_counter = 0 # blocks should be 4 lines long
1
0
25 May '10
details: http://www.bx.psu.edu/hg/galaxy/rev/7faa12ac9746
changeset: 3811:7faa12ac9746
user: Greg Von Kuster <greg(a)bx.psu.edu>
date: Mon May 24 11:22:12 2010 -0400
description:
Send the web freamework ( trans ) to the grid's build_initial_query() method rather than the web framework's db session so that the method can take advantage of all of the web freamwork's attributes. Change the library controller to use grids for the data libraries display.
diffstat:
lib/galaxy/web/controllers/admin.py | 12 +-
lib/galaxy/web/controllers/history.py | 8 +-
lib/galaxy/web/controllers/library.py | 88 ++++++++++++++++++----
lib/galaxy/web/controllers/library_admin.py | 14 +-
lib/galaxy/web/controllers/page.py | 4 +-
lib/galaxy/web/controllers/requests.py | 4 +-
lib/galaxy/web/controllers/tracks.py | 4 +-
lib/galaxy/web/controllers/visualization.py | 4 +-
lib/galaxy/web/controllers/workflow.py | 4 +-
lib/galaxy/web/framework/helpers/grids.py | 7 +-
lib/galaxy/webapps/community/controllers/admin.py | 24 +++---
lib/galaxy/webapps/community/controllers/tool.py | 12 +-
templates/library/browse_libraries.mako | 31 --------
templates/library/grid.mako | 1 +
test/functional/test_library_security.py | 2 +-
15 files changed, 121 insertions(+), 98 deletions(-)
diffs (466 lines):
diff -r d22853e5963d -r 7faa12ac9746 lib/galaxy/web/controllers/admin.py
--- a/lib/galaxy/web/controllers/admin.py Mon May 24 10:53:55 2010 -0400
+++ b/lib/galaxy/web/controllers/admin.py Mon May 24 11:22:12 2010 -0400
@@ -103,8 +103,8 @@
use_paging = True
def get_current_item( self, trans ):
return trans.user
- def build_initial_query( self, session ):
- return session.query( self.model_class )
+ def build_initial_query( self, trans ):
+ return trans.sa_session.query( self.model_class )
class RoleListGrid( grids.Grid ):
class NameColumn( grids.TextColumn ):
@@ -197,8 +197,8 @@
use_paging = True
def get_current_item( self, trans ):
return None
- def build_initial_query( self, session ):
- return session.query( self.model_class )
+ def build_initial_query( self, trans ):
+ return trans.sa_session.query( self.model_class )
def apply_default_filter( self, trans, query, **kwargs ):
return query.filter( model.Role.type != model.Role.types.PRIVATE )
@@ -275,8 +275,8 @@
use_paging = True
def get_current_item( self, trans ):
return None
- def build_initial_query( self, session ):
- return session.query( self.model_class )
+ def build_initial_query( self, trans ):
+ return trans.sa_session.query( self.model_class )
class AdminGalaxy( BaseController, Admin ):
diff -r d22853e5963d -r 7faa12ac9746 lib/galaxy/web/controllers/history.py
--- a/lib/galaxy/web/controllers/history.py Mon May 24 10:53:55 2010 -0400
+++ b/lib/galaxy/web/controllers/history.py Mon May 24 11:22:12 2010 -0400
@@ -110,8 +110,8 @@
grids.GridOperation( "Unshare" )
]
standard_filters = []
- def build_initial_query( self, session ):
- return session.query( self.model_class ).join( 'users_shared_with' )
+ def build_initial_query( self, trans ):
+ return trans.sa_session.query( self.model_class ).join( 'users_shared_with' )
def apply_default_filter( self, trans, query, **kwargs ):
return query.filter( model.HistoryUserShareAssociation.user == trans.user )
@@ -138,9 +138,9 @@
key="free-text-search", visible=False, filterable="standard" )
)
operations = []
- def build_initial_query( self, session ):
+ def build_initial_query( self, trans ):
# Join so that searching history.user makes sense.
- return session.query( self.model_class ).join( model.User.table )
+ return trans.sa_session.query( self.model_class ).join( model.User.table )
def apply_default_filter( self, trans, query, **kwargs ):
# A public history is published, has a slug, and is not deleted.
return query.filter( self.model_class.published == True ).filter( self.model_class.slug != None ).filter( self.model_class.deleted == False )
diff -r d22853e5963d -r 7faa12ac9746 lib/galaxy/web/controllers/library.py
--- a/lib/galaxy/web/controllers/library.py Mon May 24 10:53:55 2010 -0400
+++ b/lib/galaxy/web/controllers/library.py Mon May 24 11:22:12 2010 -0400
@@ -1,12 +1,73 @@
from galaxy.web.base.controller import *
+from galaxy.web.framework.helpers import time_ago, iff, grids
from galaxy.model.orm import *
from galaxy.datatypes import sniff
-from galaxy import util
+from galaxy import model, util
from galaxy.util.odict import odict
log = logging.getLogger( __name__ )
+class LibraryListGrid( grids.Grid ):
+ class NameColumn( grids.TextColumn ):
+ def get_value( self, trans, grid, library ):
+ return library.name
+ class DescriptionColumn( grids.TextColumn ):
+ def get_value( self, trans, grid, library ):
+ if library.description:
+ return library.description
+ return ''
+ # Grid definition
+ title = "Data Libraries"
+ model_class = model.Library
+ template='/library/grid.mako'
+ default_sort_key = "name"
+ columns = [
+ NameColumn( "Name",
+ key="name",
+ model_class=model.Library,
+ link=( lambda library: dict( operation="browse", id=library.id ) ),
+ attach_popup=False,
+ filterable="advanced" ),
+ DescriptionColumn( "Description",
+ key="description",
+ model_class=model.Library,
+ attach_popup=False,
+ filterable="advanced" ),
+ ]
+ columns.append( grids.MulticolFilterColumn( "Search",
+ cols_to_filter=[ columns[0], columns[1] ],
+ key="free-text-search",
+ visible=False,
+ filterable="standard" ) )
+ standard_filters = []
+ default_filter = dict( name="All", description="All", deleted="False", purged="False" )
+ num_rows_per_page = 50
+ preserve_state = False
+ use_paging = True
+ def build_initial_query( self, trans ):
+ return trans.sa_session.query( self.model_class ).filter( self.model_class.table.c.deleted == False )
+ def apply_default_filter( self, trans, query, **kwd ):
+ current_user_role_ids = [ role.id for role in trans.get_current_user_roles() ]
+ library_access_action = trans.app.security_agent.permitted_actions.LIBRARY_ACCESS.action
+ restricted_library_ids = [ lp.library_id for lp in trans.sa_session.query( trans.model.LibraryPermissions ) \
+ .filter( trans.model.LibraryPermissions.table.c.action == library_access_action ) \
+ .distinct() ]
+ accessible_restricted_library_ids = [ lp.library_id for lp in trans.sa_session.query( trans.model.LibraryPermissions ) \
+ .filter( and_( trans.model.LibraryPermissions.table.c.action == library_access_action,
+ trans.model.LibraryPermissions.table.c.role_id.in_( current_user_role_ids ) ) ) ]
+ if not trans.user:
+ # Filter to get only public libraries, a library whose id
+ # is not in restricted_library_ids is a public library
+ return query.filter( not_( trans.model.Library.table.c.id.in_( restricted_library_ids ) ) )
+ else:
+ # Filter to get libraries accessible by the current user, get both
+ # public libraries and restricted libraries accessible by the current user.
+ return query.filter( or_( not_( trans.model.Library.table.c.id.in_( restricted_library_ids ) ),
+ trans.model.Library.table.c.id.in_( accessible_restricted_library_ids ) ) )
class Library( BaseController ):
+
+ library_list_grid = LibraryListGrid()
+
@web.expose
def index( self, trans, **kwd ):
params = util.Params( kwd )
@@ -18,19 +79,12 @@
status=status )
@web.expose
def browse_libraries( self, trans, **kwd ):
- params = util.Params( kwd )
- message = util.restore_text( params.get( 'message', '' ) )
- status = params.get( 'status', 'done' )
- current_user_roles = trans.get_current_user_roles()
- all_libraries = trans.sa_session.query( trans.app.model.Library ) \
- .filter( trans.app.model.Library.table.c.deleted==False ) \
- .order_by( trans.app.model.Library.name )
- authorized_libraries = []
- for library in all_libraries:
- if trans.app.security_agent.can_access_library( current_user_roles, library ):
- authorized_libraries.append( library )
- return trans.fill_template( '/library/browse_libraries.mako',
- libraries=authorized_libraries,
- default_action=params.get( 'default_action', None ),
- message=message,
- status=status )
+ if 'operation' in kwd:
+ operation = kwd['operation'].lower()
+ if operation == "browse":
+ return trans.response.send_redirect( web.url_for( controller='library_common',
+ action='browse_library',
+ cntrller='library',
+ **kwd ) )
+ # Render the list view
+ return self.library_list_grid( trans, **kwd )
diff -r d22853e5963d -r 7faa12ac9746 lib/galaxy/web/controllers/library_admin.py
--- a/lib/galaxy/web/controllers/library_admin.py Mon May 24 10:53:55 2010 -0400
+++ b/lib/galaxy/web/controllers/library_admin.py Mon May 24 11:22:12 2010 -0400
@@ -69,8 +69,8 @@
num_rows_per_page = 50
preserve_state = False
use_paging = True
- def build_initial_query( self, session ):
- return session.query( self.model_class )
+ def build_initial_query( self, trans ):
+ return trans.sa_session.query( self.model_class )
class LibraryAdmin( BaseController ):
@@ -78,16 +78,16 @@
@web.expose
@web.require_admin
- def browse_libraries( self, trans, **kwargs ):
- if 'operation' in kwargs:
- operation = kwargs['operation'].lower()
+ def browse_libraries( self, trans, **kwd ):
+ if 'operation' in kwd:
+ operation = kwd['operation'].lower()
if operation == "browse":
return trans.response.send_redirect( web.url_for( controller='library_common',
action='browse_library',
cntrller='library_admin',
- **kwargs ) )
+ **kwd ) )
# Render the list view
- return self.library_list_grid( trans, **kwargs )
+ return self.library_list_grid( trans, **kwd )
@web.expose
@web.require_admin
def create_library( self, trans, **kwd ):
diff -r d22853e5963d -r 7faa12ac9746 lib/galaxy/web/controllers/page.py
--- a/lib/galaxy/web/controllers/page.py Mon May 24 10:53:55 2010 -0400
+++ b/lib/galaxy/web/controllers/page.py Mon May 24 11:22:12 2010 -0400
@@ -71,9 +71,9 @@
cols_to_filter=[ columns[0], columns[1], columns[2], columns[3] ],
key="free-text-search", visible=False, filterable="standard" )
)
- def build_initial_query( self, session ):
+ def build_initial_query( self, trans ):
# Join so that searching history.user makes sense.
- return session.query( self.model_class ).join( model.User.table )
+ return trans.sa_session.query( self.model_class ).join( model.User.table )
def apply_default_filter( self, trans, query, **kwargs ):
return query.filter( self.model_class.deleted==False ).filter( self.model_class.published==True )
diff -r d22853e5963d -r 7faa12ac9746 lib/galaxy/web/controllers/requests.py
--- a/lib/galaxy/web/controllers/requests.py Mon May 24 10:53:55 2010 -0400
+++ b/lib/galaxy/web/controllers/requests.py Mon May 24 11:22:12 2010 -0400
@@ -126,8 +126,8 @@
]
def apply_default_filter( self, trans, query, **kwd ):
return query.filter_by( user=trans.user )
- def build_initial_query( self, session ):
- return session.query( self.model_class )
+ def build_initial_query( self, trans ):
+ return trans.sa_session.query( self.model_class )
class Requests( BaseController ):
request_grid = RequestsGrid()
diff -r d22853e5963d -r 7faa12ac9746 lib/galaxy/web/controllers/tracks.py
--- a/lib/galaxy/web/controllers/tracks.py Mon May 24 10:53:55 2010 -0400
+++ b/lib/galaxy/web/controllers/tracks.py Mon May 24 11:22:12 2010 -0400
@@ -71,8 +71,8 @@
DbKeyColumn( "Dbkey", key="dbkey", model_class=model.HistoryDatasetAssociation, visible=False )
]
- def build_initial_query( self, session ):
- return session.query( self.model_class ).join( model.History.table).join( model.Dataset.table )
+ def build_initial_query( self, trans ):
+ return trans.sa_session.query( self.model_class ).join( model.History.table).join( model.Dataset.table )
def apply_default_filter( self, trans, query, **kwargs ):
if self.available_tracks is None:
self.available_tracks = trans.app.datatypes_registry.get_available_tracks()
diff -r d22853e5963d -r 7faa12ac9746 lib/galaxy/web/controllers/visualization.py
--- a/lib/galaxy/web/controllers/visualization.py Mon May 24 10:53:55 2010 -0400
+++ b/lib/galaxy/web/controllers/visualization.py Mon May 24 11:22:12 2010 -0400
@@ -55,9 +55,9 @@
cols_to_filter=[ columns[0], columns[1], columns[2], columns[3] ],
key="free-text-search", visible=False, filterable="standard" )
)
- def build_initial_query( self, session ):
+ def build_initial_query( self, trans ):
# Join so that searching history.user makes sense.
- return session.query( self.model_class ).join( model.User.table )
+ return trans.sa_session.query( self.model_class ).join( model.User.table )
def apply_default_filter( self, trans, query, **kwargs ):
return query.filter( self.model_class.deleted==False ).filter( self.model_class.published==True )
diff -r d22853e5963d -r 7faa12ac9746 lib/galaxy/web/controllers/workflow.py
--- a/lib/galaxy/web/controllers/workflow.py Mon May 24 10:53:55 2010 -0400
+++ b/lib/galaxy/web/controllers/workflow.py Mon May 24 11:22:12 2010 -0400
@@ -75,9 +75,9 @@
key="free-text-search", visible=False, filterable="standard" )
)
operations = []
- def build_initial_query( self, session ):
+ def build_initial_query( self, trans ):
# Join so that searching stored_workflow.user makes sense.
- return session.query( self.model_class ).join( model.User.table )
+ return trans.sa_session.query( self.model_class ).join( model.User.table )
def apply_default_filter( self, trans, query, **kwargs ):
# A public workflow is published, has a slug, and is not deleted.
return query.filter( self.model_class.published==True ).filter( self.model_class.slug != None ).filter( self.model_class.deleted == False )
diff -r d22853e5963d -r 7faa12ac9746 lib/galaxy/web/framework/helpers/grids.py
--- a/lib/galaxy/web/framework/helpers/grids.py Mon May 24 10:53:55 2010 -0400
+++ b/lib/galaxy/web/framework/helpers/grids.py Mon May 24 11:22:12 2010 -0400
@@ -45,7 +45,6 @@
webapp = kwargs.get( 'webapp', 'galaxy' )
status = kwargs.get( 'status', None )
message = kwargs.get( 'message', None )
- session = trans.sa_session
# Build a base filter and sort key that is the combination of the saved state and defaults. Saved state takes preference over defaults.
base_filter = {}
if self.default_filter:
@@ -60,7 +59,7 @@
if pref_name in trans.get_user().preferences:
base_sort_key = from_json_string( trans.get_user().preferences[pref_name] )
# Build initial query
- query = self.build_initial_query( session )
+ query = self.build_initial_query( trans )
query = self.apply_default_filter( trans, query, **kwargs )
# Maintain sort state in generated urls
extra_url_args = {}
@@ -258,8 +257,8 @@
pass
def get_current_item( self, trans ):
return None
- def build_initial_query( self, session ):
- return session.query( self.model_class )
+ def build_initial_query( self, trans ):
+ return trans.sa_session.query( self.model_class )
def apply_default_filter( self, trans, query, **kwargs):
return query
diff -r d22853e5963d -r 7faa12ac9746 lib/galaxy/webapps/community/controllers/admin.py
--- a/lib/galaxy/webapps/community/controllers/admin.py Mon May 24 10:53:55 2010 -0400
+++ b/lib/galaxy/webapps/community/controllers/admin.py Mon May 24 11:22:12 2010 -0400
@@ -113,8 +113,8 @@
use_paging = True
def get_current_item( self, trans ):
return trans.user
- def build_initial_query( self, session ):
- return session.query( self.model_class )
+ def build_initial_query( self, trans ):
+ return trans.sa_session.query( self.model_class )
class RoleListGrid( grids.Grid ):
class NameColumn( grids.TextColumn ):
@@ -211,8 +211,8 @@
use_paging = True
def get_current_item( self, trans ):
return None
- def build_initial_query( self, session ):
- return session.query( self.model_class )
+ def build_initial_query( self, trans ):
+ return trans.sa_session.query( self.model_class )
def apply_default_filter( self, trans, query, **kwd ):
return query.filter( model.Role.type != model.Role.types.PRIVATE )
@@ -294,8 +294,8 @@
use_paging = True
def get_current_item( self, trans ):
return None
- def build_initial_query( self, session ):
- return session.query( self.model_class )
+ def build_initial_query( self, trans ):
+ return trans.sa_session.query( self.model_class )
class ManageCategoryListGrid( grids.Grid ):
class NameColumn( grids.TextColumn ):
@@ -360,8 +360,8 @@
use_paging = True
def get_current_item( self, trans ):
return None
- def build_initial_query( self, session ):
- return session.query( self.model_class )
+ def build_initial_query( self, trans ):
+ return trans.sa_session.query( self.model_class )
class ToolsByCategoryListGrid( grids.Grid ):
class NameColumn( grids.TextColumn ):
@@ -423,8 +423,8 @@
num_rows_per_page = 50
preserve_state = False
use_paging = True
- def build_initial_query( self, session ):
- return session.query( self.model_class )
+ def build_initial_query( self, trans ):
+ return trans.sa_session.query( self.model_class )
def apply_default_filter( self, trans, query, **kwd ):
ids = kwd.get( 'ids', False )
if ids:
@@ -546,8 +546,8 @@
num_rows_per_page = 50
preserve_state = False
use_paging = True
- def build_initial_query( self, session ):
- return session.query( self.model_class )
+ def build_initial_query( self, trans ):
+ return trans.sa_session.query( self.model_class )
def apply_default_filter( self, trans, query, **kwd ):
ids = kwd.get( 'ids', False )
if ids:
diff -r d22853e5963d -r 7faa12ac9746 lib/galaxy/webapps/community/controllers/tool.py
--- a/lib/galaxy/webapps/community/controllers/tool.py Mon May 24 10:53:55 2010 -0400
+++ b/lib/galaxy/webapps/community/controllers/tool.py Mon May 24 11:22:12 2010 -0400
@@ -94,8 +94,8 @@
num_rows_per_page = 50
preserve_state = False
use_paging = True
- def build_initial_query( self, session ):
- return session.query( self.model_class )
+ def build_initial_query( self, trans ):
+ return trans.sa_session.query( self.model_class )
def apply_default_filter( self, trans, query, **kwd ):
ids = kwd.get( 'ids', False )
if not ids:
@@ -218,8 +218,8 @@
num_rows_per_page = 50
preserve_state = False
use_paging = True
- def build_initial_query( self, session ):
- return session.query( self.model_class )
+ def build_initial_query( self, trans ):
+ return trans.sa_session.query( self.model_class )
def apply_default_filter( self, trans, query, **kwd ):
ids = kwd.get( 'ids', False )
if not ids:
@@ -295,8 +295,8 @@
num_rows_per_page = 50
preserve_state = False
use_paging = True
- def build_initial_query( self, session ):
- return session.query( self.model_class )
+ def build_initial_query( self, trans ):
+ return trans.sa_session.query( self.model_class )
class ToolController( BaseController ):
diff -r d22853e5963d -r 7faa12ac9746 templates/library/browse_libraries.mako
--- a/templates/library/browse_libraries.mako Mon May 24 10:53:55 2010 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,31 +0,0 @@
-<%inherit file="/base.mako"/>
-<%namespace file="/message.mako" import="render_msg" />
-
-<%def name="title()">Browse Data Libraries</%def>
-
-<h2>Data Libraries</h2>
-
-%if message:
- ${render_msg( message, status )}
-%endif
-
-%if not libraries:
- You are not authorized to access any libraries
-%else:
- <table class="grid">
- <thead>
- <tr>
- <th>Name</th>
- <th>Description</th>
- </tr>
- </thead>
- <tbody>
- %for library in libraries:
- <tr class="libraryRow libraryOrFolderRow" id="libraryRow">
- <td><a href="${h.url_for( controller='library_common', action='browse_library', cntrller='library', id=trans.security.encode_id( library.id ), hidden_folder_ids='' )}">${library.name}</a></td>
- <td>${library.description}</td>
- </tr>
- %endfor
- </tbody>
- </table>
-%endif
diff -r d22853e5963d -r 7faa12ac9746 templates/library/grid.mako
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/templates/library/grid.mako Mon May 24 11:22:12 2010 -0400
@@ -0,0 +1,1 @@
+<%inherit file="/grid_base.mako"/>
diff -r d22853e5963d -r 7faa12ac9746 test/functional/test_library_security.py
--- a/test/functional/test_library_security.py Mon May 24 10:53:55 2010 -0400
+++ b/test/functional/test_library_security.py Mon May 24 11:22:12 2010 -0400
@@ -192,7 +192,7 @@
# regular_user2 should not be to see the library since they do not have
# Role One which is associated with the LIBRARY_ACCESS permission
self.login( email=regular_user2.email )
- self.browse_libraries_regular_user( check_str1="You are not authorized to access any libraries" )
+ self.browse_libraries_regular_user( check_str1="No Items" )
self.logout()
# regular_user3 should not be able to see 1.bed from the analysis view's access librarys
self.login( email=regular_user3.email )
1
0
25 May '10
details: http://www.bx.psu.edu/hg/galaxy/rev/d22853e5963d
changeset: 3810:d22853e5963d
user: jeremy goecks <jeremy.goecks(a)emory.edu>
date: Mon May 24 10:53:55 2010 -0400
description:
In Page editor, prevent extra paragraphs from being inserted with embedded items.
diffstat:
templates/page/editor.mako | 29 ++++++++++++++++++++++++-----
1 files changed, 24 insertions(+), 5 deletions(-)
diffs (48 lines):
diff -r 18bd3fa93bed -r d22853e5963d templates/page/editor.mako
--- a/templates/page/editor.mako Sun May 23 08:46:07 2010 -0400
+++ b/templates/page/editor.mako Mon May 24 10:53:55 2010 -0400
@@ -474,20 +474,39 @@
// Embedded item HTML; item class is embedded in div container classes; this is necessary because the editor strips
// all non-standard attributes when it returns its content (e.g. it will not return an element attribute of the form
// item_class='History').
+ var item_elt_id = item_info.iclass + "-" + item_id;
var item_embed_html =
"\
- <div id='" + item_info.iclass + "-" + item_id + "' class='embedded-item " + item_info.singular.toLowerCase() +
+ <p><div id='" + item_elt_id + "' class='embedded-item " + item_info.singular.toLowerCase() +
" placeholder'> \
<p class='title'>Embedded Galaxy " + item_info.singular + " '" + item_name + "'</p> \
<p class='content'> \
[Do not edit this block; Galaxy will fill it in with the annotated " +
- item_info.singular.toLowerCase() + " when it is displayed.]</div> \
+ item_info.singular.toLowerCase() + " when it is displayed.] \
</p> \
- </div><p></p>";
+ </div></p>";
- // Insert embedded representation into document.
- // TODO: maybe try replace() instead to handle indenting?
+ // Insert embedded item into document.
wym.insert(item_embed_html);
+
+ // TODO: can we fix this?
+ // Due to oddities of wym.insert() [likely due to inserting a <div> and/or a complete paragraph], an
+ // empty paragraph may be included either before or after an embedded item. Remove these paragraphs.
+ $("#" + item_elt_id, wym._doc.body).each( function() {
+ // Remove previous empty paragraphs.
+ var prev_elt = $(this).prev();
+ if ( prev_elt.length != 0 && jQuery.trim(prev_elt.text()) == "" )
+ prev_elt.remove();
+
+ // Remove subsequent empty paragraphs.
+ /*
+ var next_elt = $(this).next();
+ var next_next_elt = next_elt.next();
+ if (next_next_elt.length != 0)
+ next_elt.remove();
+ */
+ });
+
});
hide_modal();
},
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/18bd3fa93bed
changeset: 3809:18bd3fa93bed
user: fubar: ross Lazarus at gmail period com
date: Sun May 23 08:46:07 2010 -0400
description:
branch merge again?
diffstat:
test-data/sanger_full_range_masked_N.fastqsanger | 8 +
test-data/sanger_full_range_masked_lowercase.fastqsanger | 8 +
tools/fastq/fastq_masker_by_quality.py | 83 ++++++++++++++++
tools/fastq/fastq_masker_by_quality.xml | 53 ++++++++++
4 files changed, 152 insertions(+), 0 deletions(-)
diffs (168 lines):
diff -r f175a156d7e0 -r 18bd3fa93bed test-data/sanger_full_range_masked_N.fastqsanger
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/sanger_full_range_masked_N.fastqsanger Sun May 23 08:46:07 2010 -0400
@@ -0,0 +1,8 @@
+@FAKE0001 Original version has PHRED scores from 0 to 93 inclusive (in that order)
+NNNNNNNNNNNNNNNNNNNNNCGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC
++
+!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~
+@FAKE0002 Original version has PHRED scores from 93 to 0 inclusive (in that order)
+CATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCNNNNNNNNNNNNNNNNNNNNN
++
+~}|{zyxwvutsrqponmlkjihgfedcba`_^]\[ZYXWVUTSRQPONMLKJIHGFEDCBA@?>=<;:9876543210/.-,+*)('&%$#"!
diff -r f175a156d7e0 -r 18bd3fa93bed test-data/sanger_full_range_masked_lowercase.fastqsanger
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/sanger_full_range_masked_lowercase.fastqsanger Sun May 23 08:46:07 2010 -0400
@@ -0,0 +1,8 @@
+@FAKE0001 Original version has PHRED scores from 0 to 93 inclusive (in that order)
+acgtacgtacgtacgtacgtaCGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTAC
++
+!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~
+@FAKE0002 Original version has PHRED scores from 93 to 0 inclusive (in that order)
+CATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCatgcatgcatgcatgcatgca
++
+~}|{zyxwvutsrqponmlkjihgfedcba`_^]\[ZYXWVUTSRQPONMLKJIHGFEDCBA@?>=<;:9876543210/.-,+*)('&%$#"!
diff -r f175a156d7e0 -r 18bd3fa93bed tools/fastq/fastq_masker_by_quality.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/fastq/fastq_masker_by_quality.py Sun May 23 08:46:07 2010 -0400
@@ -0,0 +1,83 @@
+#Dan Blankenberg
+import string
+from optparse import OptionParser
+from galaxy_utils.sequence.fastq import fastqReader, fastqWriter
+
+
+def get_score_comparer( operator ):
+ if operator == 'gt':
+ return compare_gt
+ elif operator == 'ge':
+ return compare_ge
+ elif operator == 'eq':
+ return compare_eq
+ elif operator == 'lt':
+ return compare_lt
+ elif operator == 'le':
+ return compare_le
+ elif operator == 'ne':
+ return compare_ne
+ raise 'Invalid operator provided: %s' % operator
+
+def compare_gt( quality_score, threshold_value ):
+ return quality_score > threshold_value
+
+def compare_ge( quality_score, threshold_value ):
+ return quality_score >= threshold_value
+
+def compare_eq( quality_score, threshold_value ):
+ return quality_score == threshold_value
+
+def compare_ne( quality_score, threshold_value ):
+ return quality_score != threshold_value
+
+def compare_lt( quality_score, threshold_value ):
+ return quality_score < threshold_value
+
+def compare_le( quality_score, threshold_value ):
+ return quality_score <= threshold_value
+
+class BaseReplacer( object ):
+ def __init__( self, replace_character ):
+ self.replace_character = replace_character
+ def __call__( self, base_character ):
+ return self.replace_character
+
+def main():
+ usage = "usage: %prog [options] input_file output_file"
+ parser = OptionParser( usage=usage )
+ parser.add_option( '-f', '--format', dest='format', type='choice', default='sanger', choices=( 'sanger', 'cssanger', 'solexa', 'illumina' ), help='FASTQ variant type' )
+ parser.add_option( '-m', '--mask_character', dest='mask_character', default='N', help='Mask Character to use' )
+ parser.add_option( '-c', '--score_comparison', type="choice", dest='score_comparison', default='le', choices=('gt','ge','eq','lt', 'le', 'ne' ), help='Mask base when score is' )
+ parser.add_option( '-s', '--quality_score', type="float", dest='quality_score', default='0', help='Quality Score' )
+ parser.add_option( "-l", "--lowercase", action="store_true", dest="lowercase", default=False, help="Use lowercase masking")
+ ( options, args ) = parser.parse_args()
+
+ if len ( args ) != 2:
+ parser.error( "Need to specify an input file and an output file" )
+
+ score_comparer = get_score_comparer( options.score_comparison )
+
+ if options.lowercase:
+ base_masker = string.lower
+ else:
+ base_masker = BaseReplacer( options.mask_character )
+
+ out = fastqWriter( open( args[1], 'wb' ), format = options.format )
+
+ num_reads = None
+ num_reads_excluded = 0
+ for num_reads, fastq_read in enumerate( fastqReader( open( args[0] ), format = options.format ) ):
+ sequence_list = list( fastq_read.sequence )
+ for i, quality_score in enumerate( fastq_read.get_decimal_quality_scores() ):
+ if score_comparer( quality_score, options.quality_score ):
+ sequence_list[ i ] = base_masker( sequence_list[ i ] )
+ fastq_read.sequence = "".join( sequence_list )
+ out.write( fastq_read )
+
+ if num_reads is not None:
+ print "Processed %i %s reads." % ( num_reads + 1, options.format )
+ else:
+ print "No valid FASTQ reads were provided."
+
+if __name__ == "__main__": main()
diff -r f175a156d7e0 -r 18bd3fa93bed tools/fastq/fastq_masker_by_quality.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/fastq/fastq_masker_by_quality.xml Sun May 23 08:46:07 2010 -0400
@@ -0,0 +1,53 @@
+<tool id="fastq_masker_by_quality" name="FASTQ Masker" version="1.0.0">
+ <description>by quality score</description>
+ <command interpreter="python">fastq_masker_by_quality.py '$input_file' '$output_file' -f '${input_file.extension[len( 'fastq' ):]}' -s '${quality_score}' -c '${score_comparison}'
+ #if $mask_type.value == 'lowercase'
+ --lowercase
+ #else
+ -m '${mask_type}'
+ #end if
+ </command>
+ <inputs>
+ <param name="input_file" type="data" format="fastqsanger" label="File to mask" />
+ <param name="mask_type" type="select" label="Mask input with">
+ <option value="N">N's</option>
+ <option value="lowercase">Lowercase</option>
+ </param>
+ <param name="score_comparison" type="select" label="When score is">
+ <option value="le" selected="True">Less than or equal</option>
+ <option value="lt">Less than</option>
+ <option value="eq">Equal to</option>
+ <option value="ne">Not Equal to</option>
+ <option value="ge">Greater than</option>
+ <option value="gt">Greater than or equal</option>
+ </param>
+ <param name="quality_score" type="integer" value="0"/>
+ </inputs>
+ <outputs>
+ <data name="output_file" format="fastqsanger" />
+ </outputs>
+ <tests>
+ <test>
+ <param name="input_file" value="sanger_full_range_original_sanger.fastqsanger" ftype="fastqsanger" />
+ <param name="mask_type" value="N" />
+ <param name="score_comparison" value="le" />
+ <param name="quality_score" value="20" />
+ <output name="output_file" file="sanger_full_range_masked_N.fastqsanger" />
+ </test>
+ <test>
+ <param name="input_file" value="sanger_full_range_original_sanger.fastqsanger" ftype="fastqsanger" />
+ <param name="mask_type" value="lowercase" />
+ <param name="score_comparison" value="le" />
+ <param name="quality_score" value="20" />
+ <output name="output_file" file="sanger_full_range_masked_lowercase.fastqsanger" />
+ </test>
+ </tests>
+ <help>
+**What it does**
+
+This tool allows masking base characters in FASTQ format files dependent upon user specified quality score value and comparison method.
+
+This tool is not available for use on color space (csSanger) formats.
+
+ </help>
+</tool>
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/f175a156d7e0
changeset: 3808:f175a156d7e0
user: fubar: ross Lazarus at gmail period com
date: Sun May 23 08:45:01 2010 -0400
description:
Branch merge
diffstat:
eggs.ini | 2 +-
lib/galaxy/jobs/runners/pbs.py | 133 +++++++++++++-----------------------
tool_conf.xml.sample | 1 +
tools/rgenetics/rgtest_one_tool.sh | 16 ++--
4 files changed, 58 insertions(+), 94 deletions(-)
diffs (243 lines):
diff -r 72d709b2c198 -r f175a156d7e0 eggs.ini
--- a/eggs.ini Sat May 22 19:47:50 2010 -0400
+++ b/eggs.ini Sun May 23 08:45:01 2010 -0400
@@ -17,7 +17,7 @@
DRMAA_python = 0.2
MySQL_python = 1.2.3c1
numpy = 1.3.0
-pbs_python = 2.9.8
+pbs_python = 2.9.4
psycopg2 = 2.0.13
pycrypto = 2.0.1
pysam = 0.1.1
diff -r 72d709b2c198 -r f175a156d7e0 lib/galaxy/jobs/runners/pbs.py
--- a/lib/galaxy/jobs/runners/pbs.py Sat May 22 19:47:50 2010 -0400
+++ b/lib/galaxy/jobs/runners/pbs.py Sun May 23 08:45:01 2010 -0400
@@ -50,19 +50,6 @@
%s
"""
-# From pbs' job.h
-JOB_EXIT_STATUS = {
- 0: "job exec successful",
- -1: "job exec failed, before files, no retry",
- -2: "job exec failed, after files, no retry",
- -3: "job execution failed, do retry",
- -4: "job aborted on MOM initialization",
- -5: "job aborted on MOM init, chkpt, no migrate",
- -6: "job aborted on MOM init, chkpt, ok migrate",
- -7: "job restart failed",
- -8: "exec() of user command failed",
-}
-
class PBSJobState( object ):
def __init__( self ):
"""
@@ -78,7 +65,6 @@
self.efile = None
self.runner_url = None
self.check_count = 0
- self.stop_job = False
class PBSJobRunner( object ):
"""
@@ -207,9 +193,8 @@
pbs_options = self.determine_pbs_options( runner_url )
c = pbs.pbs_connect( pbs_server_name )
if c <= 0:
- errno, text = pbs.error()
job_wrapper.fail( "Unable to queue job for execution. Resubmitting the job may succeed." )
- log.error( "Connection to PBS server for submit failed: %s: %s" % ( errno, text ) )
+ log.error( "Connection to PBS server for submit failed" )
return
# define job attributes
@@ -351,78 +336,58 @@
log.debug( "(%s/%s) Skipping state check because PBS server connection failed" % ( galaxy_job_id, job_id ) )
new_watched.append( pbs_job_state )
continue
- try:
+ if statuses.has_key( job_id ):
status = statuses[job_id]
- except KeyError:
+ if status.job_state != old_state:
+ log.debug("(%s/%s) job state changed from %s to %s" % ( galaxy_job_id, job_id, old_state, status.job_state ) )
+ if status.job_state == "R" and not pbs_job_state.running:
+ pbs_job_state.running = True
+ pbs_job_state.job_wrapper.change_state( model.Job.states.RUNNING )
+ if status.job_state == "R" and ( pbs_job_state.check_count % 20 ) == 0:
+ # Every 20th time the job status is checked, do limit checks (if configured)
+ if self.app.config.output_size_limit > 0:
+ # Check the size of the job outputs
+ fail = False
+ for outfile, size in pbs_job_state.job_wrapper.check_output_sizes():
+ if size > self.app.config.output_size_limit:
+ pbs_job_state.fail_message = 'Job output grew too large (greater than %s), please try different job parameters or' \
+ % nice_size( self.app.config.output_size_limit )
+ log.warning( '(%s/%s) Dequeueing job due to output %s growing larger than %s limit' \
+ % ( galaxy_job_id, job_id, os.path.basename( outfile ), nice_size( self.app.config.output_size_limit ) ) )
+ self.work_queue.put( ( 'fail', pbs_job_state ) )
+ fail = True
+ break
+ if fail:
+ continue
+ if self.job_walltime is not None:
+ # Check the job's execution time
+ if status.get( 'resources_used', False ):
+ # resources_used may not be in the status for new jobs
+ h, m, s = [ int( i ) for i in status.resources_used.walltime.split( ':' ) ]
+ time_executing = timedelta( 0, s, 0, 0, m, h )
+ if time_executing > self.job_walltime:
+ pbs_job_state.fail_message = 'Job ran longer than maximum allowed execution time (%s), please try different job parameters or' \
+ % self.app.config.job_walltime
+ log.warning( '(%s/%s) Dequeueing job since walltime has been reached' \
+ % ( galaxy_job_id, job_id ) )
+ self.work_queue.put( ( 'fail', pbs_job_state ) )
+ continue
+ pbs_job_state.old_state = status.job_state
+ new_watched.append( pbs_job_state )
+ else:
try:
- # Recheck to make sure it wasn't a communication problem
+ # recheck to make sure it wasn't a communication problem
self.check_single_job( pbs_server_name, job_id )
- log.warning( "(%s/%s) PBS job was not in state check list, but was found with individual state check" % ( galaxy_job_id, job_id ) )
+ log.warning( "(%s/%s) job was not in state check list, but was found with individual state check" % ( galaxy_job_id, job_id ) )
new_watched.append( pbs_job_state )
except:
errno, text = pbs.error()
- if errno == 15001:
- # 15001 == job not in queue
- log.debug("(%s/%s) PBS job has left queue" % (galaxy_job_id, job_id) )
+ if errno != 15001:
+ log.info("(%s/%s) state check resulted in error (%d): %s" % (galaxy_job_id, job_id, errno, text) )
+ new_watched.append( pbs_job_state )
+ else:
+ log.debug("(%s/%s) job has left queue" % (galaxy_job_id, job_id) )
self.work_queue.put( ( 'finish', pbs_job_state ) )
- else:
- # Unhandled error, continue to monitor
- log.info("(%s/%s) PBS state check resulted in error (%d): %s" % (galaxy_job_id, job_id, errno, text) )
- new_watched.append( pbs_job_state )
- continue
- if status.job_state != old_state:
- log.debug("(%s/%s) PBS job state changed from %s to %s" % ( galaxy_job_id, job_id, old_state, status.job_state ) )
- if status.job_state == "R" and not pbs_job_state.running:
- pbs_job_state.running = True
- pbs_job_state.job_wrapper.change_state( model.Job.states.RUNNING )
- if status.job_state == "R" and ( pbs_job_state.check_count % 20 ) == 0:
- # Every 20th time the job status is checked, do limit checks (if configured)
- if self.app.config.output_size_limit > 0:
- # Check the size of the job outputs
- fail = False
- for outfile, size in pbs_job_state.job_wrapper.check_output_sizes():
- if size > self.app.config.output_size_limit:
- pbs_job_state.fail_message = 'Job output grew too large (greater than %s), please try different job parameters or' \
- % nice_size( self.app.config.output_size_limit )
- log.warning( '(%s/%s) Dequeueing job due to output %s growing larger than %s limit' \
- % ( galaxy_job_id, job_id, os.path.basename( outfile ), nice_size( self.app.config.output_size_limit ) ) )
- pbs_job_state.stop_job = True
- self.work_queue.put( ( 'fail', pbs_job_state ) )
- fail = True
- break
- if fail:
- continue
- if self.job_walltime is not None:
- # Check the job's execution time
- if status.get( 'resources_used', False ):
- # resources_used may not be in the status for new jobs
- h, m, s = [ int( i ) for i in status.resources_used.walltime.split( ':' ) ]
- time_executing = timedelta( 0, s, 0, 0, m, h )
- if time_executing > self.job_walltime:
- pbs_job_state.fail_message = 'Job ran longer than maximum allowed execution time (%s), please try different job parameters or' \
- % self.app.config.job_walltime
- log.warning( '(%s/%s) Dequeueing job since walltime has been reached' \
- % ( galaxy_job_id, job_id ) )
- pbs_job_state.stop_job = True
- self.work_queue.put( ( 'fail', pbs_job_state ) )
- continue
- elif status.job_state == "C":
- # "keep_completed" is enabled in PBS, so try to check exit status
- try:
- assert int( status.exit_status ) == 0
- log.debug("(%s/%s) PBS job has completed successfully" % ( galaxy_job_id, job_id ) )
- except AssertionError:
- pbs_job_state.fail_message = 'Job cannot be completed due to a cluster error. Please retry or'
- log.error( '(%s/%s) PBS job failed: %s' % ( galaxy_job_id, job_id, JOB_EXIT_STATUS.get( int( status.exit_status ), 'Unknown error: %s' % status.exit_status ) ) )
- self.work_queue.put( ( 'fail', pbs_job_state ) )
- continue
- except AttributeError:
- # No exit_status, can't verify proper completion so we just have to assume success.
- log.debug("(%s/%s) PBS job has completed" % ( galaxy_job_id, job_id ) )
- self.work_queue.put( ( 'finish', pbs_job_state ) )
- continue
- pbs_job_state.old_state = status.job_state
- new_watched.append( pbs_job_state )
# Replace the watch list with the updated version
self.watched = new_watched
@@ -446,10 +411,9 @@
log.debug("connection to PBS server %s for state check failed" % pbs_server_name )
failures.append( pbs_server_name )
continue
- stat_attrl = pbs.new_attrl(3)
+ stat_attrl = pbs.new_attrl(2)
stat_attrl[0].name = pbs.ATTR_state
stat_attrl[1].name = pbs.ATTR_used
- stat_attrl[2].name = pbs.ATTR_exitstat
jobs = pbs.pbs_statjob( c, None, stat_attrl, None )
pbs.pbs_disconnect( c )
statuses.update( self.convert_statjob_to_bunches( jobs ) )
@@ -516,8 +480,7 @@
"""
Seperated out so we can use the worker threads for it.
"""
- if pbs_job_state.stop_job:
- self.stop_job( self.sa_session.query( self.app.model.Job ).get( pbs_job_state.job_wrapper.job_id ) )
+ self.stop_job( self.sa_session.query( self.app.model.Job ).get( pbs_job_state.job_wrapper.job_id ) )
pbs_job_state.job_wrapper.fail( pbs_job_state.fail_message )
self.cleanup( ( pbs_job_state.ofile, pbs_job_state.efile, pbs_job_state.job_file ) )
diff -r 72d709b2c198 -r f175a156d7e0 tool_conf.xml.sample
--- a/tool_conf.xml.sample Sat May 22 19:47:50 2010 -0400
+++ b/tool_conf.xml.sample Sun May 23 08:45:01 2010 -0400
@@ -221,6 +221,7 @@
<tool file="fastq/fastq_filter.xml" />
<tool file="fastq/fastq_trimmer.xml" />
<tool file="fastq/fastq_trimmer_by_quality.xml" />
+ <tool file="fastq/fastq_masker_by_quality.xml" />
<tool file="fastq/fastq_manipulation.xml" />
<tool file="fastq/fastq_to_fasta.xml" />
<tool file="fastq/fastq_to_tabular.xml" />
diff -r 72d709b2c198 -r f175a156d7e0 tools/rgenetics/rgtest_one_tool.sh
--- a/tools/rgenetics/rgtest_one_tool.sh Sat May 22 19:47:50 2010 -0400
+++ b/tools/rgenetics/rgtest_one_tool.sh Sun May 23 08:45:01 2010 -0400
@@ -1,20 +1,20 @@
#!/bin/sh
# script to generate all functional test outputs for each rgenetics tool
# could be run at installation to ensure all dependencies are in place?
-case $# in 0) echo "USAGE: ${0##*/} TooltoTest"; exit 1;;
- [2-10]*) echo "Too many arguments - name of tool only"; exit 2;;
+case $# in 0) echo "USAGE: ${0##*/} TooltoTest galaxyRoot outRoot"; exit 1;;
+ [1-3]*) echo "Need ToolToTest and paths for galaxyRoot outRoot as parameters"; exit 2;;
+ [5-10]*) echo "Too many arguments - ToolToTest and paths for galaxyRoot outRoot as parameters"; exit 2;;
*)
esac
-GALAXYROOT=`pwd`
-#PATHTOGALAXY='/opt/galaxy' # whatever
-PATHTOGALAXY='/share/shared/galaxy' # whatever
+GALAXYROOT=$2
+OUTROOT=$3
echo "using $GALAXYROOT"
# change this as needed for your local install
INPATH="${GALAXYROOT}/test-data"
BINPATH="${GALAXYROOT}/tool-data/rg/bin"
-TOOLPATH="${PATHTOGALAXY}/tools/rgenetics"
-OROOT="${GALAXYROOT}/test-data/rgtestouts"
-NORMALOROOT="${GALAXYROOT}/test-data"
+TOOLPATH="${GALAXYROOT}/tools/rgenetics"
+OROOT="${OUTROOT}/test-data/rgtestouts"
+NORMALOROOT="${OUTROOT}/test-data"
case "$1" in
'rgManQQ')
1
0
25 May '10
details: http://www.bx.psu.edu/hg/galaxy/rev/72d709b2c198
changeset: 3807:72d709b2c198
user: fubar: ross Lazarus at gmail period com
date: Sat May 22 19:47:50 2010 -0400
description:
Require two paths for rgtest.sh galaxyroot and outroot
test with a /tmp path for outroot...this script will recreate all the
snp/wga test outputs if you ask it to - which may or may not be what
you want...
diffstat:
tools/rgenetics/rgEigPCA.xml | 2 +-
tools/rgenetics/rgtest.sh | 27 +++++++++++++++++++++++----
2 files changed, 24 insertions(+), 5 deletions(-)
diffs (53 lines):
diff -r 3b8e4af25be2 -r 72d709b2c198 tools/rgenetics/rgEigPCA.xml
--- a/tools/rgenetics/rgEigPCA.xml Fri May 21 15:50:49 2010 -0400
+++ b/tools/rgenetics/rgEigPCA.xml Sat May 22 19:47:50 2010 -0400
@@ -48,7 +48,7 @@
<param name="t" value="2" />
<param name="s" value="2" />
<output name='out_file1' file='rgtestouts/rgEigPCA/rgEigPCAtest1.html' ftype='html' compare='diff' lines_diff='195'>
- <extra_files type="file" name='rgEigPCAtest1_PCAPlot.pdf' value="rgtestouts/rgEigPCA/rgEigPCAtest1_PCAPlot.pdf" compare="sim_size" delta="30000"/>
+ <extra_files type="file" name='rgEigPCAtest1_PCAPlot.pdf' value="rgtestouts/rgEigPCA/rgEigPCAtest1_PCAPlot.pdf" compare="sim_size" delta="3000"/>
</output>
<output name='pca' file='rgtestouts/rgEigPCA/rgEigPCAtest1.txt' compare='diff'/>
</test>
diff -r 3b8e4af25be2 -r 72d709b2c198 tools/rgenetics/rgtest.sh
--- a/tools/rgenetics/rgtest.sh Fri May 21 15:50:49 2010 -0400
+++ b/tools/rgenetics/rgtest.sh Sat May 22 19:47:50 2010 -0400
@@ -1,14 +1,33 @@
#!/bin/sh
# script to generate all functional test outputs for each rgenetics tool
# could be run at installation to ensure all dependencies are in place?
-GALAXYROOT=`pwd`
-echo "using $GALAXYROOT"
+if test $# -lt 2
+then
+ echo "We need to agree on 2 parameters - GalaxyRoot and OutRoot - use paths to galaxy and galaxy to re-create all test outputs"
+ echo "or more prudently, galaxy and /tmp/foo for checking without updating all your test-data"
+ echo "Exiting with no changes"
+ exit 1
+fi
+if [ $1 ]
+then
+ GALAXYROOT=$1
+else
+ GALAXYROOT=`pwd`
+fi
+if [ $2 ]
+then
+ OUTROOT=$2
+else
+ OUTROOT=`pwd`
+ OUTROOT="$OUTROOT/test-data"
+fi
+echo "using $GALAXYROOT as galaxyroot and $OUTROOT as outroot"
# change this as needed for your local install
INPATH="${GALAXYROOT}/test-data"
BINPATH="${GALAXYROOT}/tool-data/rg/bin"
TOOLPATH="${GALAXYROOT}/tools/rgenetics"
-OROOT="${GALAXYROOT}/test-data/rgtestouts"
-NORMALOROOT="${GALAXYROOT}/test-data"
+OROOT="${OUTROOT}/test-data/rgtestouts"
+NORMALOROOT="${OUTROOT}/test-data"
mkdir -p $OROOT
rm -rf $OROOT/*
# needed for testing - but tool versions should be bumped if this is rerun?
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/aa84d012cb50
changeset: 3806:aa84d012cb50
user: fubar/ross period lazarus at gmail d0t com
date: Sat May 22 20:04:38 2010 -0400
description:
Require 2 paths for rgtest.sh
This script will overwrite all the snp/wga test outputs if you ask it to
diffstat:
tools/rgenetics/rgtest.sh | 27 +++++++++++++++++++++++----
1 files changed, 23 insertions(+), 4 deletions(-)
diffs (41 lines):
diff -r 88afe0a30dc7 -r aa84d012cb50 tools/rgenetics/rgtest.sh
--- a/tools/rgenetics/rgtest.sh Fri May 21 16:53:42 2010 -0400
+++ b/tools/rgenetics/rgtest.sh Sat May 22 20:04:38 2010 -0400
@@ -1,14 +1,33 @@
#!/bin/sh
# script to generate all functional test outputs for each rgenetics tool
# could be run at installation to ensure all dependencies are in place?
-GALAXYROOT=`pwd`
-echo "using $GALAXYROOT"
+if test $# -lt 2
+then
+ echo "We need to agree on 2 parameters - GalaxyRoot and OutRoot - use paths to galaxy and galaxy to re-create all test outputs"
+ echo "or more prudently, galaxy and /tmp/foo for checking without updating all your test-data"
+ echo "Exiting with no changes"
+ exit 1
+fi
+if [ $1 ]
+then
+ GALAXYROOT=$1
+else
+ GALAXYROOT=`pwd`
+fi
+if [ $2 ]
+then
+ OUTROOT=$2
+else
+ OUTROOT=`pwd`
+ OUTROOT="$OUTROOT/test-data"
+fi
+echo "using $GALAXYROOT as galaxyroot and $OUTROOT as outroot"
# change this as needed for your local install
INPATH="${GALAXYROOT}/test-data"
BINPATH="${GALAXYROOT}/tool-data/rg/bin"
TOOLPATH="${GALAXYROOT}/tools/rgenetics"
-OROOT="${GALAXYROOT}/test-data/rgtestouts"
-NORMALOROOT="${GALAXYROOT}/test-data"
+OROOT="${OUTROOT}/test-data/rgtestouts"
+NORMALOROOT="${OUTROOT}/test-data"
mkdir -p $OROOT
rm -rf $OROOT/*
# needed for testing - but tool versions should be bumped if this is rerun?
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/88afe0a30dc7
changeset: 3805:88afe0a30dc7
user: Nate Coraor <nate(a)bx.psu.edu>
date: Fri May 21 16:53:42 2010 -0400
description:
merge backout
diffstat:
eggs.ini | 2 +-
lib/galaxy/jobs/runners/pbs.py | 133 ++++++++++++++--------------------------
2 files changed, 49 insertions(+), 86 deletions(-)
diffs (200 lines):
diff -r c9b41f94d707 -r 88afe0a30dc7 eggs.ini
--- a/eggs.ini Fri May 21 16:48:59 2010 -0400
+++ b/eggs.ini Fri May 21 16:53:42 2010 -0400
@@ -17,7 +17,7 @@
DRMAA_python = 0.2
MySQL_python = 1.2.3c1
numpy = 1.3.0
-pbs_python = 2.9.8
+pbs_python = 2.9.4
psycopg2 = 2.0.13
pycrypto = 2.0.1
pysam = 0.1.1
diff -r c9b41f94d707 -r 88afe0a30dc7 lib/galaxy/jobs/runners/pbs.py
--- a/lib/galaxy/jobs/runners/pbs.py Fri May 21 16:48:59 2010 -0400
+++ b/lib/galaxy/jobs/runners/pbs.py Fri May 21 16:53:42 2010 -0400
@@ -50,19 +50,6 @@
%s
"""
-# From pbs' job.h
-JOB_EXIT_STATUS = {
- 0: "job exec successful",
- -1: "job exec failed, before files, no retry",
- -2: "job exec failed, after files, no retry",
- -3: "job execution failed, do retry",
- -4: "job aborted on MOM initialization",
- -5: "job aborted on MOM init, chkpt, no migrate",
- -6: "job aborted on MOM init, chkpt, ok migrate",
- -7: "job restart failed",
- -8: "exec() of user command failed",
-}
-
class PBSJobState( object ):
def __init__( self ):
"""
@@ -78,7 +65,6 @@
self.efile = None
self.runner_url = None
self.check_count = 0
- self.stop_job = False
class PBSJobRunner( object ):
"""
@@ -207,9 +193,8 @@
pbs_options = self.determine_pbs_options( runner_url )
c = pbs.pbs_connect( pbs_server_name )
if c <= 0:
- errno, text = pbs.error()
job_wrapper.fail( "Unable to queue job for execution. Resubmitting the job may succeed." )
- log.error( "Connection to PBS server for submit failed: %s: %s" % ( errno, text ) )
+ log.error( "Connection to PBS server for submit failed" )
return
# define job attributes
@@ -351,78 +336,58 @@
log.debug( "(%s/%s) Skipping state check because PBS server connection failed" % ( galaxy_job_id, job_id ) )
new_watched.append( pbs_job_state )
continue
- try:
+ if statuses.has_key( job_id ):
status = statuses[job_id]
- except KeyError:
+ if status.job_state != old_state:
+ log.debug("(%s/%s) job state changed from %s to %s" % ( galaxy_job_id, job_id, old_state, status.job_state ) )
+ if status.job_state == "R" and not pbs_job_state.running:
+ pbs_job_state.running = True
+ pbs_job_state.job_wrapper.change_state( model.Job.states.RUNNING )
+ if status.job_state == "R" and ( pbs_job_state.check_count % 20 ) == 0:
+ # Every 20th time the job status is checked, do limit checks (if configured)
+ if self.app.config.output_size_limit > 0:
+ # Check the size of the job outputs
+ fail = False
+ for outfile, size in pbs_job_state.job_wrapper.check_output_sizes():
+ if size > self.app.config.output_size_limit:
+ pbs_job_state.fail_message = 'Job output grew too large (greater than %s), please try different job parameters or' \
+ % nice_size( self.app.config.output_size_limit )
+ log.warning( '(%s/%s) Dequeueing job due to output %s growing larger than %s limit' \
+ % ( galaxy_job_id, job_id, os.path.basename( outfile ), nice_size( self.app.config.output_size_limit ) ) )
+ self.work_queue.put( ( 'fail', pbs_job_state ) )
+ fail = True
+ break
+ if fail:
+ continue
+ if self.job_walltime is not None:
+ # Check the job's execution time
+ if status.get( 'resources_used', False ):
+ # resources_used may not be in the status for new jobs
+ h, m, s = [ int( i ) for i in status.resources_used.walltime.split( ':' ) ]
+ time_executing = timedelta( 0, s, 0, 0, m, h )
+ if time_executing > self.job_walltime:
+ pbs_job_state.fail_message = 'Job ran longer than maximum allowed execution time (%s), please try different job parameters or' \
+ % self.app.config.job_walltime
+ log.warning( '(%s/%s) Dequeueing job since walltime has been reached' \
+ % ( galaxy_job_id, job_id ) )
+ self.work_queue.put( ( 'fail', pbs_job_state ) )
+ continue
+ pbs_job_state.old_state = status.job_state
+ new_watched.append( pbs_job_state )
+ else:
try:
- # Recheck to make sure it wasn't a communication problem
+ # recheck to make sure it wasn't a communication problem
self.check_single_job( pbs_server_name, job_id )
- log.warning( "(%s/%s) PBS job was not in state check list, but was found with individual state check" % ( galaxy_job_id, job_id ) )
+ log.warning( "(%s/%s) job was not in state check list, but was found with individual state check" % ( galaxy_job_id, job_id ) )
new_watched.append( pbs_job_state )
except:
errno, text = pbs.error()
- if errno == 15001:
- # 15001 == job not in queue
- log.debug("(%s/%s) PBS job has left queue" % (galaxy_job_id, job_id) )
+ if errno != 15001:
+ log.info("(%s/%s) state check resulted in error (%d): %s" % (galaxy_job_id, job_id, errno, text) )
+ new_watched.append( pbs_job_state )
+ else:
+ log.debug("(%s/%s) job has left queue" % (galaxy_job_id, job_id) )
self.work_queue.put( ( 'finish', pbs_job_state ) )
- else:
- # Unhandled error, continue to monitor
- log.info("(%s/%s) PBS state check resulted in error (%d): %s" % (galaxy_job_id, job_id, errno, text) )
- new_watched.append( pbs_job_state )
- continue
- if status.job_state != old_state:
- log.debug("(%s/%s) PBS job state changed from %s to %s" % ( galaxy_job_id, job_id, old_state, status.job_state ) )
- if status.job_state == "R" and not pbs_job_state.running:
- pbs_job_state.running = True
- pbs_job_state.job_wrapper.change_state( model.Job.states.RUNNING )
- if status.job_state == "R" and ( pbs_job_state.check_count % 20 ) == 0:
- # Every 20th time the job status is checked, do limit checks (if configured)
- if self.app.config.output_size_limit > 0:
- # Check the size of the job outputs
- fail = False
- for outfile, size in pbs_job_state.job_wrapper.check_output_sizes():
- if size > self.app.config.output_size_limit:
- pbs_job_state.fail_message = 'Job output grew too large (greater than %s), please try different job parameters or' \
- % nice_size( self.app.config.output_size_limit )
- log.warning( '(%s/%s) Dequeueing job due to output %s growing larger than %s limit' \
- % ( galaxy_job_id, job_id, os.path.basename( outfile ), nice_size( self.app.config.output_size_limit ) ) )
- pbs_job_state.stop_job = True
- self.work_queue.put( ( 'fail', pbs_job_state ) )
- fail = True
- break
- if fail:
- continue
- if self.job_walltime is not None:
- # Check the job's execution time
- if status.get( 'resources_used', False ):
- # resources_used may not be in the status for new jobs
- h, m, s = [ int( i ) for i in status.resources_used.walltime.split( ':' ) ]
- time_executing = timedelta( 0, s, 0, 0, m, h )
- if time_executing > self.job_walltime:
- pbs_job_state.fail_message = 'Job ran longer than maximum allowed execution time (%s), please try different job parameters or' \
- % self.app.config.job_walltime
- log.warning( '(%s/%s) Dequeueing job since walltime has been reached' \
- % ( galaxy_job_id, job_id ) )
- pbs_job_state.stop_job = True
- self.work_queue.put( ( 'fail', pbs_job_state ) )
- continue
- elif status.job_state == "C":
- # "keep_completed" is enabled in PBS, so try to check exit status
- try:
- assert int( status.exit_status ) == 0
- log.debug("(%s/%s) PBS job has completed successfully" % ( galaxy_job_id, job_id ) )
- except AssertionError:
- pbs_job_state.fail_message = 'Job cannot be completed due to a cluster error. Please retry or'
- log.error( '(%s/%s) PBS job failed: %s' % ( galaxy_job_id, job_id, JOB_EXIT_STATUS.get( int( status.exit_status ), 'Unknown error: %s' % status.exit_status ) ) )
- self.work_queue.put( ( 'fail', pbs_job_state ) )
- continue
- except AttributeError:
- # No exit_status, can't verify proper completion so we just have to assume success.
- log.debug("(%s/%s) PBS job has completed" % ( galaxy_job_id, job_id ) )
- self.work_queue.put( ( 'finish', pbs_job_state ) )
- continue
- pbs_job_state.old_state = status.job_state
- new_watched.append( pbs_job_state )
# Replace the watch list with the updated version
self.watched = new_watched
@@ -446,10 +411,9 @@
log.debug("connection to PBS server %s for state check failed" % pbs_server_name )
failures.append( pbs_server_name )
continue
- stat_attrl = pbs.new_attrl(3)
+ stat_attrl = pbs.new_attrl(2)
stat_attrl[0].name = pbs.ATTR_state
stat_attrl[1].name = pbs.ATTR_used
- stat_attrl[2].name = pbs.ATTR_exitstat
jobs = pbs.pbs_statjob( c, None, stat_attrl, None )
pbs.pbs_disconnect( c )
statuses.update( self.convert_statjob_to_bunches( jobs ) )
@@ -516,8 +480,7 @@
"""
Seperated out so we can use the worker threads for it.
"""
- if pbs_job_state.stop_job:
- self.stop_job( self.sa_session.query( self.app.model.Job ).get( pbs_job_state.job_wrapper.job_id ) )
+ self.stop_job( self.sa_session.query( self.app.model.Job ).get( pbs_job_state.job_wrapper.job_id ) )
pbs_job_state.job_wrapper.fail( pbs_job_state.fail_message )
self.cleanup( ( pbs_job_state.ofile, pbs_job_state.efile, pbs_job_state.job_file ) )
1
0