details: http://www.bx.psu.edu/hg/galaxy/rev/7ae75e6d9d6a
changeset: 1600:7ae75e6d9d6a
user: Greg Von Kuster <greg(a)bx.psu.edu>
date: Mon Nov 03 15:31:38 2008 -0500
description:
Integrate "session_create_speed" and "faster_random" patches from James.
8 file(s) affected in this change:
lib/galaxy/model/mapping.py
lib/galaxy/tools/actions/__init__.py
lib/galaxy/web/controllers/root.py
lib/galaxy/web/controllers/tool_runner.py
lib/galaxy/web/controllers/user.py
lib/galaxy/web/framework/__init__.py
lib/galaxy/web/security/__init__.py
templates/root/history.mako
diffs (650 lines):
diff -r 13cbdd1bbd19 -r 7ae75e6d9d6a lib/galaxy/model/mapping.py
--- a/lib/galaxy/model/mapping.py Mon Nov 03 15:21:03 2008 -0500
+++ b/lib/galaxy/model/mapping.py Mon Nov 03 15:31:38 2008 -0500
@@ -326,6 +326,7 @@
assign_mapper( context, GalaxySession, GalaxySession.table,
properties=dict( histories=relation( GalaxySessionToHistoryAssociation ),
+ current_history=relation( History ),
user=relation( User.mapper ) ) )
assign_mapper( context, GalaxySessionToHistoryAssociation, GalaxySessionToHistoryAssociation.table,
diff -r 13cbdd1bbd19 -r 7ae75e6d9d6a lib/galaxy/tools/actions/__init__.py
--- a/lib/galaxy/tools/actions/__init__.py Mon Nov 03 15:21:03 2008 -0500
+++ b/lib/galaxy/tools/actions/__init__.py Mon Nov 03 15:31:38 2008 -0500
@@ -177,7 +177,7 @@
trans.app.model.flush()
# Create the job object
job = trans.app.model.Job()
- job.session_id = trans.get_galaxy_session( create=True ).id
+ job.session_id = trans.get_galaxy_session().id
job.history_id = trans.history.id
job.tool_id = tool.id
try:
diff -r 13cbdd1bbd19 -r 7ae75e6d9d6a lib/galaxy/web/controllers/root.py
--- a/lib/galaxy/web/controllers/root.py Mon Nov 03 15:21:03 2008 -0500
+++ b/lib/galaxy/web/controllers/root.py Mon Nov 03 15:31:38 2008 -0500
@@ -54,10 +54,7 @@
NOTE: No longer accepts "id" or "template" options for security reasons.
"""
- try:
- history = trans.get_history()
- except:
- return self.history_new(trans)
+ history = trans.get_history()
if as_xml:
trans.response.set_content_type('text/xml')
return trans.fill_template_mako( "root/history_as_xml.mako", history=history )
diff -r 13cbdd1bbd19 -r 7ae75e6d9d6a lib/galaxy/web/controllers/tool_runner.py
--- a/lib/galaxy/web/controllers/tool_runner.py Mon Nov 03 15:21:03 2008 -0500
+++ b/lib/galaxy/web/controllers/tool_runner.py Mon Nov 03 15:31:38 2008 -0500
@@ -41,7 +41,6 @@
return "Tool '%s' does not exist, kwd=%s " % (tool_id, kwd)
params = util.Params( kwd, sanitize=tool.options.sanitize, tool=tool )
history = trans.get_history()
- trans.ensure_valid_galaxy_session()
template, vars = tool.handle_input( trans, params.__dict__ )
if len(params) > 0:
trans.log_event( "Tool params: %s" % (str(params)), tool_id=tool_id )
diff -r 13cbdd1bbd19 -r 7ae75e6d9d6a lib/galaxy/web/controllers/user.py
--- a/lib/galaxy/web/controllers/user.py Mon Nov 03 15:21:03 2008 -0500
+++ b/lib/galaxy/web/controllers/user.py Mon Nov 03 15:31:38 2008 -0500
@@ -82,10 +82,7 @@
elif not user.check_password( password ):
password_error = "Invalid password"
else:
- trans.set_user( user )
- trans.ensure_valid_galaxy_session()
- # Associate user with galaxy_session and history
- trans.make_associations()
+ trans.handle_user_login( user )
trans.log_event( "User logged in" )
return trans.show_ok_message( "Now logged in as " + user.email, refresh_frames=['masthead', 'history'] )
return trans.show_form(
@@ -97,7 +94,7 @@
def logout( self, trans ):
# Since logging an event requires a session, we'll log prior to ending the session
trans.log_event( "User logged out" )
- new_galaxy_session = trans.logout_galaxy_session()
+ trans.handle_user_logout()
return trans.show_ok_message( "You are no longer logged in", refresh_frames=['masthead', 'history'] )
@web.expose
@@ -118,12 +115,7 @@
user = trans.app.model.User( email=email )
user.set_password_cleartext( password )
user.flush()
- trans.set_user( user )
- trans.ensure_valid_galaxy_session()
- """
- Associate user with galaxy_session and history
- """
- trans.make_associations()
+ trans.handle_user_login( user )
trans.log_event( "User created a new account" )
trans.log_event( "User logged in" )
#subscribe user to email list
diff -r 13cbdd1bbd19 -r 7ae75e6d9d6a lib/galaxy/web/framework/__init__.py
--- a/lib/galaxy/web/framework/__init__.py Mon Nov 03 15:21:03 2008 -0500
+++ b/lib/galaxy/web/framework/__init__.py Mon Nov 03 15:31:38 2008 -0500
@@ -109,12 +109,14 @@
self.__history = NOT_SET
self.__galaxy_session = NOT_SET
base.DefaultWebTransaction.__init__( self, environ )
- self.app.model.context.current.clear()
+ self.sa_session.clear()
self.debug = asbool( self.app.config.get( 'debug', False ) )
# Flag indicating whether we are in workflow building mode (means
# that the current history should not be used for parameter values
# and such).
self.workflow_building_mode = False
+ # Always have a valid galaxy session
+ self.__ensure_valid_session()
@property
def sa_session( self ):
"""
@@ -140,7 +142,6 @@
except:
event.history_id = None
event.user = self.user
- self.ensure_valid_galaxy_session()
event.session_id = self.galaxy_session.id
event.flush()
def get_cookie( self, name='galaxysession' ):
@@ -163,277 +164,208 @@
tstamp = time.localtime ( time.time() + 3600 * 24 * age )
self.response.cookies[name]['expires'] = time.strftime( '%a, %d-%b-%Y %H:%M:%S GMT', tstamp )
self.response.cookies[name]['version'] = version
+ #@property
+ #def galaxy_session( self ):
+ # if not self.__galaxy_session:
+ # self.__ensure_valid_session()
+ # return self.__galaxy_session
+ def __ensure_valid_session( self ):
+ """
+ Ensure that a valid Galaxy session exists and is available as
+ trans.session (part of initialization)
+
+ Support for universe_session and universe_user cookies has been
+ removed as of 31 Oct 2008.
+ """
+ sa_session = self.sa_session
+ # Try to load an existing session
+ secure_id = self.get_cookie( name='galaxysession' )
+ galaxy_session = None
+ prev_galaxy_session = None
+ user_for_new_session = None
+ invalidate_existing_session = False
+ # Track whether the session has changed so we can avoid calling flush
+ # in the most common case (session exists and is valid).
+ galaxy_session_requires_flush = False
+ if secure_id:
+ # Decode the cookie value to get the session_key
+ session_key = self.security.decode_session_key( secure_id )
+ # Retrive the galaxy_session id via the unique session_key
+ galaxy_session = sa_session.query( self.app.model.GalaxySession ).filter_by( session_key=session_key, is_valid=True ).first()
+ # If remote user is in use it can invalidate the session, so we need to
+ # to check some things now.
+ if self.app.config.use_remote_user:
+ assert "HTTP_REMOTE_USER" in self.environ, \
+ "use_remote_user is set but no HTTP_REMOTE_USER variable"
+ remote_user_email = self.environ[ 'HTTP_REMOTE_USER' ]
+ if galaxy_session:
+ # An existing session, make sure correct association exists
+ if galaxy_session.user is None:
+ # No user, associate
+ galaxy_session.user = self.__get_or_create_remote_user( remote_user_email )
+ galaxy_session_requires_flush = True
+ elif galaxy_session.user.email != remote_user_email:
+ # Session exists but is not associated with the correct
+ # remote user
+ invalidate_existing_session = True
+ user_for_new_session = self.__get_or_create_remote_user( remote_user_email )
+ log.warning( "User logged in as '%s' externally, but has a cookie as '%s' invalidating session",
+ remote_user_email, prev_galaxy_session.user.email )
+ else:
+ if galaxy_session is not None and galaxy_session.user and galaxy_session.user.external:
+ # Remote user support is not enabled, but there is an existing
+ # session with an external user, invalidate
+ invalidate_existing_session = True
+ log.warning( "User '%s' is an external user with an existing session, invalidating session since external auth is disabled",
+ galaxy_session.user.email )
+ # Do we need to invalidate the session for some reason?
+ if invalidate_existing_session:
+ prev_galaxy_session = galaxy_session
+ prev_galaxy_session.is_valid = False
+ galaxy_session = None
+ # No relevant cookies, or couldn't find, or invalid, so create a new session
+ if galaxy_session is None:
+ galaxy_session = self.__create_new_session( prev_galaxy_session, user_for_new_session )
+ galaxy_session_requires_flush = True
+ self.galaxy_session = galaxy_session
+ self.__update_session_cookie()
+ else:
+ self.galaxy_session = galaxy_session
+ # Do we need to flush the session?
+ if galaxy_session_requires_flush:
+ objects_to_flush = [ galaxy_session ]
+ # FIXME: If prev_session is a proper relation this would not
+ # be needed.
+ if prev_galaxy_session:
+ objects_to_flush.append( prev_galaxy_session )
+ sa_session.flush( objects_to_flush )
+ def __create_new_session( self, prev_galaxy_session=None, user_for_new_session=None ):
+ """
+ Create a new GalaxySession for this request, possibly with a connection
+ to a previous session (in `prev_galaxy_session`) and an existing user
+ (in `user_for_new_session`).
+
+ Caller is responsible for flushing the returned session.
+ """
+ session_key = self.security.get_new_session_key()
+ galaxy_session = self.app.model.GalaxySession(
+ session_key=session_key,
+ is_valid=True,
+ remote_host = self.request.remote_host,
+ remote_addr = self.request.remote_addr,
+ referer = self.request.headers.get( 'Referer', None ) )
+ # Invalidated an existing sesssion for some reason, keep track
+ if prev_galaxy_session:
+ galaxy_session.prev_session_id = prev_galaxy_session.id
+ # The new session should be immediately associated with a user
+ if user_for_new_session:
+ galaxy_session.user = user_for_new_session
+ return galaxy_session
+ def __get_or_create_remote_user( self, remote_user_email ):
+ """
+ Return the user in $HTTP_REMOTE_USER and create if necessary
+
+ Caller is responsible for flushing the returned user.
+ """
+ # remote_user middleware ensures HTTP_REMOTE_USER exists
+ user = self.app.model.User.filter_by( email=remote_user_email ).first()
+ if user is None:
+ user = self.app.model.User( email=remote_user_email )
+ user.set_password_cleartext( 'external' )
+ user.external = True
+ self.log_event( "Automatically created account '%s'", user.email )
+ return user
+ def __update_session_cookie( self ):
+ """
+ Update the 'galaxysession' cookie to match the current session.
+ """
+ self.set_cookie( name='galaxysession',
+ value=self.security.encode_session_key( self.galaxy_session.session_key ) )
+
+ def handle_user_login( self, user ):
+ """
+ Login a new user (possibly newly created)
+ - create a new session
+ - associate new session with user
+ - if old session had a history and it was not associated with a user, associate it with the new session.
+ """
+ prev_galaxy_session = self.galaxy_session
+ prev_galaxy_session.is_valid = False
+ self.galaxy_session = self.__create_new_session( prev_galaxy_session, user )
+ if prev_galaxy_session.current_history:
+ history = prev_galaxy_session.current_history
+ if history.user is None:
+ self.galaxy_session.add_history( history )
+ self.galaxy_session.current_history = history
+ history.user = user
+ self.sa_session.flush( [ prev_galaxy_session, self.galaxy_session, history ] )
+ else:
+ self.sa_session.flush( [ prev_galaxy_session, self.galaxy_session ] )
+ self.__update_session_cookie()
+ def handle_user_logout( self ):
+ """
+ Logout the current user:
+ - invalidate the current session
+ - create a new session with no user associated
+ """
+ prev_galaxy_session = self.galaxy_session
+ prev_galaxy_session.is_valid = False
+ self.galaxy_session = self.__create_new_session( prev_galaxy_session, None )
+ self.sa_session.flush( [ prev_galaxy_session, self.galaxy_session ] )
+ self.__update_session_cookie()
+
+ def get_galaxy_session( self ):
+ """
+ Return the current galaxy session
+ """
+ return self.galaxy_session
+
def get_history( self, create=False ):
- """Load the current history"""
- if self.__history is NOT_SET:
- self.__history = None
- # See if we have a galaxysession cookie
- secure_id = self.get_cookie( name='galaxysession' )
- if secure_id:
- session_key = self.security.decode_session_key( secure_id )
- try:
- galaxy_session = self.app.model.GalaxySession.filter_by( session_key=session_key ).first()
- if galaxy_session and galaxy_session.is_valid and galaxy_session.current_history_id:
- history = self.app.model.History.get( galaxy_session.current_history_id )
- if history and not history.deleted:
- self.__history = history
- except Exception, e:
- # This should only occur in development if the cookie is not synced with the db
- pass
- else:
- # See if we have a deprecated universe cookie
- # TODO: this should be eliminated some time after October 1, 2008
- # We'll keep it until then because the old universe cookies are valid for 90 days
- history_id = self.get_cookie( name='universe' )
- if history_id:
- history = self.app.model.History.get( int( history_id ) )
- if history and not history.deleted:
- self.__history = history
- # Expire the universe cookie since it is deprecated
- self.set_cookie( name='universe', value=id, age=0 )
- if self.__history is None:
- return self.new_history()
- if create is True and self.__history is None:
- return self.new_history()
- return self.__history
+ """
+ Load the current history.
+
+ NOTE: It looks like create was being ignored for a long time, so this
+ will currently *always* create a new history. This is wasteful
+ though, and we should verify that callers are using the create
+ flag correctly and fix.
+ """
+ history = self.galaxy_session.current_history
+ if history is None:
+ history = self.new_history()
+ return history
+ def set_history( self, history ):
+ if history and not history.deleted:
+ self.galaxy_session.current_history = history
+ self.sa_session.flush( [ self.galaxy_session ] )
+ history = property( get_history, set_history )
def new_history( self ):
+ """
+ Create a new history and associate it with the current session and
+ its associated user (if set).
+ """
+ # Create new history
history = self.app.model.History()
- # Make sure we have an id
- history.flush()
- # Immediately associate the new history with self
- self.__history = history
- # Make sure we have a valid session to associate with the new history
- if self.galaxy_session_is_valid():
- galaxy_session = self.get_galaxy_session()
- else:
- galaxy_session = self.new_galaxy_session()
- # We are associating the last used genome_build with histories, so we will always
- # initialize a new history with the first dbkey in util.dbnames which is currently
- # ? unspecified (?)
+ # Associate with session
+ history.add_galaxy_session( self.galaxy_session )
+ # Make it the session's current history
+ self.galaxy_session.current_history = history
+ # Associate with user
+ if self.galaxy_session.user:
+ history.user = self.galaxy_session.user
+ # Track genome_build with history
history.genome_build = util.dbnames.default_value
- if self.user:
- history.user_id = self.user.id
- galaxy_session.user_id = self.user.id
- try:
- # See if we have already associated the history with the session
- association = self.app.model.GalaxySessionToHistoryAssociation.filter_by( session_id=galaxy_session.id, history_id=history.id ).first()
- except:
- association = None
- history.add_galaxy_session( galaxy_session, association=association )
- history.flush()
- galaxy_session.current_history_id = history.id
- galaxy_session.flush()
- self.__history = history
- return self.__history
- def set_history( self, history ):
- if history and not history.deleted and self.galaxy_session_is_valid():
- galaxy_session = self.get_galaxy_session()
- galaxy_session.current_history_id = history.id
- galaxy_session.flush()
- self.__history = history
- history = property( get_history, set_history )
+ # Save
+ self.sa_session.flush( [ self.galaxy_session, history ] )
+ return history
+
def get_user( self ):
"""Return the current user if logged in or None."""
- if self.__user is NOT_SET:
- self.__user = None
- user = self.get_cookie_user()
- if self.app.config.use_remote_user:
- remote_user = self.get_remote_user()
- if user is not None and user.email != remote_user.email:
- # The user has a cookie for a different user than the one
- # they've authed as.
- log.warning( "User logged in as '%s' externally, but has a cookie as '%s',"
- % ( remote_user.email, user.email ) + " invalidating session" )
- self.logout_galaxy_session()
- if user is None or ( user is not None and user.email != remote_user.email ):
- # The user has no cookie, was not logged in, or the above.
- self.set_user( remote_user )
- self.make_associations()
- self.__user = remote_user
- else:
- if user is not None and user.external:
- # use_remote_user is off, but the user still has a cookie
- # from when it was on. Force the user to get a new
- # unauthenticated session.
- log.warning( "User '%s' is an external user with an existing " % user.email
- + "session, invalidating session since external auth is disabled" )
- self.logout_galaxy_session()
- else:
- self.__user = user
- return self.__user
- def get_remote_user( self ):
- """Return the user in $HTTP_REMOTE_USER and create if necessary"""
- # remote_user middleware ensures HTTP_REMOTE_USER exists
- try:
- user = self.app.model.User.filter_by( email=self.environ[ 'HTTP_REMOTE_USER' ] ).first()
- except:
- user = self.app.model.User( email=self.environ[ 'HTTP_REMOTE_USER' ] )
- user.set_password_cleartext( 'external' )
- user.external = True
- user.flush()
- self.log_event( "Automatically created account '%s'" % user.email )
- return user
- def get_cookie_user( self ):
- """Return the user in the galaxysession cookie"""
- __user = None
- # See if we have a galaxysession cookie
- secure_id = self.get_cookie( name='galaxysession' )
- if secure_id:
- session_key = self.security.decode_session_key( secure_id )
- try:
- galaxy_session = self.app.model.GalaxySession.filter_by( session_key=session_key ).first()
- if galaxy_session and galaxy_session.is_valid and galaxy_session.user_id:
- user = self.app.model.User.get( galaxy_session.user_id )
- if user:
- __user = user
- except:
- # This should only occur in development if the cookie is not synced with the db
- pass
- else:
- # See if we have a deprecated universe_user cookie
- # TODO: this should be eliminated some time after October 1, 2008
- # We'll keep it until then because the old universe cookies are valid for 90 days
- user_id = self.get_cookie( name='universe_user' )
- if user_id:
- user = self.app.model.User.get( int( user_id ) )
- if user:
- __user = user
- # Expire the universe_user cookie since it is deprecated
- self.set_cookie( name='universe_user', value='', age=0 )
- return __user
+ return self.galaxy_session.user
def set_user( self, user ):
- """Set the current user if logged in."""
- if user is not None and self.galaxy_session_is_valid():
- galaxy_session = self.get_galaxy_session()
- if galaxy_session.user_id != user.id:
- galaxy_session.user_id = user.id
- galaxy_session.flush()
- self.__user = user
+ """Set the current user."""
+ self.galaxy_session.user = user
+ self.sa_session.flush( [ self.galaxy_session ] )
user = property( get_user, set_user )
- def get_galaxy_session( self, create=False ):
- """Return the current user's GalaxySession"""
- if self.__galaxy_session is NOT_SET:
- self.__galaxy_session = None
- # See if we have a galaxysession cookie
- secure_id = self.get_cookie( name='galaxysession' )
- if secure_id:
- # Decode the cookie value to get the session_key
- session_key = self.security.decode_session_key( secure_id )
- try:
- # Retrive the galaxy_session id via the unique session_key
- galaxy_session = self.app.model.GalaxySession.filter_by( session_key=session_key ).first()
- if galaxy_session and galaxy_session.is_valid:
- self.__galaxy_session = galaxy_session
- except:
- # This should only occur in development if the cookie is not synced with the db
- pass
- else:
- # See if we have a deprecated universe_session cookie
- # TODO: this should be eliminated some time after October 1, 2008
- # We'll keep it until then because the old universe cookies are valid for 90 days
- session_id = self.get_cookie( name='universe_session' )
- if session_id:
- galaxy_session = self.app.model.GalaxySession.get( int( session_id ) )
- # NOTE: We can't test for is_valid here since the old session records did not include this flag
- if galaxy_session:
- # Set the new galaxysession cookie value, old session records did not have a session_key or is_valid flag
- session_key = self.security.get_new_session_key()
- galaxy_session.session_key = session_key
- galaxy_session.is_valid = True
- galaxy_session.flush()
- secure_id = self.security.encode_session_key( session_key )
- self.set_cookie( name='galaxysession', value=secure_id )
- # Expire the universe_user cookie since it is deprecated
- self.set_cookie( name='universe_session', value='', age=0 )
- self.__galaxy_session = galaxy_session
- if create is True and self.__galaxy_session is None:
- return self.new_galaxy_session()
- return self.__galaxy_session
- def new_galaxy_session( self, prev_session_id=None ):
- """Create a new secure galaxy_session"""
- session_key = self.security.get_new_session_key()
- galaxy_session = self.app.model.GalaxySession( session_key=session_key, is_valid=True, prev_session_id=prev_session_id )
- # Make sure we have an id
- galaxy_session.flush()
- # Immediately associate the new session with self
- self.__galaxy_session = galaxy_session
- if prev_session_id is not None:
- # User logged out, so we need to create a new history for this session
- self.history = self.new_history()
- galaxy_session.current_history_id = self.history.id
- elif self.user is not None:
- galaxy_session.user_id = self.user.id
- # Set this session's current_history_id to the user's last updated history
- h = self.app.model.History
- ht = h.table
- where = ( ht.c.user_id==self.user.id ) & ( ht.c.deleted=='f' )
- history = h.query().filter( where ).order_by( desc( ht.c.update_time ) ).first()
- if history:
- self.history = history
- galaxy_session.current_history_id = self.history.id
- elif self.history:
- galaxy_session.current_history_id = self.history.id
- galaxy_session.remote_host = self.request.remote_host
- galaxy_session.remote_addr = self.request.remote_addr
- try:
- galaxy_session.referer = self.request.headers['Referer']
- except:
- galaxy_session.referer = None
- if self.history is not None:
- # See if we have already associated the session with the history
- try:
- association = self.app.model.GalaxySessionToHistoryAssociation.filter_by( session_id=galaxy_session.id, history_id=self.history.id ).first()
- except:
- association = None
- galaxy_session.add_history( self.history, association=association )
- galaxy_session.flush()
- # Set the cookie value to the encrypted session_key
- self.set_cookie( name='galaxysession', value=self.security.encode_session_key( session_key ) )
- self.__galaxy_session = galaxy_session
- return self.__galaxy_session
- def set_galaxy_session( self, galaxy_session ):
- """Set the current galaxy_session"""
- self.__galaxy_session = galaxy_session
- galaxy_session = property( get_galaxy_session, set_galaxy_session )
- def galaxy_session_is_valid( self ):
- try:
- return self.galaxy_session.is_valid
- except:
- return False
- def ensure_valid_galaxy_session( self ):
- """Make sure we have a valid galaxy session, create a new one if necessary."""
- if not self.galaxy_session_is_valid():
- galaxy_session = self.new_galaxy_session()
- def logout_galaxy_session( self ):
- """
- Logout the current user by setting user to None and galaxy_session.is_valid to False
- in the db. A new galaxy_session is automatically created with prev_session_id is set
- to save a reference to the current one as a way of chaining them together
- """
- if self.galaxy_session_is_valid():
- galaxy_session = self.get_galaxy_session()
- old_session_id = galaxy_session.id
- galaxy_session.is_valid = False
- galaxy_session.flush()
- self.set_user( None )
- return self.new_galaxy_session( prev_session_id=old_session_id )
- else:
- error( "Attempted to logout an invalid galaxy_session" )
- def make_associations( self ):
- history = self.get_history()
- user = self.get_user()
- if self.galaxy_session_is_valid():
- galaxy_session = self.get_galaxy_session()
- if galaxy_session.user_id is None and user is not None:
- galaxy_session.user_id = user.id
- if history is not None:
- galaxy_session.current_history_id = history.id
- galaxy_session.flush()
- self.__galaxy_session = galaxy_session
- if history is not None and user is not None:
- history.user_id = user.id
- history.flush()
- self.__history = history
def get_toolbox(self):
"""Returns the application toolbox"""
diff -r 13cbdd1bbd19 -r 7ae75e6d9d6a lib/galaxy/web/security/__init__.py
--- a/lib/galaxy/web/security/__init__.py Mon Nov 03 15:21:03 2008 -0500
+++ b/lib/galaxy/web/security/__init__.py Mon Nov 03 15:31:38 2008 -0500
@@ -1,9 +1,35 @@
+import os, os.path, logging
+
import pkg_resources
pkg_resources.require( "pycrypto" )
from Crypto.Cipher import Blowfish
from Crypto.Util.randpool import RandomPool
from Crypto.Util import number
+
+log = logging.getLogger( __name__ )
+
+if os.path.exists( "/dev/urandom" ):
+ log.debug("###using /dev/urandom....")
+ # We have urandom, use it as the source of random data
+ random_fd = os.open( "/dev/urandom", os.O_RDONLY )
+ def get_random_bytes( nbytes ):
+ value = os.read( random_fd, nbytes )
+ # Normally we should get as much as we need
+ if len( value ) == nbytes:
+ return value
+ # If we don't, keep reading (this is slow and should never happen)
+ while len( value ) < nbytes:
+ value += os.read( random_fd, nbytes - len( value ) )
+ return value
+else:
+ def get_random_bytes( nbytes ):
+ nbits = nbytes * 8
+ random_pool = RandomPool( 1064 )
+ while random_pool.entropy < nbits:
+ random_pool.add_event()
+ random_pool.stir()
+ return( str( number.getRandomNumber( nbits, random_pool.get_bytes ) ) )
class SecurityHelper( object ):
# TODO: checking if histories/datasets are owned by the current user) will be moved here.
@@ -30,11 +56,5 @@
return self.id_cipher.decrypt( session_key.decode( 'hex' ) ).lstrip( "!" )
def get_new_session_key( self ):
# Generate a unique, high entropy 128 bit random number
- random_pool = RandomPool( 1064 )
- while random_pool.entropy < 128:
- random_pool.add_event()
- random_pool.stir()
- rn = number.getRandomNumber( 128, random_pool.get_bytes )
- # session_key must be a string
- return str( rn )
+ return get_random_bytes( 16 )
\ No newline at end of file
diff -r 13cbdd1bbd19 -r 7ae75e6d9d6a templates/root/history.mako
--- a/templates/root/history.mako Mon Nov 03 15:21:03 2008 -0500
+++ b/templates/root/history.mako Mon Nov 03 15:31:38 2008 -0500
@@ -231,6 +231,7 @@
<body class="historyPage">
+
<div id="top-links" class="historyLinks">
<a href="${h.url_for('history', show_deleted=show_deleted)}">refresh</a>
%if show_deleted:
details: http://www.bx.psu.edu/hg/galaxy/rev/36a297157114
changeset: 1596:36a297157114
user: Dan Blankenberg <dan(a)bx.psu.edu>
date: Mon Nov 03 14:48:25 2008 -0500
description:
A fix for the metadata reverting issue seen with PBS job runner.
1 file(s) affected in this change:
lib/galaxy/jobs/__init__.py
diffs (18 lines):
diff -r 56bc2f789894 -r 36a297157114 lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py Sat Nov 01 15:30:13 2008 -0400
+++ b/lib/galaxy/jobs/__init__.py Mon Nov 03 14:48:25 2008 -0500
@@ -274,6 +274,7 @@
Prepare the job to run by creating the working directory and the
config files.
"""
+ mapping.context.current.clear() #this prevents the metadata reverting that has been seen in conjunction with the PBS job runner
# Create the working directory
self.working_directory = \
os.path.join( self.app.config.job_working_directory, str( self.job_id ) )
@@ -643,4 +644,4 @@
def put( self, *args ):
return
def shutdown( self ):
- return
\ No newline at end of file
+ return
details: http://www.bx.psu.edu/hg/galaxy/rev/ca640ce1abcf
changeset: 1598:ca640ce1abcf
user: Nate Coraor <nate(a)bx.psu.edu>
date: Mon Nov 03 15:15:22 2008 -0500
description:
Fix a bug: errored jobs weren't actually being deleted.
1 file(s) affected in this change:
lib/galaxy/web/controllers/root.py
diffs (19 lines):
diff -r 21f98b638868 -r ca640ce1abcf lib/galaxy/web/controllers/root.py
--- a/lib/galaxy/web/controllers/root.py Mon Nov 03 15:07:22 2008 -0500
+++ b/lib/galaxy/web/controllers/root.py Mon Nov 03 15:15:22 2008 -0500
@@ -281,11 +281,10 @@
if data.parent_id is None and len( data.creating_job_associations ) > 0:
# Mark associated job for deletion
job = data.creating_job_associations[0].job
- if job.state not in [ model.Job.states.QUEUED, model.Job.states.RUNNING, model.Job.states.NEW ]:
- return
- # Are *all* of the job's other output datasets deleted?
- if job.check_if_output_datasets_deleted():
- job.mark_deleted()
+ if job.state in [ model.Job.states.QUEUED, model.Job.states.RUNNING, model.Job.states.NEW ]:
+ # Are *all* of the job's other output datasets deleted?
+ if job.check_if_output_datasets_deleted():
+ job.mark_deleted()
self.app.model.flush()
@web.expose
details: http://www.bx.psu.edu/hg/galaxy/rev/21f98b638868
changeset: 1597:21f98b638868
user: Dan Blankenberg <dan(a)bx.psu.edu>
date: Mon Nov 03 15:07:22 2008 -0500
description:
Backed out changeset 36a297157114
Temporarilly rolling this change back...
1 file(s) affected in this change:
lib/galaxy/jobs/__init__.py
diffs (18 lines):
diff -r 36a297157114 -r 21f98b638868 lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py Mon Nov 03 14:48:25 2008 -0500
+++ b/lib/galaxy/jobs/__init__.py Mon Nov 03 15:07:22 2008 -0500
@@ -274,7 +274,6 @@
Prepare the job to run by creating the working directory and the
config files.
"""
- mapping.context.current.clear() #this prevents the metadata reverting that has been seen in conjunction with the PBS job runner
# Create the working directory
self.working_directory = \
os.path.join( self.app.config.job_working_directory, str( self.job_id ) )
@@ -644,4 +643,4 @@
def put( self, *args ):
return
def shutdown( self ):
- return
+ return
\ No newline at end of file
details: http://www.bx.psu.edu/hg/galaxy/rev/c4644668afff
changeset: 1591:c4644668afff
user: Nate Coraor <nate(a)bx.psu.edu>
date: Fri Oct 31 10:22:46 2008 -0400
description:
James' changes allowing the server to run w/o a job runner, and
nginx-specific performance improvements.
11 file(s) affected in this change:
lib/galaxy/app.py
lib/galaxy/config.py
lib/galaxy/jobs/__init__.py
lib/galaxy/model/__init__.py
lib/galaxy/tools/__init__.py
lib/galaxy/tools/actions/upload.py
lib/galaxy/tools/parameters/__init__.py
lib/galaxy/tools/parameters/basic.py
lib/galaxy/web/controllers/root.py
lib/galaxy/web/framework/base.py
universe_wsgi.ini.sample
diffs (576 lines):
diff -r 7e94f40ee9e8 -r c4644668afff lib/galaxy/app.py
--- a/lib/galaxy/app.py Thu Oct 30 17:34:46 2008 -0400
+++ b/lib/galaxy/app.py Fri Oct 31 10:22:46 2008 -0400
@@ -31,9 +31,11 @@
#Load datatype converters
self.datatypes_registry.load_datatype_converters( self.toolbox )
# Start the job queue
- job_dispatcher = jobs.DefaultJobDispatcher( self )
- self.job_queue = jobs.JobQueue( self, job_dispatcher )
- self.job_stop_queue = jobs.JobStopQueue( self, job_dispatcher )
+ self.job_manager = jobs.JobManager( self )
+ # FIXME: These are exposed directly for backward compatibility
+ self.job_queue = self.job_manager.job_queue
+ self.job_stop_queue = self.job_manager.job_stop_queue
+ # Heartbeat and memdump for thread / heap profiling
self.heartbeat = None
self.memdump = None
# Start the heartbeat process if configured and available
@@ -48,7 +50,6 @@
if memdump.Memdump:
self.memdump = memdump.Memdump()
def shutdown( self ):
- self.job_stop_queue.shutdown()
- self.job_queue.shutdown()
+ self.job_manager.shutdown()
if self.heartbeat:
self.heartbeat.shutdown()
diff -r 7e94f40ee9e8 -r c4644668afff lib/galaxy/config.py
--- a/lib/galaxy/config.py Thu Oct 30 17:34:46 2008 -0400
+++ b/lib/galaxy/config.py Fri Oct 31 10:22:46 2008 -0400
@@ -64,6 +64,11 @@
self.bugs_email = kwargs.get( 'bugs_email', None )
self.blog_url = kwargs.get( 'blog_url', None )
self.screencasts_url = kwargs.get( 'screencasts_url', None )
+ # Configuration options for taking advantage of nginx features
+ self.nginx_x_accel_redirect_base = kwargs.get( 'nginx_x_accel_redirect_base', False )
+ self.nginx_upload_location = kwargs.get( 'nginx_upload_store', False )
+ if self.nginx_upload_location:
+ self.nginx_upload_location = os.path.abspath( self.nginx_upload_location )
# Parse global_conf and save the parser
global_conf = kwargs.get( 'global_conf', None )
global_conf_parser = ConfigParser.ConfigParser()
@@ -78,6 +83,11 @@
self.datatypes_config = kwargs.get( 'datatypes_config_file', 'datatypes_conf.xml' )
def get( self, key, default ):
return self.config_dict.get( key, default )
+ def get_bool( self, key, default ):
+ if key in self.config_dict:
+ return string_as_bool( key )
+ else:
+ return default
def check( self ):
# Check that required directories exist
for path in self.root, self.file_path, self.tool_path, self.tool_data_path, self.template_path, self.job_working_directory:
diff -r 7e94f40ee9e8 -r c4644668afff lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py Thu Oct 30 17:34:46 2008 -0400
+++ b/lib/galaxy/jobs/__init__.py Fri Oct 31 10:22:46 2008 -0400
@@ -16,6 +16,27 @@
# States for running a job. These are NOT the same as data states
JOB_WAIT, JOB_ERROR, JOB_INPUT_ERROR, JOB_INPUT_DELETED, JOB_OK, JOB_READY, JOB_DELETED = 'wait', 'error', 'input_error', 'input_deleted', 'ok', 'ready', 'deleted'
+
+class JobManager( object ):
+ """
+ Highest level interface to job management.
+
+ TODO: Currently the app accesses "job_queue" and "job_stop_queue" directly.
+ This should be decoupled.
+ """
+ def __init__( self, app ):
+ self.app = app
+ if self.app.config.get_bool( "enable_job_running", True ):
+ # The dispatcher launches the underlying job runners
+ self.dispatcher = DefaultJobDispatcher( app )
+ # Queues for starting and stopping jobs
+ self.job_queue = JobQueue( app, self.dispatcher )
+ self.job_stop_queue = JobStopQueue( app, self.dispatcher )
+ else:
+ self.job_queue = self.job_stop_queue = NoopQueue()
+ def shutdown( self ):
+ self.job_queue.shutdown()
+ self.job_stop_queue.shutdown()
class Sleeper( object ):
"""
@@ -594,49 +615,11 @@
pass
for job in jobs:
- # jobs in a non queued/running/new state do not need to be stopped
- if job.state not in [ model.Job.states.QUEUED, model.Job.states.RUNNING, model.Job.states.NEW ]:
- return
- # job has multiple datasets that aren't parent/child and not all of them are deleted.
- if not self.check_if_output_datasets_deleted( job.id ):
- return
- self.mark_deleted( job.id )
# job is in JobQueue or FooJobRunner, will be dequeued due to state change above
if job.job_runner_name is None:
return
# tell the dispatcher to stop the job
self.dispatcher.stop( job )
-
- def check_if_output_datasets_deleted( self, job_id ):
- job = model.Job.get( job_id )
- for dataset_assoc in job.output_datasets:
- dataset = dataset_assoc.dataset
- dataset.refresh()
- #only the originator of the job can delete a dataset to cause
- #cancellation of the job, no need to loop through history_associations
- if not dataset.deleted:
- return False
- return True
-
- def mark_deleted( self, job_id ):
- job = model.Job.get( job_id )
- job.refresh()
- job.state = job.states.DELETED
- job.info = "Job output deleted by user before job completed."
- job.flush()
- for dataset_assoc in job.output_datasets:
- dataset = dataset_assoc.dataset
- dataset.refresh()
- dataset.deleted = True
- dataset.state = dataset.states.DISCARDED
- dataset.dataset.flush()
- for dataset in dataset.dataset.history_associations:
- #propagate info across shared datasets
- dataset.deleted = True
- dataset.blurb = 'deleted'
- dataset.peek = 'Job deleted'
- dataset.info = 'Job output deleted by user before job completed'
- dataset.flush()
def put( self, job ):
self.queue.put( job )
@@ -652,3 +635,12 @@
self.queue.put( self.STOP_SIGNAL )
self.sleeper.wake()
log.info( "job stopper stopped" )
+
+class NoopQueue( object ):
+ """
+ Implements the JobQueue / JobStopQueue interface but does nothing
+ """
+ def put( self, *args ):
+ return
+ def shutdown( self ):
+ return
\ No newline at end of file
diff -r 7e94f40ee9e8 -r c4644668afff lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py Thu Oct 30 17:34:46 2008 -0400
+++ b/lib/galaxy/model/__init__.py Fri Oct 31 10:22:46 2008 -0400
@@ -85,7 +85,35 @@
tool = app.toolbox.tools_by_id[self.tool_id]
param_dict = tool.params_from_strings( param_dict, app )
return param_dict
-
+ def check_if_output_datasets_deleted( self ):
+ """
+ Return true if all of the output datasets associated with this job are
+ in the deleted state
+ """
+ for dataset_assoc in self.output_datasets:
+ dataset = dataset_assoc.dataset
+ # only the originator of the job can delete a dataset to cause
+ # cancellation of the job, no need to loop through history_associations
+ if not dataset.deleted:
+ return False
+ return True
+ def mark_deleted( self ):
+ """
+ Mark this job as deleted, and mark any output datasets as discarded.
+ """
+ self.state = Job.states.DELETED
+ self.info = "Job output deleted by user before job completed."
+ for dataset_assoc in self.output_datasets:
+ dataset = dataset_assoc.dataset
+ dataset.deleted = True
+ dataset.state = dataset.states.DISCARDED
+ for dataset in dataset.dataset.history_associations:
+ # propagate info across shared datasets
+ dataset.deleted = True
+ dataset.blurb = 'deleted'
+ dataset.peek = 'Job deleted'
+ dataset.info = 'Job output deleted by user before job completed'
+
class JobParameter( object ):
def __init__( self, name, value ):
self.name = name
diff -r 7e94f40ee9e8 -r c4644668afff lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py Thu Oct 30 17:34:46 2008 -0400
+++ b/lib/galaxy/tools/__init__.py Fri Oct 31 10:22:46 2008 -0400
@@ -797,7 +797,7 @@
# Deal with the 'test' element and see if it's value changed
test_param_key = group_prefix + input.test_param.name
test_param_error = None
- test_incoming = incoming.get( test_param_key, None )
+ test_incoming = get_incoming_value( incoming, test_param_key, None )
if test_param_key not in incoming \
and "__force_update__" + test_param_key not in incoming \
and update_only:
@@ -878,7 +878,7 @@
except:
pass
if not incoming_value_generated:
- incoming_value = incoming.get( key, None )
+ incoming_value = get_incoming_value( incoming, key, None )
value, error = check_param( trans, input, incoming_value, context )
if input.dependent_params and state[ input.name ] != value:
# We need to keep track of changed dependency parametrs ( parameters
@@ -1362,3 +1362,12 @@
else:
return val
+def get_incoming_value( incoming, key, default ):
+ if "__" + key + "__is_composite" in incoming:
+ composite_keys = incoming["__" + key + "__keys"].split()
+ value = dict()
+ for composite_key in composite_keys:
+ value[composite_key] = incoming[key + "_" + composite_key]
+ return value
+ else:
+ return incoming.get( key, default )
\ No newline at end of file
diff -r 7e94f40ee9e8 -r c4644668afff lib/galaxy/tools/actions/upload.py
--- a/lib/galaxy/tools/actions/upload.py Thu Oct 30 17:34:46 2008 -0400
+++ b/lib/galaxy/tools/actions/upload.py Fri Oct 31 10:22:46 2008 -0400
@@ -26,12 +26,22 @@
temp_name = ""
data_list = []
- if 'filename' in dir( data_file ):
+ if 'local_filename' in dir( data_file ):
+ # Use the existing file
try:
file_name = data_file.filename
file_name = file_name.split( '\\' )[-1]
file_name = file_name.split( '/' )[-1]
- data_list.append( self.add_file( trans, data_file.file, file_name, file_type, dbkey, space_to_tab=space_to_tab ) )
+ data_list.append( self.add_file( trans, data_file.local_filename, file_name, file_type, dbkey, space_to_tab=space_to_tab ) )
+ except Exception, e:
+ return self.upload_empty( trans, "Error:", str( e ) )
+ elif 'filename' in dir( data_file ):
+ try:
+ file_name = data_file.filename
+ file_name = file_name.split( '\\' )[-1]
+ file_name = file_name.split( '/' )[-1]
+ temp_name = sniff.stream_to_file( data_file.file )
+ data_list.append( self.add_file( trans, temp_name, file_name, file_type, dbkey, space_to_tab=space_to_tab ) )
except Exception, e:
return self.upload_empty( trans, "Error:", str( e ) )
if url_paste not in [ None, "" ]:
@@ -41,7 +51,8 @@
line = line.rstrip( '\r\n' )
if line:
try:
- data_list.append( self.add_file( trans, urllib.urlopen( line ), line, file_type, dbkey, info="uploaded url", space_to_tab=space_to_tab ) )
+ temp_name = sniff.stream_to_file( urllib.urlopen( line ) )
+ data_list.append( self.add_file( trans, temp_name, line, file_type, dbkey, info="uploaded url", space_to_tab=space_to_tab ) )
except Exception, e:
return self.upload_empty( trans, "Error:", str( e ) )
else:
@@ -53,7 +64,8 @@
break
if is_valid:
try:
- data_list.append( self.add_file( trans, StringIO.StringIO( url_paste ), 'Pasted Entry', file_type, dbkey, info="pasted entry", space_to_tab=space_to_tab ) )
+ temp_name = sniff.stream_to_file( StringIO.StringIO( url_paste ) )
+ data_list.append( self.add_file( trans, temp_name, 'Pasted Entry', file_type, dbkey, info="pasted entry", space_to_tab=space_to_tab ) )
except Exception, e:
return self.upload_empty( trans, "Error:", str( e ) )
else:
@@ -77,9 +89,8 @@
trans.app.model.flush()
return dict( output=data )
- def add_file( self, trans, file_obj, file_name, file_type, dbkey, info=None, space_to_tab=False ):
+ def add_file( self, trans, temp_name, file_name, file_type, dbkey, info=None, space_to_tab=False ):
data_type = None
- temp_name = sniff.stream_to_file( file_obj )
# See if we have an empty file
if not os.path.getsize( temp_name ) > 0:
diff -r 7e94f40ee9e8 -r c4644668afff lib/galaxy/tools/parameters/__init__.py
--- a/lib/galaxy/tools/parameters/__init__.py Thu Oct 30 17:34:46 2008 -0400
+++ b/lib/galaxy/tools/parameters/__init__.py Fri Oct 31 10:22:46 2008 -0400
@@ -16,9 +16,7 @@
value = incoming_value
error = None
try:
- if param.name == 'file_data':
- pass
- elif value is not None or isinstance(param, DataToolParameter):
+ if value is not None or isinstance(param, DataToolParameter):
# Convert value from HTML representation
value = param.from_html( value, trans, param_values )
# Allow the value to be converted if neccesary
diff -r 7e94f40ee9e8 -r c4644668afff lib/galaxy/tools/parameters/basic.py
--- a/lib/galaxy/tools/parameters/basic.py Thu Oct 30 17:34:46 2008 -0400
+++ b/lib/galaxy/tools/parameters/basic.py Fri Oct 31 10:22:46 2008 -0400
@@ -2,12 +2,13 @@
Basic tool parameters.
"""
-import logging, string, sys, os
+import logging, string, sys, os, os.path
from elementtree.ElementTree import XML, Element
from galaxy import config, datatypes, util
from galaxy.web import form_builder
+from galaxy.util.bunch import Bunch
import validation, dynamic_options
@@ -294,6 +295,23 @@
self.name = elem.get( 'name' )
def get_html_field( self, trans=None, value=None, other_values={} ):
return form_builder.FileField( self.name )
+ def from_html( self, value, trans=None, other_values={} ):
+ # Middleware or proxies may encode files in special ways (TODO: this
+ # should be pluggable)
+ if type( value ) == dict:
+ upload_location = self.tool.app.config.nginx_upload_location
+ assert upload_location, \
+ "Request appears to have been processed by nginx_upload_module \
+ but Galaxy is not configured to recgonize it"
+ # Check that the file is in the right location
+ local_filename = os.path.abspath( value['path'] )
+ assert local_filename.startswith( upload_location ), \
+ "Filename provided by nginx is not in correct directory"
+ value = Bunch(
+ filename = value["name"],
+ local_filename = local_filename
+ )
+ return value
def get_required_enctype( self ):
"""
File upload elements require the multipart/form-data encoding
diff -r 7e94f40ee9e8 -r c4644668afff lib/galaxy/web/controllers/root.py
--- a/lib/galaxy/web/controllers/root.py Thu Oct 30 17:34:46 2008 -0400
+++ b/lib/galaxy/web/controllers/root.py Fri Oct 31 10:22:46 2008 -0400
@@ -266,6 +266,28 @@
return trans.fill_template( "/dataset/edit_attributes.mako", data=data,
datatypes=ldatatypes, err=None )
+ def __delete_dataset( self, trans, id ):
+ data = self.app.model.HistoryDatasetAssociation.get( id )
+ if data:
+ # Walk up parent datasets to find the containing history
+ topmost_parent = data
+ while topmost_parent.parent:
+ topmost_parent = topmost_parent.parent
+ assert topmost_parent in trans.history.datasets, "Data does not belong to current history"
+ # Mark deleted and cleanup
+ data.mark_deleted()
+ data.clear_associated_files()
+ trans.log_event( "Dataset id %s marked as deleted" % str(id) )
+ if data.parent_id is None and len( data.creating_job_associations ) > 0:
+ # Mark associated job for deletion
+ job = data.creating_job_associations[0].job
+ if job.state not in [ model.Job.states.QUEUED, model.Job.states.RUNNING, model.Job.states.NEW ]:
+ return
+ # Are *all* of the job's other output datasets deleted?
+ if job.check_if_output_datasets_deleted():
+ job.mark_deleted()
+ self.app.model.flush()
+
@web.expose
def delete( self, trans, id = None, show_deleted_on_refresh = False, **kwd):
if id:
@@ -276,26 +298,10 @@
history = trans.get_history()
for id in dataset_ids:
try:
- int( id )
+ id = int( id )
except:
continue
- data = self.app.model.HistoryDatasetAssociation.get( id )
- if data:
- # Walk up parent datasets to find the containing history
- topmost_parent = data
- while topmost_parent.parent:
- topmost_parent = topmost_parent.parent
- assert topmost_parent in history.datasets, "Data does not belong to current history"
- # Mark deleted and cleanup
- data.mark_deleted()
- data.clear_associated_files()
- self.app.model.flush()
- trans.log_event( "Dataset id %s marked as deleted" % str(id) )
- if data.parent_id is None:
- try:
- self.app.job_stop_queue.put( data.creating_job_associations[0].job )
- except IndexError:
- pass # upload tool will cause this since it doesn't have a job
+ self.__delete_dataset( trans, id )
return self.history( trans, show_deleted = show_deleted_on_refresh )
@web.expose
@@ -305,24 +311,7 @@
int( id )
except:
return "Dataset id '%s' is invalid" %str( id )
- history = trans.get_history()
- data = self.app.model.HistoryDatasetAssociation.get( id )
- if data:
- # Walk up parent datasets to find the containing history
- topmost_parent = data
- while topmost_parent.parent:
- topmost_parent = topmost_parent.parent
- assert topmost_parent in history.datasets, "Data does not belong to current history"
- # Mark deleted and cleanup
- data.mark_deleted()
- data.clear_associated_files()
- self.app.model.flush()
- trans.log_event( "Dataset id %s marked as deleted async" % str(id) )
- if data.parent_id is None:
- try:
- self.app.job_stop_queue.put( data.creating_job_associations[0].job )
- except IndexError:
- pass # upload tool will cause this since it doesn't have a job
+ self.__delete_dataset( trans, id )
return "OK"
## ---- History management -----------------------------------------------
diff -r 7e94f40ee9e8 -r c4644668afff lib/galaxy/web/framework/base.py
--- a/lib/galaxy/web/framework/base.py Thu Oct 30 17:34:46 2008 -0400
+++ b/lib/galaxy/web/framework/base.py Fri Oct 31 10:22:46 2008 -0400
@@ -5,6 +5,7 @@
import socket
import types
import logging
+import os.path
import sys
from Cookie import SimpleCookie
@@ -132,16 +133,16 @@
if callable( body ):
# Assume the callable is another WSGI application to run
return body( environ, start_response )
+ elif isinstance( body, types.FileType ):
+ # Stream the file back to the browser
+ return send_file( start_response, trans, body )
else:
start_response( trans.response.wsgi_status(),
trans.response.wsgi_headeritems() )
return self.make_body_iterable( trans, body )
def make_body_iterable( self, trans, body ):
- if isinstance( body, types.FileType ):
- # Stream the file back to the browser
- return iterate_file( body )
- elif isinstance( body, ( types.GeneratorType, list, tuple ) ):
+ if isinstance( body, ( types.GeneratorType, list, tuple ) ):
# Recursively stream the iterable
return flatten( body )
elif isinstance( body, basestring ):
@@ -302,6 +303,20 @@
CHUNK_SIZE = 2**16
+def send_file( start_response, trans, body ):
+ # If configured use X-Accel-Redirect header for nginx
+ base = trans.app.config.nginx_x_accel_redirect_base
+ if base:
+ trans.response.headers['X-Accel-Redirect'] = \
+ base + os.path.abspath( body.name )
+ body = [ "" ]
+ # Fall back on sending the file in chunks
+ else:
+ body = iterate_file( body )
+ start_response( trans.response.wsgi_status(),
+ trans.response.wsgi_headeritems() )
+ return body
+
def iterate_file( file ):
"""
Progressively return chunks from `file`.
diff -r 7e94f40ee9e8 -r c4644668afff universe_wsgi.ini.sample
--- a/universe_wsgi.ini.sample Thu Oct 30 17:34:46 2008 -0400
+++ b/universe_wsgi.ini.sample Fri Oct 31 10:22:46 2008 -0400
@@ -34,7 +34,7 @@
# Database connection
database_file = database/universe.sqlite
# You may use a SQLAlchemy connection string to specify an external database instead
-## database_connection = postgres:///galaxy_test
+## database_connection = postgres:///galaxy
## database_engine_option_echo = true
## database_engine_option_echo_pool = true
## database_engine_option_pool_size = 10
@@ -89,12 +89,15 @@
# Write thread status periodically to 'heartbeat.log' (careful, uses disk space rapidly!)
## use_heartbeat = True
+# Enable the memory debugging interface (careful, negatively impacts server performance)
+## use_memdump = True
+
# Profiling middleware (cProfile based)
## use_profile = True
-# Mail
-smtp_server = coltrane.bx.psu.edu
-error_email_to = galaxy-bugs(a)bx.psu.edu
+# For use by 'report this error' link on error-state datasets
+#smtp_server = smtp.example.org
+#error_email_to = galaxy-bugs(a)example.org
# Use the new iframe / javascript based layout
use_new_layout = true
@@ -120,29 +123,34 @@
## wiki_url: replaces the default galaxy main wiki
## bugs_email: replaces the default galaxy bugs email list
#brand = Private local mirror
-#wiki_url=/path/to/my/local/wiki
-#bugs_email=mailto:bugmaster@this.site.com
+#wiki_url = /path/to/my/local/wiki
+#bugs_email = mailto:galaxy-bugs@example.org
# ---- Job Runners ----------------------------------------------------------
# Clustering Galaxy is not a straightforward process and requires a lot of
-# pre-configuration. See the ClusteringGalaxy Wiki before attempting to set any
-# of these options. If running normally (without a cluster), do not change
-# anything in this section.
+# pre-configuration. See the ClusteringGalaxy Wiki before attempting to set
+# any of these options:
+#
+# http://g2.trac.bx.psu.edu/wiki/ClusteringGalaxy
+#
+# If running normally (without a cluster), do not change anything in this
+# section.
# start_job_runners: Comma-separated list of job runners to start. local is
# always started. If left commented, no jobs will be run on the cluster, even
# if a cluster URL is explicitly defined in the [galaxy:tool_runners] section
-# below. The only runner currently available is 'pbs'.
+# below. The runners currently available are 'pbs' and 'sge'.
#start_job_runners = pbs
# default_cluster_job_runner: The URL for the default runner to use when a tool
# doesn't explicity define a runner below. For help on the cluster URL format,
-# see the ClusteringGalaxy Wiki. Leave commented if not using a cluster job runner.
+# see the ClusteringGalaxy Wiki. Leave commented if not using a cluster job
+# runner.
#default_cluster_job_runner = pbs:///
# The PBS options are described in detail in the Galaxy Configuration section of
-# the ClusteringGalaxy Wiki
+# the ClusteringGalaxy Wiki, and are only necessary when using file staging.
#pbs_application_server =
#pbs_stage_path =
#pbs_dataset_server =
@@ -152,8 +160,6 @@
[galaxy:tool_runners]
biomart = local:///
-blat2wig = pbs:///blast
-blat_wrapper = pbs:///blast
encode_db1 = local:///
encode_import_all_latest_datasets1 = local:///
encode_import_chromatin_and_chromosomes1 = local:///
@@ -161,13 +167,8 @@
encode_import_genes_and_transcripts1 = local:///
encode_import_multi-species_sequence_analysis1 = local:///
encode_import_transcription_regulation1 = local:///
-generate_coverage_report = pbs:///blast
hbvar = local:///
-hist_high_quality_score = pbs:///blast
-megablast_wrapper = pbs:///blast
-megablast_xml_parser = pbs:///blast
microbial_import1 = local:///
-quality_score_distribution = pbs:///blast
ucsc_table_direct1 = local:///
ucsc_table_direct_archaea1 = local:///
ucsc_table_direct_test1 = local:///
details: http://www.bx.psu.edu/hg/galaxy/rev/56bc2f789894
changeset: 1595:56bc2f789894
user: Greg Von Kuster <greg(a)bx.psu.edu>
date: Sat Nov 01 15:30:13 2008 -0400
description:
Fix History.add_dataset() to be compatible with sqlalchemy 4.
1 file(s) affected in this change:
lib/galaxy/model/__init__.py
diffs (26 lines):
diff -r 881ac57dcc81 -r 56bc2f789894 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py Fri Oct 31 15:21:30 2008 -0400
+++ b/lib/galaxy/model/__init__.py Sat Nov 01 15:30:13 2008 -0400
@@ -353,17 +353,18 @@
dataset = HistoryDatasetAssociation( dataset = dataset )
dataset.flush()
elif not isinstance( dataset, HistoryDatasetAssociation ):
- raise TypeError, "You can only add Dataset and HistoryDatasetAssociation instances to a history."
+ raise TypeError, "You can only add Dataset and HistoryDatasetAssociation instances to a history ( you tried to add %s )." % str( dataset )
if parent_id:
for data in self.datasets:
if data.id == parent_id:
dataset.hid = data.hid
break
else:
- if set_hid: dataset.hid = self._next_hid()
+ if set_hid:
+ dataset.hid = self._next_hid()
else:
- if set_hid: dataset.hid = self._next_hid()
- dataset.history = self
+ if set_hid:
+ dataset.hid = self._next_hid()
if genome_build not in [None, '?']:
self.genome_build = genome_build
self.datasets.append( dataset )
details: http://www.bx.psu.edu/hg/galaxy/rev/6ddbf5c83efc
changeset: 1592:6ddbf5c83efc
user: Dan Blankenberg <dan(a)bx.psu.edu>
date: Fri Oct 31 11:00:50 2008 -0400
description:
Fixes for dealing with UnvalidatedValues (i.e. when running workflows) and also when passing None trans to dynamic options.
4 file(s) affected in this change:
lib/galaxy/jobs/__init__.py
lib/galaxy/tools/__init__.py
lib/galaxy/tools/parameters/dynamic_options.py
tools/data_source/microbial_import_code.py
diffs (77 lines):
diff -r c4644668afff -r 6ddbf5c83efc lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py Fri Oct 31 10:22:46 2008 -0400
+++ b/lib/galaxy/jobs/__init__.py Fri Oct 31 11:00:50 2008 -0400
@@ -464,7 +464,7 @@
# custom post process setup
inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] )
out_data = dict( [ ( da.name, da.dataset ) for da in job.output_datasets ] )
- param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] ) # why not re-use self.param_dict here?
+ param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] ) # why not re-use self.param_dict here? ##dunno...probably should, this causes tools.parameters.basic.UnvalidatedValue to be used in following methods instead of validated and transformed values during i.e. running workflows
param_dict = self.tool.params_from_strings( param_dict, self.app )
# Check for and move associated_files
self.tool.collect_associated_files(out_data)
diff -r c4644668afff -r 6ddbf5c83efc lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py Fri Oct 31 10:22:46 2008 -0400
+++ b/lib/galaxy/tools/__init__.py Fri Oct 31 11:00:50 2008 -0400
@@ -948,7 +948,10 @@
# Regular tool parameter
value = input_values[ input.name ]
if isinstance( value, UnvalidatedValue ):
- value = input.from_html( value.value, None, context )
+ if value.value is None: #if value.value is None, it could not have been submited via html form and therefore .from_html can't be guaranteed to work
+ value = None
+ else:
+ value = input.from_html( value.value, None, context )
# Then do any further validation on the value
input.validate( value, None )
input_values[ input.name ] = value
diff -r c4644668afff -r 6ddbf5c83efc lib/galaxy/tools/parameters/dynamic_options.py
--- a/lib/galaxy/tools/parameters/dynamic_options.py Fri Oct 31 10:22:46 2008 -0400
+++ b/lib/galaxy/tools/parameters/dynamic_options.py Fri Oct 31 11:00:50 2008 -0400
@@ -102,7 +102,7 @@
if self.multiple:
return dataset_value in file_value.split( self.separator )
return file_value == dataset_value
- assert self.ref_name in other_values or trans.workflow_building_mode, "Required dependency '%s' not found in incoming values" % self.ref_name
+ assert self.ref_name in other_values or ( trans is not None and trans.workflow_building_mode), "Required dependency '%s' not found in incoming values" % self.ref_name
ref = other_values.get( self.ref_name, None )
if not isinstance( ref, self.dynamic_option.tool_param.tool.app.model.HistoryDatasetAssociation ):
return [] #not a valid dataset
@@ -146,9 +146,9 @@
def get_dependency_name( self ):
return self.ref_name
def filter_options( self, options, trans, other_values ):
- if trans.workflow_building_mode: return []
+ if trans is not None and trans.workflow_building_mode: return []
+ assert self.ref_name in other_values, "Required dependency '%s' not found in incoming values" % self.ref_name
ref = str( other_values.get( self.ref_name, None ) )
- assert ref is not None, "Required dependency '%s' not found in incoming values" % self.ref_name
rval = []
for fields in options:
if ( self.keep and fields[self.column] == ref ) or ( not self.keep and fields[self.column] != ref ):
diff -r c4644668afff -r 6ddbf5c83efc tools/data_source/microbial_import_code.py
--- a/tools/data_source/microbial_import_code.py Fri Oct 31 10:22:46 2008 -0400
+++ b/tools/data_source/microbial_import_code.py Fri Oct 31 11:00:50 2008 -0400
@@ -80,7 +80,7 @@
return microbe_info
#post processing, set build for data and add additional data to history
-from galaxy import datatypes, config, jobs
+from galaxy import datatypes, config, jobs, tools
from shutil import copyfile
def exec_after_process(app, inp_data, out_data, param_dict, tool, stdout, stderr):
@@ -95,7 +95,12 @@
#if not (kingdom or group or org):
if not (kingdom or org):
print "Parameters are not available."
-
+ #workflow passes galaxy.tools.parameters.basic.UnvalidatedValue instead of values
+ if isinstance( kingdom, tools.parameters.basic.UnvalidatedValue ):
+ kingdom = kingdom.value
+ if isinstance( org, tools.parameters.basic.UnvalidatedValue ):
+ org = org.value
+
GALAXY_DATA_INDEX_DIR = app.config.tool_data_path
microbe_info = load_microbial_data( GALAXY_DATA_INDEX_DIR, sep='\t' )
new_stdout = ""