[hg] galaxy 2912: Some missed fixes for sqlalchemy 0.5 form of q...
details: http://www.bx.psu.edu/hg/galaxy/rev/6dcf496ad316 changeset: 2912:6dcf496ad316 user: Greg Von Kuster <greg@bx.psu.edu> date: Fri Oct 23 13:31:11 2009 -0400 description: Some missed fixes for sqlalchemy 0.5 form of querying. 6 file(s) affected in this change: lib/galaxy/datatypes/metadata.py lib/galaxy/jobs/__init__.py lib/galaxy/jobs/runners/local.py lib/galaxy/jobs/runners/pbs.py lib/galaxy/tools/actions/upload_common.py lib/galaxy/tools/parameters/basic.py diffs (112 lines): diff -r 4448bc8ba587 -r 6dcf496ad316 lib/galaxy/datatypes/metadata.py --- a/lib/galaxy/datatypes/metadata.py Fri Oct 23 12:53:25 2009 -0400 +++ b/lib/galaxy/datatypes/metadata.py Fri Oct 23 13:31:11 2009 -0400 @@ -391,7 +391,9 @@ return value if DATABASE_CONNECTION_AVAILABLE: try: - return galaxy.model.MetadataFile.get( value ) + # FIXME: GVK ( 10/23/09 ) Can we get a valid db session without this import? + from galaxy.model.mapping import context as sa_session + return sa_session.query( galaxy.model.MetadataFile ).get( value ) except: #value was not a valid id return None @@ -569,9 +571,9 @@ log.debug( 'setting metadata externally failed for %s %s: %s' % ( dataset.__class__.__name__, dataset.id, rstring ) ) return rval - def cleanup_external_metadata( self ): + def cleanup_external_metadata( self, sa_session ): log.debug( 'Cleaning up external metadata files' ) - for metadata_files in galaxy.model.Job.get( self.job_id ).external_output_metadata: + for metadata_files in sa_session.query( galaxy.model.Job ).get( self.job_id ).external_output_metadata: #we need to confirm that any MetadataTempFile files were removed, if not we need to remove them #can occur if the job was stopped before completion, but a MetadataTempFile is used in the set_meta MetadataTempFile.cleanup_from_JSON_dict_filename( metadata_files.filename_out ) @@ -581,7 +583,7 @@ os.remove( fname ) except Exception, e: log.debug( 'Failed to cleanup external metadata file (%s) for %s: %s' % ( key, dataset_key, e ) ) - def set_job_runner_external_pid( self, pid ): - for metadata_files in galaxy.model.Job.get( self.job_id ).external_output_metadata: + def set_job_runner_external_pid( self, pid, sa_session ): + for metadata_files in sa_session.query( galaxy.model.Job ).get( self.job_id ).external_output_metadata: metadata_files.job_runner_external_pid = pid metadata_files.flush() diff -r 4448bc8ba587 -r 6dcf496ad316 lib/galaxy/jobs/__init__.py --- a/lib/galaxy/jobs/__init__.py Fri Oct 23 12:53:25 2009 -0400 +++ b/lib/galaxy/jobs/__init__.py Fri Oct 23 13:31:11 2009 -0400 @@ -609,7 +609,7 @@ if self.working_directory is not None: shutil.rmtree( self.working_directory ) if self.app.config.set_metadata_externally: - self.external_output_metadata.cleanup_external_metadata() + self.external_output_metadata.cleanup_external_metadata( self.sa_session ) except: log.exception( "Unable to cleanup job %d" % self.job_id ) diff -r 4448bc8ba587 -r 6dcf496ad316 lib/galaxy/jobs/runners/local.py --- a/lib/galaxy/jobs/runners/local.py Fri Oct 23 12:53:25 2009 -0400 +++ b/lib/galaxy/jobs/runners/local.py Fri Oct 23 13:31:11 2009 -0400 @@ -19,6 +19,7 @@ def __init__( self, app ): """Start the job runner with 'nworkers' worker threads""" self.app = app + self.sa_session = app.model.context self.queue = Queue() self.threads = [] nworkers = app.config.local_job_queue_workers @@ -111,7 +112,7 @@ shell = True, env = env, preexec_fn = os.setpgrp ) - job_wrapper.external_output_metadata.set_job_runner_external_pid( external_metadata_proc.pid ) + job_wrapper.external_output_metadata.set_job_runner_external_pid( external_metadata_proc.pid, self.sa_session ) external_metadata_proc.wait() log.debug( 'execution of external set_meta finished for job %d' % job_wrapper.job_id ) diff -r 4448bc8ba587 -r 6dcf496ad316 lib/galaxy/jobs/runners/pbs.py --- a/lib/galaxy/jobs/runners/pbs.py Fri Oct 23 12:53:25 2009 -0400 +++ b/lib/galaxy/jobs/runners/pbs.py Fri Oct 23 13:31:11 2009 -0400 @@ -80,6 +80,7 @@ if app.config.pbs_application_server and app.config.outputs_to_working_directory: raise Exception( "pbs_application_server (file staging) and outputs_to_working_directory options are mutually exclusive" ) self.app = app + self.sa_session = app.model.context # 'watched' and 'queue' are both used to keep track of jobs to watch. # 'queue' is used to add new watched jobs, and can be called from # any thread (usually by the 'queue_job' method). 'watched' must only @@ -457,7 +458,7 @@ """ Seperated out so we can use the worker threads for it. """ - self.stop_job( self.app.model.Job.get( pbs_job_state.job_wrapper.job_id ) ) + self.stop_job( self.sa_session.query( self.app.model.Job ).get( pbs_job_state.job_wrapper.job_id ) ) pbs_job_state.job_wrapper.fail( pbs_job_state.fail_message ) self.cleanup( ( pbs_job_state.ofile, pbs_job_state.efile, pbs_job_state.job_file ) ) diff -r 4448bc8ba587 -r 6dcf496ad316 lib/galaxy/tools/actions/upload_common.py --- a/lib/galaxy/tools/actions/upload_common.py Fri Oct 23 12:53:25 2009 -0400 +++ b/lib/galaxy/tools/actions/upload_common.py Fri Oct 23 13:31:11 2009 -0400 @@ -71,7 +71,7 @@ user, roles = trans.get_user_and_roles() for id in async_datasets: try: - data = data_obj.get( int( id ) ) + data = trans.sa_session.query( data_obj ).get( int( id ) ) except: log.exception( 'Unable to load precreated dataset (%s) sent in upload form' % id ) continue diff -r 4448bc8ba587 -r 6dcf496ad316 lib/galaxy/tools/parameters/basic.py --- a/lib/galaxy/tools/parameters/basic.py Fri Oct 23 12:53:25 2009 -0400 +++ b/lib/galaxy/tools/parameters/basic.py Fri Oct 23 13:31:11 2009 -0400 @@ -1235,7 +1235,7 @@ if value in [None, "None"]: return None if isinstance( value, list ): - return [ trans.app.model.HistoryDatasetAssociation.get( v ) for v in value ] + return [ trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( v ) for v in value ] elif isinstance( value, trans.app.model.HistoryDatasetAssociation ): return value else:
participants (1)
-
Greg Von Kuster