commit/galaxy-central: dannon: Require pexpect -- previously not explicitly required
by Bitbucket
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/8155a8ff8dd4/
changeset: 8155a8ff8dd4
user: dannon
date: 2012-11-19 19:02:12
summary: Require pexpect -- previously not explicitly required
affected #: 1 file
diff -r 886d8ada4a2215aa4b69244908cb4859352a2f03 -r 8155a8ff8dd4f8aaabe4cee31ca0415c39302ed3 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -5,9 +5,10 @@
the relationship cardinalities are obvious (e.g. prefer Dataset to Data)
"""
-import pkg_resources, os, errno, codecs, operator, socket, pexpect, logging, time
-pkg_resources.require( "simplejson" )
-import simplejson
+import pkg_resources
+pkg_resources.require("simplejson")
+pkg_resources.require("pexpect")
+import simplejson, os, errno, codecs, operator, socket, pexpect, logging, time
from sqlalchemy.orm import object_session
from sqlalchemy.sql.expression import func
import galaxy.datatypes
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
10 years, 2 months
commit/galaxy-central: dannon: Refactor imports to remove 'import *s', remove unused variables/dead code, strip whitespace.
by Bitbucket
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/886d8ada4a22/
changeset: 886d8ada4a22
user: dannon
date: 2012-11-19 18:54:46
summary: Refactor imports to remove 'import *s', remove unused variables/dead code, strip whitespace.
affected #: 1 file
diff -r 950544a9b858d86bd1f3ff51c7433a90d2e89bfb -r 886d8ada4a2215aa4b69244908cb4859352a2f03 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -4,24 +4,22 @@
Naming: try to use class names that have a distinct plural form so that
the relationship cardinalities are obvious (e.g. prefer Dataset to Data)
"""
-import pkg_resources
+
+import pkg_resources, os, errno, codecs, operator, socket, pexpect, logging, time
pkg_resources.require( "simplejson" )
import simplejson
+from sqlalchemy.orm import object_session
+from sqlalchemy.sql.expression import func
import galaxy.datatypes
-from galaxy.util.bunch import Bunch
-from galaxy import util
import galaxy.datatypes.registry
from galaxy.datatypes.metadata import MetadataCollection
-from galaxy.security import RBACAgent, get_permitted_actions
-from galaxy.util.hash_util import *
-from galaxy.web.form_builder import *
+from galaxy.security import get_permitted_actions
+from galaxy import util
+from galaxy.util.bunch import Bunch
+from galaxy.util.hash_util import new_secure_hash
+from galaxy.web.form_builder import (AddressField, CheckboxField, PasswordField, SelectField, TextArea, TextField,
+ WorkflowField, WorkflowMappingField, HistoryField)
from galaxy.model.item_attrs import UsesAnnotations, APIItem
-from sqlalchemy.orm import object_session
-from sqlalchemy.sql.expression import func
-import os.path, os, errno, codecs, operator, socket, pexpect, logging, time, shutil
-
-if sys.version_info[:2] < ( 2, 5 ):
- from sets import Set as set
log = logging.getLogger( __name__ )
@@ -138,13 +136,13 @@
self.exit_code = None
# TODO: Add accessors for members defined in SQL Alchemy for the Job table and
- # for the mapper defined to the Job table.
+ # for the mapper defined to the Job table.
def get_external_output_metadata( self ):
"""
- The external_output_metadata is currently a reference from Job to
+ The external_output_metadata is currently a reference from Job to
JobExternalOutputMetadata. It exists for a job but not a task.
"""
- return self.external_output_metadata
+ return self.external_output_metadata
def get_session_id( self ):
return self.session_id
def get_user_id( self ):
@@ -177,7 +175,7 @@
# runner_name is not the same thing.
return self.job_runner_name
def get_job_runner_external_id( self ):
- # This is different from the Task just in the member accessed:
+ # This is different from the Task just in the member accessed:
return self.job_runner_external_id
def get_post_job_actions( self ):
return self.post_job_actions
@@ -197,10 +195,10 @@
# The tasks member is pert of a reference in the SQL Alchemy schema:
return self.tasks
def get_id_tag( self ):
- """
- Return a tag that can be useful in identifying a Job.
+ """
+ Return a tag that can be useful in identifying a Job.
This returns the Job's get_id
- """
+ """
return "%s" % self.id;
def set_session_id( self, session_id ):
@@ -324,8 +322,8 @@
self.task_runner_name = None
self.task_runner_external_id = None
self.job = job
- self.stdout = ""
- self.stderr = ""
+ self.stdout = ""
+ self.stderr = ""
self.exit_code = None
self.prepare_input_files_cmd = prepare_files_cmd
@@ -340,8 +338,8 @@
return param_dict
def get_id( self ):
- # This is defined in the SQL Alchemy schema:
- return self.id
+ # This is defined in the SQL Alchemy schema:
+ return self.id
def get_id_tag( self ):
"""
Return an id tag suitable for identifying the task.
@@ -378,7 +376,7 @@
# metdata). These can be filled in as needed.
def get_external_output_metadata( self ):
"""
- The external_output_metadata is currently a backref to
+ The external_output_metadata is currently a backref to
JobExternalOutputMetadata. It exists for a job but not a task,
and when a task is cancelled its corresponding parent Job will
be cancelled. So None is returned now, but that could be changed
@@ -395,13 +393,13 @@
"""
Runners will use the same methods to get information about the Task
class as they will about the Job class, so this method just returns
- the task's external id.
+ the task's external id.
"""
# TODO: Merge into get_runner_external_id.
return self.task_runner_external_id
def get_session_id( self ):
# The Job's galaxy session is equal to the Job's session, so the
- # Job's session is the same as the Task's session.
+ # Job's session is the same as the Task's session.
return self.get_job().get_session_id()
def set_id( self, id ):
@@ -424,7 +422,7 @@
# This method is available for runners that do not want/need to
# differentiate between the kinds of Runnable things (Jobs and Tasks)
# that they're using.
- log.debug( "Task %d: Set external id to %s"
+ log.debug( "Task %d: Set external id to %s"
% ( self.id, task_runner_external_id ) )
self.task_runner_external_id = task_runner_external_id
def set_task_runner_external_id( self, task_runner_external_id ):
@@ -954,7 +952,7 @@
return False
try:
return util.is_multi_byte( codecs.open( self.file_name, 'r', 'utf-8' ).read( 100 ) )
- except UnicodeDecodeError, e:
+ except UnicodeDecodeError:
return False
# FIXME: sqlalchemy will replace this
def _delete(self):
@@ -1136,7 +1134,6 @@
"""
Returns dict of { "dependency" => HDA }
"""
- converted_dataset = self.get_converted_files_by_type( target_ext )
# List of string of dependencies
try:
depends_list = trans.app.datatypes_registry.converter_deps[self.extension][target_ext]
@@ -1307,7 +1304,7 @@
"""
Returns datasources for dataset; if datasources are not available
due to indexing, indexing is started. Return value is a dictionary
- with entries of type
+ with entries of type
(<datasource_type> : {<datasource_name>, <indexing_message>}).
"""
track_type, data_sources = self.datatype.get_track_type()
@@ -1320,17 +1317,17 @@
else:
# Convert.
msg = self.convert_dataset( trans, data_source )
-
+
# Store msg.
data_sources_dict[ source_type ] = { "name" : data_source, "message": msg }
-
+
return data_sources_dict
def convert_dataset( self, trans, target_type ):
"""
- Converts a dataset to the target_type and returns a message indicating
+ Converts a dataset to the target_type and returns a message indicating
status of the conversion. None is returned to indicate that dataset
- was converted successfully.
+ was converted successfully.
"""
# FIXME: copied from controller.py
@@ -1402,7 +1399,7 @@
hda.metadata = self.metadata
if copy_children:
for child in self.children:
- child_copy = child.copy( copy_children = copy_children, parent_id = hda.id )
+ child.copy( copy_children = copy_children, parent_id = hda.id )
if not self.datatype.copy_safe_peek:
# In some instances peek relies on dataset_id, i.e. gmaj.zip for viewing MAFs
hda.set_peek()
@@ -1454,11 +1451,11 @@
object_session( self ).add( library_dataset )
object_session( self ).flush()
for child in self.children:
- child_copy = child.to_library_dataset_dataset_association( trans,
- target_folder=target_folder,
- replace_dataset=replace_dataset,
- parent_id=ldda.id,
- user=ldda.user )
+ child.to_library_dataset_dataset_association( trans,
+ target_folder=target_folder,
+ replace_dataset=replace_dataset,
+ parent_id=ldda.id,
+ user=ldda.user )
if not self.datatype.copy_safe_peek:
# In some instances peek relies on dataset_id, i.e. gmaj.zip for viewing MAFs
ldda.set_peek()
@@ -1808,7 +1805,7 @@
if add_to_history and target_history:
target_history.add_dataset( hda )
for child in self.children:
- child_copy = child.to_history_dataset_association( target_history = target_history, parent_id = hda.id, add_to_history = False )
+ child.to_history_dataset_association( target_history = target_history, parent_id = hda.id, add_to_history = False )
if not self.datatype.copy_safe_peek:
hda.set_peek() #in some instances peek relies on dataset_id, i.e. gmaj.zip for viewing MAFs
object_session( self ).flush()
@@ -1833,7 +1830,7 @@
ldda.metadata = self.metadata
if copy_children:
for child in self.children:
- child_copy = child.copy( copy_children = copy_children, parent_id = ldda.id )
+ child.copy( copy_children = copy_children, parent_id = ldda.id )
if not self.datatype.copy_safe_peek:
# In some instances peek relies on dataset_id, i.e. gmaj.zip for viewing MAFs
ldda.set_peek()
@@ -2640,7 +2637,7 @@
events={ '.ssword:*': scp_configs['password']+'\r\n',
pexpect.TIMEOUT:print_ticks},
timeout=10 )
- except Exception, e:
+ except Exception:
return error_msg
# cleanup the output to get just the file size
return output.replace( filepath, '' )\
@@ -3280,7 +3277,6 @@
.first()
return None
def get_versions( self, app ):
- sa_session = app.model.context.current
tool_versions = []
# Prepend ancestors.
def __ancestors( app, tool_version ):
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
10 years, 2 months
commit/galaxy-central: carlfeberhard: history panel: noop on ajax error fetching history updates when interrupted by iframe refresh.
by Bitbucket
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/950544a9b858/
changeset: 950544a9b858
user: carlfeberhard
date: 2012-11-19 18:18:10
summary: history panel: noop on ajax error fetching history updates when interrupted by iframe refresh.
affected #: 2 files
diff -r 9b27168c94423a91e0dc0249bb9453adc9532610 -r 950544a9b858d86bd1f3ff51c7433a90d2e89bfb static/scripts/mvc/history/history-model.js
--- a/static/scripts/mvc/history/history-model.js
+++ b/static/scripts/mvc/history/history-model.js
@@ -184,10 +184,14 @@
}
}).error( function( xhr, status, error ){
- if( console && console.warn ){
- console.warn( 'Error getting history updates from the server:', xhr, status, error );
+ // if not interruption by iframe reload
+ //TODO: remove when iframes are removed
+ if( !( ( xhr.readyState === 0 ) && ( xhr.status === 0 ) ) ){
+ if( console && console.warn ){
+ console.warn( 'Error getting history updates from the server:', xhr, status, error );
+ }
+ alert( _l( 'Error getting history updates from the server.' ) + '\n' + error );
}
- alert( _l( 'Error getting history updates from the server.' ) + '\n' + error );
});
},
diff -r 9b27168c94423a91e0dc0249bb9453adc9532610 -r 950544a9b858d86bd1f3ff51c7433a90d2e89bfb static/scripts/packed/mvc/history/history-model.js
--- a/static/scripts/packed/mvc/history/history-model.js
+++ b/static/scripts/packed/mvc/history/history-model.js
@@ -1,1 +1,1 @@
-var History=BaseModel.extend(LoggableMixin).extend({defaults:{id:"",name:"",state:"",diskSize:0,deleted:false,annotation:null,message:null},urlRoot:"api/histories/",url:function(){return"api/histories/"+this.get("id")},initialize:function(a,b){this.log(this+".initialize:",a,b);this.hdas=new HDACollection();if(b&&b.length){this.hdas.reset(b);this.checkForUpdates()}},loadFromApi:function(a,c){var b=this;b.attributes.id=a;jQuery.when(jQuery.ajax("api/users/current"),b.fetch()).then(function(e,d){b.attributes.user=e[0];b.trigger("loaded:user",e[0]);b.trigger("loaded",d[0])}).then(function(){jQuery.ajax(b.url()+"/contents?"+jQuery.param({ids:b.hdaIdsFromStateIds().join(",")})).success(function(d){b.hdas.reset(d);b.checkForUpdates();b.trigger("loaded:hdas",d);if(c){callback(b)}})})},hdaIdsFromStateIds:function(){return _.reduce(_.values(this.get("state_ids")),function(b,a){return b.concat(a)})},checkForUpdates:function(a){if(this.hdas.running().length){this.stateUpdater()}else{this.trigger("ready")}return this},stateUpdater:function(){var c=this,a=this.get("state"),b=this.get("state_ids");jQuery.ajax("api/histories/"+this.get("id")).success(function(d){c.set(d);c.log("current history state:",c.get("state"),"(was)",a,"new size:",c.get("nice_size"));var e=[];_.each(_.keys(d.state_ids),function(g){var f=_.difference(d.state_ids[g],b[g]);e=e.concat(f)});if(e.length){c.hdas.update(e)}if((c.get("state")===HistoryDatasetAssociation.STATES.RUNNING)||(c.get("state")===HistoryDatasetAssociation.STATES.QUEUED)){setTimeout(function(){c.stateUpdater()},4000)}else{c.trigger("ready")}}).error(function(f,d,e){if(console&&console.warn){console.warn("Error getting history updates from the server:",f,d,e)}alert(_l("Error getting history updates from the server.")+"\n"+e)})},toString:function(){var a=(this.get("name"))?(","+this.get("name")):("");return"History("+this.get("id")+a+")"}});var HistoryCollection=Backbone.Collection.extend(LoggableMixin).extend({model:History,urlRoot:"api/histories"});
\ No newline at end of file
+var History=BaseModel.extend(LoggableMixin).extend({defaults:{id:"",name:"",state:"",diskSize:0,deleted:false,annotation:null,message:null},urlRoot:"api/histories/",url:function(){return"api/histories/"+this.get("id")},initialize:function(a,b){this.log(this+".initialize:",a,b);this.hdas=new HDACollection();if(b&&b.length){this.hdas.reset(b);this.checkForUpdates()}},loadFromApi:function(a,c){var b=this;b.attributes.id=a;jQuery.when(jQuery.ajax("api/users/current"),b.fetch()).then(function(e,d){b.attributes.user=e[0];b.trigger("loaded:user",e[0]);b.trigger("loaded",d[0])}).then(function(){jQuery.ajax(b.url()+"/contents?"+jQuery.param({ids:b.hdaIdsFromStateIds().join(",")})).success(function(d){b.hdas.reset(d);b.checkForUpdates();b.trigger("loaded:hdas",d);if(c){callback(b)}})})},hdaIdsFromStateIds:function(){return _.reduce(_.values(this.get("state_ids")),function(b,a){return b.concat(a)})},checkForUpdates:function(a){if(this.hdas.running().length){this.stateUpdater()}else{this.trigger("ready")}return this},stateUpdater:function(){var c=this,a=this.get("state"),b=this.get("state_ids");jQuery.ajax("api/histories/"+this.get("id")).success(function(d){c.set(d);c.log("current history state:",c.get("state"),"(was)",a,"new size:",c.get("nice_size"));var e=[];_.each(_.keys(d.state_ids),function(g){var f=_.difference(d.state_ids[g],b[g]);e=e.concat(f)});if(e.length){c.hdas.update(e)}if((c.get("state")===HistoryDatasetAssociation.STATES.RUNNING)||(c.get("state")===HistoryDatasetAssociation.STATES.QUEUED)){setTimeout(function(){c.stateUpdater()},4000)}else{c.trigger("ready")}}).error(function(f,d,e){if(!((f.readyState===0)&&(f.status===0))){if(console&&console.warn){console.warn("Error getting history updates from the server:",f,d,e)}alert(_l("Error getting history updates from the server.")+"\n"+e)}})},toString:function(){var a=(this.get("name"))?(","+this.get("name")):("");return"History("+this.get("id")+a+")"}});var HistoryCollection=Backbone.Collection.extend(LoggableMixin).extend({model:History,urlRoot:"api/histories"});
\ No newline at end of file
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
10 years, 2 months
commit/galaxy-central: carlfeberhard: sphinx-autodoc: better formatting for metadata_spec
by Bitbucket
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/9b27168c9442/
changeset: 9b27168c9442
user: carlfeberhard
date: 2012-11-19 17:30:35
summary: sphinx-autodoc: better formatting for metadata_spec
affected #: 3 files
diff -r 19cbbaf566216cb46ecc6a6d17e0f1e0ab52978e -r 9b27168c94423a91e0dc0249bb9453adc9532610 lib/galaxy/datatypes/assembly.py
--- a/lib/galaxy/datatypes/assembly.py
+++ b/lib/galaxy/datatypes/assembly.py
@@ -225,4 +225,3 @@
if __name__ == '__main__':
import doctest, sys
doctest.testmod(sys.modules[__name__])
-
diff -r 19cbbaf566216cb46ecc6a6d17e0f1e0ab52978e -r 9b27168c94423a91e0dc0249bb9453adc9532610 lib/galaxy/datatypes/data.py
--- a/lib/galaxy/datatypes/data.py
+++ b/lib/galaxy/datatypes/data.py
@@ -69,6 +69,9 @@
<class 'galaxy.datatypes.metadata.MetadataParameter'>
"""
+ #: dictionary of metadata fields for this datatype::
+ metadata_spec = None
+
__metaclass__ = DataMeta
# Add metadata elements
MetadataElement( name="dbkey", desc="Database/Build", default="?", param=metadata.DBKeyParameter, multiple=False, no_value="?" )
@@ -849,4 +852,3 @@
except UnicodeDecodeError:
text = "binary/unknown file"
return text
-
diff -r 19cbbaf566216cb46ecc6a6d17e0f1e0ab52978e -r 9b27168c94423a91e0dc0249bb9453adc9532610 lib/galaxy/datatypes/metadata.py
--- a/lib/galaxy/datatypes/metadata.py
+++ b/lib/galaxy/datatypes/metadata.py
@@ -123,6 +123,7 @@
def __getstate__( self ):
return None #cannot pickle a weakref item (self._parent), when data._metadata_collection is None, it will be recreated on demand
+
class MetadataSpecCollection( odict ):
"""
A simple extension of dict which allows cleaner access to items
@@ -132,13 +133,21 @@
"""
def __init__( self, dict = None ):
odict.__init__( self, dict = None )
+
def append( self, item ):
self[item.name] = item
+
def iter( self ):
return self.itervalues()
+
def __getattr__( self, name ):
return self.get( name )
+ def __repr__( self ):
+ # force elements to draw with __str__ for sphinx-apidoc
+ return ', '.join([ item.__str__() for item in self.iter() ])
+
+
class MetadataParameter( object ):
def __init__( self, spec ):
self.spec = spec
@@ -185,7 +194,6 @@
"""
pass
-
def unwrap( self, form_value ):
"""
Turns a value into its storable form.
@@ -205,19 +213,22 @@
Turns a value read from an external dict into its value to be pushed directly into the metadata dict.
"""
return value
+
def to_external_value( self, value ):
"""
Turns a value read from a metadata into its value to be pushed directly into the external dict.
"""
return value
+
class MetadataElementSpec( object ):
"""
Defines a metadata element and adds it to the metadata_spec (which
is a MetadataSpecCollection) of datatype.
"""
-
- def __init__( self, datatype, name=None, desc=None, param=MetadataParameter, default=None, no_value = None, visible=True, set_in_upload = False, **kwargs ):
+ def __init__( self, datatype,
+ name=None, desc=None, param=MetadataParameter, default=None, no_value = None,
+ visible=True, set_in_upload = False, **kwargs ):
self.name = name
self.desc = desc or name
self.default = default
@@ -226,24 +237,37 @@
self.set_in_upload = set_in_upload
# Catch-all, allows for extra attributes to be set
self.__dict__.update(kwargs)
- #set up param last, as it uses values set above
+ # set up param last, as it uses values set above
self.param = param( self )
- datatype.metadata_spec.append( self ) #add spec element to the spec
+ # add spec element to the spec
+ datatype.metadata_spec.append( self )
+
def get( self, name, default=None ):
return self.__dict__.get(name, default)
+
def wrap( self, value ):
"""
Turns a stored value into its usable form.
"""
return self.param.wrap( value )
+
def unwrap( self, value ):
"""
Turns an incoming value into its storable form.
"""
return self.param.unwrap( value )
+ def __str__( self ):
+ #TODO??: assuming param is the class of this MetadataElementSpec - add the plain class name for that
+ spec_dict = dict( param_class=self.param.__class__.__name__ )
+ spec_dict.update( self.__dict__ )
+ return ( "{name} ({param_class}): {desc}, defaults to '{default}'".format( **spec_dict ) )
+
+# create a statement class that, when called,
+# will add a new MetadataElementSpec to a class's metadata_spec
MetadataElement = Statement( MetadataElementSpec )
+
"""
MetadataParameter sub-classes.
"""
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
10 years, 2 months
commit/galaxy-central: inithello: Fix for TwillTestCase.wait() with the new client-side histories.
by Bitbucket
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/243ff2bfbf28/
changeset: 243ff2bfbf28
user: inithello
date: 2012-11-16 21:39:50
summary: Fix for TwillTestCase.wait() with the new client-side histories.
affected #: 1 file
diff -r 8bd9f729fb6a92077b9652690b11877f203902ec -r 243ff2bfbf28fb92f94eca408d063fb5257a6481 test/base/twilltestcase.py
--- a/test/base/twilltestcase.py
+++ b/test/base/twilltestcase.py
@@ -1199,15 +1199,25 @@
tc.fv( 2, "hgta_doGalaxyQuery", "Send query to Galaxy" )
self.submit_form( button="Send query to Galaxy" )#, **output_params ) #AssertionError: Attempting to set field 'fbQual' to value '['whole']' in form 'None' threw exception: no matching forms! control: <RadioControl(fbQual=[whole, upstreamAll, endAll])>
+ def get_running_datasets( self ):
+ self.visit_url( '/api/histories' )
+ history_id = from_json_string( self.last_page() )[0][ 'id' ]
+ self.visit_url( '/api/histories/%s/contents' % history_id )
+ jsondata = from_json_string( self.last_page() )
+ for history_item in jsondata:
+ self.visit_url( history_item[ 'url' ] )
+ item_json = from_json_string( self.last_page() )
+ if item_json[ 'state' ] in [ 'queued', 'running', 'paused' ]:
+ return True
+ return False
+
def wait( self, maxseconds=120 ):
"""Waits for the tools to finish"""
sleep_amount = 0.1
slept = 0
self.home()
while slept <= maxseconds:
- self.visit_page( "history" )
- page = tc.browser.get_html()
- if page.find( '<!-- running: do not change this comment, used by TwillTestCase.wait -->' ) > -1:
+ if self.get_running_datasets():
time.sleep( sleep_amount )
slept += sleep_amount
sleep_amount *= 2
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
10 years, 2 months
commit/galaxy-central: 2 new changesets
by Bitbucket
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/430ea49ff3c2/
changeset: 430ea49ff3c2
user: natefoo
date: 2012-11-16 21:36:31
summary: Jobs now have their runner name set prior to actually being submitted to the runner, meaning that runners' recover() methods must handle the case where the job was never actually submitted.
affected #: 4 files
diff -r 7f7171c52b1d922fb6471c8ce373fa3e50901206 -r 430ea49ff3c2ab00af9e5d602216ac37c489156e lib/galaxy/jobs/runners/cli.py
--- a/lib/galaxy/jobs/runners/cli.py
+++ b/lib/galaxy/jobs/runners/cli.py
@@ -359,12 +359,16 @@
def recover( self, job, job_wrapper ):
"""Recovers jobs stuck in the queued/running state when Galaxy started"""
+ job_id = job.get_job_runner_external_id()
+ if job_id is None:
+ self.put( job_wrapper )
+ return
runner_job_state = RunnerJobState()
runner_job_state.ofile = "%s.gjout" % os.path.join(job_wrapper.working_directory, job_wrapper.get_id_tag())
runner_job_state.efile = "%s.gjerr" % os.path.join(job_wrapper.working_directory, job_wrapper.get_id_tag())
runner_job_state.ecfile = "%s.gjec" % os.path.join(job_wrapper.working_directory, job_wrapper.get_id_tag())
runner_job_state.job_file = "%s/galaxy_%s.sh" % (self.app.config.cluster_files_directory, job_wrapper.get_id_tag())
- runner_job_state.external_job_id = str( job.job_runner_external_id )
+ runner_job_state.external_job_id = str( job_id )
job_wrapper.command_line = job.command_line
runner_job_state.job_wrapper = job_wrapper
runner_job_state.runner_url = job.job_runner_name
diff -r 7f7171c52b1d922fb6471c8ce373fa3e50901206 -r 430ea49ff3c2ab00af9e5d602216ac37c489156e lib/galaxy/jobs/runners/condor.py
--- a/lib/galaxy/jobs/runners/condor.py
+++ b/lib/galaxy/jobs/runners/condor.py
@@ -368,11 +368,15 @@
def recover( self, job, job_wrapper ):
"""Recovers jobs stuck in the queued/running state when Galaxy started"""
# TODO Check if we need any changes here
+ job_id = job.get_job_runner_external_id()
+ if job_id is None:
+ self.put( job_wrapper )
+ return
drm_job_state = CondorJobState()
drm_job_state.ofile = "%s/database/pbs/%s.o" % (os.getcwd(), job.id)
drm_job_state.efile = "%s/database/pbs/%s.e" % (os.getcwd(), job.id)
drm_job_state.job_file = "%s/database/pbs/galaxy_%s.sh" % (os.getcwd(), job.id)
- drm_job_state.job_id = str( job.job_runner_external_id )
+ drm_job_state.job_id = str( job_id )
drm_job_state.runner_url = job_wrapper.get_job_runner()
job_wrapper.command_line = job.command_line
drm_job_state.job_wrapper = job_wrapper
diff -r 7f7171c52b1d922fb6471c8ce373fa3e50901206 -r 430ea49ff3c2ab00af9e5d602216ac37c489156e lib/galaxy/jobs/runners/drmaa.py
--- a/lib/galaxy/jobs/runners/drmaa.py
+++ b/lib/galaxy/jobs/runners/drmaa.py
@@ -411,12 +411,16 @@
def recover( self, job, job_wrapper ):
"""Recovers jobs stuck in the queued/running state when Galaxy started"""
+ job_id = job.get_job_runner_external_id()
+ if job_id is None:
+ self.put( job_wrapper )
+ return
drm_job_state = DRMAAJobState()
drm_job_state.ofile = "%s.drmout" % os.path.join(os.getcwd(), job_wrapper.working_directory, job_wrapper.get_id_tag())
drm_job_state.efile = "%s.drmerr" % os.path.join(os.getcwd(), job_wrapper.working_directory, job_wrapper.get_id_tag())
drm_job_state.ecfile = "%s.drmec" % os.path.join(os.getcwd(), job_wrapper.working_directory, job_wrapper.get_id_tag())
drm_job_state.job_file = "%s/galaxy_%s.sh" % (self.app.config.cluster_files_directory, job.get_id())
- drm_job_state.job_id = str( job.get_job_runner_external_id() )
+ drm_job_state.job_id = str( job_id )
drm_job_state.runner_url = job_wrapper.get_job_runner_url()
job_wrapper.command_line = job.get_command_line()
drm_job_state.job_wrapper = job_wrapper
diff -r 7f7171c52b1d922fb6471c8ce373fa3e50901206 -r 430ea49ff3c2ab00af9e5d602216ac37c489156e lib/galaxy/jobs/runners/pbs.py
--- a/lib/galaxy/jobs/runners/pbs.py
+++ b/lib/galaxy/jobs/runners/pbs.py
@@ -640,12 +640,16 @@
def recover( self, job, job_wrapper ):
"""Recovers jobs stuck in the queued/running state when Galaxy started"""
+ job_id = job.get_job_runner_external_id()
+ if job_id is None:
+ self.put( job_wrapper )
+ return
pbs_job_state = PBSJobState()
pbs_job_state.ofile = "%s/%s.o" % (self.app.config.cluster_files_directory, job.id)
pbs_job_state.efile = "%s/%s.e" % (self.app.config.cluster_files_directory, job.id)
pbs_job_state.ecfile = "%s/%s.ec" % (self.app.config.cluster_files_directory, job.id)
pbs_job_state.job_file = "%s/%s.sh" % (self.app.config.cluster_files_directory, job.id)
- pbs_job_state.job_id = str( job.get_job_runner_external_id() )
+ pbs_job_state.job_id = str( job_id )
pbs_job_state.runner_url = job_wrapper.get_job_runner_url()
job_wrapper.command_line = job.command_line
pbs_job_state.job_wrapper = job_wrapper
https://bitbucket.org/galaxy/galaxy-central/changeset/8bd9f729fb6a/
changeset: 8bd9f729fb6a
user: natefoo
date: 2012-11-16 21:36:56
summary: Remove the long-deprecated SGE runner. Use the DRMAA runner.
affected #: 1 file
diff -r 430ea49ff3c2ab00af9e5d602216ac37c489156e -r 8bd9f729fb6a92077b9652690b11877f203902ec lib/galaxy/jobs/runners/sge.py
--- a/lib/galaxy/jobs/runners/sge.py
+++ /dev/null
@@ -1,392 +0,0 @@
-import os, logging, threading, time
-from Queue import Queue, Empty
-
-from galaxy import model
-from galaxy.jobs.runners import BaseJobRunner
-
-from paste.deploy.converters import asbool
-
-import pkg_resources
-
-egg_message = """
-
-The 'sge' runner depends on 'DRMAA_python' which is not installed. Galaxy's
-"scramble" system should make this installation simple, please follow the
-instructions found at:
-
- http://wiki.g2.bx.psu.edu/Admin/Config/Performance/Cluster
-
-Additional errors may follow:
-%s
-"""
-
-
-try:
- pkg_resources.require( "DRMAA_python" )
- import DRMAA
-except Exception, e:
- raise Exception( egg_message % str( e ) )
-
-
-log = logging.getLogger( __name__ )
-
-__all__ = [ 'SGEJobRunner' ]
-
-DRMAA_state = {
- DRMAA.Session.UNDETERMINED: 'process status cannot be determined',
- DRMAA.Session.QUEUED_ACTIVE: 'job is queued and waiting to be scheduled',
- DRMAA.Session.SYSTEM_ON_HOLD: 'job is queued and in system hold',
- DRMAA.Session.USER_ON_HOLD: 'job is queued and in user hold',
- DRMAA.Session.USER_SYSTEM_ON_HOLD: 'job is queued and in user and system hold',
- DRMAA.Session.RUNNING: 'job is running',
- DRMAA.Session.SYSTEM_SUSPENDED: 'job is system suspended',
- DRMAA.Session.USER_SUSPENDED: 'job is user suspended',
- DRMAA.Session.DONE: 'job finished normally',
- DRMAA.Session.FAILED: 'job finished, but failed',
-}
-
-sge_template = """#!/bin/sh
-#$ -S /bin/sh
-GALAXY_LIB="%s"
-if [ "$GALAXY_LIB" != "None" ]; then
- if [ -n "$PYTHONPATH" ]; then
- PYTHONPATH="$GALAXY_LIB:$PYTHONPATH"
- else
- PYTHONPATH="$GALAXY_LIB"
- fi
- export PYTHONPATH
-fi
-cd %s
-%s
-"""
-
-class SGEJobState( object ):
- def __init__( self ):
- """
- Encapsulates state related to a job that is being run via SGE and
- that we need to monitor.
- """
- self.job_wrapper = None
- self.job_id = None
- self.old_state = None
- self.running = False
- self.job_file = None
- self.ofile = None
- self.efile = None
- self.runner_url = None
-
-class SGEJobRunner( BaseJobRunner ):
- """
- Job runner backed by a finite pool of worker threads. FIFO scheduling
- """
- STOP_SIGNAL = object()
- def __init__( self, app ):
- """Initialize this job runner and start the monitor thread"""
- self.app = app
- self.sa_session = app.model.context
- # 'watched' and 'queue' are both used to keep track of jobs to watch.
- # 'queue' is used to add new watched jobs, and can be called from
- # any thread (usually by the 'queue_job' method). 'watched' must only
- # be modified by the monitor thread, which will move items from 'queue'
- # to 'watched' and then manage the watched jobs.
- self.watched = []
- self.monitor_queue = Queue()
- self.default_cell = self.determine_sge_cell( self.app.config.default_cluster_job_runner )
- self.ds = DRMAA.Session()
- self.ds.init( self.default_cell )
- self.monitor_thread = threading.Thread( target=self.monitor )
- self.monitor_thread.start()
- self.work_queue = Queue()
- self.work_threads = []
- nworkers = app.config.cluster_job_queue_workers
- for i in range( nworkers ):
- worker = threading.Thread( target=self.run_next )
- worker.start()
- self.work_threads.append( worker )
- log.debug( "%d workers ready" % nworkers )
-
- def determine_sge_cell( self, url ):
- """Determine what SGE cell we are using"""
- url_split = url.split("/")
- if url_split[0] == 'sge:':
- return url_split[2]
- # this could happen if sge is started, but is not the default runner
- else:
- return ''
-
- def determine_sge_queue( self, url ):
- """Determine what SGE queue we are submitting to"""
- try:
- return url.split('/')[3] or None
- except:
- return None
-
- def determine_sge_project( self, url ):
- """Determine what SGE project we are submitting to"""
- try:
- return url.split('/')[4] or None
- except:
- return None
-
- def determine_sge_tool_parameters( self, url ):
- """Determine what are the tool's specific paramters"""
- try:
- return url.split('/')[5] or None
- except:
- return None
-
- def run_next( self ):
- """
- Run the next item in the queue (a job waiting to run or finish )
- """
- while 1:
- ( op, obj ) = self.work_queue.get()
- if op is self.STOP_SIGNAL:
- return
- try:
- if op == 'queue':
- self.queue_job( obj )
- elif op == 'finish':
- self.finish_job( obj )
- elif op == 'fail':
- self.fail_job( obj )
- except:
- log.exception( "Uncaught exception %sing job" % op )
-
- def queue_job( self, job_wrapper ):
- """Create SGE script for a job and submit it to the SGE queue"""
-
- try:
- job_wrapper.prepare()
- command_line = self.build_command_line( job_wrapper, include_metadata = True )
- except:
- job_wrapper.fail( "failure preparing job", exception=True )
- log.exception("failure running job %d" % job_wrapper.job_id)
- return
-
- runner_url = job_wrapper.get_job_runner_url()
-
- # This is silly, why would we queue a job with no command line?
- if not command_line:
- job_wrapper.finish( '', '' )
- return
-
- # Check for deletion before we change state
- if job_wrapper.get_state() == model.Job.states.DELETED:
- log.debug( "Job %s deleted by user before it entered the SGE queue" % job_wrapper.job_id )
- job_wrapper.cleanup()
- return
-
- # Change to queued state immediately
- job_wrapper.change_state( model.Job.states.QUEUED )
-
- if self.determine_sge_cell( runner_url ) != self.default_cell:
- # TODO: support multiple cells
- log.warning( "(%s) Using multiple SGE cells is not supported. This job will be submitted to the default cell." % job_wrapper.job_id )
- sge_queue_name = self.determine_sge_queue( runner_url )
- sge_project_name = self.determine_sge_project( runner_url )
- sge_extra_params = self.determine_sge_tool_parameters ( runner_url )
-
- # define job attributes
- ofile = "%s/%s.o" % (self.app.config.cluster_files_directory, job_wrapper.job_id)
- efile = "%s/%s.e" % (self.app.config.cluster_files_directory, job_wrapper.job_id)
- jt = self.ds.createJobTemplate()
- jt.remoteCommand = "%s/database/pbs/galaxy_%s.sh" % (os.getcwd(), job_wrapper.job_id)
- jt.outputPath = ":%s" % ofile
- jt.errorPath = ":%s" % efile
- nativeSpec = []
- if sge_queue_name is not None:
- nativeSpec.append( "-q '%s'" % sge_queue_name )
- if sge_project_name is not None:
- nativeSpec.append( "-P '%s'" % sge_project_name)
- if sge_extra_params is not None:
- nativeSpec.append( sge_extra_params )
- if len(nativeSpec)>0:
- jt.nativeSpecification = ' '.join(nativeSpec)
-
- script = sge_template % (job_wrapper.galaxy_lib_dir, os.path.abspath( job_wrapper.working_directory ), command_line)
-
- fh = file( jt.remoteCommand, "w" )
- fh.write( script )
- fh.close()
- os.chmod( jt.remoteCommand, 0750 )
-
- # job was deleted while we were preparing it
- if job_wrapper.get_state() == model.Job.states.DELETED:
- log.debug( "Job %s deleted by user before it entered the SGE queue" % job_wrapper.job_id )
- self.cleanup( ( ofile, efile, jt.remoteCommand ) )
- job_wrapper.cleanup()
- return
-
- galaxy_job_id = job_wrapper.job_id
- log.debug("(%s) submitting file %s" % ( galaxy_job_id, jt.remoteCommand ) )
- log.debug("(%s) command is: %s" % ( galaxy_job_id, command_line ) )
- # runJob will raise if there's a submit problem
- job_id = self.ds.runJob(jt)
- if sge_queue_name is None:
- log.debug("(%s) queued in default queue as %s" % (galaxy_job_id, job_id) )
- else:
- log.debug("(%s) queued in %s queue as %s" % (galaxy_job_id, sge_queue_name, job_id) )
-
- # store runner information for tracking if Galaxy restarts
- job_wrapper.set_runner( runner_url, job_id )
-
- # Store SGE related state information for job
- sge_job_state = SGEJobState()
- sge_job_state.job_wrapper = job_wrapper
- sge_job_state.job_id = job_id
- sge_job_state.ofile = ofile
- sge_job_state.efile = efile
- sge_job_state.job_file = jt.remoteCommand
- sge_job_state.old_state = 'new'
- sge_job_state.running = False
- sge_job_state.runner_url = runner_url
-
- # delete the job template
- self.ds.deleteJobTemplate( jt )
-
- # Add to our 'queue' of jobs to monitor
- self.monitor_queue.put( sge_job_state )
-
- def monitor( self ):
- """
- Watches jobs currently in the PBS queue and deals with state changes
- (queued to running) and job completion
- """
- while 1:
- # Take any new watched jobs and put them on the monitor list
- try:
- while 1:
- sge_job_state = self.monitor_queue.get_nowait()
- if sge_job_state is self.STOP_SIGNAL:
- # TODO: This is where any cleanup would occur
- self.ds.exit()
- return
- self.watched.append( sge_job_state )
- except Empty:
- pass
- # Iterate over the list of watched jobs and check state
- self.check_watched_items()
- # Sleep a bit before the next state check
- time.sleep( 1 )
-
- def check_watched_items( self ):
- """
- Called by the monitor thread to look at each watched job and deal
- with state changes.
- """
- new_watched = []
- for sge_job_state in self.watched:
- job_id = sge_job_state.job_id
- galaxy_job_id = sge_job_state.job_wrapper.job_id
- old_state = sge_job_state.old_state
- try:
- state = self.ds.getJobProgramStatus( job_id )
- except DRMAA.InvalidJobError:
- # we should only get here if an orphaned job was put into the queue at app startup
- log.debug("(%s/%s) job left SGE queue" % ( galaxy_job_id, job_id ) )
- self.work_queue.put( ( 'finish', sge_job_state ) )
- continue
- except Exception, e:
- # so we don't kill the monitor thread
- log.exception("(%s/%s) Unable to check job status" % ( galaxy_job_id, job_id ) )
- log.warning("(%s/%s) job will now be errored" % ( galaxy_job_id, job_id ) )
- sge_job_state.fail_message = "Cluster could not complete job"
- self.work_queue.put( ( 'fail', sge_job_state ) )
- continue
- if state != old_state:
- log.debug("(%s/%s) state change: %s" % ( galaxy_job_id, job_id, DRMAA_state[state] ) )
- if state == DRMAA.Session.RUNNING and not sge_job_state.running:
- sge_job_state.running = True
- sge_job_state.job_wrapper.change_state( model.Job.states.RUNNING )
- if state in ( DRMAA.Session.DONE, DRMAA.Session.FAILED ):
- self.work_queue.put( ( 'finish', sge_job_state ) )
- continue
- sge_job_state.old_state = state
- new_watched.append( sge_job_state )
- # Replace the watch list with the updated version
- self.watched = new_watched
-
- def finish_job( self, sge_job_state ):
- """
- Get the output/error for a finished job, pass to `job_wrapper.finish`
- and cleanup all the SGE temporary files.
- """
- ofile = sge_job_state.ofile
- efile = sge_job_state.efile
- job_file = sge_job_state.job_file
- # collect the output
- try:
- ofh = file(ofile, "r")
- efh = file(efile, "r")
- stdout = ofh.read( 32768 )
- stderr = efh.read( 32768 )
- except:
- stdout = ''
- stderr = 'Job output not returned from cluster'
- log.debug(stderr)
-
- try:
- sge_job_state.job_wrapper.finish( stdout, stderr )
- except:
- log.exception("Job wrapper finish method failed")
-
- # clean up the sge files
- self.cleanup( ( ofile, efile, job_file ) )
-
- def fail_job( self, sge_job_state ):
- """
- Seperated out so we can use the worker threads for it.
- """
- self.stop_job( self.sa_session.query( self.app.model.Job ).get( sge_job_state.job_wrapper.job_id ) )
- sge_job_state.job_wrapper.fail( sge_job_state.fail_message )
- self.cleanup( ( sge_job_state.ofile, sge_job_state.efile, sge_job_state.job_file ) )
-
- def cleanup( self, files ):
- if not asbool( self.app.config.get( 'debug', False ) ):
- for file in files:
- if os.access( file, os.R_OK ):
- os.unlink( file )
-
- def put( self, job_wrapper ):
- """Add a job to the queue (by job identifier)"""
- # Change to queued state before handing to worker thread so the runner won't pick it up again
- job_wrapper.change_state( model.Job.states.QUEUED )
- self.work_queue.put( ( 'queue', job_wrapper ) )
-
- def shutdown( self ):
- """Attempts to gracefully shut down the monitor thread"""
- log.info( "sending stop signal to worker threads" )
- self.monitor_queue.put( self.STOP_SIGNAL )
- for i in range( len( self.work_threads ) ):
- self.work_queue.put( ( self.STOP_SIGNAL, None ) )
- log.info( "sge job runner stopped" )
-
- def stop_job( self, job ):
- """Attempts to delete a job from the SGE queue"""
- try:
- self.ds.control( job.get_job_runner_external_id(), DRMAA.Session.TERMINATE )
- log.debug( "(%s/%s) Removed from SGE queue at user's request" % ( job.get_id(), job.get_job_runner_external_id() ) )
- except DRMAA.InvalidJobError:
- log.debug( "(%s/%s) User killed running job, but it was already dead" % ( job.get_id(), job.get_job_runner_external_id() ) )
-
- def recover( self, job, job_wrapper ):
- """Recovers jobs stuck in the queued/running state when Galaxy started"""
- sge_job_state = SGEJobState()
- sge_job_state.ofile = "%s/database/pbs/%s.o" % (os.getcwd(), job.get_id())
- sge_job_state.efile = "%s/database/pbs/%s.e" % (os.getcwd(), job.get_id())
- sge_job_state.job_file = "%s/database/pbs/galaxy_%s.sh" % (os.getcwd(), job.get_id())
- sge_job_state.job_id = str( job.get_job_runner_external_id() )
- sge_job_state.runner_url = job_wrapper.get_job_runner_url()
- job_wrapper.command_line = job.get_command_line()
- sge_job_state.job_wrapper = job_wrapper
- if job.get_state() == model.Job.states.RUNNING:
- log.debug( "(%s/%s) is still in running state, adding to the SGE queue" % ( job.get_id(), job.get_job_runner_external_id() ) )
- sge_job_state.old_state = DRMAA.Session.RUNNING
- sge_job_state.running = True
- self.monitor_queue.put( sge_job_state )
- elif job.get_state() == model.Job.states.QUEUED:
- log.debug( "(%s/%s) is still in SGE queued state, adding to the SGE queue" % ( job.get_id(), job.get_job_runner_external_id() ) )
- sge_job_state.old_state = DRMAA.Session.QUEUED_ACTIVE
- sge_job_state.running = False
- self.monitor_queue.put( sge_job_state )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
10 years, 2 months
commit/galaxy-central: greg: Yet more shed_util refactoring.
by Bitbucket
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/c7bc628577af/
changeset: c7bc628577af
user: greg
date: 2012-11-16 19:36:36
summary: Yet more shed_util refactoring.
affected #: 2 files
diff -r 9e1d821b5c12f56bbfe1923ea1092608054c9d08 -r c7bc628577af7f57b5511b2cc8e1423d5faf31df lib/galaxy/util/shed_util.py
--- a/lib/galaxy/util/shed_util.py
+++ b/lib/galaxy/util/shed_util.py
@@ -784,24 +784,6 @@
# Reset the tool_data_tables by loading the empty tool_data_table_conf.xml file.
reset_tool_data_tables( app )
return repository_tools_tups
-def handle_existing_tool_dependencies_that_changed_in_update( app, repository, original_dependency_dict, new_dependency_dict ):
- """
- This method is called when a Galaxy admin is getting updates for an installed tool shed repository in order to cover the case where an
- existing tool dependency was changed (e.g., the version of the dependency was changed) but the tool version for which it is a dependency
- was not changed. In this case, we only want to determine if any of the dependency information defined in original_dependency_dict was
- changed in new_dependency_dict. We don't care if new dependencies were added in new_dependency_dict since they will just be treated as
- missing dependencies for the tool.
- """
- updated_tool_dependency_names = []
- deleted_tool_dependency_names = []
- for original_dependency_key, original_dependency_val_dict in original_dependency_dict.items():
- if original_dependency_key not in new_dependency_dict:
- updated_tool_dependency = update_existing_tool_dependency( app, repository, original_dependency_val_dict, new_dependency_dict )
- if updated_tool_dependency:
- updated_tool_dependency_names.append( updated_tool_dependency.name )
- else:
- deleted_tool_dependency_names.append( original_dependency_val_dict[ 'name' ] )
- return updated_tool_dependency_names, deleted_tool_dependency_names
def handle_missing_index_file( app, tool_path, sample_files, repository_tools_tups, sample_files_copied ):
"""
Inspect each tool to see if it has any input parameters that are dynamically generated select lists that depend on a .loc file.
@@ -1136,21 +1118,6 @@
trans.sa_session.add( tool_dependency )
trans.sa_session.flush()
return removed, error_message
-def remove_tool_dependency_installation_directory( dependency_install_dir ):
- if os.path.exists( dependency_install_dir ):
- try:
- shutil.rmtree( dependency_install_dir )
- removed = True
- error_message = ''
- log.debug( "Removed tool dependency installation directory: %s" % str( dependency_install_dir ) )
- except Exception, e:
- removed = False
- error_message = "Error removing tool dependency installation directory %s: %s" % ( str( dependency_install_dir ), str( e ) )
- log.debug( error_message )
- else:
- removed = True
- error_message = ''
- return removed, error_message
def to_html_str( text ):
"""Translates the characters in text to an html string"""
translated = []
@@ -1183,65 +1150,6 @@
else:
translated_string = ''
return translated_string
-def update_existing_tool_dependency( app, repository, original_dependency_dict, new_dependencies_dict ):
- """
- Update an exsiting tool dependency whose definition was updated in a change set pulled by a Galaxy administrator when getting updates
- to an installed tool shed repository. The original_dependency_dict is a single tool dependency definition, an example of which is::
-
- {"name": "bwa",
- "readme": "\\nCompiling BWA requires zlib and libpthread to be present on your system.\\n ",
- "type": "package",
- "version": "0.6.2"}
-
- The new_dependencies_dict is the dictionary generated by the generate_tool_dependency_metadata method.
- """
- new_tool_dependency = None
- original_name = original_dependency_dict[ 'name' ]
- original_type = original_dependency_dict[ 'type' ]
- original_version = original_dependency_dict[ 'version' ]
- # Locate the appropriate tool_dependency associated with the repository.
- tool_dependency = None
- for tool_dependency in repository.tool_dependencies:
- if tool_dependency.name == original_name and tool_dependency.type == original_type and tool_dependency.version == original_version:
- break
- if tool_dependency and tool_dependency.can_update:
- dependency_install_dir = tool_dependency.installation_directory( app )
- removed_from_disk, error_message = remove_tool_dependency_installation_directory( dependency_install_dir )
- if removed_from_disk:
- sa_session = app.model.context.current
- new_dependency_name = None
- new_dependency_type = None
- new_dependency_version = None
- for new_dependency_key, new_dependency_val_dict in new_dependencies_dict.items():
- # Match on name only, hopefully this will be enough!
- if original_name == new_dependency_val_dict[ 'name' ]:
- new_dependency_name = new_dependency_val_dict[ 'name' ]
- new_dependency_type = new_dependency_val_dict[ 'type' ]
- new_dependency_version = new_dependency_val_dict[ 'version' ]
- break
- if new_dependency_name and new_dependency_type and new_dependency_version:
- # Update all attributes of the tool_dependency record in the database.
- log.debug( "Updating tool dependency '%s' with type '%s' and version '%s' to have new type '%s' and version '%s'." % \
- ( str( tool_dependency.name ),
- str( tool_dependency.type ),
- str( tool_dependency.version ),
- str( new_dependency_type ),
- str( new_dependency_version ) ) )
- tool_dependency.type = new_dependency_type
- tool_dependency.version = new_dependency_version
- tool_dependency.status = app.model.ToolDependency.installation_status.UNINSTALLED
- tool_dependency.error_message = None
- sa_session.add( tool_dependency )
- sa_session.flush()
- new_tool_dependency = tool_dependency
- else:
- # We have no new tool dependency definition based on a matching dependency name, so remove the existing tool dependency record
- # from the database.
- log.debug( "Deleting tool dependency with name '%s', type '%s' and version '%s' from the database since it is no longer defined." % \
- ( str( tool_dependency.name ), str( tool_dependency.type ), str( tool_dependency.version ) ) )
- sa_session.delete( tool_dependency )
- sa_session.flush()
- return new_tool_dependency
def update_in_shed_tool_config( app, repository ):
# A tool shed repository is being updated so change the shed_tool_conf file. Parse the config file to generate the entire list
# of config_elems instead of using the in-memory list.
diff -r 9e1d821b5c12f56bbfe1923ea1092608054c9d08 -r c7bc628577af7f57b5511b2cc8e1423d5faf31df lib/galaxy/util/shed_util_common.py
--- a/lib/galaxy/util/shed_util_common.py
+++ b/lib/galaxy/util/shed_util_common.py
@@ -884,6 +884,24 @@
relative_path_to_file.startswith( os.path.join( shed_config_dict.get( 'tool_path' ), relative_install_dir ) ):
relative_path_to_file = relative_path_to_file[ len( shed_config_dict.get( 'tool_path' ) ) + 1: ]
return relative_path_to_file
+def handle_existing_tool_dependencies_that_changed_in_update( app, repository, original_dependency_dict, new_dependency_dict ):
+ """
+ This method is called when a Galaxy admin is getting updates for an installed tool shed repository in order to cover the case where an
+ existing tool dependency was changed (e.g., the version of the dependency was changed) but the tool version for which it is a dependency
+ was not changed. In this case, we only want to determine if any of the dependency information defined in original_dependency_dict was
+ changed in new_dependency_dict. We don't care if new dependencies were added in new_dependency_dict since they will just be treated as
+ missing dependencies for the tool.
+ """
+ updated_tool_dependency_names = []
+ deleted_tool_dependency_names = []
+ for original_dependency_key, original_dependency_val_dict in original_dependency_dict.items():
+ if original_dependency_key not in new_dependency_dict:
+ updated_tool_dependency = update_existing_tool_dependency( app, repository, original_dependency_val_dict, new_dependency_dict )
+ if updated_tool_dependency:
+ updated_tool_dependency_names.append( updated_tool_dependency.name )
+ else:
+ deleted_tool_dependency_names.append( original_dependency_val_dict[ 'name' ] )
+ return updated_tool_dependency_names, deleted_tool_dependency_names
def handle_sample_files_and_load_tool_from_disk( trans, repo_files_dir, tool_config_filepath, work_dir ):
# Copy all sample files from disk to a temporary directory since the sample files may be in multiple directories.
message = ''
@@ -978,6 +996,21 @@
shutil.rmtree( dir )
except:
pass
+def remove_tool_dependency_installation_directory( dependency_install_dir ):
+ if os.path.exists( dependency_install_dir ):
+ try:
+ shutil.rmtree( dependency_install_dir )
+ removed = True
+ error_message = ''
+ log.debug( "Removed tool dependency installation directory: %s" % str( dependency_install_dir ) )
+ except Exception, e:
+ removed = False
+ error_message = "Error removing tool dependency installation directory %s: %s" % ( str( dependency_install_dir ), str( e ) )
+ log.debug( error_message )
+ else:
+ removed = True
+ error_message = ''
+ return removed, error_message
def reset_all_metadata_on_installed_repository( trans, id ):
"""Reset all metadata on a single tool shed repository installed into a Galaxy instance."""
repository = get_installed_tool_shed_repository( trans, id )
@@ -1198,6 +1231,65 @@
except:
file_name = fpath
return file_name
+def update_existing_tool_dependency( app, repository, original_dependency_dict, new_dependencies_dict ):
+ """
+ Update an exsiting tool dependency whose definition was updated in a change set pulled by a Galaxy administrator when getting updates
+ to an installed tool shed repository. The original_dependency_dict is a single tool dependency definition, an example of which is::
+
+ {"name": "bwa",
+ "readme": "\\nCompiling BWA requires zlib and libpthread to be present on your system.\\n ",
+ "type": "package",
+ "version": "0.6.2"}
+
+ The new_dependencies_dict is the dictionary generated by the generate_tool_dependency_metadata method.
+ """
+ new_tool_dependency = None
+ original_name = original_dependency_dict[ 'name' ]
+ original_type = original_dependency_dict[ 'type' ]
+ original_version = original_dependency_dict[ 'version' ]
+ # Locate the appropriate tool_dependency associated with the repository.
+ tool_dependency = None
+ for tool_dependency in repository.tool_dependencies:
+ if tool_dependency.name == original_name and tool_dependency.type == original_type and tool_dependency.version == original_version:
+ break
+ if tool_dependency and tool_dependency.can_update:
+ dependency_install_dir = tool_dependency.installation_directory( app )
+ removed_from_disk, error_message = remove_tool_dependency_installation_directory( dependency_install_dir )
+ if removed_from_disk:
+ sa_session = app.model.context.current
+ new_dependency_name = None
+ new_dependency_type = None
+ new_dependency_version = None
+ for new_dependency_key, new_dependency_val_dict in new_dependencies_dict.items():
+ # Match on name only, hopefully this will be enough!
+ if original_name == new_dependency_val_dict[ 'name' ]:
+ new_dependency_name = new_dependency_val_dict[ 'name' ]
+ new_dependency_type = new_dependency_val_dict[ 'type' ]
+ new_dependency_version = new_dependency_val_dict[ 'version' ]
+ break
+ if new_dependency_name and new_dependency_type and new_dependency_version:
+ # Update all attributes of the tool_dependency record in the database.
+ log.debug( "Updating tool dependency '%s' with type '%s' and version '%s' to have new type '%s' and version '%s'." % \
+ ( str( tool_dependency.name ),
+ str( tool_dependency.type ),
+ str( tool_dependency.version ),
+ str( new_dependency_type ),
+ str( new_dependency_version ) ) )
+ tool_dependency.type = new_dependency_type
+ tool_dependency.version = new_dependency_version
+ tool_dependency.status = app.model.ToolDependency.installation_status.UNINSTALLED
+ tool_dependency.error_message = None
+ sa_session.add( tool_dependency )
+ sa_session.flush()
+ new_tool_dependency = tool_dependency
+ else:
+ # We have no new tool dependency definition based on a matching dependency name, so remove the existing tool dependency record
+ # from the database.
+ log.debug( "Deleting tool dependency with name '%s', type '%s' and version '%s' from the database since it is no longer defined." % \
+ ( str( tool_dependency.name ), str( tool_dependency.type ), str( tool_dependency.version ) ) )
+ sa_session.delete( tool_dependency )
+ sa_session.flush()
+ return new_tool_dependency
def update_repository( repo, ctx_rev=None ):
"""
Update the cloned repository to changeset_revision. It is critical that the installed repository is updated to the desired
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
10 years, 2 months