galaxy-dev
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2009 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2008 -----
- December
- November
- October
- September
- August
November 2009
- 26 participants
- 233 discussions
23 Nov '09
details: http://www.bx.psu.edu/hg/galaxy/rev/555afd0bf457
changeset: 3097:555afd0bf457
user: Enis Afgan <afgane(a)gmail.com>
date: Tue Nov 17 16:14:54 2009 -0500
description:
Moved cloud control flag in universe.ini file to appropriate location.
diffstat:
universe_wsgi.ini.sample | 10 +++++-----
1 files changed, 5 insertions(+), 5 deletions(-)
diffs (27 lines):
diff -r 5e2fd6248f71 -r 555afd0bf457 universe_wsgi.ini.sample
--- a/universe_wsgi.ini.sample Tue Nov 17 15:51:39 2009 -0500
+++ b/universe_wsgi.ini.sample Tue Nov 17 16:14:54 2009 -0500
@@ -18,11 +18,6 @@
# [filter:gzip]
# use = egg:Paste#gzip
-# ---- Cloud Management --------------------------------------------------------
-
-# Uncomment following line to enable cloud management mode (only leaves the Cloud, Help and User tabs on)
-# cloud_controller_instance = True
-
# ---- Galaxy Web Interface -------------------------------------------------
[app:main]
@@ -197,6 +192,11 @@
# Should default dataset access permissions be private for new users; default is False (datasets are public)
new_user_dataset_access_role_default_private = False
+# ---- Cloud Management --------------------------------------------------------
+
+# Uncomment following line to enable cloud management mode (only leaves the Cloud, Help and User tabs on)
+# cloud_controller_instance = True
+
# ---- Job Execution --------------------------------------------------------
# Number of concurrent jobs to run (local job runner)
1
0
23 Nov '09
details: http://www.bx.psu.edu/hg/galaxy/rev/5e2fd6248f71
changeset: 3096:5e2fd6248f71
user: Enis Afgan <afgane(a)gmail.com>
date: Tue Nov 17 15:51:39 2009 -0500
description:
Completed port to SQLalchemy 0.5. Also, fixed couple of bugs, cleaned up cloud provider code, tested w/ EC2.
diffstat:
lib/galaxy/cloud/providers/ec2.py | 42 ++++++++++----------
lib/galaxy/cloud/providers/eucalyptus.py | 44 +++++++++++-----------
lib/galaxy/web/controllers/cloud.py | 14 ++++--
templates/cloud/configure_cloud.mako | 24 ++---------
4 files changed, 57 insertions(+), 67 deletions(-)
diffs (414 lines):
diff -r 39502dd3fd23 -r 5e2fd6248f71 lib/galaxy/cloud/providers/ec2.py
--- a/lib/galaxy/cloud/providers/ec2.py Mon Nov 16 20:37:47 2009 -0500
+++ b/lib/galaxy/cloud/providers/ec2.py Tue Nov 17 15:51:39 2009 -0500
@@ -109,15 +109,15 @@
return
try:
if uci_state==uci_states.NEW:
- self.createUCI( uci_wrapper )
+ self.create_uci( uci_wrapper )
elif uci_state==uci_states.DELETING:
- self.deleteUCI( uci_wrapper )
+ self.delete_uci( uci_wrapper )
elif uci_state==uci_states.SUBMITTED:
- self.startUCI( uci_wrapper )
+ self.start_uci( uci_wrapper )
elif uci_state==uci_states.SHUTTING_DOWN:
- self.stopUCI( uci_wrapper )
+ self.stop_uci( uci_wrapper )
elif uci_state==uci_states.SNAPSHOT:
- self.snapshotUCI( uci_wrapper )
+ self.snapshot_uci( uci_wrapper )
except:
log.exception( "Uncaught exception executing cloud request." )
cnt += 1
@@ -223,7 +223,7 @@
uci_wrapper.set_error( err+". Contact site administrator to ensure needed machine image is registered.", True )
return None
- def createUCI( self, uci_wrapper ):
+ def create_uci( self, uci_wrapper ):
"""
Creates User Configured Instance (UCI). Essentially, creates storage volume on cloud provider
and registers relevant information in Galaxy database.
@@ -276,7 +276,7 @@
uci_wrapper.set_store_status( vol.id, uci_states.ERROR )
uci_wrapper.set_error( err, True )
- def deleteUCI( self, uci_wrapper ):
+ def delete_uci( self, uci_wrapper ):
"""
Deletes UCI. NOTE that this implies deletion of any and all data associated
with this UCI from the cloud. All data will be deleted.
@@ -315,7 +315,7 @@
log.error( err )
uci_wrapper.set_error( err, True )
- def snapshotUCI( self, uci_wrapper ):
+ def snapshot_uci( self, uci_wrapper ):
"""
Creates snapshot of all storage volumes associated with this UCI.
"""
@@ -346,11 +346,11 @@
uci_wrapper.change_state( uci_state=uci_states.AVAILABLE )
- def addStorageToUCI( self, name ):
+ def add_storage_to_uci( self, name ):
""" Adds more storage to specified UCI
TODO"""
- def dummyStartUCI( self, uci_wrapper ):
+ def dummy_start_uci( self, uci_wrapper ):
uci = uci_wrapper.get_uci()
log.debug( "Would be starting instance '%s'" % uci.name )
@@ -359,7 +359,7 @@
# time.sleep(20)
# log.debug( "Woke up! (%s)" % uci.name )
- def startUCI( self, uci_wrapper ):
+ def start_uci( self, uci_wrapper ):
"""
Starts instance(s) of given UCI on the cloud.
"""
@@ -456,7 +456,7 @@
else:
log.error( "UCI '%s' is in 'error' state, starting instance was aborted." % uci_wrapper.get_name() )
- def stopUCI( self, uci_wrapper):
+ def stop_uci( self, uci_wrapper):
"""
Stops all of cloud instances associated with given UCI.
"""
@@ -467,7 +467,7 @@
# Process list of instances and remove any references to empty instance id's
for i in il:
if i is None:
- l.remove( i )
+ il.remove( i )
log.debug( 'List of instances being terminated: %s' % il )
rl = conn.get_all_instances( il ) # Reservation list associated with given instances
@@ -554,7 +554,7 @@
for inst in instances:
if self.type == inst.uci.credentials.provider.type:
log.debug( "[%s] Running general status update on instance '%s'" % ( inst.uci.credentials.provider.type, inst.instance_id ) )
- self.updateInstance( inst )
+ self.update_instance( inst )
# Update storage volume(s)
stores = self.sa_session.query( model.CloudStore ) \
@@ -565,7 +565,7 @@
for store in stores:
if self.type == store.uci.credentials.provider.type: # and store.volume_id != None:
log.debug( "[%s] Running general status update on store with local database ID: '%s'" % ( store.uci.credentials.provider.type, store.id ) )
- self.updateStore( store )
+ self.update_store( store )
# else:
# log.error( "[%s] There exists an entry for UCI (%s) storage volume without an ID. Storage volume might have been created with "
# "cloud provider though. Manual check is recommended." % ( store.uci.credentials.provider.type, store.uci.name ) )
@@ -579,7 +579,7 @@
# Update pending snapshots or delete ones marked for deletion
snapshots = self.sa_session.query( model.CloudSnapshot ) \
- .filter_by( status=snapshot_status.PENDING, status=snapshot_status.DELETE ) \
+ .filter( or_( model.CloudSnapshot.table.c.status == snapshot_status.PENDING, model.CloudSnapshot.table.c.status == snapshot_status.DELETE ) ) \
.all()
for snapshot in snapshots:
if self.type == snapshot.uci.credentials.provider.type and snapshot.status == snapshot_status.PENDING:
@@ -603,9 +603,9 @@
td = datetime.utcnow() - z_inst.update_time
if td.seconds > 180: # if instance has been in SUBMITTED state for more than 3 minutes
log.debug( "[%s] Running zombie repair update on instance with DB id '%s'" % ( z_inst.uci.credentials.provider.type, z_inst.id ) )
- self.processZombie( z_inst )
+ self.process_zombie( z_inst )
- def updateInstance( self, inst ):
+ def update_instance( self, inst ):
# Get credentials associated wit this instance
uci_id = inst.uci_id
@@ -678,7 +678,7 @@
self.sa_session.flush()
return None
- def updateStore( self, store ):
+ def update_store( self, store ):
# Get credentials associated wit this store
uci_id = store.uci_id
uci = self.sa_session.query( model.UCI ).get( uci_id )
@@ -748,7 +748,7 @@
self.sa_session.add( store )
self.sa_session.flush()
- def updateSnapshot( self, snapshot ):
+ def update_snapshot( self, snapshot ):
# Get credentials associated wit this store
uci_id = snapshot.uci_id
uci = self.sa_session.query( model.UCI ).get( uci_id )
@@ -839,7 +839,7 @@
self.sa_session.add( snapshot )
self.sa_session.flush()
- def processZombie( self, inst ):
+ def process_zombie( self, inst ):
"""
Attempt at discovering if starting an instance was successful but local database was not updated
accordingly or if something else failed and instance was never started. Currently, no automatic
diff -r 39502dd3fd23 -r 5e2fd6248f71 lib/galaxy/cloud/providers/eucalyptus.py
--- a/lib/galaxy/cloud/providers/eucalyptus.py Mon Nov 16 20:37:47 2009 -0500
+++ b/lib/galaxy/cloud/providers/eucalyptus.py Tue Nov 17 15:51:39 2009 -0500
@@ -110,16 +110,16 @@
return
try:
if uci_state==uci_states.NEW:
- self.createUCI( uci_wrapper )
+ self.create_uci( uci_wrapper )
elif uci_state==uci_states.DELETING:
- self.deleteUCI( uci_wrapper )
+ self.delete_uci( uci_wrapper )
elif uci_state==uci_states.SUBMITTED:
- self.startUCI( uci_wrapper )
- #self.dummyStartUCI( uci_wrapper )
+ self.start_uci( uci_wrapper )
+ #self.dummy_start_uci( uci_wrapper )
elif uci_state==uci_states.SHUTTING_DOWN:
- self.stopUCI( uci_wrapper )
+ self.stop_uci( uci_wrapper )
elif uci_state==uci_states.SNAPSHOT:
- self.snapshotUCI( uci_wrapper )
+ self.snapshot_uci( uci_wrapper )
except:
log.exception( "Uncaught exception executing cloud request." )
cnt += 1
@@ -230,7 +230,7 @@
uci_wrapper.set_error( err+". Contact site administrator to ensure needed machine image is registered.", True )
return None
- def createUCI( self, uci_wrapper ):
+ def create_uci( self, uci_wrapper ):
"""
Create User Configured Instance (UCI) - i.e., create storage volume on cloud provider
and register relevant information in local Galaxy database.
@@ -276,7 +276,7 @@
uci_wrapper.set_store_status( vol.id, uci_states.ERROR )
uci_wrapper.set_error( err, True )
- def deleteUCI( self, uci_wrapper ):
+ def delete_uci( self, uci_wrapper ):
"""
Delete UCI - i.e., delete all storage volumes associated with this UCI.
NOTE that this implies deletion of any and all data associated
@@ -318,7 +318,7 @@
log.error( err )
uci_wrapper.set_error( err, True )
- def snapshotUCI( self, uci_wrapper ):
+ def snapshot_uci( self, uci_wrapper ):
"""
Initiate creation of a snapshot by cloud provider for all storage volumes
associated with this UCI.
@@ -361,10 +361,10 @@
# Feel free to resent state of this instance and use it normally.", True )
- def addStorageToUCI( self, uci_wrapper ):
+ def add_storage_to_uci( self, uci_wrapper ):
""" Adds more storage to specified UCI """
- def dummyStartUCI( self, uci_wrapper ):
+ def dummy_start_uci( self, uci_wrapper ):
uci = uci_wrapper.get_uci()
log.debug( "Would be starting instance '%s'" % uci.name )
@@ -374,7 +374,7 @@
time.sleep(10)
log.debug( "Woke up! (%s)" % uci.name )
- def startUCI( self, uci_wrapper ):
+ def start_uci( self, uci_wrapper ):
"""
Start instance(s) of given UCI on the cloud.
"""
@@ -443,7 +443,7 @@
else:
log.error( "UCI '%s' is in 'error' state, starting instance was aborted." % uci_wrapper.get_name() )
- def stopUCI( self, uci_wrapper):
+ def stop_uci( self, uci_wrapper):
"""
Stop all cloud instances associated with given UCI.
"""
@@ -454,7 +454,7 @@
# Process list of instances and remove any references to empty instance id's
for i in il:
if i is None:
- l.remove( i )
+ il.remove( i )
log.debug( 'List of instances being terminated: %s' % il )
rl = conn.get_all_instances( il ) # Reservation list associated with given instances
@@ -543,7 +543,7 @@
for inst in instances:
if self.type == inst.uci.credentials.provider.type:
log.debug( "[%s] Running general status update on instance '%s'" % ( inst.uci.credentials.provider.type, inst.instance_id ) )
- self.updateInstance( inst )
+ self.update_instance( inst )
# Update storage volume(s)
stores = self.sa_session.query( model.CloudStore ) \
@@ -554,11 +554,11 @@
for store in stores:
if self.type == store.uci.credentials.provider.type: # and store.volume_id != None:
log.debug( "[%s] Running general status update on store with local database ID: '%s'" % ( store.uci.credentials.provider.type, store.id ) )
- self.updateStore( store )
+ self.update_store( store )
# Update pending snapshots or delete ones marked for deletion
snapshots = self.sa_session.query( model.CloudSnapshot ) \
- .filter_by( status=snapshot_status.PENDING, status=snapshot_status.DELETE ) \
+ .filter( or_( model.CloudSnapshot.table.c.status == snapshot_status.PENDING, model.CloudSnapshot.table.c.status == snapshot_status.DELETE ) ) \
.all()
for snapshot in snapshots:
if self.type == snapshot.uci.credentials.provider.type and snapshot.status == snapshot_status.PENDING:
@@ -583,9 +583,9 @@
# log.debug( "z_inst.id: %s, time delta is %s sec" % ( z_inst.id, td.seconds ) )
if td.seconds > 180: # if instance has been in SUBMITTED state for more than 3 minutes
log.debug( "[%s](td=%s) Running zombie repair update on instance with DB id '%s'" % ( z_inst.uci.credentials.provider.type, td.seconds, z_inst.id ) )
- self.processZombie( z_inst )
+ self.process_zombie( z_inst )
- def updateInstance( self, inst ):
+ def update_instance( self, inst ):
"""
Update information in local database for given instance as it is obtained from cloud provider.
Along with updating information about given instance, information about the UCI controlling
@@ -662,7 +662,7 @@
self.sa_session.flush()
return None
- def updateStore( self, store ):
+ def update_store( self, store ):
"""
Update information in local database for given storage volume as it is obtained from cloud provider.
Along with updating information about given storage volume, information about the UCI controlling
@@ -756,7 +756,7 @@
self.sa_session.add( store )
self.sa_session.flush()
- def updateSnapshot( self, snapshot ):
+ def update_snapshot( self, snapshot ):
"""
Update information in local database for given snapshot as it is obtained from cloud provider.
Along with updating information about given snapshot, information about the UCI controlling
@@ -855,7 +855,7 @@
self.sa_session.add( snapshot )
self.sa_session.flush()
- def processZombie( self, inst ):
+ def process_zombie( self, inst ):
"""
Attempt at discovering if starting a cloud instance was successful but local database was not updated
accordingly or if something else failed and instance was never started. Currently, no automatic
diff -r 39502dd3fd23 -r 5e2fd6248f71 lib/galaxy/web/controllers/cloud.py
--- a/lib/galaxy/web/controllers/cloud.py Mon Nov 16 20:37:47 2009 -0500
+++ b/lib/galaxy/web/controllers/cloud.py Tue Nov 17 15:51:39 2009 -0500
@@ -515,7 +515,7 @@
error['provider_error'] = "You must select cloud provider type for this machine image."
elif image_id=='' or len( image_id ) > 255:
error['id_error'] = "Image ID must be between 1 and 255 characters long."
- elif trans.sa_session.query( model.CloudUserCredentials ) \
+ elif trans.sa_session.query( model.CloudImage ) \
.filter_by( deleted=False ) \
.filter( model.CloudImage.table.c.image_id == image_id ) \
.first():
@@ -558,7 +558,7 @@
@web.expose
@web.require_login( "use Galaxy cloud" )
def list_machine_images( self, trans ):
- images = trans.sa_session.query( model.CloudImage ).filter( model.CloudImage.table.c.deleted != True ).all()
+ images = trans.sa_session.query( model.CloudImage ).filter_by( deleted=False ).all()
return trans.fill_template( '/cloud/list_images.mako', images=images )
@web.expose
@@ -1028,15 +1028,19 @@
@web.json
def json_update( self, trans ):
user = trans.get_user()
- UCIs = trans.sa_session.query( model.UCI ).filter_by( user=user ).filter( model.UCI.table.c.deleted != True ).all()
+ UCIs = trans.sa_session.query( model.UCI ).filter_by( user=user, deleted=False ).all()
insd = {} # instance name-state dict
for uci in UCIs:
dict = {}
dict['id'] = uci.id
dict['state'] = uci.state
+ if uci.error != None:
+ dict['error'] = str( uci.error )
+ else:
+ dict['error'] = None
if uci.launch_time != None:
- dict['launch_time'] = str(uci.launch_time)
- dict['time_ago'] = str(date.distance_of_time_in_words(uci.launch_time, date.datetime.utcnow() ) )
+ dict['launch_time'] = str( uci.launch_time )
+ dict['time_ago'] = str( date.distance_of_time_in_words( uci.launch_time, date.datetime.utcnow() ) )
else:
dict['launch_time'] = None
dict['time_ago'] = None
diff -r 39502dd3fd23 -r 5e2fd6248f71 templates/cloud/configure_cloud.mako
--- a/templates/cloud/configure_cloud.mako Mon Nov 16 20:37:47 2009 -0500
+++ b/templates/cloud/configure_cloud.mako Tue Nov 17 15:51:39 2009 -0500
@@ -36,10 +36,11 @@
old_state = $(elem + "-state").text();
prev_old_state = trim19( $(elem + "-state-p").text() );
new_state = data[i].state;
+ error_msg = data[i].error;
//console.log( "old_state[%d] = %s", i, old_state );
//console.log( "prev_old_state[%d] = %s", i, prev_old_state );
//console.log( "new_state[%d] = %s", i, new_state );
- //console.log( trim19(prev_old_state) );
+ //console.log( "error_msg[%d] = %s", i, error_msg );
if ( ( old_state=='pending' && new_state=='running' ) || ( old_state=='shutting-down' && new_state=='available' ) || \
( old_state=='running' && new_state=='available' ) || ( old_state=='running' && new_state=='error' ) || \
( old_state=='pending' && new_state=='available' ) || ( old_state=='submitted' && new_state=='available' ) || \
@@ -47,21 +48,11 @@
var url = "${h.url_for( controller='cloud', action='list')}";
location.replace( url );
}
- else if ( ( old_state=='running' && new_state=='error' ) || ( old_state=='pending' && new_state=='error' ) || \
- ( old_state=='submitted' && new_state=='error' ) || ( old_state=='submittedUCI' && new_state=='error' ) || \
- ( old_state=='shutting-down' && new_state=='error' ) || ( prev_old_state.match('newUCI') && new_state=='error' ) || \
- ( prev_old_state.match('new') && new_state=='error' ) || ( prev_old_state.match('deletingUCI') && new_state=='error' ) ) {
- // TODO: Following clause causes constant page refresh for an exception thrown as a result of instance not starting correctly - need alternative method!
- //( prev_old_state.match('available') && new_state=='error' ) || ( prev_old_state.match('deleting') && new_state=='error' ) \
-
+ else if ( ( ( old_state != 'error' && old_state != '' ) && new_state == 'error' ) || ( !prev_old_state.match('error') && new_state == 'error' ) ) {
var url = "${h.url_for( controller='cloud', action='list')}";
location.replace( url );
}
- if ( prev_old_state.match('deletingUCI') || prev_old_state.match('deleting') ) {
- setTimeout("update_state()", 3000);
- }
-
if ( new_state=='shutting-down' || new_state=='shutting-downUCI' ) {
$(elem + "-link").text( "" );
}
@@ -90,15 +81,10 @@
// Update 'state' and 'time alive' fields
$(elem + "-state").text( data[i].state );
- if ( ( prev_old_state.match('newUCI') && new_state=='new' ) || ( prev_old_state.match('newUCI') && new_state=='available' ) || \
- ( prev_old_state.match('newUCI') && new_state=='creating' ) || ( prev_old_state.match('new') && new_state=='creating' ) || \
- ( prev_old_state.match('new') && new_state=='available' ) || \
- ( prev_old_state.match('deletingUCI') && new_state=='deleted' ) || ( prev_old_state.match('deleting') && new_state=='deleted' ) || \
- ( prev_old_state.match('available') && new_state=='error' ) || ( prev_old_state.match('deleting') && new_state=='error' ) ) {
- // TODO: on state change from available->error and deleting->error page should be refreshed but that causes problems with
- // constant refreshings depending on what error message is so at least do it here...
+ if ( new_state != 'error' ) { // Because 'error' state is handled as a JS link, don't include it in update
$(elem + "-state-p").text( data[i].state );
}
+
if (data[i].launch_time) {
$(elem + "-launch_time").text( data[i].launch_time.substring(0, 16 ) + " UTC (" + data[i].time_ago + ")" );
}
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/37406d8ad116
changeset: 3098:37406d8ad116
user: Enis Afgan <afgane(a)gmail.com>
date: Tue Nov 17 16:16:26 2009 -0500
description:
merge
diffstat:
datatypes_conf.xml.sample | 429 +++++++------
dist-eggs.ini | 9 +-
eggs.ini | 13 +-
lib/galaxy/datatypes/binary.py | 156 +++++
lib/galaxy/datatypes/data.py | 93 +-
lib/galaxy/datatypes/genetics.py | 180 ++---
lib/galaxy/datatypes/images.py | 120 ---
lib/galaxy/datatypes/metadata.py | 4 +-
lib/galaxy/datatypes/registry.py | 16 +-
lib/galaxy/datatypes/tracks.py | 9 +-
lib/galaxy/jobs/__init__.py | 22 +-
lib/galaxy/jobs/runners/local.py | 4 +-
lib/galaxy/jobs/runners/pbs.py | 17 +-
lib/galaxy/tools/actions/metadata.py | 1 +
lib/galaxy/web/controllers/dataset.py | 61 +-
lib/galaxy/web/controllers/forms.py | 252 +++++---
lib/galaxy/web/controllers/history.py | 34 +-
lib/galaxy/web/controllers/library_common.py | 2 +-
lib/galaxy/web/controllers/page.py | 60 +-
lib/galaxy/web/controllers/requests.py | 302 +++++----
lib/galaxy/web/controllers/requests_admin.py | 663 +++++++++++++-------
lib/galaxy/web/controllers/user.py | 2 -
lib/galaxy/web/framework/__init__.py | 10 +-
lib/galaxy/web/framework/helpers/grids.py | 8 +-
run.sh | 19 +
scripts/check_eggs.py | 14 +-
scripts/check_python.py | 19 +-
scripts/cleanup_datasets/cleanup_datasets.py | 4 +-
scripts/scramble/scripts/pysqlite.py | 44 +-
scripts/set_metadata.py | 15 +-
static/scripts/galaxy.base.js | 20 +-
templates/admin/forms/edit_form.mako | 2 +-
templates/admin/forms/grid.mako | 1 +
templates/admin/forms/manage_forms.mako | 76 --
templates/admin/forms/show_form_read_only.mako | 4 +-
templates/admin/requests/create_request_type.mako | 92 +-
templates/admin/requests/grid.mako | 218 +-------
templates/admin/requests/manage_request_types.mako | 69 +--
templates/admin/requests/show_request.mako | 2 +-
templates/admin/requests/view_request_type.mako | 70 +-
templates/dataset/errors.mako | 17 +-
templates/grid_base.mako | 4 +-
templates/grid_base_async.mako | 698 ++++++++++++++++++++++
templates/grid_body_async.mako | 5 +
templates/grid_common_async.mako | 155 +++++
templates/mobile/history/detail.mako | 2 +-
templates/requests/grid.mako | 218 +-------
templates/requests/show_request.mako | 2 +-
templates/root/history_common.mako | 4 +-
templates/tagging_common.mako | 13 -
templates/tool_form.mako | 16 +-
test/base/twilltestcase.py | 43 +-
test/functional/test_forms_and_requests.py | 44 +-
test/functional/test_get_data.py | 620 ++++++++++++++++---
test/functional/test_history_functions.py | 6 +
test/functional/test_sniffing_and_metadata_settings.py | 262 --------
test/functional/test_user_info.py | 9 +-
tools/data_source/upload.py | 45 +-
tools/data_source/upload.xml | 6 +
tools/extract/extract_genomic_dna.xml | 4 +-
tools/extract/liftOver_wrapper.xml | 2 +-
61 files changed, 3055 insertions(+), 2256 deletions(-)
diffs (truncated from 7153 to 3000 lines):
diff -r 555afd0bf457 -r 37406d8ad116 datatypes_conf.xml.sample
--- a/datatypes_conf.xml.sample Tue Nov 17 16:14:54 2009 -0500
+++ b/datatypes_conf.xml.sample Tue Nov 17 16:16:26 2009 -0500
@@ -1,211 +1,224 @@
<?xml version="1.0"?>
<datatypes>
- <registration converters_path="lib/galaxy/datatypes/converters">
- <datatype extension="ab1" type="galaxy.datatypes.images:Ab1" mimetype="application/octet-stream" display_in_upload="true"/>
- <datatype extension="axt" type="galaxy.datatypes.sequence:Axt" display_in_upload="true"/>
- <datatype extension="bam" type="galaxy.datatypes.images:Bam" mimetype="application/octet-stream"/>
- <datatype extension="bed" type="galaxy.datatypes.interval:Bed" display_in_upload="true">
- <converter file="bed_to_gff_converter.xml" target_datatype="gff"/>
- <converter file="interval_to_coverage.xml" target_datatype="coverage"/>
- <converter file="bed_to_interval_index_converter.xml" target_datatype="interval_index"/>
- </datatype>
- <datatype extension="binseq.zip" type="galaxy.datatypes.images:Binseq" mimetype="application/zip" display_in_upload="true"/>
- <datatype extension="len" type="galaxy.datatypes.chrominfo:ChromInfo" display_in_upload="true">
- <!-- no converters yet -->
- </datatype>
- <datatype extension="coverage" type="galaxy.datatypes.coverage:LastzCoverage" display_in_upload="true">
- <indexer file="coverage.xml" />
- </datatype>
- <datatype extension="customtrack" type="galaxy.datatypes.interval:CustomTrack"/>
- <datatype extension="csfasta" type="galaxy.datatypes.sequence:csFasta" display_in_upload="true"/>
- <datatype extension="data" type="galaxy.datatypes.data:Data" mimetype="application/octet-stream"/>
- <datatype extension="fasta" type="galaxy.datatypes.sequence:Fasta" display_in_upload="true">
- <converter file="fasta_to_tabular_converter.xml" target_datatype="tabular"/>
- </datatype>
- <datatype extension="fastq" type="galaxy.datatypes.sequence:Fastq" display_in_upload="true"/>
- <datatype extension="fastqsanger" type="galaxy.datatypes.sequence:FastqSanger" display_in_upload="true"/>
- <datatype extension="genetrack" type="galaxy.datatypes.tracks:GeneTrack"/>
- <datatype extension="gff" type="galaxy.datatypes.interval:Gff" display_in_upload="true">
- <converter file="gff_to_bed_converter.xml" target_datatype="bed"/>
- </datatype>
- <datatype extension="gff3" type="galaxy.datatypes.interval:Gff3" display_in_upload="true"/>
- <datatype extension="gif" type="galaxy.datatypes.images:Image" mimetype="image/gif"/>
- <datatype extension="gmaj.zip" type="galaxy.datatypes.images:Gmaj" mimetype="application/zip"/>
- <datatype extension="html" type="galaxy.datatypes.images:Html" mimetype="text/html"/>
- <datatype extension="interval" type="galaxy.datatypes.interval:Interval" display_in_upload="true">
- <converter file="interval_to_bed_converter.xml" target_datatype="bed"/>
- <indexer file="interval_awk.xml" />
- </datatype>
- <datatype extension="jpg" type="galaxy.datatypes.images:Image" mimetype="image/jpeg"/>
- <datatype extension="laj" type="galaxy.datatypes.images:Laj"/>
- <datatype extension="lav" type="galaxy.datatypes.sequence:Lav" display_in_upload="true"/>
- <datatype extension="maf" type="galaxy.datatypes.sequence:Maf" display_in_upload="true">
- <converter file="maf_to_fasta_converter.xml" target_datatype="fasta"/>
- <converter file="maf_to_interval_converter.xml" target_datatype="interval"/>
- </datatype>
- <datatype extension="pdf" type="galaxy.datatypes.images:Image" mimetype="application/pdf"/>
- <datatype extension="png" type="galaxy.datatypes.images:Image" mimetype="image/png"/>
- <datatype extension="qualsolexa" type="galaxy.datatypes.qualityscore:QualityScoreSolexa" display_in_upload="true"/>
- <datatype extension="qualsolid" type="galaxy.datatypes.qualityscore:QualityScoreSOLiD" display_in_upload="true"/>
- <datatype extension="qual454" type="galaxy.datatypes.qualityscore:QualityScore454" display_in_upload="true"/>
- <datatype extension="sam" type="galaxy.datatypes.tabular:Sam" display_in_upload="true"/>
- <datatype extension="scf" type="galaxy.datatypes.images:Scf" mimetype="application/octet-stream" display_in_upload="true"/>
- <datatype extension="taxonomy" type="galaxy.datatypes.tabular:Taxonomy" display_in_upload="true"/>
- <datatype extension="tabular" type="galaxy.datatypes.tabular:Tabular" display_in_upload="true"/>
- <datatype extension="txt" type="galaxy.datatypes.data:Text" display_in_upload="true"/>
- <datatype extension="blastxml" type="galaxy.datatypes.xml:BlastXml" display_in_upload="true"/>
- <datatype extension="txtseq.zip" type="galaxy.datatypes.images:Txtseq" mimetype="application/zip" display_in_upload="true"/>
- <datatype extension="wig" type="galaxy.datatypes.interval:Wiggle" display_in_upload="true">
- <converter file="wiggle_to_array_tree_converter.xml" target_datatype="array_tree"/>
- </datatype>
- <datatype extension="array_tree" type="galaxy.datatypes.data:Data" />
- <datatype extension="interval_index" type="galaxy.datatypes.data:Data" />
- <!-- EMBOSS TOOLS -->
- <datatype extension="acedb" type="galaxy.datatypes.data:Text"/>
- <datatype extension="asn1" type="galaxy.datatypes.data:Text"/>
- <datatype extension="btwisted" type="galaxy.datatypes.data:Text"/>
- <datatype extension="cai" type="galaxy.datatypes.data:Text"/>
- <datatype extension="charge" type="galaxy.datatypes.data:Text"/>
- <datatype extension="checktrans" type="galaxy.datatypes.data:Text"/>
- <datatype extension="chips" type="galaxy.datatypes.data:Text"/>
- <datatype extension="clustal" type="galaxy.datatypes.data:Text"/>
- <datatype extension="codata" type="galaxy.datatypes.data:Text"/>
- <datatype extension="codcmp" type="galaxy.datatypes.data:Text"/>
- <datatype extension="coderet" type="galaxy.datatypes.data:Text"/>
- <datatype extension="compseq" type="galaxy.datatypes.data:Text"/>
- <datatype extension="cpgplot" type="galaxy.datatypes.data:Text"/>
- <datatype extension="cpgreport" type="galaxy.datatypes.data:Text"/>
- <datatype extension="cusp" type="galaxy.datatypes.data:Text"/>
- <datatype extension="cut" type="galaxy.datatypes.data:Text"/>
- <datatype extension="dan" type="galaxy.datatypes.data:Text"/>
- <datatype extension="dbmotif" type="galaxy.datatypes.data:Text"/>
- <datatype extension="diffseq" type="galaxy.datatypes.data:Text"/>
- <datatype extension="digest" type="galaxy.datatypes.data:Text"/>
- <datatype extension="dreg" type="galaxy.datatypes.data:Text"/>
- <datatype extension="einverted" type="galaxy.datatypes.data:Text"/>
- <datatype extension="embl" type="galaxy.datatypes.data:Text"/>
- <datatype extension="epestfind" type="galaxy.datatypes.data:Text"/>
- <datatype extension="equicktandem" type="galaxy.datatypes.data:Text"/>
- <datatype extension="est2genome" type="galaxy.datatypes.data:Text"/>
- <datatype extension="etandem" type="galaxy.datatypes.data:Text"/>
- <datatype extension="excel" type="galaxy.datatypes.data:Text"/>
- <datatype extension="feattable" type="galaxy.datatypes.data:Text"/>
- <datatype extension="fitch" type="galaxy.datatypes.data:Text"/>
- <datatype extension="freak" type="galaxy.datatypes.data:Text"/>
- <datatype extension="fuzznuc" type="galaxy.datatypes.data:Text"/>
- <datatype extension="fuzzpro" type="galaxy.datatypes.data:Text"/>
- <datatype extension="fuzztran" type="galaxy.datatypes.data:Text"/>
- <datatype extension="garnier" type="galaxy.datatypes.data:Text"/>
- <datatype extension="gcg" type="galaxy.datatypes.data:Text"/>
- <datatype extension="geecee" type="galaxy.datatypes.data:Text"/>
- <datatype extension="genbank" type="galaxy.datatypes.data:Text"/>
- <datatype extension="helixturnhelix" type="galaxy.datatypes.data:Text"/>
- <datatype extension="hennig86" type="galaxy.datatypes.data:Text"/>
- <datatype extension="hmoment" type="galaxy.datatypes.data:Text"/>
- <datatype extension="ig" type="galaxy.datatypes.data:Text"/>
- <datatype extension="isochore" type="galaxy.datatypes.data:Text"/>
- <datatype extension="jackknifer" type="galaxy.datatypes.data:Text"/>
- <datatype extension="jackknifernon" type="galaxy.datatypes.data:Text"/>
- <datatype extension="markx10" type="galaxy.datatypes.data:Text"/>
- <datatype extension="markx1" type="galaxy.datatypes.data:Text"/>
- <datatype extension="markx0" type="galaxy.datatypes.data:Text"/>
- <datatype extension="markx3" type="galaxy.datatypes.data:Text"/>
- <datatype extension="markx2" type="galaxy.datatypes.data:Text"/>
- <datatype extension="match" type="galaxy.datatypes.data:Text"/>
- <datatype extension="mega" type="galaxy.datatypes.data:Text"/>
- <datatype extension="meganon" type="galaxy.datatypes.data:Text"/>
- <datatype extension="motif" type="galaxy.datatypes.data:Text"/>
- <datatype extension="msf" type="galaxy.datatypes.data:Text"/>
- <datatype extension="nametable" type="galaxy.datatypes.data:Text"/>
- <datatype extension="ncbi" type="galaxy.datatypes.data:Text"/>
- <datatype extension="needle" type="galaxy.datatypes.data:Text"/>
- <datatype extension="newcpgreport" type="galaxy.datatypes.data:Text"/>
- <datatype extension="newcpgseek" type="galaxy.datatypes.data:Text"/>
- <datatype extension="nexus" type="galaxy.datatypes.data:Text"/>
- <datatype extension="nexusnon" type="galaxy.datatypes.data:Text"/>
- <datatype extension="noreturn" type="galaxy.datatypes.data:Text"/>
- <datatype extension="pair" type="galaxy.datatypes.data:Text"/>
- <datatype extension="palindrome" type="galaxy.datatypes.data:Text"/>
- <datatype extension="pepcoil" type="galaxy.datatypes.data:Text"/>
- <datatype extension="pepinfo" type="galaxy.datatypes.data:Text"/>
- <datatype extension="pepstats" type="galaxy.datatypes.data:Text"/>
- <datatype extension="phylip" type="galaxy.datatypes.data:Text"/>
- <datatype extension="phylipnon" type="galaxy.datatypes.data:Text"/>
- <datatype extension="pir" type="galaxy.datatypes.data:Text"/>
- <datatype extension="polydot" type="galaxy.datatypes.data:Text"/>
- <datatype extension="preg" type="galaxy.datatypes.data:Text"/>
- <datatype extension="prettyseq" type="galaxy.datatypes.data:Text"/>
- <datatype extension="primersearch" type="galaxy.datatypes.data:Text"/>
- <datatype extension="regions" type="galaxy.datatypes.data:Text"/>
- <datatype extension="score" type="galaxy.datatypes.data:Text"/>
- <datatype extension="selex" type="galaxy.datatypes.data:Text"/>
- <datatype extension="seqtable" type="galaxy.datatypes.data:Text"/>
- <datatype extension="showfeat" type="galaxy.datatypes.data:Text"/>
- <datatype extension="showorf" type="galaxy.datatypes.data:Text"/>
- <datatype extension="simple" type="galaxy.datatypes.data:Text"/>
- <datatype extension="sixpack" type="galaxy.datatypes.data:Text"/>
- <datatype extension="srs" type="galaxy.datatypes.data:Text"/>
- <datatype extension="srspair" type="galaxy.datatypes.data:Text"/>
- <datatype extension="staden" type="galaxy.datatypes.data:Text"/>
- <datatype extension="strider" type="galaxy.datatypes.data:Text"/>
- <datatype extension="supermatcher" type="galaxy.datatypes.data:Text"/>
- <datatype extension="swiss" type="galaxy.datatypes.data:Text"/>
- <datatype extension="syco" type="galaxy.datatypes.data:Text"/>
- <datatype extension="table" type="galaxy.datatypes.data:Text"/>
- <datatype extension="textsearch" type="galaxy.datatypes.data:Text"/>
- <datatype extension="vectorstrip" type="galaxy.datatypes.data:Text"/>
- <datatype extension="wobble" type="galaxy.datatypes.data:Text"/>
- <datatype extension="wordcount" type="galaxy.datatypes.data:Text"/>
- <datatype extension="tagseq" type="galaxy.datatypes.data:Text"/>
- <!-- Start RGenetics Datatypes -->
- <!-- genome graphs ucsc file - first col is always marker then numeric values to plot -->
- <datatype extension="gg" type="galaxy.datatypes.genetics:GenomeGraphs"/>
- <datatype extension="rgenetics" type="galaxy.datatypes.genetics:Rgenetics"/>
- <!-- linkage format pedigree (separate .map file) -->
- <datatype extension="lped" type="galaxy.datatypes.genetics:Lped" display_in_upload="true"/>
- <!-- plink compressed file - has bed extension unfortunately -->
- <datatype extension="pbed" type="galaxy.datatypes.genetics:Pbed" display_in_upload="true"/>
- <!-- eigenstrat pedigree input file -->
- <datatype extension="eigenstratgeno" type="galaxy.datatypes.genetics:Eigenstratgeno"/>
- <!-- eigenstrat pca output file for adjusted eigenQTL eg -->
- <datatype extension="eigenstratpca" type="galaxy.datatypes.genetics:Eigenstratpca"/>
- <!-- fbat/pbat format pedigree (header row of marker names) -->
- <datatype extension="fped" type="galaxy.datatypes.genetics:Fped"/>
- <!-- part of linkage format pedigree -->
- <datatype extension="lmap" type="galaxy.datatypes.genetics:Lmap"/>
- <!-- phenotype file - fbat format -->
- <datatype extension="fphe" type="galaxy.datatypes.genetics:Fphe"/>
- <!-- phenotype file - plink format -->
- <datatype extension="pphe" type="galaxy.datatypes.genetics:Pphe"/>
- <datatype extension="snptest" type="galaxy.datatypes.genetics:Snptest"/>
- <datatype extension="snpmatrix" type="galaxy.datatypes.genetics:SNPMatrix"/>
- <datatype extension="xls" type="galaxy.datatypes.tabular:Tabular"/>
- <!-- End RGenetics Datatypes -->
- </registration>
- <sniffers>
- <!--
- The order in which Galaxy attempts to determine data types is
- important because some formats are much more loosely defined
- than others. The following list should be the most rigidly
- defined format first, followed by next-most rigidly defined,
- and so on.
- -->
- <sniffer type="galaxy.datatypes.xml:BlastXml"/>
- <sniffer type="galaxy.datatypes.sequence:Maf"/>
- <sniffer type="galaxy.datatypes.sequence:Lav"/>
- <sniffer type="galaxy.datatypes.sequence:csFasta"/>
- <sniffer type="galaxy.datatypes.qualityscore:QualityScoreSOLiD"/>
- <sniffer type="galaxy.datatypes.qualityscore:QualityScore454"/>
- <sniffer type="galaxy.datatypes.sequence:Fasta"/>
- <sniffer type="galaxy.datatypes.sequence:Fastq"/>
- <sniffer type="galaxy.datatypes.interval:Wiggle"/>
- <sniffer type="galaxy.datatypes.images:Html"/>
- <sniffer type="galaxy.datatypes.sequence:Axt"/>
- <sniffer type="galaxy.datatypes.interval:Bed"/>
- <sniffer type="galaxy.datatypes.interval:CustomTrack"/>
- <sniffer type="galaxy.datatypes.interval:Gff"/>
- <sniffer type="galaxy.datatypes.interval:Gff3"/>
- <sniffer type="galaxy.datatypes.interval:Interval"/>
- <sniffer type="galaxy.datatypes.tabular:Sam"/>
- </sniffers>
+ <registration converters_path="lib/galaxy/datatypes/converters">
+ <datatype extension="ab1" type="galaxy.datatypes.binary:Ab1" mimetype="application/octet-stream" display_in_upload="true"/>
+ <datatype extension="axt" type="galaxy.datatypes.sequence:Axt" display_in_upload="true"/>
+ <datatype extension="bam" type="galaxy.datatypes.binary:Bam" mimetype="application/octet-stream"/>
+ <datatype extension="bed" type="galaxy.datatypes.interval:Bed" display_in_upload="true">
+ <converter file="bed_to_gff_converter.xml" target_datatype="gff"/>
+ <converter file="interval_to_coverage.xml" target_datatype="coverage"/>
+ <converter file="bed_to_interval_index_converter.xml" target_datatype="interval_index"/>
+ </datatype>
+ <datatype extension="binseq.zip" type="galaxy.datatypes.binary:Binseq" mimetype="application/zip" display_in_upload="true"/>
+ <datatype extension="len" type="galaxy.datatypes.chrominfo:ChromInfo" display_in_upload="true">
+ <!-- no converters yet -->
+ </datatype>
+ <datatype extension="coverage" type="galaxy.datatypes.coverage:LastzCoverage" display_in_upload="true">
+ <indexer file="coverage.xml" />
+ </datatype>
+ <datatype extension="customtrack" type="galaxy.datatypes.interval:CustomTrack"/>
+ <datatype extension="csfasta" type="galaxy.datatypes.sequence:csFasta" display_in_upload="true"/>
+ <datatype extension="data" type="galaxy.datatypes.data:Data" mimetype="application/octet-stream"/>
+ <datatype extension="fasta" type="galaxy.datatypes.sequence:Fasta" display_in_upload="true">
+ <converter file="fasta_to_tabular_converter.xml" target_datatype="tabular"/>
+ </datatype>
+ <datatype extension="fastq" type="galaxy.datatypes.sequence:Fastq" display_in_upload="true"/>
+ <datatype extension="fastqsanger" type="galaxy.datatypes.sequence:FastqSanger" display_in_upload="true"/>
+ <datatype extension="genetrack" type="galaxy.datatypes.tracks:GeneTrack"/>
+ <datatype extension="gff" type="galaxy.datatypes.interval:Gff" display_in_upload="true">
+ <converter file="gff_to_bed_converter.xml" target_datatype="bed"/>
+ </datatype>
+ <datatype extension="gff3" type="galaxy.datatypes.interval:Gff3" display_in_upload="true"/>
+ <datatype extension="gif" type="galaxy.datatypes.images:Image" mimetype="image/gif"/>
+ <datatype extension="gmaj.zip" type="galaxy.datatypes.images:Gmaj" mimetype="application/zip"/>
+ <datatype extension="html" type="galaxy.datatypes.images:Html" mimetype="text/html"/>
+ <datatype extension="interval" type="galaxy.datatypes.interval:Interval" display_in_upload="true">
+ <converter file="interval_to_bed_converter.xml" target_datatype="bed"/>
+ <indexer file="interval_awk.xml" />
+ </datatype>
+ <datatype extension="jpg" type="galaxy.datatypes.images:Image" mimetype="image/jpeg"/>
+ <datatype extension="laj" type="galaxy.datatypes.images:Laj"/>
+ <datatype extension="lav" type="galaxy.datatypes.sequence:Lav" display_in_upload="true"/>
+ <datatype extension="maf" type="galaxy.datatypes.sequence:Maf" display_in_upload="true">
+ <converter file="maf_to_fasta_converter.xml" target_datatype="fasta"/>
+ <converter file="maf_to_interval_converter.xml" target_datatype="interval"/>
+ </datatype>
+ <datatype extension="pdf" type="galaxy.datatypes.images:Image" mimetype="application/pdf"/>
+ <datatype extension="png" type="galaxy.datatypes.images:Image" mimetype="image/png"/>
+ <datatype extension="qualsolexa" type="galaxy.datatypes.qualityscore:QualityScoreSolexa" display_in_upload="true"/>
+ <datatype extension="qualsolid" type="galaxy.datatypes.qualityscore:QualityScoreSOLiD" display_in_upload="true"/>
+ <datatype extension="qual454" type="galaxy.datatypes.qualityscore:QualityScore454" display_in_upload="true"/>
+ <datatype extension="sam" type="galaxy.datatypes.tabular:Sam" display_in_upload="true"/>
+ <datatype extension="scf" type="galaxy.datatypes.binary:Scf" mimetype="application/octet-stream" display_in_upload="true"/>
+ <datatype extension="sff" type="galaxy.datatypes.binary:Sff" mimetype="application/octet-stream" display_in_upload="true"/>
+ <datatype extension="taxonomy" type="galaxy.datatypes.tabular:Taxonomy" display_in_upload="true"/>
+ <datatype extension="tabular" type="galaxy.datatypes.tabular:Tabular" display_in_upload="true"/>
+ <datatype extension="txt" type="galaxy.datatypes.data:Text" display_in_upload="true"/>
+ <datatype extension="blastxml" type="galaxy.datatypes.xml:BlastXml" display_in_upload="true"/>
+ <datatype extension="txtseq.zip" type="galaxy.datatypes.data:Txtseq" mimetype="application/zip" display_in_upload="true"/>
+ <datatype extension="wig" type="galaxy.datatypes.interval:Wiggle" display_in_upload="true">
+ <converter file="wiggle_to_array_tree_converter.xml" target_datatype="array_tree"/>
+ </datatype>
+ <datatype extension="array_tree" type="galaxy.datatypes.data:Data" />
+ <datatype extension="interval_index" type="galaxy.datatypes.data:Data" />
+ <!-- Start EMBOSS tools -->
+ <datatype extension="acedb" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="asn1" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="btwisted" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="cai" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="charge" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="checktrans" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="chips" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="clustal" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="codata" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="codcmp" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="coderet" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="compseq" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="cpgplot" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="cpgreport" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="cusp" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="cut" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="dan" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="dbmotif" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="diffseq" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="digest" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="dreg" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="einverted" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="embl" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="epestfind" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="equicktandem" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="est2genome" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="etandem" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="excel" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="feattable" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="fitch" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="freak" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="fuzznuc" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="fuzzpro" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="fuzztran" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="garnier" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="gcg" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="geecee" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="genbank" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="helixturnhelix" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="hennig86" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="hmoment" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="ig" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="isochore" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="jackknifer" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="jackknifernon" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="markx10" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="markx1" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="markx0" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="markx3" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="markx2" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="match" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="mega" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="meganon" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="motif" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="msf" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="nametable" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="ncbi" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="needle" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="newcpgreport" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="newcpgseek" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="nexus" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="nexusnon" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="noreturn" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="pair" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="palindrome" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="pepcoil" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="pepinfo" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="pepstats" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="phylip" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="phylipnon" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="pir" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="polydot" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="preg" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="prettyseq" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="primersearch" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="regions" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="score" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="selex" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="seqtable" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="showfeat" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="showorf" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="simple" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="sixpack" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="srs" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="srspair" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="staden" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="strider" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="supermatcher" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="swiss" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="syco" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="table" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="textsearch" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="vectorstrip" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="wobble" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="wordcount" type="galaxy.datatypes.data:Text"/>
+ <datatype extension="tagseq" type="galaxy.datatypes.data:Text"/>
+ <!-- End EMBOSS tools -->
+ <!-- Start RGenetics Datatypes -->
+ <datatype extension="affybatch" type="galaxy.datatypes.genetics:Affybatch" display_in_upload="true"/>
+ <!-- eigenstrat pedigree input file -->
+ <datatype extension="eigenstratgeno" type="galaxy.datatypes.genetics:Eigenstratgeno"/>
+ <!-- eigenstrat pca output file for adjusted eigenQTL eg -->
+ <datatype extension="eigenstratpca" type="galaxy.datatypes.genetics:Eigenstratpca"/>
+ <datatype extension="eset" type="galaxy.datatypes.genetics:Eset" display_in_upload="true" />
+ <!-- fbat/pbat format pedigree (header row of marker names) -->
+ <datatype extension="fped" type="galaxy.datatypes.genetics:Fped" display_in_upload="true"/>
+ <!-- phenotype file - fbat format -->
+ <datatype extension="fphe" type="galaxy.datatypes.genetics:Fphe" display_in_upload="true" mimetype="text/html"/>
+ <!-- genome graphs ucsc file - first col is always marker then numeric values to plot -->
+ <datatype extension="gg" type="galaxy.datatypes.genetics:GenomeGraphs"/>
+ <!-- part of linkage format pedigree -->
+ <datatype extension="lmap" type="galaxy.datatypes.genetics:Lmap" display_in_upload="true"/>
+ <datatype extension="malist" type="galaxy.datatypes.genetics:MAlist" display_in_upload="true"/>
+ <!-- linkage format pedigree (separate .map file) -->
+ <datatype extension="lped" type="galaxy.datatypes.genetics:Lped" display_in_upload="true">
+ <converter file="lped_to_fped_converter.xml" target_datatype="fped"/>
+ <converter file="lped_to_pbed_converter.xml" target_datatype="pbed"/>
+ </datatype>
+ <!-- plink compressed file - has bed extension unfortunately -->
+ <datatype extension="pbed" type="galaxy.datatypes.genetics:Pbed" display_in_upload="true">
+ <converter file="pbed_to_lped_converter.xml" target_datatype="lped"/>
+ </datatype>
+ <datatype extension="pheno" type="galaxy.datatypes.genetics:Pheno"/>
+ <!-- phenotype file - plink format -->
+ <datatype extension="pphe" type="galaxy.datatypes.genetics:Pphe" display_in_upload="true" mimetype="text/html"/>
+ <datatype extension="rexpbase" type="galaxy.datatypes.genetics:RexpBase"/>
+ <datatype extension="rgenetics" type="galaxy.datatypes.genetics:Rgenetics"/>
+ <datatype extension="snptest" type="galaxy.datatypes.genetics:Snptest" display_in_upload="true"/>
+ <datatype extension="snpmatrix" type="galaxy.datatypes.genetics:SNPMatrix" display_in_upload="true"/>
+ <datatype extension="xls" type="galaxy.datatypes.tabular:Tabular"/>
+ <!-- End RGenetics Datatypes -->
+ </registration>
+ <sniffers>
+ <!--
+ The order in which Galaxy attempts to determine data types is
+ important because some formats are much more loosely defined
+ than others. The following list should be the most rigidly
+ defined format first, followed by next-most rigidly defined,
+ and so on.
+ -->
+ <sniffer type="galaxy.datatypes.binary:Sff"/>
+ <sniffer type="galaxy.datatypes.xml:BlastXml"/>
+ <sniffer type="galaxy.datatypes.sequence:Maf"/>
+ <sniffer type="galaxy.datatypes.sequence:Lav"/>
+ <sniffer type="galaxy.datatypes.sequence:csFasta"/>
+ <sniffer type="galaxy.datatypes.qualityscore:QualityScoreSOLiD"/>
+ <sniffer type="galaxy.datatypes.qualityscore:QualityScore454"/>
+ <sniffer type="galaxy.datatypes.sequence:Fasta"/>
+ <sniffer type="galaxy.datatypes.sequence:Fastq"/>
+ <sniffer type="galaxy.datatypes.interval:Wiggle"/>
+ <sniffer type="galaxy.datatypes.images:Html"/>
+ <sniffer type="galaxy.datatypes.sequence:Axt"/>
+ <sniffer type="galaxy.datatypes.interval:Bed"/>
+ <sniffer type="galaxy.datatypes.interval:CustomTrack"/>
+ <sniffer type="galaxy.datatypes.interval:Gff"/>
+ <sniffer type="galaxy.datatypes.interval:Gff3"/>
+ <sniffer type="galaxy.datatypes.interval:Interval"/>
+ <sniffer type="galaxy.datatypes.tabular:Sam"/>
+ </sniffers>
</datatypes>
diff -r 555afd0bf457 -r 37406d8ad116 dist-eggs.ini
--- a/dist-eggs.ini Tue Nov 17 16:14:54 2009 -0500
+++ b/dist-eggs.ini Tue Nov 17 16:16:26 2009 -0500
@@ -23,6 +23,9 @@
py2.5-macosx-10.3-fat-ucs2 = medeski.bx.psu.edu /usr/local/bin/python2.5
py2.6-macosx-10.3-fat-ucs2 = medeski.bx.psu.edu /usr/local/bin/python2.6
py2.5-macosx-10.5-i386-ucs2 = lion.bx.psu.edu /usr/bin/python2.5
+py2.4-solaris-2.10-i86pc-ucs2 = thumper.bx.psu.edu /depot/projects/pythons/solaris-2.10-i86pc-ucs2/bin/python2.4
+py2.5-solaris-2.10-i86pc-ucs2 = thumper.bx.psu.edu /depot/projects/pythons/solaris-2.10-i86pc-ucs2/bin/python2.5
+py2.6-solaris-2.10-i86pc-ucs2 = thumper.bx.psu.edu /depot/projects/pythons/solaris-2.10-i86pc-ucs2/bin/python2.6
py2.4-solaris-2.11-i86pc-ucs2 = victory.bx.psu.edu /depot/projects/pythons/solaris-2.11-i86pc-ucs2/bin/python2.4
py2.5-solaris-2.11-i86pc-ucs2 = victory.bx.psu.edu /depot/projects/pythons/solaris-2.11-i86pc-ucs2/bin/python2.5
py2.6-solaris-2.11-i86pc-ucs2 = victory.bx.psu.edu /depot/projects/pythons/solaris-2.11-i86pc-ucs2/bin/python2.6
@@ -47,9 +50,9 @@
py2.5-macosx = py2.5-macosx-10.3-fat-ucs2 py2.5-macosx-10.5-i386-ucs2
py2.6-macosx = py2.6-macosx-10.3-fat-ucs2
macosx = py2.4-macosx py2.5-macosx py2.6-macosx
-py2.4-solaris = py2.4-solaris-2.11-i86pc-ucs2 py2.4-solaris-2.10-sun4u-ucs2
-py2.5-solaris = py2.5-solaris-2.11-i86pc-ucs2 py2.5-solaris-2.10-sun4u-ucs2
-py2.6-solaris = py2.6-solaris-2.11-i86pc-ucs2 py2.6-solaris-2.10-sun4u-ucs2
+py2.4-solaris = py2.4-solaris-2.10-i86pc-ucs2 py2.4-solaris-2.11-i86pc-ucs2 py2.4-solaris-2.10-sun4u-ucs2
+py2.5-solaris = py2.5-solaris-2.10-i86pc-ucs2 py2.5-solaris-2.11-i86pc-ucs2 py2.5-solaris-2.10-sun4u-ucs2
+py2.6-solaris = py2.6-solaris-2.10-i86pc-ucs2 py2.6-solaris-2.11-i86pc-ucs2 py2.6-solaris-2.10-sun4u-ucs2
solaris = py2.4-solaris py2.5-solaris py2.6-solaris
py2.4-all = py2.4-linux py2.4-macosx py2.4-solaris
py2.5-all = py2.5-linux py2.5-macosx py2.5-solaris
diff -r 555afd0bf457 -r 37406d8ad116 eggs.ini
--- a/eggs.ini Tue Nov 17 16:14:54 2009 -0500
+++ b/eggs.ini Tue Nov 17 16:16:26 2009 -0500
@@ -19,7 +19,7 @@
pbs_python = 2.9.4
psycopg2 = 2.0.6
pycrypto = 2.0.1
-pysqlite = 2.3.5
+pysqlite = 2.5.6
python_lzo = 1.08
threadframe = 0.2
guppy = 0.1.8
@@ -57,11 +57,12 @@
; extra version information
[tags]
psycopg2 = _8.2.6_static
-pysqlite = _3.5.4_static
+pysqlite = _static
MySQL_python = _5.0.67_static
python_lzo = _static
bx_python = _dev_r4bf1f32e6b76
-GeneTrack = _dev_raa786e9fc131d998e532a1aef39d108850c9e93d
+GeneTrack = _dev_e380f21c704218622155b9d230a44b3c9c452524
+SQLAlchemy = _dev_r6498
; nose = .dev_r7156749efc58
; source location, necessary for scrambling
@@ -73,7 +74,7 @@
pbs_python = http://ftp.sara.nl/pub/outgoing/pbs_python-2.9.4.tar.gz
psycopg2 = http://initd.org/pub/software/psycopg/PSYCOPG-2-0/psycopg2-2.0.6.tar.gz ftp://ftp-archives.postgresql.org/pub/source/v8.2.6/postgresql-8.2.6.tar.bz2
pycrypto = http://www.amk.ca/files/python/crypto/pycrypto-2.0.1.tar.gz
-pysqlite = http://initd.org/pub/software/pysqlite/releases/2.3/2.3.5/pysqlite-2.3.5.ta… http://www.sqlite.org/sqlite-source-3_5_4.zip
+pysqlite = http://pypi.python.org/packages/source/p/pysqlite/pysqlite-2.5.6.tar.gz
python_lzo = http://www.oberhumer.com/opensource/lzo/download/LZO-v1/python-lzo-1.08.tar… http://www.oberhumer.com/opensource/lzo/download/LZO-v1/lzo-1.08.tar.gz
threadframe = http://www.majid.info/python/threadframe/threadframe-0.2.tar.gz
guppy = http://pypi.python.org/packages/source/g/guppy/guppy-0.1.8.tar.gz
@@ -82,7 +83,7 @@
decorator = http://pypi.python.org/packages/source/d/decorator/decorator-3.1.2.tar.gz
docutils = http://downloads.sourceforge.net/docutils/docutils-0.4.tar.gz
elementtree = http://effbot.org/downloads/elementtree-1.2.6-20050316.tar.gz
-GeneTrack = http://github.com/ialbert/genetrack-central/tarball/aa786e9fc131d998e532a1a…
+GeneTrack = http://github.com/ialbert/genetrack-central/tarball/e380f21c704218622155b9d…
lrucache = http://evan.prodromou.name/lrucache/lrucache-0.2.tar.gz
Mako = http://www.makotemplates.org/downloads/Mako-0.2.5.tar.gz
nose = http://pypi.python.org/packages/source/n/nose/nose-0.11.1.tar.gz
@@ -93,7 +94,7 @@
PSI = http://pypi.python.org/packages/source/P/PSI/PSI-0.3b1.1.tar.gz
Routes = http://pypi.python.org/packages/source/R/Routes/Routes-1.11.tar.gz
simplejson = http://cheeseshop.python.org/packages/source/s/simplejson/simplejson-1.5.ta…
-SQLAlchemy = http://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-0.5.6.tar.gz
+SQLAlchemy = http://dist.g2.bx.psu.edu/SQLAlchemy-0.5.6_r6498.tar.bz2
sqlalchemy_migrate = http://pypi.python.org/packages/source/s/sqlalchemy-migrate/sqlalchemy-migr…
Tempita = http://pypi.python.org/packages/source/T/Tempita/Tempita-0.1.tar.gz
twill = http://darcs.idyll.org/~t/projects/twill-0.9.tar.gz
diff -r 555afd0bf457 -r 37406d8ad116 lib/galaxy/datatypes/binary.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/datatypes/binary.py Tue Nov 17 16:16:26 2009 -0500
@@ -0,0 +1,156 @@
+"""
+Binary classes
+"""
+
+import data, logging, binascii
+from galaxy.datatypes.metadata import MetadataElement
+from galaxy.datatypes import metadata
+from galaxy.datatypes.sniff import *
+from urllib import urlencode, quote_plus
+import zipfile
+import os, subprocess, tempfile
+
+log = logging.getLogger(__name__)
+
+sniffable_binary_formats = [ 'sff' ]
+# Currently these supported binary data types must be manually set on upload
+unsniffable_binary_formats = [ 'ab1', 'scf' ]
+
+class Binary( data.Data ):
+ """Binary data"""
+ def set_peek( self, dataset ):
+ """Set the peek and blurb text"""
+ if not dataset.dataset.purged:
+ dataset.peek = 'binary data'
+ dataset.blurb = 'data'
+ else:
+ dataset.peek = 'file does not exist'
+ dataset.blurb = 'file purged from disk'
+
+class Ab1( Binary ):
+ """Class describing an ab1 binary sequence file"""
+ file_ext = "ab1"
+ def set_peek( self, dataset ):
+ if not dataset.dataset.purged:
+ export_url = "/history_add_to?" + urlencode( {'history_id':dataset.history_id,'ext':'ab1','name':'ab1 sequence','info':'Sequence file','dbkey':dataset.dbkey} )
+ dataset.peek = "Binary ab1 sequence file"
+ dataset.blurb = data.nice_size( dataset.get_size() )
+ else:
+ dataset.peek = 'file does not exist'
+ dataset.blurb = 'file purged from disk'
+ def display_peek( self, dataset ):
+ try:
+ return dataset.peek
+ except:
+ return "Binary ab1 sequence file (%s)" % ( data.nice_size( dataset.get_size() ) )
+
+class Bam( Binary ):
+ """Class describing a BAM binary file"""
+ file_ext = "bam"
+ MetadataElement( name="bam_index", desc="BAM Index File", param=metadata.FileParameter, readonly=True, no_value=None, visible=False, optional=True )
+ def init_meta( self, dataset, copy_from=None ):
+ Binary.init_meta( self, dataset, copy_from=copy_from )
+ def set_meta( self, dataset, overwrite = True, **kwd ):
+ """
+ Sets index for BAM file.
+ """
+ index_file = dataset.metadata.bam_index
+ if not index_file:
+ index_file = dataset.metadata.spec['bam_index'].param.new_file( dataset = dataset )
+ tmp_dir = tempfile.gettempdir()
+ tmpf1 = tempfile.NamedTemporaryFile( dir=tmp_dir )
+ tmpf1bai = '%s.bai' % tmpf1.name
+ try:
+ os.system( 'cd %s' % tmp_dir )
+ os.system( 'cp %s %s' % ( dataset.file_name, tmpf1.name ) )
+ os.system( 'samtools index %s' % tmpf1.name )
+ os.system( 'cp %s %s' % ( tmpf1bai, index_file.file_name ) )
+ except Exception, ex:
+ sys.stderr.write( 'There was a problem creating the index for the BAM file\n%s\n' + str( ex ) )
+ tmpf1.close()
+ if os.path.exists( tmpf1bai ):
+ os.remove( tmpf1bai )
+ dataset.metadata.bam_index = index_file
+ def set_peek( self, dataset ):
+ if not dataset.dataset.purged:
+ export_url = "/history_add_to?" + urlencode( {'history_id':dataset.history_id,'ext':'bam','name':'bam alignments','info':'Alignments file','dbkey':dataset.dbkey} )
+ dataset.peek = "Binary bam alignments file"
+ dataset.blurb = data.nice_size( dataset.get_size() )
+ else:
+ dataset.peek = 'file does not exist'
+ dataset.blurb = 'file purged from disk'
+ def display_peek( self, dataset ):
+ try:
+ return dataset.peek
+ except:
+ return "Binary bam alignments file (%s)" % ( data.nice_size( dataset.get_size() ) )
+ def get_mime( self ):
+ """Returns the mime type of the datatype"""
+ return 'application/octet-stream'
+
+class Binseq( Binary ):
+ """Class describing a zip archive of binary sequence files"""
+ file_ext = "binseq.zip"
+ def set_peek( self, dataset ):
+ if not dataset.dataset.purged:
+ zip_file = zipfile.ZipFile( dataset.file_name, "r" )
+ num_files = len( zip_file.namelist() )
+ dataset.peek = "Archive of %s binary sequence files" % ( str( num_files ) )
+ dataset.blurb = data.nice_size( dataset.get_size() )
+ else:
+ dataset.peek = 'file does not exist'
+ dataset.blurb = 'file purged from disk'
+ def display_peek( self, dataset ):
+ try:
+ return dataset.peek
+ except:
+ return "Binary sequence file archive (%s)" % ( data.nice_size( dataset.get_size() ) )
+ def get_mime( self ):
+ """Returns the mime type of the datatype"""
+ return 'application/zip'
+
+class Scf( Binary ):
+ """Class describing an scf binary sequence file"""
+ file_ext = "scf"
+ def set_peek( self, dataset ):
+ if not dataset.dataset.purged:
+ export_url = "/history_add_to?" + urlencode({'history_id':dataset.history_id,'ext':'scf','name':'scf sequence','info':'Sequence file','dbkey':dataset.dbkey})
+ dataset.peek = "Binary scf sequence file"
+ dataset.blurb = data.nice_size( dataset.get_size() )
+ else:
+ dataset.peek = 'file does not exist'
+ dataset.blurb = 'file purged from disk'
+ def display_peek( self, dataset ):
+ try:
+ return dataset.peek
+ except:
+ return "Binary scf sequence file (%s)" % ( data.nice_size( dataset.get_size() ) )
+
+class Sff( Binary ):
+ """ Standard Flowgram Format (SFF) """
+ file_ext = "sff"
+ def __init__( self, **kwd ):
+ Binary.__init__( self, **kwd )
+ def sniff( self, filename ):
+ # The first 4 bytes of any sff file is '.sff', and the file is binary. For details
+ # about the format, see http://www.ncbi.nlm.nih.gov/Traces/trace.cgi?cmd=show&f=formats&m=doc&s=for…
+ try:
+ header = open( filename ).read(4)
+ if binascii.b2a_hex( header ) == binascii.hexlify( '.sff' ):
+ return True
+ return False
+ except Exception, e:
+ return False
+ def set_peek( self, dataset ):
+ if not dataset.dataset.purged:
+ export_url = "/history_add_to?" + urlencode( {'history_id':dataset.history_id,'ext':'sff','name':'sff file','info':'sff file','dbkey':dataset.dbkey} )
+ dataset.peek = "Binary sff file"
+ dataset.blurb = data.nice_size( dataset.get_size() )
+ else:
+ dataset.peek = 'file does not exist'
+ dataset.blurb = 'file purged from disk'
+ def display_peek( self, dataset ):
+ try:
+ return dataset.peek
+ except:
+ return "Binary sff file (%s)" % ( data.nice_size( dataset.get_size() ) )
diff -r 555afd0bf457 -r 37406d8ad116 lib/galaxy/datatypes/data.py
--- a/lib/galaxy/datatypes/data.py Tue Nov 17 16:14:54 2009 -0500
+++ b/lib/galaxy/datatypes/data.py Tue Nov 17 16:16:26 2009 -0500
@@ -1,4 +1,4 @@
-import logging, os, sys, time, tempfile, binascii
+import logging, os, sys, time, tempfile
from galaxy import util
from galaxy.util.odict import odict
from galaxy.util.bunch import Bunch
@@ -40,20 +40,18 @@
"""
__metaclass__ = DataMeta
-
- """Add metadata elements"""
+ # Add metadata elements
MetadataElement( name="dbkey", desc="Database/Build", default="?", param=metadata.DBKeyParameter, multiple=False, no_value="?" )
-
- """Stores the set of display applications, and viewing methods, supported by this datatype """
+ # Stores the set of display applications, and viewing methods, supported by this datatype
supported_display_apps = {}
-
- """If False, the peek is regenerated whenever a dataset of this type is copied"""
+ # If False, the peek is regenerated whenever a dataset of this type is copied
copy_safe_peek = True
-
- is_binary = True #The dataset contains binary data --> do not space_to_tab or convert newlines, etc. Allow binary file uploads of this type when True.
-
- allow_datatype_change = True #Allow user to change between this datatype and others. If False, this datatype cannot be changed from or into.
-
+ # The dataset contains binary data --> do not space_to_tab or convert newlines, etc.
+ # Allow binary file uploads of this type when True.
+ is_binary = True
+ # Allow user to change between this datatype and others. If False, this datatype
+ # cannot be changed from or into.
+ allow_datatype_change = True
#Composite datatypes
composite_type = None
composite_files = odict()
@@ -162,6 +160,11 @@
info = info.replace( '\r', '<br/>' )
if info.find( '\n' ) >= 0:
info = info.replace( '\n', '<br/>' )
+
+ # Convert to unicode to display non-ascii characters.
+ if type( info ) is not unicode:
+ info = unicode( info, 'utf-8')
+
return info
except:
return "info unavailable"
@@ -270,8 +273,6 @@
def add_composite_file( self, name, **kwds ):
#self.composite_files = self.composite_files.copy()
self.composite_files[ name ] = self.__new_composite_file( name, **kwds )
-
-
def __substitute_composite_key( self, key, composite_file, dataset = None ):
if composite_file.substitute_name_with_metadata:
if dataset:
@@ -303,7 +304,6 @@
return files
def generate_auto_primary_file( self, dataset = None ):
raise Exception( "generate_auto_primary_file is not implemented for this datatype." )
-
@property
def has_resolution(self):
return False
@@ -364,23 +364,37 @@
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
-class Binary( Data ):
- """Binary data"""
+class Txtseq( Data ):
+ """Class describing a zip archive of text sequence files"""
+ file_ext = "txtseq.zip"
def set_peek( self, dataset ):
- """Set the peek and blurb text"""
if not dataset.dataset.purged:
- dataset.peek = 'binary data'
- dataset.blurb = 'data'
+ zip_file = zipfile.ZipFile( dataset.file_name, "r" )
+ num_files = len( zip_file.namelist() )
+ dataset.peek = "Archive of %s text sequence files" % ( str( num_files ) )
+ dataset.blurb = data.nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
+ def display_peek(self, dataset):
+ try:
+ return dataset.peek
+ except:
+ return "Text sequence file archive (%s)" % ( data.nice_size( dataset.get_size() ) )
+ def get_mime(self):
+ """Returns the mime type of the datatype"""
+ return 'application/zip'
+
+class Newick( Text ):
+ pass
+
+# ------------- Utility methods --------------
def get_test_fname( fname ):
"""Returns test data filename"""
path, name = os.path.split(__file__)
full_path = os.path.join( path, 'test', fname )
return full_path
-
def nice_size(size):
"""
Returns a readably formatted string with the size
@@ -406,7 +420,6 @@
out = "%.1f %s" % (size, word)
return out
return '??? bytes'
-
def get_file_peek( file_name, is_multi_byte=False, WIDTH=256, LINE_COUNT=5 ):
"""
Returns the first LINE_COUNT lines wrapped to WIDTH
@@ -443,7 +456,6 @@
else:
text = unicode( '\n'.join( lines ), 'utf-8' )
return text
-
def get_line_count(file_name):
"""Returns the number of lines in a file that are neither null nor comments"""
count = 0
@@ -452,38 +464,3 @@
if line and line[0] != '#':
count += 1
return count
-
-class Newick( Text ):
- pass
-
-class Sff( Binary ):
- """ Standard Flowgram Format (SFF) """
- file_ext = "sff"
- def __init__( self, **kwd ):
- Binary.__init__(self, **kwd)
- def init_meta( self, dataset, copy_from=None ):
- Binary.init_meta( self, dataset, copy_from=copy_from )
- def sniff( self, filename ):
- '''
- The first 4 bytes of any sff file is '.sff'
-
- >>> fname = get_test_fname( '1.sff' )
- >>> Sff().sniff( fname )
- True
- '''
- header = open( filename ).read(4)
- if binascii.b2a_hex( header ) == binascii.hexlify( '.sff' ):
- return True
- return False
- def set_peek( self, dataset ):
- if not dataset.dataset.purged:
- dataset.peek = "Binary sff file"
- dataset.blurb = nice_size( dataset.get_size() )
- else:
- dataset.peek = 'file does not exist'
- dataset.blurb = 'file purged from disk'
- def display_peek(self, dataset):
- try:
- return dataset.peek
- except:
- return "sff file (%s)" % ( nice_size( dataset.get_size() ) )
diff -r 555afd0bf457 -r 37406d8ad116 lib/galaxy/datatypes/genetics.py
--- a/lib/galaxy/datatypes/genetics.py Tue Nov 17 16:14:54 2009 -0500
+++ b/lib/galaxy/datatypes/genetics.py Tue Nov 17 16:16:26 2009 -0500
@@ -1,6 +1,5 @@
"""
rgenetics datatypes
-Use at your peril
Ross Lazarus
for the rgenetics and galaxy projects
@@ -11,7 +10,6 @@
ross lazarus for rgenetics
august 20 2007
"""
-
import logging, os, sys, time, tempfile, shutil, string, glob
import data
from galaxy import util
@@ -26,8 +24,7 @@
from galaxy.datatypes.interval import Interval
from galaxy.util.hash_util import *
-gal_Log = logging.getLogger(__name__)
-verbose = False
+log = logging.getLogger(__name__)
class GenomeGraphs(Interval):
@@ -48,10 +45,8 @@
"""Initialize datatype, by adding GBrowse display app"""
Interval.__init__(self, **kwd)
self.add_display_app ( 'ucsc', 'display at UCSC', 'as_ucsc_display_file', 'ucsc_links' )
-
def as_ucsc_display_file( self, dataset, **kwd ):
return open( dataset.file_name )
-
def set_meta( self, dataset, overwrite = True, **kwd ):
i = 0
for i, line in enumerate( file ( dataset.file_name ) ):
@@ -66,7 +61,6 @@
except:
pass
Interval.set_meta( self, dataset, overwrite = overwrite, skip = i )
-
def make_html_table( self, dataset, skipchars=[] ):
"""Create HTML table, used for displaying peek"""
out = ['<table cellspacing="0" cellpadding="3">']
@@ -82,7 +76,6 @@
except Exception, exc:
out = "Can't create peek %s" % exc
return out
-
def get_estimated_display_viewport( self, dataset ):
"""
Return a chrom, start, stop tuple for viewing a file. There are slight differences between gff 2 and gff 3
@@ -118,7 +111,6 @@
return ( seqid, str( start ), str( stop ) )
else:
return ( '', '', '' )
-
def gbrowse_links( self, dataset, type, app, base_url ):
ret_val = []
if dataset.has_data:
@@ -132,7 +124,6 @@
link = "%s?start=%s&stop=%s&ref=%s&dbkey=%s" % ( site_url, start, stop, seqid, dataset.dbkey )
ret_val.append( ( site_name, link ) )
return ret_val
-
def ucsc_links( self, dataset, type, app, base_url ):
ret_val = []
if dataset.has_data:
@@ -160,10 +151,8 @@
link = '%s?redirect_url=%s&display_url=%s' % ( internal_url, redirect_url, display_url )
ret_val.append( (site_name, link) )
else:
- gal_Log.debug('@@@ gg ucsc_links - no viewport_tuple')
+ log.debug('@@@ gg ucsc_links - no viewport_tuple')
return ret_val
-
-
def sniff( self, filename ):
"""
Determines whether the file is in gff format
@@ -202,20 +191,17 @@
except:
return False
-
-
class rgTabList(Tabular):
- """ for sampleid and for featureid lists of exclusions or inclusions in the clean tool
+ """
+ for sampleid and for featureid lists of exclusions or inclusions in the clean tool
featureid subsets on statistical criteria -> specialized display such as gg
"""
file_ext = "rgTList"
-
def __init__(self, **kwd):
"""Initialize featurelistt datatype"""
Tabular.__init__( self, **kwd )
self.column_names = []
-
def make_html_table( self, dataset, skipchars=[] ):
"""Create HTML table, used for displaying peek"""
out = ['<table cellspacing="0" cellpadding="3">']
@@ -236,23 +222,24 @@
out = "Can't create peek %s" % exc
return out
-
class rgSampleList(rgTabList):
- """ for sampleid exclusions or inclusions in the clean tool
- output from QC eg excess het, gender error, ibd pair member,eigen outlier,excess mendel errors,...
- since they can be uploaded, should be flexible
- but they are persistent at least
- same infrastructure for expression?
+ """
+ for sampleid exclusions or inclusions in the clean tool
+ output from QC eg excess het, gender error, ibd pair member,eigen outlier,excess mendel errors,...
+ since they can be uploaded, should be flexible
+ but they are persistent at least
+ same infrastructure for expression?
"""
file_ext = "rgSList"
def __init__(self, **kwd):
- """Initialize samplelist datatype"""
+ """
+ Initialize samplelist datatype
+ """
rgTabList.__init__( self, **kwd )
self.column_names[0] = 'FID'
self.column_names[1] = 'IID'
# this is what Plink wants as at 2009
-
def sniff(self,filename):
"""
"""
@@ -264,10 +251,11 @@
return False
class rgFeatureList( rgTabList ):
- """ for featureid lists of exclusions or inclusions in the clean tool
- output from QC eg low maf, high missingness, bad hwe in controls, excess mendel errors,...
- featureid subsets on statistical criteria -> specialized display such as gg
- same infrastructure for expression?
+ """
+ for featureid lists of exclusions or inclusions in the clean tool
+ output from QC eg low maf, high missingness, bad hwe in controls, excess mendel errors,...
+ featureid subsets on statistical criteria -> specialized display such as gg
+ same infrastructure for expression?
"""
file_ext = "rgFList"
@@ -276,26 +264,23 @@
rgTabList.__init__( self, **kwd )
for i,s in enumerate(['#FeatureId', 'Chr', 'Genpos', 'Mappos']):
self.column_names[i] = s
-
class Rgenetics(Html):
- """class to use for rgenetics"""
-
- MetadataElement( name="base_name", desc="base name for all transformed versions of this genetic dataset", default="rgenetics",
- readonly=True, set_in_upload=True)
+ """
+ class to use for rgenetics
+ """
+ MetadataElement( name="base_name", desc="base name for all transformed versions of this genetic dataset", default="rgenetics", readonly=True, set_in_upload=True)
composite_type = 'auto_primary_file'
allow_datatype_change = False
file_ext = 'rgenetics'
-
def missing_meta( self, dataset=None, **kwargs):
"""Checks for empty meta values"""
for key, value in dataset.metadata.items():
if not value:
return True
return False
-
def generate_primary_file( self, dataset = None ):
rval = ['<html><head><title>Rgenetics Galaxy Composite Dataset </title></head><p/>']
rval.append('<div>This composite dataset is composed of the following files:<p/><ul>')
@@ -306,9 +291,9 @@
rval.append( '<li><a href="%s" type="application/binary">%s</a>%s' % ( composite_name, composite_name, opt_text ) )
rval.append( '</ul></div></html>' )
return "\n".join( rval )
-
def regenerate_primary_file(self,dataset):
- """cannot do this until we are setting metadata
+ """
+ cannot do this until we are setting metadata
"""
def fix(oldpath,newbase):
old,e = os.path.splitext(oldpath)
@@ -332,30 +317,25 @@
f.write("\n".join( rval ))
f.write('\n')
f.close()
-
def set_meta( self, dataset, **kwd ):
-
- """for lped/pbed eg
-
+ """
+ for lped/pbed eg
"""
if kwd.get('overwrite') == False:
- if verbose:
- gal_Log.debug('@@@ rgenetics set_meta called with overwrite = False')
+ #log.debug('@@@ rgenetics set_meta called with overwrite = False')
return True
try:
efp = dataset.extra_files_path
except:
- if verbose:
- gal_Log.debug('@@@rgenetics set_meta failed %s - dataset %s has no efp ?' % (sys.exc_info()[0], dataset.name))
+ #log.debug('@@@rgenetics set_meta failed %s - dataset %s has no efp ?' % (sys.exc_info()[0], dataset.name))
return False
try:
- flist = os.listdir(efp)
- except:
- if verbose: gal_Log.debug('@@@rgenetics set_meta failed %s - dataset %s has no efp ?' % (sys.exc_info()[0],dataset.name))
+ flist = os.listdir(efp)
+ except:
+ #log.debug('@@@rgenetics set_meta failed %s - dataset %s has no efp ?' % (sys.exc_info()[0],dataset.name))
return False
if len(flist) == 0:
- if verbose:
- gal_Log.debug('@@@rgenetics set_meta failed - %s efp %s is empty?' % (dataset.name,efp))
+ #log.debug('@@@rgenetics set_meta failed - %s efp %s is empty?' % (dataset.name,efp))
return False
bn = None
for f in flist:
@@ -372,9 +352,9 @@
dataset.blurb = 'Composite file - Rgenetics Galaxy toolkit'
return True
-
class SNPMatrix(Rgenetics):
- """fake class to distinguish different species of Rgenetics data collections
+ """
+ fake class to distinguish different species of Rgenetics data collections
"""
file_ext="snpmatrix"
@@ -385,9 +365,9 @@
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
-
def sniff(self,filename):
- """ need to check the file header hex code
+ """
+ need to check the file header hex code
"""
infile = open(dataset.file_name, "b")
head = infile.read(16)
@@ -397,9 +377,9 @@
else:
return True
-
class Lped(Rgenetics):
- """fake class to distinguish different species of Rgenetics data collections
+ """
+ fake class to distinguish different species of Rgenetics data collections
"""
file_ext="lped"
@@ -408,9 +388,9 @@
self.add_composite_file( '%s.ped', description = 'Pedigree File', substitute_name_with_metadata = 'base_name', is_binary = True )
self.add_composite_file( '%s.map', description = 'Map File', substitute_name_with_metadata = 'base_name', is_binary = True )
-
class Pphe(Rgenetics):
- """fake class to distinguish different species of Rgenetics data collections
+ """
+ fake class to distinguish different species of Rgenetics data collections
"""
file_ext="pphe"
@@ -418,14 +398,15 @@
Rgenetics.__init__(self, **kwd)
self.add_composite_file( '%s.pphe', description = 'Plink Phenotype File', substitute_name_with_metadata = 'base_name' )
-
class Lmap(Rgenetics):
- """fake class to distinguish different species of Rgenetics data collections
+ """
+ fake class to distinguish different species of Rgenetics data collections
"""
file_ext="lmap"
class Fphe(Rgenetics):
- """fake class to distinguish different species of Rgenetics data collections
+ """
+ fake class to distinguish different species of Rgenetics data collections
"""
file_ext="fphe"
@@ -434,7 +415,8 @@
self.add_composite_file( '%s.fphe', description = 'FBAT Phenotype File', substitute_name_with_metadata = 'base_name' )
class Phe(Rgenetics):
- """fake class to distinguish different species of Rgenetics data collections
+ """
+ fake class to distinguish different species of Rgenetics data collections
"""
file_ext="phe"
@@ -442,10 +424,9 @@
Rgenetics.__init__(self, **kwd)
self.add_composite_file( '%s.phe', description = 'Phenotype File', substitute_name_with_metadata = 'base_name' )
-
-
class Fped(Rgenetics):
- """fake class to distinguish different species of Rgenetics data collections
+ """
+ fake class to distinguish different species of Rgenetics data collections
"""
file_ext="fped"
@@ -453,9 +434,9 @@
Rgenetics.__init__(self, **kwd)
self.add_composite_file( '%s.fped', description = 'FBAT format pedfile', substitute_name_with_metadata = 'base_name' )
-
class Pbed(Rgenetics):
- """fake class to distinguish different species of Rgenetics data collections
+ """
+ fake class to distinguish different species of Rgenetics data collections
"""
file_ext="pbed"
@@ -466,7 +447,8 @@
self.add_composite_file( '%s.fam', substitute_name_with_metadata = 'base_name', is_binary = True )
class Eigenstratgeno(Rgenetics):
- """fake class to distinguish different species of Rgenetics data collections
+ """
+ fake class to distinguish different species of Rgenetics data collections
"""
file_ext="eigenstratgeno"
@@ -475,11 +457,10 @@
self.add_composite_file( '%s.eigenstratgeno', substitute_name_with_metadata = 'base_name', is_binary = True )
self.add_composite_file( '%s.ind', substitute_name_with_metadata = 'base_name', is_binary = True )
self.add_composite_file( '%s.map', substitute_name_with_metadata = 'base_name', is_binary = True )
-
-
class Eigenstratpca(Rgenetics):
- """fake class to distinguish different species of Rgenetics data collections
+ """
+ fake class to distinguish different species of Rgenetics data collections
"""
file_ext="eigenstratpca"
@@ -487,22 +468,21 @@
Rgenetics.__init__(self, **kwd)
self.add_composite_file( '%s.eigenstratpca', description = 'Eigenstrat PCA file', substitute_name_with_metadata = 'base_name' )
-
class Snptest(Rgenetics):
- """fake class to distinguish different species of Rgenetics data collections
+ """
+ fake class to distinguish different species of Rgenetics data collections
"""
file_ext="snptest"
-
class Pheno(Tabular):
"""
base class for pheno files
"""
file_ext = 'pheno'
-
class RexpBase( Html ):
- """base class for BioC data structures in Galaxy
+ """
+ base class for BioC data structures in Galaxy
must be constructed with the pheno data in place since that
goes into the metadata for each instance
"""
@@ -518,18 +498,16 @@
composite_type = 'auto_primary_file'
allow_datatype_change = False
-
def __init__( self, **kwd ):
Html.__init__(self,**kwd)
self.add_composite_file( '%s.pheno', description = 'Phenodata tab text file',
substitute_name_with_metadata = 'base_name', is_binary=True)
-
def generate_primary_file( self, dataset = None ):
- """ This is called only at upload to write the html file
+ """
+ This is called only at upload to write the html file
cannot rename the datasets here - they come with the default unfortunately
"""
return '<html><head></head><body>AutoGenerated Primary File for Composite Dataset</body></html>'
-
def get_phecols(self, phenolist=[], maxConc=20):
"""
sept 2009: cannot use whitespace to split - make a more complex structure here
@@ -555,7 +533,7 @@
else:
for col,code in enumerate(row): # keep column order correct
if col >= totcols:
- gal_Log.warning('### get_phecols error in pheno file - row %d col %d (%s) longer than header %s' % (nrows, col, row, head))
+ log.warning('### get_phecols error in pheno file - row %d col %d (%s) longer than header %s' % (nrows, col, row, head))
else:
concordance[col].setdefault(code,0) # first one is zero
concordance[col][code] += 1
@@ -601,10 +579,9 @@
res = [('no usable phenotype columns found',[('?',0),]),]
return res
-
-
def get_pheno(self,dataset):
- """expects a .pheno file in the extra_files_dir - ugh
+ """
+ expects a .pheno file in the extra_files_dir - ugh
note that R is wierd and adds the row.name in
the header so the columns are all wrong - unless you tell it not to.
A file can be written as
@@ -620,11 +597,12 @@
else:
p = []
return '\n'.join(p)
-
def set_peek( self, dataset ):
- """expects a .pheno file in the extra_files_dir - ugh
+ """
+ expects a .pheno file in the extra_files_dir - ugh
note that R is wierd and does not include the row.name in
- the header. why?"""
+ the header. why?
+ """
if not dataset.dataset.purged:
pp = os.path.join(dataset.extra_files_path,'%s.pheno' % dataset.metadata.base_name)
try:
@@ -636,17 +614,14 @@
else:
dataset.peek = 'file does not exist\n'
dataset.blurb = 'file purged from disk'
-
def get_peek( self, dataset ):
- """expects a .pheno file in the extra_files_dir - ugh
- """
+ """expects a .pheno file in the extra_files_dir - ugh"""
pp = os.path.join(dataset.extra_files_path,'%s.pheno' % dataset.metadata.base_name)
try:
p = file(pp,'r').readlines()
except:
p = ['##failed to find %s' % pp]
return ''.join(p[:5])
-
def get_file_peek(self,filename):
"""
can't really peek at a filename - need the extra_files_path and such?
@@ -657,7 +632,6 @@
except:
pass
return ''.join(h[:5])
-
def regenerate_primary_file(self,dataset):
"""cannot do this until we are setting metadata
"""
@@ -672,24 +646,19 @@
f.write("\n".join( rval ))
f.write('\n')
f.close()
-
- """Add metadata elements"""
def init_meta( self, dataset, copy_from=None ):
+ """Add metadata elements"""
if copy_from:
dataset.metadata = copy_from.metadata
-
def set_meta( self, dataset, **kwd ):
-
"""
NOTE we apply the tabular machinary to the phenodata extracted
from a BioC eSet or affybatch.
-
"""
try:
flist = os.listdir(dataset.extra_files_path)
except:
- if verbose:
- gal_Log.debug('@@@rexpression set_meta failed - no dataset?')
+ #log.debug('@@@rexpression set_meta failed - no dataset?')
return False
bn = None
for f in flist:
@@ -727,7 +696,6 @@
if not dataset.blurb:
dataset.blurb = 'R loadable BioC expression object for the Rexpression Galaxy toolkit'
return True
-
def make_html_table( self, pp='nothing supplied from peek\n'):
"""Create HTML table, used for displaying peek"""
out = ['<table cellspacing="0" cellpadding="3">',]
@@ -750,20 +718,16 @@
except Exception, exc:
out = "Can't create html table %s" % str( exc )
return out
-
def display_peek( self, dataset ):
"""Returns formatted html of peek"""
out=self.make_html_table(dataset.peek)
return out
-
def get_mime(self):
"""Returns the mime type of the datatype"""
return 'text/html'
-
class Affybatch( RexpBase ):
"""derived class for BioC data structures in Galaxy """
-
file_ext = "affybatch"
def __init__( self, **kwd ):
@@ -780,7 +744,6 @@
self.add_composite_file( '%s.eset', description = 'ESet R object saved to file',
substitute_name_with_metadata = 'base_name', is_binary = True )
-
class MAlist( RexpBase ):
"""derived class for BioC data structures in Galaxy """
file_ext = "malist"
@@ -790,9 +753,6 @@
self.add_composite_file( '%s.malist', description = 'MAlist R object saved to file',
substitute_name_with_metadata = 'base_name', is_binary = True )
-
if __name__ == '__main__':
import doctest, sys
doctest.testmod(sys.modules[__name__])
-
-
diff -r 555afd0bf457 -r 37406d8ad116 lib/galaxy/datatypes/images.py
--- a/lib/galaxy/datatypes/images.py Tue Nov 17 16:14:54 2009 -0500
+++ b/lib/galaxy/datatypes/images.py Tue Nov 17 16:16:26 2009 -0500
@@ -13,82 +13,6 @@
log = logging.getLogger(__name__)
-class Ab1( data.Data ):
- """Class describing an ab1 binary sequence file"""
- file_ext = "ab1"
- def set_peek( self, dataset ):
- if not dataset.dataset.purged:
- export_url = "/history_add_to?" + urlencode({'history_id':dataset.history_id,'ext':'ab1','name':'ab1 sequence','info':'Sequence file','dbkey':dataset.dbkey})
- dataset.peek = "Binary ab1 sequence file"
- dataset.blurb = data.nice_size( dataset.get_size() )
- else:
- dataset.peek = 'file does not exist'
- dataset.blurb = 'file purged from disk'
- def display_peek(self, dataset):
- try:
- return dataset.peek
- except:
- return "Binary ab1 sequence file (%s)" % ( data.nice_size( dataset.get_size() ) )
-
-class Scf( data.Data ):
- """Class describing an scf binary sequence file"""
- file_ext = "scf"
- def set_peek( self, dataset ):
- if not dataset.dataset.purged:
- export_url = "/history_add_to?" + urlencode({'history_id':dataset.history_id,'ext':'scf','name':'scf sequence','info':'Sequence file','dbkey':dataset.dbkey})
- dataset.peek = "Binary scf sequence file"
- dataset.blurb = data.nice_size( dataset.get_size() )
- else:
- dataset.peek = 'file does not exist'
- dataset.blurb = 'file purged from disk'
- def display_peek(self, dataset):
- try:
- return dataset.peek
- except:
- return "Binary scf sequence file (%s)" % ( data.nice_size( dataset.get_size() ) )
-
-class Binseq( data.Data ):
- """Class describing a zip archive of binary sequence files"""
- file_ext = "binseq.zip"
- def set_peek( self, dataset ):
- if not dataset.dataset.purged:
- zip_file = zipfile.ZipFile( dataset.file_name, "r" )
- num_files = len( zip_file.namelist() )
- dataset.peek = "Archive of %s binary sequence files" % ( str( num_files ) )
- dataset.blurb = data.nice_size( dataset.get_size() )
- else:
- dataset.peek = 'file does not exist'
- dataset.blurb = 'file purged from disk'
- def display_peek(self, dataset):
- try:
- return dataset.peek
- except:
- return "Binary sequence file archive (%s)" % ( data.nice_size( dataset.get_size() ) )
- def get_mime(self):
- """Returns the mime type of the datatype"""
- return 'application/zip'
-
-class Txtseq( data.Data ):
- """Class describing a zip archive of text sequence files"""
- file_ext = "txtseq.zip"
- def set_peek( self, dataset ):
- if not dataset.dataset.purged:
- zip_file = zipfile.ZipFile( dataset.file_name, "r" )
- num_files = len( zip_file.namelist() )
- dataset.peek = "Archive of %s text sequence files" % ( str( num_files ) )
- dataset.blurb = data.nice_size( dataset.get_size() )
- else:
- dataset.peek = 'file does not exist'
- dataset.blurb = 'file purged from disk'
- def display_peek(self, dataset):
- try:
- return dataset.peek
- except:
- return "Text sequence file archive (%s)" % ( data.nice_size( dataset.get_size() ) )
- def get_mime(self):
- """Returns the mime type of the datatype"""
- return 'application/zip'
-
class Image( data.Data ):
"""Class describing an image"""
def set_peek( self, dataset ):
@@ -236,47 +160,3 @@
return dataset.peek
except:
return "peek unavailable"
-
-class Bam( data.Binary ):
- """Class describing a BAM binary file"""
- file_ext = "bam"
- MetadataElement( name="bam_index", desc="BAM Index File", param=metadata.FileParameter, readonly=True, no_value=None, visible=False, optional=True )
- def init_meta( self, dataset, copy_from=None ):
- data.Binary.init_meta( self, dataset, copy_from=copy_from )
- def set_meta( self, dataset, overwrite = True, **kwd ):
- """
- Sets index for BAM file.
- """
- index_file = dataset.metadata.bam_index
- if not index_file:
- index_file = dataset.metadata.spec['bam_index'].param.new_file( dataset = dataset )
- tmp_dir = tempfile.gettempdir()
- tmpf1 = tempfile.NamedTemporaryFile(dir=tmp_dir)
- tmpf1bai = '%s.bai' % tmpf1.name
- try:
- os.system('cd %s' % tmp_dir)
- os.system('cp %s %s' % (dataset.file_name, tmpf1.name))
- os.system('samtools index %s' % tmpf1.name)
- os.system('cp %s %s' % (tmpf1bai, index_file.file_name))
- except Exception, ex:
- sys.stderr.write('There was a problem creating the index for the BAM file\n%s\n' + str(ex))
- tmpf1.close()
- if os.path.exists(tmpf1bai):
- os.remove(tmpf1bai)
- dataset.metadata.bam_index = index_file
- def set_peek( self, dataset ):
- if not dataset.dataset.purged:
- export_url = "/history_add_to?" + urlencode({'history_id':dataset.history_id,'ext':'bam','name':'bam alignments','info':'Alignments file','dbkey':dataset.dbkey})
- dataset.peek = "Binary bam alignments file"
- dataset.blurb = data.nice_size( dataset.get_size() )
- else:
- dataset.peek = 'file does not exist'
- dataset.blurb = 'file purged from disk'
- def display_peek(self, dataset):
- try:
- return dataset.peek
- except:
- return "Binary bam alignments file (%s)" % ( data.nice_size( dataset.get_size() ) )
- def get_mime(self):
- """Returns the mime type of the datatype"""
- return 'application/octet-stream'
diff -r 555afd0bf457 -r 37406d8ad116 lib/galaxy/datatypes/metadata.py
--- a/lib/galaxy/datatypes/metadata.py Tue Nov 17 16:14:54 2009 -0500
+++ b/lib/galaxy/datatypes/metadata.py Tue Nov 17 16:16:26 2009 -0500
@@ -509,7 +509,7 @@
# need to make different keys for them, since ids can overlap
return "%s_%d" % ( dataset.__class__.__name__, dataset.id )
def setup_external_metadata( self, datasets, sa_session, exec_dir=None, tmp_dir=None, dataset_files_path=None,
- output_fnames=None, config_root=None, datatypes_config=None, kwds={} ):
+ output_fnames=None, config_root=None, datatypes_config=None, job_metadata=None, kwds={} ):
#fill in metadata_files_dict and return the command with args required to set metadata
def __metadata_files_list_to_cmd_line( metadata_files ):
def __get_filename_override():
@@ -564,7 +564,7 @@
sa_session.flush()
metadata_files_list.append( metadata_files )
#return command required to build
- return "%s %s %s %s %s %s" % ( os.path.join( exec_dir, 'set_metadata.sh' ), dataset_files_path, tmp_dir, config_root, datatypes_config, " ".join( map( __metadata_files_list_to_cmd_line, metadata_files_list ) ) )
+ return "%s %s %s %s %s %s %s" % ( os.path.join( exec_dir, 'set_metadata.sh' ), dataset_files_path, tmp_dir, config_root, datatypes_config, job_metadata, " ".join( map( __metadata_files_list_to_cmd_line, metadata_files_list ) ) )
def external_metadata_set_successfully( self, dataset, sa_session ):
metadata_files = self.get_output_filenames_by_dataset( dataset, sa_session )
diff -r 555afd0bf457 -r 37406d8ad116 lib/galaxy/datatypes/registry.py
--- a/lib/galaxy/datatypes/registry.py Tue Nov 17 16:14:54 2009 -0500
+++ b/lib/galaxy/datatypes/registry.py Tue Nov 17 16:16:26 2009 -0500
@@ -3,7 +3,7 @@
"""
import os, tempfile
import logging
-import data, tabular, interval, images, sequence, qualityscore, genetics, xml, coverage, tracks, chrominfo
+import data, tabular, interval, images, sequence, qualityscore, genetics, xml, coverage, tracks, chrominfo, binary
import galaxy.util
from galaxy.util.odict import odict
@@ -109,11 +109,11 @@
#default values
if len(self.datatypes_by_extension) < 1:
self.datatypes_by_extension = {
- 'ab1' : images.Ab1(),
+ 'ab1' : binary.Ab1(),
'axt' : sequence.Axt(),
- 'bam' : images.Bam(),
+ 'bam' : binary.Bam(),
'bed' : interval.Bed(),
- 'binseq.zip' : images.Binseq(),
+ 'binseq.zip' : binary.Binseq(),
'blastxml' : xml.BlastXml(),
'coverage' : coverage.LastzCoverage(),
'customtrack' : interval.CustomTrack(),
@@ -132,12 +132,12 @@
'qualsolexa' : qualityscore.QualityScoreSolexa(),
'qual454' : qualityscore.QualityScore454(),
'sam' : tabular.Sam(),
- 'scf' : images.Scf(),
- 'sff' : data.Sff(),
+ 'scf' : binary.Scf(),
+ 'sff' : binary.Sff(),
'tabular' : tabular.Tabular(),
'taxonomy' : tabular.Taxonomy(),
'txt' : data.Text(),
- 'txtseq.zip' : images.Txtseq(),
+ 'txtseq.zip' : data.Txtseq(),
'wig' : interval.Wiggle()
}
self.mimetypes_by_extension = {
@@ -174,7 +174,7 @@
# because some formats are much more flexibly defined than others.
if len(self.sniff_order) < 1:
self.sniff_order = [
- data.Sff(),
+ binary.Sff(),
xml.BlastXml(),
sequence.Maf(),
sequence.Lav(),
diff -r 555afd0bf457 -r 37406d8ad116 lib/galaxy/datatypes/tracks.py
--- a/lib/galaxy/datatypes/tracks.py Tue Nov 17 16:14:54 2009 -0500
+++ b/lib/galaxy/datatypes/tracks.py Tue Nov 17 16:16:26 2009 -0500
@@ -3,10 +3,7 @@
"""
import data
-import logging
-import re
-import binascii
-from cgi import escape
+import tabular, binascii, logging
from galaxy.datatypes.metadata import MetadataElement
from galaxy.datatypes import metadata
import galaxy.model
@@ -17,7 +14,7 @@
log = logging.getLogger(__name__)
-class GeneTrack( data.Binary ):
+class GeneTrack( tabular.Tabular ):
file_ext = "genetrack"
MetadataElement( name="genetrack", default="data.genetrack", desc="HDF index", readonly=True, visible=True, no_value=0 )
@@ -27,7 +24,7 @@
super( GeneTrack, self ).__init__( **kwargs )
self.add_display_app( 'genetrack', 'View in', '', 'genetrack_link' )
def get_display_links( self, dataset, type, app, base_url, target_frame='galaxy_main', **kwd ):
- return data.Binary.get_display_links( self, dataset, type, app, base_url, target_frame=target_frame, **kwd )
+ return data.Data.get_display_links( self, dataset, type, app, base_url, target_frame=target_frame, **kwd )
def genetrack_link( self, hda, type, app, base_url ):
ret_val = []
if hda.has_data:
diff -r 555afd0bf457 -r 37406d8ad116 lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py Tue Nov 17 16:14:54 2009 -0500
+++ b/lib/galaxy/jobs/__init__.py Tue Nov 17 16:16:26 2009 -0500
@@ -139,10 +139,15 @@
JobWrapper( job, None, self ).fail( 'This tool was disabled before the job completed. Please contact your Galaxy administrator, or' )
elif job.job_runner_name is None:
log.debug( "no runner: %s is still in queued state, adding to the jobs queue" %job.id )
- self.queue.put( ( job.id, job.tool_id ) )
+ if self.track_jobs_in_database:
+ job.state = model.Job.states.NEW
+ else:
+ self.queue.put( ( job.id, job.tool_id ) )
else:
job_wrapper = JobWrapper( job, self.app.toolbox.tools_by_id[ job.tool_id ], self )
self.dispatcher.recover( job, job_wrapper )
+ if self.sa_session.dirty:
+ self.sa_session.flush()
def __monitor( self ):
"""
@@ -526,6 +531,7 @@
# If the tool was expected to set the extension, attempt to retrieve it
if dataset.ext == 'auto':
dataset.extension = context.get( 'ext', 'data' )
+ dataset.init_meta( copy_from=dataset )
#if a dataset was copied, it won't appear in our dictionary:
#either use the metadata from originating output dataset, or call set_meta on the copies
#it would be quicker to just copy the metadata from the originating output dataset,
@@ -715,14 +721,15 @@
for outfile in [ str( o ) for o in output_paths ]:
sizes.append( ( outfile, os.stat( outfile ).st_size ) )
return sizes
- def setup_external_metadata( self, exec_dir = None, tmp_dir = None, dataset_files_path = None, config_root = None, datatypes_config = None, **kwds ):
+ def setup_external_metadata( self, exec_dir = None, tmp_dir = None, dataset_files_path = None, config_root = None, datatypes_config = None, set_extension = True, **kwds ):
# extension could still be 'auto' if this is the upload tool.
job = self.sa_session.query( model.Job ).get( self.job_id )
- for output_dataset_assoc in job.output_datasets:
- if output_dataset_assoc.dataset.ext == 'auto':
- context = self.get_dataset_finish_context( dict(), output_dataset_assoc.dataset.dataset )
- output_dataset_assoc.dataset.extension = context.get( 'ext', 'data' )
- self.sa_session.flush()
+ if set_extension:
+ for output_dataset_assoc in job.output_datasets:
+ if output_dataset_assoc.dataset.ext == 'auto':
+ context = self.get_dataset_finish_context( dict(), output_dataset_assoc.dataset.dataset )
+ output_dataset_assoc.dataset.extension = context.get( 'ext', 'data' )
+ self.sa_session.flush()
if tmp_dir is None:
#this dir should should relative to the exec_dir
tmp_dir = self.app.config.new_file_path
@@ -739,6 +746,7 @@
dataset_files_path = dataset_files_path,
config_root = config_root,
datatypes_config = datatypes_config,
+ job_metadata = os.path.join( self.working_directory, TOOL_PROVIDED_JOB_METADATA_FILE ),
**kwds )
class DefaultJobDispatcher( object ):
diff -r 555afd0bf457 -r 37406d8ad116 lib/galaxy/jobs/runners/local.py
--- a/lib/galaxy/jobs/runners/local.py Tue Nov 17 16:14:54 2009 -0500
+++ b/lib/galaxy/jobs/runners/local.py Tue Nov 17 16:16:26 2009 -0500
@@ -106,7 +106,9 @@
#this is terminatable when output dataset/job is deleted
#so that long running set_meta()s can be cancelled without having to reboot the server
if job_wrapper.get_state() not in [ model.Job.states.ERROR, model.Job.states.DELETED ] and self.app.config.set_metadata_externally and job_wrapper.output_paths:
- external_metadata_script = job_wrapper.setup_external_metadata( output_fnames = job_wrapper.get_output_fnames(), kwds = { 'overwrite' : False } ) #we don't want to overwrite metadata that was copied over in init_meta(), as per established behavior
+ external_metadata_script = job_wrapper.setup_external_metadata( output_fnames = job_wrapper.get_output_fnames(),
+ set_extension = True,
+ kwds = { 'overwrite' : False } ) #we don't want to overwrite metadata that was copied over in init_meta(), as per established behavior
log.debug( 'executing external set_meta script for job %d: %s' % ( job_wrapper.job_id, external_metadata_script ) )
external_metadata_proc = subprocess.Popen( args = external_metadata_script,
shell = True,
diff -r 555afd0bf457 -r 37406d8ad116 lib/galaxy/jobs/runners/pbs.py
--- a/lib/galaxy/jobs/runners/pbs.py Tue Nov 17 16:14:54 2009 -0500
+++ b/lib/galaxy/jobs/runners/pbs.py Tue Nov 17 16:16:26 2009 -0500
@@ -29,7 +29,6 @@
fi
cd %s
%s
-%s
"""
pbs_symlink_template = """#!/bin/sh
@@ -178,7 +177,9 @@
pbs_queue_name = self.determine_pbs_queue( runner_url )
c = pbs.pbs_connect( pbs_server_name )
if c <= 0:
- raise Exception( "Connection to PBS server for submit failed" )
+ job_wrapper.fail( "Unable to queue job for execution. Resubmitting the job may succeed." )
+ log.error( "Connection to PBS server for submit failed" )
+ return
# define job attributes
ofile = "%s/%s.o" % (self.app.config.cluster_files_directory, job_wrapper.job_id)
@@ -221,11 +222,15 @@
if self.app.config.pbs_stage_path != '':
script = pbs_symlink_template % (job_wrapper.galaxy_lib_dir, " ".join(job_wrapper.get_input_fnames() + output_files), self.app.config.pbs_stage_path, exec_dir, command_line)
else:
+ script = pbs_template % ( job_wrapper.galaxy_lib_dir, exec_dir, command_line )
if self.app.config.set_metadata_externally:
- external_metadata_script = job_wrapper.setup_external_metadata( exec_dir = os.path.abspath( os.getcwd() ), tmp_dir = self.app.config.new_file_path, dataset_files_path = self.app.model.Dataset.file_path, output_fnames = output_fnames, kwds = { 'overwrite' : False } ) #we don't want to overwrite metadata that was copied over in init_meta(), as per established behavior
- else:
- external_metadata_script = ""
- script = pbs_template % ( job_wrapper.galaxy_lib_dir, exec_dir, command_line, external_metadata_script )
+ script += "cd %s\n" % os.path.abspath( os.getcwd() )
+ script += "%s\n" % job_wrapper.setup_external_metadata( exec_dir = os.path.abspath( os.getcwd() ),
+ tmp_dir = self.app.config.new_file_path,
+ dataset_files_path = self.app.model.Dataset.file_path,
+ output_fnames = output_fnames,
+ set_extension = False,
+ kwds = { 'overwrite' : False } ) #we don't want to overwrite metadata that was copied over in init_meta(), as per established behavior
job_file = "%s/%s.sh" % (self.app.config.cluster_files_directory, job_wrapper.job_id)
fh = file(job_file, "w")
fh.write(script)
diff -r 555afd0bf457 -r 37406d8ad116 lib/galaxy/tools/actions/metadata.py
--- a/lib/galaxy/tools/actions/metadata.py Tue Nov 17 16:14:54 2009 -0500
+++ b/lib/galaxy/tools/actions/metadata.py Tue Nov 17 16:16:26 2009 -0500
@@ -41,6 +41,7 @@
output_fnames = None,
config_root = None,
datatypes_config = None,
+ job_metadata = None,
kwds = { 'overwrite' : True } )
incoming[ '__SET_EXTERNAL_METADATA_COMMAND_LINE__' ] = cmd_line
for name, value in tool.params_to_strings( incoming, trans.app ).iteritems():
diff -r 555afd0bf457 -r 37406d8ad116 lib/galaxy/web/controllers/dataset.py
--- a/lib/galaxy/web/controllers/dataset.py Tue Nov 17 16:14:54 2009 -0500
+++ b/lib/galaxy/web/controllers/dataset.py Tue Nov 17 16:16:26 2009 -0500
@@ -20,25 +20,38 @@
This error report was sent from the Galaxy instance hosted on the server
"${host}"
-----------------------------------------------------------------------------
-This is in reference to output dataset ${dataset_id}.
+This is in reference to dataset id ${dataset_id} from history id ${history_id}
+-----------------------------------------------------------------------------
+You should be able to view the history containing the related history item
+
+${hid}: ${history_item_name}
+
+by logging in as a Galaxy admin user to the Galaxy instance referenced above
+and pointing your browser to the following link.
+
+${history_view_link}
-----------------------------------------------------------------------------
The user '${email}' provided the following information:
+
${message}
-----------------------------------------------------------------------------
job id: ${job_id}
-tool id: ${tool_id}
+tool id: ${job_tool_id}
+-----------------------------------------------------------------------------
+job command line:
+${job_command_line}
-----------------------------------------------------------------------------
job stderr:
-${stderr}
+${job_stderr}
-----------------------------------------------------------------------------
job stdout:
-${stdout}
+${job_stdout}
-----------------------------------------------------------------------------
job info:
-${info}
+${job_info}
-----------------------------------------------------------------------------
job traceback:
-${traceback}
+${job_traceback}
-----------------------------------------------------------------------------
(This is an automated message).
"""
@@ -103,41 +116,45 @@
@web.expose
def errors( self, trans, id ):
- dataset = trans.sa_session.query( model.HistoryDatasetAssociation ).get( id )
- return trans.fill_template( "dataset/errors.mako", dataset=dataset )
-
+ hda = trans.sa_session.query( model.HistoryDatasetAssociation ).get( id )
+ return trans.fill_template( "dataset/errors.mako", hda=hda )
@web.expose
def stderr( self, trans, id ):
dataset = trans.sa_session.query( model.HistoryDatasetAssociation ).get( id )
job = dataset.creating_job_associations[0].job
trans.response.set_content_type( 'text/plain' )
return job.stderr
-
@web.expose
def report_error( self, trans, id, email='', message="" ):
smtp_server = trans.app.config.smtp_server
if smtp_server is None:
- return trans.show_error_message( "Sorry, mail is not configured for this galaxy instance" )
+ return trans.show_error_message( "Mail is not configured for this galaxy instance" )
to_address = trans.app.config.error_email_to
if to_address is None:
- return trans.show_error_message( "Sorry, error reporting has been disabled for this galaxy instance" )
+ return trans.show_error_message( "Error reporting has been disabled for this galaxy instance" )
# Get the dataset and associated job
- dataset = trans.sa_session.query( model.HistoryDatasetAssociation ).get( id )
- job = dataset.creating_job_associations[0].job
+ hda = trans.sa_session.query( model.HistoryDatasetAssociation ).get( id )
+ job = hda.creating_job_associations[0].job
# Get the name of the server hosting the Galaxy instance from which this report originated
host = trans.request.host
+ history_view_link = "%s/history/view?id=%s" % ( str( host ), trans.security.encode_id( hda.history_id ) )
# Build the email message
msg = MIMEText( string.Template( error_report_template )
.safe_substitute( host=host,
- dataset_id=dataset.id,
+ dataset_id=hda.dataset_id,
+ history_id=hda.history_id,
+ hid=hda.hid,
+ history_item_name=hda.get_display_name(),
+ history_view_link=history_view_link,
+ job_id=job.id,
+ job_tool_id=job.tool_id,
+ job_command_line=job.command_line,
+ job_stderr=job.stderr,
+ job_stdout=job.stdout,
+ job_info=job.info,
+ job_traceback=job.traceback,
email=email,
- message=message,
- job_id=job.id,
- tool_id=job.tool_id,
- stderr=job.stderr,
- stdout=job.stdout,
- traceback=job.traceback,
- info=job.info ) )
+ message=message ) )
frm = to_address
# Check email a bit
email = email.strip()
diff -r 555afd0bf457 -r 37406d8ad116 lib/galaxy/web/controllers/forms.py
--- a/lib/galaxy/web/controllers/forms.py Tue Nov 17 16:14:54 2009 -0500
+++ b/lib/galaxy/web/controllers/forms.py Tue Nov 17 16:16:26 2009 -0500
@@ -8,9 +8,71 @@
from elementtree.ElementTree import XML, Element
from galaxy.util.odict import odict
import copy
+from galaxy.web.framework.helpers import time_ago, iff, grids
log = logging.getLogger( __name__ )
+class FormsGrid( grids.Grid ):
+ # Custom column types
+ class NameColumn( grids.TextColumn ):
+ def get_value(self, trans, grid, form):
+ return form.latest_form.name
+ class DescriptionColumn( grids.TextColumn ):
+ def get_value(self, trans, grid, form):
+ return form.latest_form.desc
+ class TypeColumn( grids.TextColumn ):
+ def get_value(self, trans, grid, form):
+ return form.latest_form.type
+ class DeletedColumn( grids.GridColumn ):
+ def get_accepted_filters( self ):
+ """ Returns a list of accepted filters for this column. """
+ accepted_filter_labels_and_vals = { "active" : "False", "deleted" : "True", "all": "All" }
+ accepted_filters = []
+ for label, val in accepted_filter_labels_and_vals.items():
+ args = { self.key: val }
+ accepted_filters.append( grids.GridColumnFilter( label, args) )
+ return accepted_filters
+ # Grid definition
+ title = "Forms"
+ template = "admin/forms/grid.mako"
+ model_class = model.FormDefinitionCurrent
+ default_sort_key = "-create_time"
+ num_rows_per_page = 50
+ preserve_state = True
+ use_paging = True
+ default_filter = dict( deleted="False" )
+ columns = [
+ NameColumn( "Name",
+ key="name",
+ model_class=model.FormDefinition,
+ link=( lambda item: iff( item.deleted, None, dict( operation="view", id=item.id ) ) ),
+ attach_popup=True,
+ filterable="advanced" ),
+ DescriptionColumn( "Description",
+ key='desc',
+ model_class=model.FormDefinition,
+ filterable="advanced" ),
+ TypeColumn( "Type" ),
+ DeletedColumn( "Deleted",
+ key="deleted",
+ visible=False,
+ filterable="advanced" )
+ ]
+ columns.append( grids.MulticolFilterColumn( "Search",
+ cols_to_filter=[ columns[0], columns[1] ],
+ key="free-text-search",
+ visible=False,
+ filterable="standard" ) )
+ operations = [
+ grids.GridOperation( "Edit", allow_multiple=False, condition=( lambda item: not item.deleted ) ),
+ grids.GridOperation( "Delete", allow_multiple=True, condition=( lambda item: not item.deleted ) ),
+ grids.GridOperation( "Undelete", condition=( lambda item: item.deleted ) ),
+ ]
+ global_actions = [
+ grids.GridAction( "Create new form", dict( controller='forms',
+ action='new' ) )
+ ]
+
class Forms( BaseController ):
# Empty form field
empty_field = { 'label': '',
@@ -20,38 +82,38 @@
'type': BaseField.form_field_types()[0],
'selectlist': [],
'layout': 'none' }
+ forms_grid = FormsGrid()
+
@web.expose
@web.require_admin
- def index( self, trans, **kwd ):
- params = util.Params( kwd )
- msg = util.restore_text( params.get( 'msg', '' ) )
- messagetype = params.get( 'messagetype', 'done' )
- return trans.fill_template( "/sample/index.mako",
- default_action=params.get( 'default_action', None ),
- msg=msg,
- messagetype=messagetype )
- @web.expose
- @web.require_admin
- def manage( self, trans, **kwd ):
- params = util.Params( kwd )
- msg = util.restore_text( params.get( 'msg', '' ) )
- messagetype = params.get( 'messagetype', 'done' )
- show_filter = params.get( 'show_filter', 'Active' )
- return self._show_forms_list(trans, msg, messagetype, show_filter)
- def _show_forms_list(self, trans, msg, messagetype, show_filter='Active'):
- all_forms = trans.sa_session.query( trans.app.model.FormDefinitionCurrent )
- if show_filter == 'All':
- forms_list = all_forms
- elif show_filter == 'Deleted':
- forms_list = [form for form in all_forms if form.deleted]
- else:
- forms_list = [form for form in all_forms if not form.deleted]
- return trans.fill_template( '/admin/forms/manage_forms.mako',
- fdc_list=forms_list,
- all_forms=all_forms,
- show_filter=show_filter,
- msg=msg,
- messagetype=messagetype )
+ def manage( self, trans, **kwd ):
+ if 'operation' in kwd:
+ operation = kwd['operation'].lower()
+ if not kwd.get( 'id', None ):
+ return trans.response.send_redirect( web.url_for( controller='forms',
+ action='manage',
+ status='error',
+ message="Invalid form ID") )
+ if operation == "view":
+ return self.__view( trans, **kwd )
+ elif operation == "delete":
+ return self.__delete( trans, **kwd )
+ elif operation == "undelete":
+ return self.__undelete( trans, **kwd )
+ elif operation == "edit":
+ return self.__edit( trans, **kwd )
+ return self.forms_grid( trans, **kwd )
+ def __view(self, trans, **kwd):
+ try:
+ fdc = trans.sa_session.query( trans.app.model.FormDefinitionCurrent )\
+ .get( trans.security.decode_id(kwd['id']) )
+ except:
+ return trans.response.send_redirect( web.url_for( controller='forms',
+ action='manage',
+ msg='Invalid form',
+ messagetype='error' ) )
+ return trans.fill_template( '/admin/forms/show_form_read_only.mako',
+ form=fdc.latest_form )
def __form_types_widget(self, trans, selected='none'):
form_type_selectbox = SelectField( 'form_type_selectbox',
refresh_on_change=True,
@@ -86,13 +148,14 @@
self.__get_saved_form( fd )
if self.__imported_from_file:
return trans.response.send_redirect( web.url_for( controller='forms',
- action='edit',
- show_form=True,
- form_id=fd.id) )
+ action='manage',
+ operation='edit',
+ id=trans.security.encode_id(fd.current.id)) )
else:
return trans.response.send_redirect( web.url_for( controller='forms',
- action='edit',
- form_id=fd.id,
+ action='manage',
+ operation='edit',
+ id=trans.security.encode_id(fd.current.id),
add_field_button='Add field',
name=fd.name,
description=fd.desc,
@@ -105,35 +168,43 @@
inputs=inputs,
msg=msg,
messagetype=messagetype )
- @web.expose
- @web.require_admin
- def delete( self, trans, **kwd ):
- params = util.Params( kwd )
- msg = util.restore_text( params.get( 'msg', '' ) )
- messagetype = params.get( 'messagetype', 'done' )
- fd = trans.sa_session.query( trans.app.model.FormDefinition ).get( int( util.restore_text( params.form_id ) ) )
- fd.form_definition_current.deleted = True
- trans.sa_session.add( fd.form_definition_current )
- trans.sa_session.flush()
- return self._show_forms_list(trans,
- msg='The form definition named %s is deleted.' % fd.name,
- messagetype='done')
- @web.expose
- @web.require_admin
- def undelete( self, trans, **kwd ):
- params = util.Params( kwd )
- msg = util.restore_text( params.get( 'msg', '' ) )
- messagetype = params.get( 'messagetype', 'done' )
- fd = trans.sa_session.query( trans.app.model.FormDefinition ).get( int( util.restore_text( params.form_id ) ) )
- fd.form_definition_current.deleted = False
- trans.sa_session.add( fd.form_definition_current )
- trans.sa_session.flush()
- return self._show_forms_list(trans,
- msg='The form definition named %s is undeleted.' % fd.name,
- messagetype='done')
- @web.expose
- @web.require_admin
- def edit( self, trans, **kwd ):
+ def __delete( self, trans, **kwd ):
+ id_list = util.listify( kwd['id'] )
+ delete_failed = []
+ for id in id_list:
+ try:
+ fdc = trans.sa_session.query( trans.app.model.FormDefinitionCurrent ).get( trans.security.decode_id(id) )
+ except:
+ return trans.response.send_redirect( web.url_for( controller='forms',
+ action='manage',
+ message='Invalid form',
+ status='error' ) )
+ fdc.deleted = True
+ trans.sa_session.add( fdc )
+ trans.sa_session.flush()
+ return trans.response.send_redirect( web.url_for( controller='forms',
+ action='manage',
+ message='%i form(s) is deleted.' % len(id_list),
+ status='done') )
+ def __undelete( self, trans, **kwd ):
+ id_list = util.listify( kwd['id'] )
+ delete_failed = []
+ for id in id_list:
+ try:
+ fdc = trans.sa_session.query( trans.app.model.FormDefinitionCurrent ).get( trans.security.decode_id(id) )
+ except:
+ return trans.response.send_redirect( web.url_for( controller='forms',
+ action='manage',
+ message='Invalid form',
+ status='error' ) )
+ fdc.deleted = False
+ trans.sa_session.add( fdc )
+ trans.sa_session.flush()
+ return trans.response.send_redirect( web.url_for( controller='forms',
+ action='manage',
+ message='%i form(s) is undeleted.' % len(id_list),
+ status='done') )
+ def __edit( self, trans, **kwd ):
'''
This callback method is for handling all the editing functions like
renaming fields, adding/deleting fields, changing fields attributes.
@@ -142,17 +213,28 @@
msg = util.restore_text( params.get( 'msg', '' ) )
messagetype = params.get( 'messagetype', 'done' )
try:
- fd = trans.sa_session.query( trans.app.model.FormDefinition ).get( int( params.get( 'form_id', None ) ) )
+ fdc = trans.sa_session.query( trans.app.model.FormDefinitionCurrent ).get( trans.security.decode_id(kwd['id']) )
except:
return trans.response.send_redirect( web.url_for( controller='forms',
action='manage',
- msg='Invalid form',
- messagetype='error' ) )
+ message='Invalid form',
+ status='error' ) )
+ fd = fdc.latest_form
#
- # Show the form for editing
+ # Save changes
#
- if params.get( 'show_form', False ):
+ if params.get( 'save_changes_button', False ):
+ fd_new, msg = self.__save_form( trans, fdc_id=fd.form_definition_current.id, **kwd )
+ # if validation error encountered while saving the form, show the
+ # unsaved form, with the error message
+ if not fd_new:
+ current_form = self.__get_form( trans, **kwd )
+ return self.__show( trans=trans, form=fd, current_form=current_form,
+ msg=msg, messagetype='error', **kwd )
+ # everything went fine. form saved successfully. Show the saved form
+ fd = fd_new
current_form = self.__get_saved_form( fd )
+ msg = "The form '%s' has been updated with the changes." % fd.name
return self.__show( trans=trans, form=fd, current_form=current_form,
msg=msg, messagetype=messagetype, **kwd )
#
@@ -193,31 +275,6 @@
return self.__show( trans=trans, form=fd, current_form=current_form,
msg=msg, messagetype=messagetype, **kwd )
#
- # Save changes
- #
- elif params.get( 'save_changes_button', False ):
- fd_new, msg = self.__save_form( trans, fdc_id=fd.form_definition_current.id, **kwd )
- # if validation error encountered while saving the form, show the
- # unsaved form, with the error message
- if not fd_new:
- current_form = self.__get_form( trans, **kwd )
- return self.__show( trans=trans, form=fd, current_form=current_form,
- msg=msg, messagetype='error', **kwd )
- # everything went fine. form saved successfully. Show the saved form
- fd = fd_new
- current_form = self.__get_saved_form( fd )
- msg = "The form '%s' has been updated with the changes." % fd.name
- return self.__show( trans=trans, form=fd, current_form=current_form,
- msg=msg, messagetype=messagetype, **kwd )
- #
- # Show form read-only
- #
- elif params.get( 'read_only', False ):
- return trans.fill_template( '/admin/forms/show_form_read_only.mako',
- form=fd,
- msg=msg,
- messagetype=messagetype )
- #
# Add SelectField option
#
elif 'Add' in kwd.values():
@@ -234,6 +291,13 @@
current_form = self.__get_form( trans, **kwd )
return self.__show( trans=trans, form=fd, current_form=current_form,
msg=msg, messagetype=messagetype, **kwd )
+ #
+ # Show the form for editing
+ #
+ else:
+ current_form = self.__get_saved_form( fd )
+ return self.__show( trans=trans, form=fd, current_form=current_form,
+ msg=msg, messagetype=messagetype, **kwd )
def __add_selectbox_option( self, trans, fd, msg, messagetype, **kwd ):
'''
diff -r 555afd0bf457 -r 37406d8ad116 lib/galaxy/web/controllers/history.py
--- a/lib/galaxy/web/controllers/history.py Tue Nov 17 16:14:54 2009 -0500
+++ b/lib/galaxy/web/controllers/history.py Tue Nov 17 16:16:26 2009 -0500
@@ -87,7 +87,7 @@
# Grid definition
title = "Saved Histories"
model_class = model.History
- template='/history/grid.mako'
+ template='/grid_base.mako'
default_sort_key = "-create_time"
columns = [
NameColumn( "Name", key="name", model_class=model.History,
@@ -110,14 +110,14 @@
)
operations = [
- grids.GridOperation( "Switch", allow_multiple=False, condition=( lambda item: not item.deleted ) ),
- grids.GridOperation( "Share", condition=( lambda item: not item.deleted ) ),
- grids.GridOperation( "Unshare", condition=( lambda item: not item.deleted ) ),
- grids.GridOperation( "Rename", condition=( lambda item: not item.deleted ) ),
- grids.GridOperation( "Delete", condition=( lambda item: not item.deleted ) ),
- grids.GridOperation( "Undelete", condition=( lambda item: item.deleted ) ),
- grids.GridOperation( "Enable import via link", condition=( lambda item: item.deleted ) ),
- grids.GridOperation( "Disable import via link", condition=( lambda item: item.deleted ) )
+ grids.GridOperation( "Switch", allow_multiple=False, condition=( lambda item: not item.deleted ), async_compatible=True ),
+ grids.GridOperation( "Share", condition=( lambda item: not item.deleted ), async_compatible=False ),
+ grids.GridOperation( "Unshare", condition=( lambda item: not item.deleted ), async_compatible=False ),
+ grids.GridOperation( "Rename", condition=( lambda item: not item.deleted ), async_compatible=False ),
+ grids.GridOperation( "Delete", condition=( lambda item: not item.deleted ), async_compatible=True ),
+ grids.GridOperation( "Undelete", condition=( lambda item: item.deleted ), async_compatible=True ),
+ grids.GridOperation( "Enable import via link", condition=( lambda item: item.deleted ), async_compatible=True ),
+ grids.GridOperation( "Disable import via link", condition=( lambda item: item.deleted ), async_compatible=True )
]
standard_filters = [
grids.GridColumnFilter( "Active", args=dict( deleted=False ) ),
@@ -262,7 +262,7 @@
n_deleted += 1
status = SUCCESS
if n_deleted:
- message_parts.append( "Deleted %d histories. " % n_deleted )
+ message_parts.append( "Deleted %d %s. " % ( n_deleted, iff( n_deleted != 1, "histories", "history" ) ) )
if deleted_current:
message_parts.append( "Your active history was deleted, a new empty history is now active. " )
status = INFO
@@ -290,7 +290,7 @@
status = SUCCESS
message_parts = []
if n_undeleted:
- message_parts.append( "Undeleted %d histories." % n_undeleted )
+ message_parts.append( "Undeleted %d %s. " % ( n_undeleted, iff( n_undeleted != 1, "histories", "history" ) ) )
if n_already_purged:
message_parts.append( "%d histories have already been purged and cannot be undeleted." % n_already_purged )
status = WARNING
@@ -438,23 +438,20 @@
Warning! If you import this history, you will lose your current
history. Click <a href="%s">here</a> to confirm.
""" % web.url_for( id=id, confirm=True ) )
-
@web.expose
def view( self, trans, id=None ):
"""View a history. If a history is importable, then it is viewable by any user."""
-
# Get history to view.
if not id:
return trans.show_error_message( "You must specify a history you want to view." )
history_to_view = get_history( trans, id, False)
-
# Integrity checks.
if not history_to_view:
- return trans.show_error_message( "The specified history does not exist.")
+ return trans.show_error_message( "The specified history does not exist." )
+ # Admin users can view any history
# TODO: Use a new flag to determine if history is viewable?
- if not history_to_view.importable:
- error( "The owner of this history has not published this history." )
-
+ if not trans.user_is_admin and not history_to_view.importable:
+ error( "Either you are not allowed to view this history or the owner of this history has not published it." )
# View history.
query = trans.sa_session.query( model.HistoryDatasetAssociation ) \
.filter( model.HistoryDatasetAssociation.history == history_to_view ) \
@@ -469,7 +466,6 @@
datasets = query.all(),
user_owns_history = user_owns_history,
show_deleted = False )
-
@web.expose
@web.require_login( "share histories with other users" )
def share( self, trans, id=None, email="", **kwd ):
diff -r 555afd0bf457 -r 37406d8ad116 lib/galaxy/web/controllers/library_common.py
--- a/lib/galaxy/web/controllers/library_common.py Tue Nov 17 16:14:54 2009 -0500
+++ b/lib/galaxy/web/controllers/library_common.py Tue Nov 17 16:16:26 2009 -0500
@@ -117,7 +117,7 @@
uploaded_dataset.link_data_only = True
uploaded_dataset.data.file_name = os.path.abspath( path )
trans.sa_session.add( uploaded_dataset.data )
- trans.sa_session.data.flush()
+ trans.sa_session.flush()
return uploaded_dataset
def get_server_dir_uploaded_datasets( self, trans, params, full_dir, import_dir_desc, library_bunch, err_redirect, msg ):
files = []
diff -r 555afd0bf457 -r 37406d8ad116 lib/galaxy/web/controllers/page.py
--- a/lib/galaxy/web/controllers/page.py Tue Nov 17 16:14:54 2009 -0500
+++ b/lib/galaxy/web/controllers/page.py Tue Nov 17 16:16:26 2009 -0500
@@ -1,6 +1,7 @@
from galaxy.web.base.controller import *
from galaxy.web.framework.helpers import time_ago, grids
from galaxy.util.sanitize_html import sanitize_html
+from galaxy.util.odict import odict
import re
@@ -69,21 +70,66 @@
]
def apply_default_filter( self, trans, query, **kwargs ):
return query.filter_by( deleted=False, published=True )
-
-
-class NameColumn( grids.TextColumn ):
- def get_value(self, trans, grid, history):
- return history.get_display_name()
class HistorySelectionGrid( grids.Grid ):
+ # Custom columns.
+ class NameColumn( grids.TextColumn ):
+ def get_value(self, trans, grid, history):
+ return history.get_display_name()
+
+ class DeletedColumn( grids.GridColumn ):
+ def get_accepted_filters( self ):
+ """ Returns a list of accepted filters for this column. """
+ accepted_filter_labels_and_vals = { "active" : "False", "deleted" : "True", "all": "All" }
+ accepted_filters = []
+ for label, val in accepted_filter_labels_and_vals.items():
+ args = { self.key: val }
+ accepted_filters.append( grids.GridColumnFilter( label, args) )
+ return accepted_filters
+
+ class SharingColumn( grids.GridColumn ):
+ def filter( self, db_session, query, column_filter ):
+ """ Modify query to filter histories by sharing status. """
+ if column_filter == "All":
+ pass
+ elif column_filter:
+ if column_filter == "private":
+ query = query.filter( model.History.users_shared_with == None )
+ query = query.filter( model.History.importable == False )
+ elif column_filter == "shared":
+ query = query.filter( model.History.users_shared_with != None )
+ elif column_filter == "importable":
+ query = query.filter( model.History.importable == True )
+ return query
+ def get_accepted_filters( self ):
+ """ Returns a list of accepted filters for this column. """
+ accepted_filter_labels_and_vals = odict()
+ accepted_filter_labels_and_vals["private"] = "private"
+ accepted_filter_labels_and_vals["shared"] = "shared"
+ accepted_filter_labels_and_vals["importable"] = "importable"
+ accepted_filter_labels_and_vals["all"] = "All"
+ accepted_filters = []
+ for label, val in accepted_filter_labels_and_vals.items():
+ args = { self.key: val }
+ accepted_filters.append( grids.GridColumnFilter( label, args) )
+ return accepted_filters
+
# Grid definition.
title = "Saved Histories"
+ template = "grid_base_async.mako"
+ async_template = "grid_body_async.mako"
model_class = model.History
+ default_filter = { "deleted" : "False" , "shared" : "All" }
default_sort_key = "-update_time"
+ use_paging = True
+ num_rows_per_page = 5
columns = [
NameColumn( "Name", key="name", model_class=model.History, filterable="advanced" ),
grids.TagsColumn( "Tags", "tags", model.History, model.HistoryTagAssociation, filterable="advanced"),
grids.GridColumn( "Last Updated", key="update_time", format=time_ago ),
+ # Columns that are valid for filtering but are not visible.
+ DeletedColumn( "Deleted", key="deleted", visible=False, filterable="advanced" ),
+ SharingColumn( "Shared", key="shared", visible=False, filterable="advanced" ),
]
columns.append(
grids.MulticolFilterColumn(
@@ -91,6 +137,8 @@
cols_to_filter=[ columns[0], columns[1] ],
key="free-text-search", visible=False, filterable="standard" )
)
+ def apply_default_filter( self, trans, query, **kwargs ):
+ return query.filter_by( user=trans.user, purged=False )
class PageController( BaseController ):
@@ -268,4 +316,4 @@
@web.require_login("select a history from saved histories")
def list_histories_for_selection( self, trans, **kwargs ):
# Render the list view
- return self._history_selection_grid( trans, status=status, message=message, **kwargs )
\ No newline at end of file
+ return self._history_selection_grid( trans, **kwargs )
\ No newline at end of file
diff -r 555afd0bf457 -r 37406d8ad116 lib/galaxy/web/controllers/requests.py
--- a/lib/galaxy/web/controllers/requests.py Tue Nov 17 16:14:54 2009 -0500
+++ b/lib/galaxy/web/controllers/requests.py Tue Nov 17 16:16:26 2009 -0500
@@ -12,57 +12,109 @@
log = logging.getLogger( __name__ )
-class RequestsListGrid( grids.Grid ):
+class RequestsGrid( grids.Grid ):
+ # Custom column types
+ class NameColumn( grids.TextColumn ):
+ def get_value(self, trans, grid, request):
+ return request.name
+ class DescriptionColumn( grids.TextColumn ):
+ def get_value(self, trans, grid, request):
+ return request.desc
+ class SamplesColumn( grids.GridColumn ):
+ def get_value(self, trans, grid, request):
+ return str(len(request.samples))
+ class TypeColumn( grids.TextColumn ):
+ def get_value(self, trans, grid, request):
+ return request.type.name
+ class LastUpdateColumn( grids.TextColumn ):
+ def get_value(self, trans, grid, request):
+ return request.update_time
+ class StateColumn( grids.GridColumn ):
+ def filter( self, db_session, query, column_filter ):
+ """ Modify query to filter request by state. """
+ if column_filter == "All":
+ return query
+ if column_filter:
+ query = query.filter( model.Request.state == column_filter )
+ return query
+ def get_accepted_filters( self ):
+ """ Returns a list of accepted filters for this column. """
+ accepted_filter_labels_and_vals = [ model.Request.states.UNSUBMITTED,
+ model.Request.states.SUBMITTED,
+ model.Request.states.COMPLETE,
+ "All"]
+ accepted_filters = []
+ for val in accepted_filter_labels_and_vals:
+ label = val.lower()
+ args = { self.key: val }
+ accepted_filters.append( grids.GridColumnFilter( label, args) )
+ return accepted_filters
+ class DeletedColumn( grids.GridColumn ):
+ def get_accepted_filters( self ):
+ """ Returns a list of accepted filters for this column. """
+ accepted_filter_labels_and_vals = { "active" : "False", "deleted" : "True", "all": "All" }
+ accepted_filters = []
+ for label, val in accepted_filter_labels_and_vals.items():
+ args = { self.key: val }
+ accepted_filters.append( grids.GridColumnFilter( label, args) )
+ return accepted_filters
+ # Grid definition
title = "Sequencing Requests"
- template = '/requests/grid.mako'
+ template = 'requests/grid.mako'
model_class = model.Request
default_sort_key = "-create_time"
- show_filter = model.Request.states.UNSUBMITTED
+ num_rows_per_page = 50
+ preserve_state = True
+ use_paging = True
+ default_filter = dict( deleted="False", state=model.Request.states.UNSUBMITTED)
columns = [
- grids.GridColumn( "Name", key="name",
- link=( lambda item: iff( item.deleted, None, dict( operation="show_request", id=item.id ) ) ),
- attach_popup=True ),
- grids.GridColumn( "Description", key='desc'),
- grids.GridColumn( "Sample(s)", method='number_of_samples',
- link=( lambda item: iff( item.deleted, None, dict( operation="show_request", id=item.id ) ) ), ),
- grids.GridColumn( "Type", key="request_type_id", method='get_request_type'),
- grids.GridColumn( "Last update", key="update_time", format=time_ago ),
- grids.GridColumn( "State", key='state'),
+ NameColumn( "Name",
+ key="name",
+ model_class=model.Request,
+ link=( lambda item: iff( item.deleted, None, dict( operation="show_request", id=item.id ) ) ),
+ attach_popup=True,
+ filterable="advanced" ),
+ DescriptionColumn( "Description",
+ key='desc',
+ model_class=model.Request,
+ filterable="advanced" ),
+ SamplesColumn( "Sample(s)",
+ link=( lambda item: iff( item.deleted, None, dict( operation="show_request", id=item.id ) ) ), ),
+ TypeColumn( "Type" ),
+ LastUpdateColumn( "Last update",
+ format=time_ago ),
+ StateColumn( "State",
+ key='state',
+ filterable="advanced"),
+ DeletedColumn( "Deleted",
+ key="deleted",
+ visible=True,
+ filterable="advanced" )
]
+ columns.append( grids.MulticolFilterColumn( "Search",
+ cols_to_filter=[ columns[0], columns[1] ],
+ key="free-text-search",
+ visible=False,
+ filterable="standard" ) )
operations = [
grids.GridOperation( "Submit", allow_multiple=False, condition=( lambda item: not item.deleted and item.unsubmitted() and item.samples ) ),
grids.GridOperation( "Edit", allow_multiple=False, condition=( lambda item: not item.deleted and item.unsubmitted() ) ),
- grids.GridOperation( "Delete", allow_multiple=False, condition=( lambda item: not item.deleted and item.unsubmitted() ) ),
- grids.GridOperation( "Undelete", allow_multiple=False, condition=( lambda item: item.deleted ) )
+ grids.GridOperation( "Delete", allow_multiple=True, condition=( lambda item: not item.deleted and item.unsubmitted() ) ),
+ grids.GridOperation( "Undelete", allow_multiple=True, condition=( lambda item: item.deleted ) )
]
- standard_filters = [
- grids.GridColumnFilter( model.Request.states.UNSUBMITTED,
- args=dict( state=model.Request.states.UNSUBMITTED, deleted=False ) ),
- grids.GridColumnFilter( model.Request.states.SUBMITTED,
- args=dict( state=model.Request.states.SUBMITTED, deleted=False ) ),
- grids.GridColumnFilter( model.Request.states.COMPLETE, args=dict( state=model.Request.states.COMPLETE, deleted=False ) ),
- grids.GridColumnFilter( "Deleted", args=dict( deleted=True ) ),
- grids.GridColumnFilter( "All", args={} )
+ global_actions = [
+ grids.GridAction( "Create new request", dict( controller='requests',
+ action='new',
+ select_request_type='True' ) )
]
- #default_filter = dict( deleted=False )
- def get_current_item( self, trans ):
- return None
- def get_request_type(self, trans, request):
- return request.type.name
- def apply_default_filter( self, trans, query, **kwargs ):
- query = query.filter_by( user=trans.user )
- if self.default_filter:
- return query.filter_by( **self.default_filter )
- else:
- return query
- def number_of_samples(self, trans, request):
- return str(len(request.samples))
- def get_state(self, trans, request):
- return request.state
+ def apply_default_filter( self, trans, query, **kwd ):
+ return query.filter_by( user=trans.user )
+ def build_initial_query( self, session ):
+ return session.query( self.model_class )
class Requests( BaseController ):
- request_grid = RequestsListGrid()
+ request_grid = RequestsGrid()
@web.expose
@web.require_login( "create/submit sequencing requests" )
@@ -71,50 +123,43 @@
@web.expose
@web.require_login( "create/submit sequencing requests" )
- def list( self, trans, **kwargs ):
+ def list( self, trans, **kwd ):
'''
List all request made by the current user
'''
- status = message = None
- self.request_grid.default_filter = dict(state=trans.app.model.Request.states.UNSUBMITTED,
- deleted=False)
- if 'operation' in kwargs:
- operation = kwargs['operation'].lower()
+
+ if 'operation' in kwd:
+ operation = kwd['operation'].lower()
+ if not kwd.get( 'id', None ):
+ return trans.response.send_redirect( web.url_for( controller='requests',
+ action='list',
+ status='error',
+ message="Invalid request ID") )
if operation == "show_request":
- id = trans.security.decode_id(kwargs['id'])
- return self.__show_request(trans, id, kwargs.get('add_sample', False))
+ return self.__show_request( trans, **kwd )
elif operation == "submit":
- id = trans.security.decode_id(kwargs['id'])
- return self.__submit_request(trans, id)
+ return self.__submit_request( trans, **kwd )
elif operation == "delete":
- id = trans.security.decode_id(kwargs['id'])
- return self.__delete_request(trans, id)
+ return self.__delete_request( trans, **kwd )
elif operation == "undelete":
- id = trans.security.decode_id(kwargs['id'])
- return self.__undelete_request(trans, id)
+ return self.__undelete_request( trans, **kwd )
elif operation == "edit":
- id = trans.security.decode_id(kwargs['id'])
- return self.__edit_request(trans, id)
- if 'show_filter' in kwargs.keys():
- if kwargs['show_filter'] == 'All':
- self.request_grid.default_filter = {}
- elif kwargs['show_filter'] == 'Deleted':
- self.request_grid.default_filter = dict(deleted=True)
- else:
- self.request_grid.default_filter = dict(state=kwargs['show_filter'], deleted=False)
- self.request_grid.show_filter = kwargs.get('show_filter', trans.app.model.Request.states.UNSUBMITTED)
+ return self.__edit_request( trans, **kwd )
# Render the list view
- return self.request_grid( trans, **kwargs )
+ return self.request_grid( trans, **kwd )
- def __show_request(self, trans, id, add_sample=False):
+ def __show_request(self, trans, **kwd):
+ params = util.Params( kwd )
+ msg = util.restore_text( params.get( 'msg', '' ) )
+ messagetype = params.get( 'messagetype', 'done' )
+ add_sample = params.get('add_sample', False)
try:
- request = trans.sa_session.query( trans.app.model.Request ).get( id )
+ request = trans.sa_session.query( trans.app.model.Request ).get( trans.security.decode_id(kwd['id']) )
except:
return trans.response.send_redirect( web.url_for( controller='requests',
action='list',
status='error',
- message="Invalid request ID",
- **kwd) )
+ message="Invalid request ID" ) )
current_samples = []
for s in request.samples:
current_samples.append([s.name, s.values.content])
@@ -122,10 +167,11 @@
current_samples.append(['Sample_%i' % (len(current_samples)+1),['' for field in request.type.sample_form.fields]])
return trans.fill_template( '/requests/show_request.mako',
request=request,
- request_details=self.request_details(trans, id),
+ request_details=self.request_details(trans, request.id),
current_samples = current_samples,
sample_copy=self.__copy_sample(current_samples),
- details='hide', edit_mode='False')
+ details='hide', edit_mode='False',
+ msg=msg, messagetype=messagetype )
def request_details(self, trans, id):
'''
Shows the request details
@@ -685,7 +731,7 @@
message="Invalid request ID",
**kwd) )
if params.get('show', False) == 'True':
- return self.__edit_request(trans, request.id, **kwd)
+ return self.__edit_request(trans, **kwd)
elif params.get('save_changes_request_button', False) == 'Save changes' \
or params.get('edit_samples_button', False) == 'Edit samples':
request_type = trans.sa_session.query( trans.app.model.RequestType ).get( int( params.select_request_type ) )
@@ -714,11 +760,11 @@
messagetype='done',
**new_kwd) )
elif params.get('refresh', False) == 'true':
- return self.__edit_request(trans, request.id, **kwd)
+ return self.__edit_request(trans, **kwd)
- def __edit_request(self, trans, id, **kwd):
+ def __edit_request(self, trans, **kwd):
try:
- request = trans.sa_session.query( trans.app.model.Request ).get( id )
+ request = trans.sa_session.query( trans.app.model.Request ).get( trans.security.decode_id(kwd['id']) )
except:
msg = "Invalid request ID"
log.warn( msg )
@@ -758,59 +804,61 @@
msg=msg,
messagetype=messagetype)
return self.__show_request_form(trans)
- def __delete_request(self, trans, id):
- try:
- request = trans.sa_session.query( trans.app.model.Request ).get( id )
- except:
- msg = "Invalid request ID"
- log.warn( msg )
- return trans.response.send_redirect( web.url_for( controller='requests',
- action='list',
- status='error',
- message=msg,
- **kwd) )
- # change request's submitted field
- if not request.unsubmitted():
- return trans.response.send_redirect( web.url_for( controller='requests',
- action='list',
- status='error',
- message='This request cannot be deleted as it is already been submitted',
- **kwd) )
- request.deleted = True
- trans.sa_session.add( request )
- trans.sa_session.flush()
- kwd = {}
- kwd['id'] = trans.security.encode_id(request.id)
+ def __delete_request(self, trans, **kwd):
+ id_list = util.listify( kwd['id'] )
+ delete_failed = []
+ for id in id_list:
+ try:
+ request = trans.sa_session.query( trans.app.model.Request ).get( trans.security.decode_id(id) )
+ except:
+ msg = "Invalid request ID"
+ log.warn( msg )
+ return trans.response.send_redirect( web.url_for( controller='requests',
+ action='list',
+ status='error',
+ message=msg,
+ **kwd) )
+ # a request cannot be deleted once its submitted
+ if not request.unsubmitted():
+ delete_failed.append(request.name)
+ else:
+ request.deleted = True
+ trans.sa_session.add( request )
+ trans.sa_session.flush()
+ if not len(delete_failed):
+ msg = '%i request(s) has been deleted.' % len(id_list)
+ status = 'done'
+ else:
+ msg = '%i request(s) has been deleted. %i request %s could not be deleted as they have been submitted.' % (len(id_list)-len(delete_failed),
+ len(delete_failed), str(delete_failed))
+ status = 'warning'
+ return trans.response.send_redirect( web.url_for( controller='requests',
+ action='list',
+ status=status,
+ message=msg) )
+ def __undelete_request(self, trans, **kwd):
+ id_list = util.listify( kwd['id'] )
+ for id in id_list:
+ try:
+ request = trans.sa_session.query( trans.app.model.Request ).get( trans.security.decode_id(id) )
+ except:
+ msg = "Invalid request ID"
+ log.warn( msg )
+ return trans.response.send_redirect( web.url_for( controller='requests',
+ action='list',
+ status='error',
+ message=msg,
+ **kwd) )
+ request.deleted = False
+ trans.sa_session.add( request )
+ trans.sa_session.flush()
return trans.response.send_redirect( web.url_for( controller='requests',
action='list',
status='done',
- message='The request <b>%s</b> has been deleted.' % request.name,
- **kwd) )
- def __undelete_request(self, trans, id):
+ message='%i request(s) has been undeleted.' % len(id_list) ) )
+ def __submit_request(self, trans, **kwd):
try:
- request = trans.sa_session.query( trans.app.model.Request ).get( id )
- except:
- msg = "Invalid request ID"
- log.warn( msg )
- return trans.response.send_redirect( web.url_for( controller='requests',
- action='list',
- status='error',
- message=msg,
- **kwd) )
- # change request's submitted field
- request.deleted = False
- trans.sa_session.add( request )
- trans.sa_session.flush()
- kwd = {}
- kwd['id'] = trans.security.encode_id(request.id)
- return trans.response.send_redirect( web.url_for( controller='requests',
- action='list',
- status='done',
- message='The request <b>%s</b> has been undeleted.' % request.name,
- **kwd) )
- def __submit_request(self, trans, id):
- try:
- request = trans.sa_session.query( trans.app.model.Request ).get( id )
+ request = trans.sa_session.query( trans.app.model.Request ).get( trans.security.decode_id(kwd['id']) )
except:
msg = "Invalid request ID"
log.warn( msg )
@@ -837,14 +885,12 @@
request.state = request.states.SUBMITTED
trans.sa_session.add( request )
trans.sa_session.flush()
- kwd = {}
- kwd['id'] = trans.security.encode_id(request.id)
- kwd['status'] = 'done'
- kwd['message'] = 'The request <b>%s</b> has been submitted.' % request.name
return trans.response.send_redirect( web.url_for( controller='requests',
action='list',
- show_filter=trans.app.model.Request.states.SUBMITTED,
- **kwd) )
+ id=trans.security.encode_id(request.id),
+ status='done',
+ message='The request <b>%s</b> has been submitted.' % request.name
+ ) )
@web.expose
@web.require_login( "create/submit sequencing requests" )
def show_events(self, trans, **kwd):
diff -r 555afd0bf457 -r 37406d8ad116 lib/galaxy/web/controllers/requests_admin.py
--- a/lib/galaxy/web/controllers/requests_admin.py Tue Nov 17 16:14:54 2009 -0500
+++ b/lib/galaxy/web/controllers/requests_admin.py Tue Nov 17 16:16:26 2009 -0500
@@ -15,63 +15,187 @@
# ---- Request Grid ------------------------------------------------------------
#
-class RequestsListGrid( grids.Grid ):
+class RequestsGrid( grids.Grid ):
+ # Custom column types
+ class NameColumn( grids.TextColumn ):
+ def get_value(self, trans, grid, request):
+ return request.name
+ class DescriptionColumn( grids.TextColumn ):
+ def get_value(self, trans, grid, request):
+ return request.desc
+ class SamplesColumn( grids.GridColumn ):
+ def get_value(self, trans, grid, request):
+ return str(len(request.samples))
+ class TypeColumn( grids.TextColumn ):
+ def get_value(self, trans, grid, request):
+ return request.type.name
+ class LastUpdateColumn( grids.TextColumn ):
+ def get_value(self, trans, grid, request):
+ return request.update_time
+ class StateColumn( grids.GridColumn ):
+ def filter( self, db_session, query, column_filter ):
+ """ Modify query to filter request by state. """
+ if column_filter == "All":
+ return query
+ if column_filter:
+ query = query.filter( model.Request.state == column_filter )
+ return query
+ def get_accepted_filters( self ):
+ """ Returns a list of accepted filters for this column. """
+ accepted_filter_labels_and_vals = [ model.Request.states.UNSUBMITTED,
+ model.Request.states.SUBMITTED,
+ model.Request.states.COMPLETE,
+ "All"]
+ accepted_filters = []
+ for val in accepted_filter_labels_and_vals:
+ label = val.lower()
+ args = { self.key: val }
+ accepted_filters.append( grids.GridColumnFilter( label, args) )
+ return accepted_filters
+ class UserColumn( grids.TextColumn ):
+ def get_value(self, trans, grid, request):
+ return request.user.email
+ class DeletedColumn( grids.GridColumn ):
+ def get_accepted_filters( self ):
+ """ Returns a list of accepted filters for this column. """
+ accepted_filter_labels_and_vals = { "active" : "False", "deleted" : "True", "all": "All" }
+ accepted_filters = []
+ for label, val in accepted_filter_labels_and_vals.items():
+ args = { self.key: val }
+ accepted_filters.append( grids.GridColumnFilter( label, args) )
+ return accepted_filters
+ # Grid definition
title = "Sequencing Requests"
template = "admin/requests/grid.mako"
model_class = model.Request
default_sort_key = "-create_time"
- show_filter = model.Request.states.SUBMITTED
+ num_rows_per_page = 50
+ preserve_state = True
+ use_paging = True
+ default_filter = dict( deleted="False", state=model.Request.states.SUBMITTED)
columns = [
- grids.GridColumn( "Name", key="name",
- link=( lambda item: iff( item.deleted, None, dict( operation="show_request", id=item.id ) ) ),
- attach_popup=True ),
- grids.GridColumn( "Description", key="desc"),
- grids.GridColumn( "Sample(s)", method='number_of_samples',
- link=( lambda item: iff( item.deleted, None, dict( operation="show_request", id=item.id ) ) ), ),
- grids.GridColumn( "Type", key="request_type_id", method='get_request_type'),
- grids.GridColumn( "Last update", key="update_time", format=time_ago ),
- grids.GridColumn( "State", key='state'),
- grids.GridColumn( "User", key="user_id", method='get_user')
-
+ NameColumn( "Name",
+ key="name",
+ model_class=model.Request,
+ link=( lambda item: iff( item.deleted, None, dict( operation="show_request", id=item.id ) ) ),
+ attach_popup=True,
+ filterable="advanced" ),
+ DescriptionColumn( "Description",
+ key='desc',
+ model_class=model.Request,
+ filterable="advanced" ),
+ SamplesColumn( "Sample(s)",
+ link=( lambda item: iff( item.deleted, None, dict( operation="show_request", id=item.id ) ) ), ),
+ TypeColumn( "Type" ),
+ LastUpdateColumn( "Last update",
+ format=time_ago ),
+ StateColumn( "State",
+ key='state',
+ filterable="advanced"),
+ UserColumn( "User",
+ key='user.email',
+ model_class=model.Request,
+ filterable="advanced" ),
+ DeletedColumn( "Deleted",
+ key="deleted",
+ visible=True,
+ filterable="advanced" )
]
+ columns.append( grids.MulticolFilterColumn( "Search",
+ cols_to_filter=[ columns[0], columns[1], columns[6] ],
+ key="free-text-search",
+ visible=False,
+ filterable="standard" ) )
operations = [
grids.GridOperation( "Submit", allow_multiple=False, condition=( lambda item: not item.deleted and item.unsubmitted() and item.samples ) ),
grids.GridOperation( "Edit", allow_multiple=False, condition=( lambda item: not item.deleted ) ),
grids.GridOperation( "Reject", allow_multiple=False, condition=( lambda item: not item.deleted and item.submitted() ) ),
- grids.GridOperation( "Delete", allow_multiple=False, condition=( lambda item: not item.deleted and item.unsubmitted() ) ),
+ grids.GridOperation( "Delete", allow_multiple=True, condition=( lambda item: not item.deleted and item.unsubmitted() ) ),
grids.GridOperation( "Undelete", condition=( lambda item: item.deleted ) ),
]
- standard_filters = [
- grids.GridColumnFilter( model.Request.states.UNSUBMITTED,
- args=dict( state=model.Request.states.UNSUBMITTED, deleted=False ) ),
- grids.GridColumnFilter( model.Request.states.SUBMITTED,
- args=dict( state=model.Request.states.SUBMITTED, deleted=False ) ),
- grids.GridColumnFilter( model.Request.states.COMPLETE, args=dict( state=model.Request.states.COMPLETE, deleted=False ) ),
- grids.GridColumnFilter( "Deleted", args=dict( deleted=True ) ),
- grids.GridColumnFilter( "All", args=dict( deleted=False ) )
+ global_actions = [
+ grids.GridAction( "Create new request", dict( controller='requests_admin',
+ action='new',
+ select_request_type='True' ) )
]
- def get_user(self, trans, request):
- return trans.sa_session.query( trans.app.model.User ).get( request.user_id ).email
- def get_current_item( self, trans ):
- return None
- def get_request_type(self, trans, request):
- request_type = trans.sa_session.query( trans.app.model.RequestType ).get( request.request_type_id )
- return request_type.name
- def number_of_samples(self, trans, request):
- return str(len(request.samples))
- def apply_default_filter( self, trans, query, **kwargs ):
- if self.default_filter:
- return query.filter_by( **self.default_filter )
- else:
- return query
-
+
+#
+# ---- Request Type Gridr ------------------------------------------------------
+#
+class RequestTypeGrid( grids.Grid ):
+ # Custom column types
+ class NameColumn( grids.TextColumn ):
+ def get_value(self, trans, grid, request_type):
+ return request_type.name
+ class DescriptionColumn( grids.TextColumn ):
+ def get_value(self, trans, grid, request_type):
+ return request_type.desc
+ class RequestFormColumn( grids.TextColumn ):
+ def get_value(self, trans, grid, request_type):
+ return request_type.request_form.name
+ class SampleFormColumn( grids.TextColumn ):
+ def get_value(self, trans, grid, request_type):
+ return request_type.sample_form.name
+ class DeletedColumn( grids.GridColumn ):
+ def get_accepted_filters( self ):
+ """ Returns a list of accepted filters for this column. """
+ accepted_filter_labels_and_vals = { "active" : "False", "deleted" : "True", "all": "All" }
+ accepted_filters = []
+ for label, val in accepted_filter_labels_and_vals.items():
+ args = { self.key: val }
+ accepted_filters.append( grids.GridColumnFilter( label, args) )
+ return accepted_filters
+ # Grid definition
+ title = "Requests Types"
+ template = "admin/requests/manage_request_types.mako"
+ model_class = model.RequestType
+ default_sort_key = "-create_time"
+ num_rows_per_page = 50
+ preserve_state = True
+ use_paging = True
+ default_filter = dict( deleted="False" )
+ columns = [
+ NameColumn( "Name",
+ key="name",
+ model_class=model.RequestType,
+ link=( lambda item: iff( item.deleted, None, dict( operation="view", id=item.id ) ) ),
+ attach_popup=True,
+ filterable="advanced" ),
+ DescriptionColumn( "Description",
+ key='desc',
+ model_class=model.Request,
+ filterable="advanced" ),
+ RequestFormColumn( "Request Form",
+ link=( lambda item: iff( item.deleted, None, dict( operation="view_form", id=item.request_form.id ) ) ), ),
+ SampleFormColumn( "Sample Form",
+ link=( lambda item: iff( item.deleted, None, dict( operation="view_form", id=item.sample_form.id ) ) ), ),
+ DeletedColumn( "Deleted",
+ key="deleted",
+ visible=False,
+ filterable="advanced" )
+ ]
+ columns.append( grids.MulticolFilterColumn( "Search",
+ cols_to_filter=[ columns[0], columns[1] ],
+ key="free-text-search",
+ visible=False,
+ filterable="standard" ) )
+ operations = [
+ #grids.GridOperation( "Update", allow_multiple=False, condition=( lambda item: not item.deleted ) ),
+ grids.GridOperation( "Delete", allow_multiple=True, condition=( lambda item: not item.deleted ) ),
+ grids.GridOperation( "Undelete", condition=( lambda item: item.deleted ) ),
+ ]
+ global_actions = [
+ grids.GridAction( "Create new request type", dict( controller='requests_admin',
+ action='create_request_type' ) )
+ ]
#
# ---- Request Controller ------------------------------------------------------
#
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/655868f3ef98
changeset: 3099:655868f3ef98
user: Enis Afgan <afgane(a)gmail.com>
date: Tue Nov 17 16:39:05 2009 -0500
description:
Fix cloud execution on startup
diffstat:
lib/galaxy/cloud/__init__.py | 2 +-
lib/galaxy/config.py | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diffs (30 lines):
diff -r 37406d8ad116 -r 655868f3ef98 lib/galaxy/cloud/__init__.py
--- a/lib/galaxy/cloud/__init__.py Tue Nov 17 16:16:26 2009 -0500
+++ b/lib/galaxy/cloud/__init__.py Tue Nov 17 16:39:05 2009 -0500
@@ -61,7 +61,7 @@
def __init__( self, app ):
self.app = app
self.sa_session = app.model.context
- if self.app.config.get_bool( "enable_cloud_execution", True ):
+ if self.app.config.enable_cloud_execution == True:
# The dispatcher manager for underlying cloud instances - implements and contacts individual cloud providers
self.provider = CloudProvider( app )
# Monitor for updating status of cloud instances
diff -r 37406d8ad116 -r 655868f3ef98 lib/galaxy/config.py
--- a/lib/galaxy/config.py Tue Nov 17 16:16:26 2009 -0500
+++ b/lib/galaxy/config.py Tue Nov 17 16:39:05 2009 -0500
@@ -114,11 +114,11 @@
self.tool_runners = []
self.datatypes_config = kwargs.get( 'datatypes_config_file', 'datatypes_conf.xml' )
# Cloud configuration options
- self.cloud_controller_instance = string_as_bool( kwargs.get( 'cloud_controller_instance', False ) )
+ self.cloud_controller_instance = string_as_bool( kwargs.get( 'cloud_controller_instance', 'False' ) )
if self.cloud_controller_instance == True:
- self.enable_cloud_execution = string_as_bool( kwargs.get( 'enable_cloud_execution', True ) )
+ self.enable_cloud_execution = string_as_bool( kwargs.get( 'enable_cloud_execution', 'True' ) )
else:
- self.enable_cloud_execution = string_as_bool( kwargs.get( 'enable_cloud_execution', False ) )
+ self.enable_cloud_execution = string_as_bool( kwargs.get( 'enable_cloud_execution', 'False' ) )
def get( self, key, default ):
return self.config_dict.get( key, default )
def get_bool( self, key, default ):
1
0
23 Nov '09
details: http://www.bx.psu.edu/hg/galaxy/rev/39502dd3fd23
changeset: 3095:39502dd3fd23
user: Enis Afgan <afgane(a)gmail.com>
date: Mon Nov 16 20:37:47 2009 -0500
description:
Ported DB interaction to be compatible with Sqlalchemy 0.5.6
diffstat:
lib/galaxy/cloud/__init__.py | 425 +++++++++++++++++++----------------
lib/galaxy/cloud/providers/ec2.py | 341 ++++++++++++++++-----------
lib/galaxy/cloud/providers/eucalyptus.py | 432 ++++++++++++++++++++++--------------
lib/galaxy/config.py | 9 +-
lib/galaxy/web/controllers/cloud.py | 151 +++++------
templates/cloud/configure_cloud.mako | 3 +-
6 files changed, 768 insertions(+), 593 deletions(-)
diffs (2608 lines):
diff -r 7d013eb98022 -r 39502dd3fd23 lib/galaxy/cloud/__init__.py
--- a/lib/galaxy/cloud/__init__.py Thu Nov 12 16:36:07 2009 -0500
+++ b/lib/galaxy/cloud/__init__.py Mon Nov 16 20:37:47 2009 -0500
@@ -60,6 +60,7 @@
"""
def __init__( self, app ):
self.app = app
+ self.sa_session = app.model.context
if self.app.config.get_bool( "enable_cloud_execution", True ):
# The dispatcher manager for underlying cloud instances - implements and contacts individual cloud providers
self.provider = CloudProvider( app )
@@ -99,6 +100,7 @@
# Keep track of the pid that started the cloud manager, only it
# has valid threads
self.parent_pid = os.getpid()
+ self.sa_session = app.model.context
# Contains requests that are waiting (only use from monitor thread)
self.waiting = []
@@ -122,7 +124,6 @@
cnt = 0 # Run global update only periodically so keep counter variable
while self.running:
try:
-# log.debug( "Calling monitor_step" )
self.__monitor_step()
if cnt%30 == 0: # Run global update every 30 iterations (1 minute)
self.provider.update()
@@ -144,27 +145,23 @@
it is marked as having errors and removed from the queue. Otherwise,
the job is dispatched.
"""
- # Get an orm (object relational mapping) session
- session = mapping.Session()
+ model = self.app.model
new_requests = []
- for r in session.query( model.UCI ) \
- .filter( or_( model.UCI.c.state==uci_states.NEW_UCI,
- model.UCI.c.state==uci_states.SUBMITTED_UCI,
- model.UCI.c.state==uci_states.SHUTTING_DOWN_UCI,
- model.UCI.c.state==uci_states.DELETING_UCI,
- model.UCI.c.state==uci_states.SNAPSHOT_UCI ) ) \
+ for r in self.sa_session.query( model.UCI ) \
+ .filter( or_( model.UCI.table.c.state==uci_states.NEW_UCI,
+ model.UCI.table.c.state==uci_states.SUBMITTED_UCI,
+ model.UCI.table.c.state==uci_states.SHUTTING_DOWN_UCI,
+ model.UCI.table.c.state==uci_states.DELETING_UCI,
+ model.UCI.table.c.state==uci_states.SNAPSHOT_UCI ) ) \
.all():
- uci_wrapper = UCIwrapper( r )
+ uci_wrapper = UCIwrapper( r, self.app )
new_requests.append( uci_wrapper )
for uci_wrapper in new_requests:
- session.clear()
+ self.sa_session.expunge_all()
self.put( uci_wrapper )
-
- # Done with the session
- mapping.Session.remove()
-
+
def put( self, uci_wrapper ):
"""Add a request to the queue."""
self.provider.put( uci_wrapper )
@@ -186,8 +183,10 @@
"""
Wraps 'model.UCI' with convenience methods for state management
"""
- def __init__( self, uci ):
+ def __init__( self, uci, app ):
self.uci_id = uci.id
+ self.app = app
+ self.sa_session = self.app.model.context
# --------- Setter methods -----------------
@@ -199,56 +198,61 @@
"""
# log.debug( "Changing state - new uci_state: %s, instance_id: %s, i_state: %s" % ( uci_state, instance_id, i_state ) )
if uci_state is not None:
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
uci.state = uci_state
- uci.flush()
+ self.sa_session.flush()
if ( instance_id is not None ) and ( i_state is not None ):
- instance = model.CloudInstance.filter_by( uci_id=self.uci_id, instance_id=instance_id).first()
+ instance = self.sa_session.query( model.CloudInstance ).filter_by( uci_id=self.uci_id, instance_id=instance_id).first()
instance.state = i_state
- instance.flush()
+ self.sa_session.add( instance )
+ self.sa_session.flush()
def set_mi( self, i_index, mi_id ):
"""
Sets Machine Image (MI), e.g., 'ami-66fa190f', for UCI's instance with given index as it
is stored in local Galaxy database.
"""
- mi = model.CloudImage.filter( model.CloudImage.c.image_id==mi_id ).first()
- instance = model.CloudInstance.get( i_index )
+ mi = self.sa_session.query( model.CloudImage ).filter( model.CloudImage.table.c.image_id==mi_id ).first()
+ instance = self.sa_session.query( model.CloudInstance ).get( i_index )
instance.image = mi
- instance.flush()
+ self.sa_session.add( instance )
+ self.sa_session.flush()
def set_key_pair( self, key_name, key_material=None ):
"""
Sets key pair value for current UCI.
"""
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
uci.key_pair_name = key_name
if key_material is not None:
uci.key_pair_material = key_material
- uci.flush()
+ self.sa_session.flush()
- def set_launch_time( self, launch_time, i_index=None, i_id=None ):
+ def set_instance_launch_time( self, launch_time, i_index=None, i_id=None ):
"""
Stores launch time in local database for instance with specified index - i_index (as it is stored in local
Galaxy database) or with specified instance ID - i_id (as obtained from the cloud provider AND stored
in local Galaxy Database). Either 'i_index' or 'i_id' needs to be provided.
"""
if i_index != None:
- instance = model.CloudInstance.get( i_index )
- instance.launch_time = launch_time
- instance.flush()
+ instance = self.sa_session.query( model.CloudInstance ).get( i_index )
elif i_id != None:
- instance = model.CloudInstance.filter_by( uci_id=self.uci_id, instance_id=i_id).first()
- instance.launch_time = launch_time
- instance.flush()
+ instance = self.sa_session.query( model.CloudInstance ).filter_by( uci_id=self.uci_id, instance_id=i_id).first()
+ else:
+ return None
+
+ instance.launch_time = launch_time
+ self.sa_session.add( instance )
+ self.sa_session.flush()
def set_uci_launch_time( self, launch_time ):
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
uci.launch_time = launch_time
- uci.flush()
+ self.sa_session.add( uci )
+ self.sa_session.flush()
def set_stop_time( self, stop_time, i_index=None, i_id=None ):
"""
@@ -257,20 +261,16 @@
in local Galaxy Database). Either 'i_index' or 'i_id' needs to be provided.
"""
if i_index != None:
- instance = model.CloudInstance.get( i_index )
- instance.stop_time = stop_time
- instance.flush()
+ instance = self.sa_session.query( model.CloudInstance ).get( i_index )
elif i_id != None:
- instance = model.CloudInstance.filter_by( uci_id=self.uci_id, instance_id=i_id).first()
- instance.stop_time = stop_time
- instance.flush()
+ instance = self.sa_session.query( model.CloudInstance ).filter_by( uci_id=self.uci_id, instance_id=i_id).first()
+ else:
+ return None
+
+ instance.stop_time = stop_time
+ self.sa_session.add( instance )
+ self.sa_session.flush()
- def reset_uci_launch_time( self ):
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
- uci.launch_time = None
- uci.flush()
-
def set_security_group_name( self, security_group_name, i_index=None, i_id=None ):
"""
Stores security group name in local database for instance with specified index - i_index (as it is stored in local
@@ -278,90 +278,107 @@
in local Galaxy Database). Either 'i_index' or 'i_id' needs to be provided.
"""
if i_index != None:
- instance = model.CloudInstance.get( i_index )
- instance.security_group = security_group_name
- instance.flush()
+ instance = self.sa_session.query( model.CloudInstance ).get( i_index )
elif i_id != None:
- instance = model.CloudInstance.filter_by( uci_id=self.uci_id, instance_id=i_id).first()
- instance.security_group = security_group_name
- instance.flush()
+ instance = self.sa_session.query( model.CloudInstance ).filter_by( uci_id=self.uci_id, instance_id=i_id).first()
+ else:
+ return None
+
+ instance.security_group = security_group_name
+ self.sa_session.add( instance )
+ self.sa_session.flush()
def set_reservation_id( self, i_index, reservation_id ):
- instance = model.CloudInstance.get( i_index )
+ instance = self.sa_session.query( model.CloudInstance ).get( i_index )
instance.reservation_id = reservation_id
- instance.flush()
+ self.sa_session.add( instance )
+ self.sa_session.flush()
def set_instance_id( self, i_index, instance_id ):
"""
i_index refers to UCI's instance ID as stored in local database
instance_id refers to real-world, cloud resource ID (e.g., 'i-78hd823a')
"""
- instance = model.CloudInstance.get( i_index )
+ instance = self.sa_session.query( model.CloudInstance ).get( i_index )
instance.instance_id = instance_id
- instance.flush()
+ self.sa_session.add( instance )
+ self.sa_session.flush()
- def set_public_dns( self, instance_id, public_dns ):
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
- uci.instance[instance_id].public_dns = public_dns
- uci.instance[instance_id].flush()
+# def set_public_dns( self, instance_id, public_dns ):
+# uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+# self.sa_session.refresh( uci )
+# uci.instance[instance_id].public_dns = public_dns
+# uci.instance[instance_id].flush()
+#
+# def set_private_dns( self, instance_id, private_dns ):
+# uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+# self.sa_session.refresh( uci )
+# uci.instance[instance_id].private_dns = private_dns
+# uci.instance[instance_id].flush()
- def set_private_dns( self, instance_id, private_dns ):
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
- uci.instance[instance_id].private_dns = private_dns
- uci.instance[instance_id].flush()
-
+ def reset_uci_launch_time( self ):
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
+ uci.launch_time = None
+ self.sa_session.add( uci )
+ self.sa_session.flush()
+
+ def set_error( self, error, set_state=False ):
+ """
+ Sets error field of given UCI in local Galaxy database as well as any instances associated with
+ this UCI whose state is 'None' or 'SUBMITTED'. If set_state is set to 'true',
+ method also sets state of give UCI and corresponding instances to 'error'
+ """
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
+ uci.error = error
+ if set_state:
+ uci.state = uci_states.ERROR
+ # Process all instances associated with this UCI
+ instances = self.sa_session.query( model.CloudInstance ) \
+ .filter_by( uci=uci ) \
+ .filter( or_( model.CloudInstance.table.c.state==None, model.CloudInstance.table.c.state==instance_states.SUBMITTED ) ) \
+ .all()
+ for i in instances:
+ i.error = error
+ i.state = instance_states.ERROR
+ self.sa_session.add( i )
+ self.sa_session.flush()
+
+ self.sa_session.add( uci )
+ self.sa_session.flush()
+
+ def set_deleted( self ):
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
+ uci.state = uci_states.DELETED # for bookkeeping reasons, mark as deleted but don't actually delete.
+ uci.deleted = True
+ self.sa_session.add( uci )
+ self.sa_session.flush()
+
def set_store_device( self, store_id, device ):
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
uci.store[store_id].device = device
uci.store[store_id].flush()
def set_store_error( self, error, store_index=None, store_id=None ):
if store_index != None:
- store = model.CloudStore.get( store_index )
+ store = self.sa_session.query( model.CloudStore ).get( store_index )
elif store_id != None:
- store = model.CloudStore.filter_by( volume_id = store_id ).first()
+ store = self.sa_session.query( model.CloudStore ).filter_by( volume_id = store_id ).first()
else:
return None
store.error = error
- store.flush()
+ self.sa_session.add( store )
+ self.sa_session.flush()
def set_store_status( self, vol_id, status ):
- vol = model.CloudStore.filter( model.CloudStore.c.volume_id == vol_id ).first()
+ vol = self.sa_session.query( model.CloudStore ).filter( model.CloudStore.table.c.volume_id == vol_id ).first()
vol.status = status
- vol.flush()
-
- def set_snapshot_id( self, snap_index, id ):
- snap = model.CloudSnapshot.get( snap_index )
- snap.snapshot_id = id
- snap.flush()
-
- def set_snapshot_status( self, status, snap_index=None, snap_id=None ):
- if snap_index != None:
- snap = model.CloudSnapshot.get( snap_index )
- elif snap_id != None:
- snap = model.CloudSnapshot.filter_by( snapshot_id = snap_id).first()
- else:
- return
- snap.status = status
- snap.flush()
-
- def set_snapshot_error( self, error, snap_index=None, snap_id=None, set_status=False ):
- if snap_index != None:
- snap = model.CloudSnapshot.get( snap_index )
- elif snap_id != None:
- snap = model.CloudSnapshot.filter_by( snapshot_id = snap_id).first()
- else:
- return
- snap.error = error
-
- if set_status:
- snap.status = snapshot_status.ERROR
-
- snap.flush()
+ self.sa_session.add( vol )
+ self.sa_session.flush()
def set_store_availability_zone( self, availability_zone, vol_id=None ):
"""
@@ -369,78 +386,91 @@
UCI or for the volume whose volume ID (e.g., 'vol-39F80512') is provided as argument.
"""
if vol_id is not None:
- vol = model.CloudStore.filter( model.CloudStore.c.volume_id == vol_id ).all()
+ vol = self.sa_session.query( model.CloudStore ).filter( model.CloudStore.table.c.volume_id == vol_id ).all()
else:
- vol = model.CloudStore.filter( model.CloudStore.c.uci_id == self.uci_id ).all()
+ vol = self.sa_session.query( model.CloudStore ).filter( model.CloudStore.table.c.uci_id == self.uci_id ).all()
for v in vol:
v.availability_zone = availability_zone
- v.flush()
+ self.sa_session.add( v )
+ self.sa_session.flush()
- def set_store_volume_id( self, store_id, volume_id ):
+ def set_store_volume_id( self, store_index, volume_id ):
"""
- Given store ID associated with this UCI, set volume ID as it is registered
+ Given store index associated with this UCI in local database, set volume ID as it is registered
on the cloud provider (e.g., vol-39890501)
"""
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
- uci.store[store_id].volume_id = volume_id
- uci.store[store_id].flush()
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
+ uci.store[store_index].volume_id = volume_id
+ #uci.store[store_index].flush()
+ self.sa_session.add( uci )
+ self.sa_session.flush()
def set_store_instance( self, vol_id, instance_id ):
"""
Stores instance ID that given store volume is attached to. Store volume ID should
be given in following format: 'vol-78943248'
"""
- vol = model.CloudStore.filter( model.CloudStore.c.volume_id == vol_id ).first()
+ vol = self.sa_session.query( model.CloudStore ).filter( model.CloudStore.table.c.volume_id == vol_id ).first()
vol.i_id = instance_id
- vol.flush()
+ self.sa_session.add( vol )
+ self.sa_session.flush()
+
+ def set_snapshot_id( self, snap_index, id ):
+ snap = model.CloudSnapshot.get( snap_index )
- def set_error( self, error, set_state=False ):
- """
- Sets error field of given UCI in local Galaxy database as well as any instances associated with
- this UCI whose state is 'None' or 'SUBMITTED'. If set_state is set to 'true',
- method also sets state of give UCI and corresponding instances to 'error'
- """
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
- uci.error = error
- if set_state:
- uci.state = uci_states.ERROR
- instances = model.CloudInstance \
- .filter_by( uci=uci ) \
- .filter( or_( model.CloudInstance.c.state==None, model.CloudInstance.c.state==instance_states.SUBMITTED ) ) \
- .all()
- for i in instances:
- i.error = error
- i.state = instance_states.ERROR
- i.flush()
- uci.flush()
+ snap.snapshot_id = id
+ self.sa_session.add( snap )
+ self.sa_session.flush()
+
+ def set_snapshot_status( self, status, snap_index=None, snap_id=None ):
+ if snap_index != None:
+ snap = self.sa_session.query( model.CloudSnapshot ).get( snap_index )
+ elif snap_id != None:
+ snap = self.sa_session.query( model.CloudSnapshot ).filter_by( snapshot_id = snap_id).first()
+ else:
+ return
+ snap.status = status
+ self.sa_session.add( snap )
+ self.sa_session.flush()
- def set_deleted( self ):
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
- uci.state = uci_states.DELETED # for bookkeeping reasons, mark as deleted but don't actually delete.
- uci.deleted = True
- uci.flush()
+ def set_snapshot_error( self, error, snap_index=None, snap_id=None, set_status=False ):
+ if snap_index != None:
+ snap = self.sa_session.query( model.CloudSnapshot ).get( snap_index )
+ elif snap_id != None:
+ snap = self.sa_session.query( model.CloudSnapshot ).filter_by( snapshot_id = snap_id).first()
+ else:
+ return
+ snap.error = error
+ if set_status:
+ snap.status = snapshot_status.ERROR
+
+ self.sa_session.add( snap )
+ self.sa_session.flush()
# --------- Getter methods -----------------
def get_provider_type( self ):
""" Returns type of cloud provider associated with given UCI. """
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
-# cred_id = uci.credentials_id
-# cred = model.CloudUserCredentials.get( cred_id )
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
return uci.credentials.provider.type
- def get_type( self, i_index ):
- instance = model.CloudInstance.get( i_index )
+ def get_provider( self ):
+ """ Returns database object of cloud provider associated with credentials of given UCI. """
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
+ return uci.credentials.provider
+
+ def get_instance_type( self, i_index ):
+ instance = self.sa_session.query( model.CloudInstance ).get( i_index )
+ self.sa_session.refresh( instance )
return instance.type
- def get_state( self ):
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
+ def get_uci_state( self ):
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
return uci.state
def get_instances_indexes( self, state=None ):
@@ -448,9 +478,12 @@
Returns indexes of instances associated with given UCI as they are stored in local Galaxy database and
whose state corresponds to passed argument. Returned values enable indexing instances from local Galaxy database.
"""
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
- instances = model.CloudInstance.filter_by( uci=uci ).filter( model.CloudInstance.c.state==state ).all()
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
+ instances = self.sa_session.query( model.CloudInstance ) \
+ .filter_by( uci=uci ) \
+ .filter( model.CloudInstance.table.c.state==state ) \
+ .all()
il = []
for i in instances:
il.append( i.id )
@@ -458,40 +491,46 @@
return il
def get_instance_state( self, instance_id ):
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
return uci.instance[instance_id].state
def get_instances_ids( self ):
"""
- Returns list IDs of all instances' associated with this UCI that are not in 'terminated' state
- (e.g., ['i-402906D2', 'i-q0290dsD2'] ).
+ Returns list IDs of all instances' associated with this UCI that are not in 'terminated' or
+ 'error' but the state is defined (i.e., state is not None)
+ (e.g., return value: ['i-402906D2', 'i-q0290dsD2'] ).
"""
- il = model.CloudInstance.filter_by( uci_id=self.uci_id ).filter( model.CloudInstance.c.state != 'terminated' ).all()
+ il = self.sa_session.query( model.CloudInstance ) \
+ .filter_by( uci_id=self.uci_id ) \
+ .filter( or_( model.CloudInstance.table.c.state != 'terminated',
+ model.CloudInstance.table.c.state != 'error',
+ model.CloudInstance.table.c.state != None ) ) \
+ .all()
instanceList = []
for i in il:
instanceList.append( i.instance_id )
return instanceList
def get_name( self ):
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
return uci.name
def get_key_pair_name( self ):
"""
Returns keypair name associated with given UCI.
"""
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
return uci.key_pair_name
def get_key_pair_material( self ):
"""
Returns keypair material (i.e., private key) associated with given UCI.
"""
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
return uci.key_pair_material
def get_security_group_name( self, i_index=None, i_id=None ):
@@ -501,35 +540,35 @@
with given instance.
"""
if i_index != None:
- instance = model.CloudInstance.get( i_index )
+ instance = self.sa_session.query( model.CloudInstance ).get( i_index )
return instance.security_group
elif i_id != None:
- instance = model.CloudInstance.filter_by( uci_id=self.uci_id, instance_id=i_id).first()
+ instance = self.sa_session.query( model.CloudInstance ).filter_by( uci_id=self.uci_id, instance_id=i_id).first()
return instance.security_group
def get_access_key( self ):
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
return uci.credentials.access_key
def get_secret_key( self ):
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
return uci.credentials.secret_key
def get_mi_id( self, instance_id=0 ):
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
return uci.instance[instance_id].mi_id
def get_public_dns( self, instance_id=0 ):
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
return uci.instance[instance_id].public_dns
def get_private_dns( self, instance_id=0 ):
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
return uci.instance[instance_id].private_dns
def get_uci_availability_zone( self ):
@@ -539,13 +578,13 @@
availability zone, availability of a UCI is determined by availability zone of
any one storage volume.
"""
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
return uci.store[0].availability_zone
def get_store_size( self, store_id=0 ):
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
return uci.store[store_id].size
def get_store_volume_id( self, store_id=0 ):
@@ -553,33 +592,27 @@
Given store ID associated with this UCI, get volume ID as it is registered
on the cloud provider (e.g., 'vol-39890501')
"""
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
return uci.store[store_id].volume_id
def get_all_stores( self ):
""" Returns all storage volumes' database objects associated with this UCI. """
- return model.CloudStore.filter( model.CloudStore.c.uci_id == self.uci_id ).all()
+ return self.sa_session.query( model.CloudStore ).filter( model.CloudStore.table.c.uci_id == self.uci_id ).all()
def get_snapshots( self, status=None ):
""" Returns database objects for all snapshots associated with this UCI and in given status."""
- return model.CloudSnapshot.filter_by( uci_id=self.uci_id, status=status ).all()
+ return self.sa_session.query( model.CloudSnapshot ).filter_by( uci_id=self.uci_id, status=status ).all()
def get_uci( self ):
""" Returns database object for given UCI. """
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
return uci
- def get_provider( self ):
- """ Returns database object of cloud provider associated with credentials of given UCI. """
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
- return uci.credentials.provider
-
def uci_launch_time_set( self ):
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( self.uci_id )
+ self.sa_session.refresh( uci )
return uci.launch_time
class CloudProvider( object ):
diff -r 7d013eb98022 -r 39502dd3fd23 lib/galaxy/cloud/providers/ec2.py
--- a/lib/galaxy/cloud/providers/ec2.py Thu Nov 12 16:36:07 2009 -0500
+++ b/lib/galaxy/cloud/providers/ec2.py Mon Nov 16 20:37:47 2009 -0500
@@ -74,6 +74,7 @@
self.zone = "us-east-1a"
self.security_group = "galaxyWeb"
self.queue = Queue()
+ self.sa_session = app.model.context
self.threads = []
nworkers = 5
@@ -84,13 +85,26 @@
self.threads.append( worker )
log.debug( "%d EC2 cloud workers ready", nworkers )
+ def shutdown( self ):
+ """Attempts to gracefully shut down the monitor thread"""
+ log.info( "sending stop signal to worker threads in EC2 cloud manager" )
+ for i in range( len( self.threads ) ):
+ self.queue.put( self.STOP_SIGNAL )
+ log.info( "EC2 cloud manager stopped" )
+
+ def put( self, uci_wrapper ):
+ # Get rid of UCI from state description
+ state = uci_wrapper.get_uci_state()
+ uci_wrapper.change_state( state.split('U')[0] ) # remove 'UCI' from end of state description (i.e., mark as accepted and ready for processing)
+ self.queue.put( uci_wrapper )
+
def run_next( self ):
"""Run the next job, waiting until one is available if necessary"""
cnt = 0
while 1:
uci_wrapper = self.queue.get()
- uci_state = uci_wrapper.get_state()
+ uci_state = uci_wrapper.get_uci_state()
if uci_state is self.STOP_SIGNAL:
return
try:
@@ -194,13 +208,13 @@
"""
Get appropriate machine image (mi) based on instance size.
"""
- i_type = uci_wrapper.get_type( i_index )
+ i_type = uci_wrapper.get_instance_type( i_index )
if i_type=='m1.small' or i_type=='c1.medium':
arch = 'i386'
else:
arch = 'x86_64'
- mi = model.CloudImage.filter_by( deleted=False, provider_type=self.type, architecture=arch ).first()
+ mi = self.sa_session.query( model.CloudImage ).filter_by( deleted=False, provider_type=self.type, architecture=arch ).first()
if mi:
return mi.image_id
else:
@@ -209,19 +223,6 @@
uci_wrapper.set_error( err+". Contact site administrator to ensure needed machine image is registered.", True )
return None
- def shutdown( self ):
- """Attempts to gracefully shut down the monitor thread"""
- log.info( "sending stop signal to worker threads in EC2 cloud manager" )
- for i in range( len( self.threads ) ):
- self.queue.put( self.STOP_SIGNAL )
- log.info( "EC2 cloud manager stopped" )
-
- def put( self, uci_wrapper ):
- # Get rid of UCI from state description
- state = uci_wrapper.get_state()
- uci_wrapper.change_state( state.split('U')[0] ) # remove 'UCI' from end of state description (i.e., mark as accepted and ready for processing)
- self.queue.put( uci_wrapper )
-
def createUCI( self, uci_wrapper ):
"""
Creates User Configured Instance (UCI). Essentially, creates storage volume on cloud provider
@@ -294,7 +295,8 @@
if conn.delete_volume( v.volume_id ):
deletedList.append( v.volume_id )
v.deleted = True
- v.flush()
+ self.sa_session.add( v )
+ self.sa_session.flush()
count += 1
else:
failedList.append( v.volume_id )
@@ -308,8 +310,8 @@
if count == len( vl ):
uci_wrapper.set_deleted()
else:
- err = "Deleting following volume(s) failed: "+failedList+". However, these volumes were successfully deleted: "+deletedList+". \
- MANUAL intervention and processing needed."
+ err = "Deleting following volume(s) failed: " + str( failedList ) + ". However, these volumes were successfully deleted: " \
+ + str( deletedList ) + ". MANUAL intervention and processing needed."
log.error( err )
uci_wrapper.set_error( err, True )
@@ -317,7 +319,7 @@
"""
Creates snapshot of all storage volumes associated with this UCI.
"""
- if uci_wrapper.get_state() != uci_states.ERROR:
+ if uci_wrapper.get_uci_state() != uci_states.ERROR:
conn = self.get_connection( uci_wrapper )
snapshots = uci_wrapper.get_snapshots( status = snapshot_status.SUBMITTED )
@@ -361,7 +363,7 @@
"""
Starts instance(s) of given UCI on the cloud.
"""
- if uci_wrapper.get_state() != uci_states.ERROR:
+ if uci_wrapper.get_uci_state() != uci_states.ERROR:
conn = self.get_connection( uci_wrapper )
self.check_key_pair( uci_wrapper, conn )
if uci_wrapper.get_key_pair_name() == None:
@@ -379,70 +381,73 @@
log.debug( "mi_id: %s, uci_wrapper.get_key_pair_name(): %s" % ( mi_id, uci_wrapper.get_key_pair_name() ) )
uci_wrapper.set_mi( i_index, mi_id )
- # Check if galaxy security group exists (and create it if it does not)
- log.debug( "Setting up '%s' security group." % self.security_group )
- try:
- conn.get_all_security_groups( [self.security_group] ) # security groups
- except boto.exception.EC2ResponseError, e:
- if e.code == 'InvalidGroup.NotFound':
- log.info( "No security group found, creating security group '%s'" % self.security_group )
- try:
- gSecurityGroup = conn.create_security_group(self.security_group, 'Security group for Galaxy.')
- gSecurityGroup.authorize( 'tcp', 80, 80, '0.0.0.0/0' ) # Open HTTP port
- gSecurityGroup.authorize( 'tcp', 22, 22, '0.0.0.0/0' ) # Open SSH port
- except boto.exception.EC2ResponseError, ee:
- err = "EC2 response error while creating security group: " + str( ee )
+ if mi_id != None:
+ # Check if galaxy security group exists (and create it if it does not)
+ log.debug( "Setting up '%s' security group." % self.security_group )
+ try:
+ conn.get_all_security_groups( [self.security_group] ) # security groups
+ except boto.exception.EC2ResponseError, e:
+ if e.code == 'InvalidGroup.NotFound':
+ log.info( "No security group found, creating security group '%s'" % self.security_group )
+ try:
+ gSecurityGroup = conn.create_security_group(self.security_group, 'Security group for Galaxy.')
+ gSecurityGroup.authorize( 'tcp', 80, 80, '0.0.0.0/0' ) # Open HTTP port
+ gSecurityGroup.authorize( 'tcp', 22, 22, '0.0.0.0/0' ) # Open SSH port
+ except boto.exception.EC2ResponseError, ee:
+ err = "EC2 response error while creating security group: " + str( ee )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ else:
+ err = "EC2 response error while retrieving security group: " + str( e )
log.error( err )
uci_wrapper.set_error( err, True )
- else:
- err = "EC2 response error while retrieving security group: " + str( e )
- log.error( err )
- uci_wrapper.set_error( err, True )
-
- if uci_wrapper.get_state() != uci_states.ERROR:
- # Start an instance
- log.debug( "Starting instance for UCI '%s'" % uci_wrapper.get_name() )
- #TODO: Once multiple volumes can be attached to a single instance, update 'userdata' composition
- userdata = uci_wrapper.get_store_volume_id()+"|"+uci_wrapper.get_access_key()+"|"+uci_wrapper.get_secret_key()
- log.debug( "Using following command: conn.run_instances( image_id='%s', key_name='%s', security_groups=['%s'], user_data=[OMITTED], instance_type='%s', placement='%s' )"
- % ( mi_id, uci_wrapper.get_key_pair_name(), self.security_group, uci_wrapper.get_type( i_index ), uci_wrapper.get_uci_availability_zone() ) )
- reservation = None
- try:
- reservation = conn.run_instances( image_id=mi_id,
- key_name=uci_wrapper.get_key_pair_name(),
- security_groups=[self.security_group],
- user_data=userdata,
- instance_type=uci_wrapper.get_type( i_index ),
- placement=uci_wrapper.get_uci_availability_zone() )
- except boto.exception.EC2ResponseError, e:
- err = "EC2 response error when starting UCI '"+ uci_wrapper.get_name() +"': " + str( e )
- log.error( err )
- uci_wrapper.set_error( err, True )
- except Exception, ex:
- err = "Error when starting UCI '" + uci_wrapper.get_name() + "': " + str( ex )
- log.error( err )
- uci_wrapper.set_error( err, True )
- # Record newly available instance data into local Galaxy database
- if reservation:
- uci_wrapper.set_launch_time( self.format_time( reservation.instances[0].launch_time ), i_index=i_index )
- if not uci_wrapper.uci_launch_time_set():
- uci_wrapper.set_uci_launch_time( self.format_time( reservation.instances[0].launch_time ) )
+
+ if uci_wrapper.get_uci_state() != uci_states.ERROR:
+ # Start an instance
+ log.debug( "Starting instance for UCI '%s'" % uci_wrapper.get_name() )
+ #TODO: Once multiple volumes can be attached to a single instance, update 'userdata' composition
+ userdata = uci_wrapper.get_store_volume_id()+"|"+uci_wrapper.get_access_key()+"|"+uci_wrapper.get_secret_key()
+ log.debug( "Using following command: conn.run_instances( image_id='%s', key_name='%s', security_groups=['%s'], user_data=[OMITTED], instance_type='%s', placement='%s' )"
+ % ( mi_id, uci_wrapper.get_key_pair_name(), self.security_group, uci_wrapper.get_instance_type( i_index ), uci_wrapper.get_uci_availability_zone() ) )
+ reservation = None
try:
- uci_wrapper.set_reservation_id( i_index, str( reservation ).split(":")[1] )
- # TODO: if more than a single instance will be started through single reservation, change this reference to element [0]
- i_id = str( reservation.instances[0]).split(":")[1]
- uci_wrapper.set_instance_id( i_index, i_id )
- s = reservation.instances[0].state
- uci_wrapper.change_state( s, i_id, s )
- uci_wrapper.set_security_group_name( self.security_group, i_id=i_id )
- log.debug( "Instance of UCI '%s' started, current state: '%s'" % ( uci_wrapper.get_name(), uci_wrapper.get_state() ) )
+ reservation = conn.run_instances( image_id=mi_id,
+ key_name=uci_wrapper.get_key_pair_name(),
+ security_groups=[self.security_group],
+ user_data=userdata,
+ instance_type=uci_wrapper.get_instance_type( i_index ),
+ placement=uci_wrapper.get_uci_availability_zone() )
except boto.exception.EC2ResponseError, e:
- err = "EC2 response error when retrieving instance information for UCI '" + uci_wrapper.get_name() + "': " + str( e )
+ err = "EC2 response error when starting UCI '"+ uci_wrapper.get_name() +"': " + str( e )
log.error( err )
uci_wrapper.set_error( err, True )
- else:
- log.error( "UCI '%s' is in 'error' state, starting instance was aborted." % uci_wrapper.get_name() )
+ except Exception, ex:
+ err = "Error when starting UCI '" + uci_wrapper.get_name() + "': " + str( ex )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ # Record newly available instance data into local Galaxy database
+ if reservation:
+ l_time = datetime.utcnow()
+ # uci_wrapper.set_instance_launch_time( self.format_time( reservation.instances[0].launch_time ), i_index=i_index )
+ uci_wrapper.set_instance_launch_time( l_time, i_index=i_index )
+ if not uci_wrapper.uci_launch_time_set():
+ uci_wrapper.set_uci_launch_time( l_time )
+ try:
+ uci_wrapper.set_reservation_id( i_index, str( reservation ).split(":")[1] )
+ # TODO: if more than a single instance will be started through single reservation, change this reference to element [0]
+ i_id = str( reservation.instances[0]).split(":")[1]
+ uci_wrapper.set_instance_id( i_index, i_id )
+ s = reservation.instances[0].state
+ uci_wrapper.change_state( s, i_id, s )
+ uci_wrapper.set_security_group_name( self.security_group, i_id=i_id )
+ log.debug( "Instance of UCI '%s' started, current state: '%s'" % ( uci_wrapper.get_name(), uci_wrapper.get_uci_state() ) )
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error when retrieving instance information for UCI '" + uci_wrapper.get_name() + "': " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ else:
+ log.error( "UCI '%s' is in 'error' state, starting instance was aborted." % uci_wrapper.get_name() )
else:
err = "No instances in state '"+ instance_states.SUBMITTED +"' found for UCI '" + uci_wrapper.get_name() + \
"'. Nothing to start."
@@ -459,8 +464,13 @@
# Get all instances associated with given UCI
il = uci_wrapper.get_instances_ids() # instance list
+ # Process list of instances and remove any references to empty instance id's
+ for i in il:
+ if i is None:
+ l.remove( i )
+ log.debug( 'List of instances being terminated: %s' % il )
rl = conn.get_all_instances( il ) # Reservation list associated with given instances
-
+
# Initiate shutdown of all instances under given UCI
cnt = 0
stopped = []
@@ -536,18 +546,22 @@
"""
log.debug( "Running general status update for EC2 UCIs..." )
# Update instances
- instances = model.CloudInstance.filter( or_( model.CloudInstance.c.state==instance_states.RUNNING,
- model.CloudInstance.c.state==instance_states.PENDING,
- model.CloudInstance.c.state==instance_states.SHUTTING_DOWN ) ).all()
+ instances = self.sa_session.query( model.CloudInstance ) \
+ .filter( or_( model.CloudInstance.table.c.state==instance_states.RUNNING,
+ model.CloudInstance.table.c.state==instance_states.PENDING,
+ model.CloudInstance.table.c.state==instance_states.SHUTTING_DOWN ) ) \
+ .all()
for inst in instances:
if self.type == inst.uci.credentials.provider.type:
log.debug( "[%s] Running general status update on instance '%s'" % ( inst.uci.credentials.provider.type, inst.instance_id ) )
self.updateInstance( inst )
# Update storage volume(s)
- stores = model.CloudStore.filter( or_( model.CloudStore.c.status==store_status.IN_USE,
- model.CloudStore.c.status==store_status.CREATING,
- model.CloudStore.c.status==None ) ).all()
+ stores = self.sa_session.query( model.CloudStore ) \
+ .filter( or_( model.CloudStore.table.c.status==store_status.IN_USE,
+ model.CloudStore.table.c.status==store_status.CREATING,
+ model.CloudStore.table.c.status==None ) ) \
+ .all()
for store in stores:
if self.type == store.uci.credentials.provider.type: # and store.volume_id != None:
log.debug( "[%s] Running general status update on store with local database ID: '%s'" % ( store.uci.credentials.provider.type, store.id ) )
@@ -564,7 +578,9 @@
# store.flush()
# Update pending snapshots or delete ones marked for deletion
- snapshots = model.CloudSnapshot.filter_by( status=snapshot_status.PENDING, status=snapshot_status.DELETE ).all()
+ snapshots = self.sa_session.query( model.CloudSnapshot ) \
+ .filter_by( status=snapshot_status.PENDING, status=snapshot_status.DELETE ) \
+ .all()
for snapshot in snapshots:
if self.type == snapshot.uci.credentials.provider.type and snapshot.status == snapshot_status.PENDING:
log.debug( "[%s] Running general status update on snapshot '%s'" % ( snapshot.uci.credentials.provider.type, snapshot.snapshot_id ) )
@@ -574,11 +590,12 @@
self.delete_snapshot( snapshot )
# Attempt at updating any zombie UCIs (i.e., instances that have been in SUBMITTED state for longer than expected - see below for exact time)
- zombies = model.UCI.filter_by( state=uci_states.SUBMITTED ).all()
+ zombies = self.sa_session.query( model.UCI ).filter_by( state=uci_states.SUBMITTED ).all()
for zombie in zombies:
- z_instances = model.CloudInstance.filter_by( uci_id=zombie.id) \
- .filter( or_( model.CloudInstance.c.state != instance_states.TERMINATED,
- model.CloudInstance.c.state == None ) ) \
+ z_instances = self.sa_session.query( model.CloudInstance ) \
+ .filter_by( uci_id=zombie.id ) \
+ .filter( or_( model.CloudInstance.table.c.state != instance_states.TERMINATED,
+ model.CloudInstance.table.c.state == None ) ) \
.all()
for z_inst in z_instances:
if self.type == z_inst.uci.credentials.provider.type:
@@ -592,8 +609,8 @@
# Get credentials associated wit this instance
uci_id = inst.uci_id
- uci = model.UCI.get( uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( uci_id )
+ self.sa_session.refresh( uci )
conn = self.get_connection_from_uci( uci )
# Get reservations handle for given instance
@@ -604,6 +621,8 @@
log.error( err )
uci.error = err
uci.state = uci_states.ERROR
+ self.sa_session.add( uci )
+ self.sa_session.flush()
return None
# Because references to reservations are deleted shortly after instances have been terminated, getting an empty list as a response to a query
@@ -618,8 +637,9 @@
inst.state = instance_states.TERMINATED
uci.state = uci_states.ERROR
uci.launch_time = None
- inst.flush()
- uci.flush()
+ self.sa_session.add( inst )
+ self.sa_session.add( uci )
+ self.sa_session.flush()
# Update instance status in local DB with info from cloud provider
for r in rl:
for i, cInst in enumerate( r.instances ):
@@ -628,45 +648,54 @@
log.debug( "Checking state of cloud instance '%s' associated with UCI '%s' and reservation '%s'. State='%s'" % ( cInst, uci.name, r, s ) )
if s != inst.state:
inst.state = s
- inst.flush()
+ self.sa_session.add( inst )
+ self.sa_session.flush()
# After instance has shut down, ensure UCI is marked as 'available'
if s == instance_states.TERMINATED and uci.state != uci_states.ERROR:
uci.state = uci_states.AVAILABLE
uci.launch_time = None
- uci.flush()
+ self.sa_session.add( uci )
+ self.sa_session.flush()
# Making sure state of UCI is updated. Once multiple instances become associated with single UCI, this will need to be changed.
if s != uci.state and s != instance_states.TERMINATED:
uci.state = s
- uci.flush()
+ self.sa_session.add( uci )
+ self.sa_session.flush()
if cInst.public_dns_name != inst.public_dns:
inst.public_dns = cInst.public_dns_name
- inst.flush()
+ self.sa_session.add( inst )
+ self.sa_session.flush()
if cInst.private_dns_name != inst.private_dns:
inst.private_dns = cInst.private_dns_name
- inst.flush()
+ self.sa_session.add( inst )
+ self.sa_session.flush()
except boto.exception.EC2ResponseError, e:
err = "Updating instance status from cloud failed for UCI '"+ uci.name + "' during general status update: " + str( e )
log.error( err )
uci.error = err
uci.state = uci_states.ERROR
+ self.sa_session.add( uci )
+ self.sa_session.flush()
return None
def updateStore( self, store ):
# Get credentials associated wit this store
uci_id = store.uci_id
- uci = model.UCI.get( uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( uci_id )
+ self.sa_session.refresh( uci )
conn = self.get_connection_from_uci( uci )
# Get reservations handle for given store
try:
+ log.debug( "Updating storage volume command: vl = conn.get_all_volumes( [%s] )" % store.volume_id )
vl = conn.get_all_volumes( [store.volume_id] )
except boto.exception.EC2ResponseError, e:
err = "Retrieving volume(s) from cloud failed for UCI '"+ uci.name + "' during general status update: " + str( e )
log.error( err )
uci.error = err
uci.state = uci_states.ERROR
- uci.flush()
+ self.sa_session.add( uci )
+ self.sa_session.flush()
return None
# Update store status in local DB with info from cloud provider
@@ -677,29 +706,36 @@
# UCI state remained as 'new', try to remedy this by updating UCI state here
if ( store.status == None ) and ( store.volume_id != None ):
uci.state = vl[0].status
- uci.flush()
+ self.sa_session.add( uci )
+ self.sa_session.flush()
# If UCI was marked in state 'CREATING', update its status to reflect new status
elif ( uci.state == uci_states.CREATING ):
uci.state = vl[0].status
- uci.flush()
+ self.sa_session.add( uci )
+ self.sa_session.flush()
store.status = vl[0].status
- store.flush()
+ self.sa_session.add( store )
+ self.sa_session.flush()
if store.i_id != vl[0].instance_id:
store.i_id = vl[0].instance_id
- store.flush()
+ self.sa_session.add( store )
+ self.sa_session.flush()
if store.attach_time != vl[0].attach_time:
store.attach_time = vl[0].attach_time
- store.flush()
+ self.sa_session.add( store )
+ self.sa_session.flush()
if store.device != vl[0].device:
store.device = vl[0].device
- store.flush()
+ self.sa_session.add( store )
+ self.sa_session.flush()
except boto.exception.EC2ResponseError, e:
err = "Updating status of volume(s) from cloud failed for UCI '"+ uci.name + "' during general status update: " + str( e )
log.error( err )
uci.error = err
uci.state = uci_states.ERROR
- uci.flush()
+ self.sa_session.add( uci )
+ self.sa_session.flush()
return None
else:
err = "No storage volumes returned by cloud provider on general update"
@@ -708,14 +744,15 @@
store.error = err
uci.error = err
uci.state = uci_states.ERROR
- uci.flush()
- store.flush()
+ self.sa_session.add( uci )
+ self.sa_session.add( store )
+ self.sa_session.flush()
def updateSnapshot( self, snapshot ):
# Get credentials associated wit this store
uci_id = snapshot.uci_id
- uci = model.UCI.get( uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( uci_id )
+ self.sa_session.refresh( uci )
conn = self.get_connection_from_uci( uci )
try:
@@ -724,7 +761,8 @@
if len( snap ) > 0:
log.debug( "Snapshot '%s' status: %s" % ( snapshot.snapshot_id, snap[0].status ) )
snapshot.status = snap[0].status
- snapshot.flush()
+ self.sa_session.add( snapshot )
+ self.sa_session.flush()
else:
err = "No snapshots returned by EC2 on general update"
log.error( "%s for UCI '%s'" % ( err, uci.name ) )
@@ -732,8 +770,9 @@
snapshot.error = err
uci.error = err
uci.state = uci_states.ERROR
- uci.flush()
- snapshot.flush()
+ self.sa_session.add( uci )
+ self.sa_session.add( snapshot )
+ self.sa_session.flush()
except boto.exception.EC2ResponseError, e:
err = "EC2 response error while updating snapshot status: " + str( e )
log.error( err )
@@ -741,8 +780,9 @@
snapshot.error = err
uci.error = err
uci.state = uci_states.ERROR
- uci.flush()
- snapshot.flush()
+ self.sa_session.add( uci )
+ self.sa_session.add( snapshot )
+ self.sa_session.flush()
except Exception, ex:
err = "Error while updating snapshot status: " + str( ex )
log.error( err )
@@ -750,15 +790,16 @@
snapshot.error = err
uci.error = err
uci.state = uci_states.ERROR
- uci.flush()
- snapshot.flush()
+ self.sa_session.add( uci )
+ self.sa_session.add( snapshot )
+ self.sa_session.flush()
def delete_snapshot( self, snapshot ):
if snapshot.status == snapshot_status.DELETE:
# Get credentials associated wit this store
uci_id = snapshot.uci_id
- uci = model.UCI.get( uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( uci_id )
+ self.sa_session.refresh( uci )
conn = self.get_connection_from_uci( uci )
try:
@@ -767,7 +808,8 @@
if snap == True:
snapshot.deleted = True
snapshot.status = snapshot_status.DELETED
- snapshot.flush()
+ self.sa_session.add( snapshot )
+ self.sa_session.flush()
return snap
except boto.exception.EC2ResponseError, e:
err = "EC2 response error while deleting snapshot: " + str( e )
@@ -776,8 +818,9 @@
snapshot.error = err
uci.error = err
uci.state = uci_states.ERROR
- uci.flush()
- snapshot.flush()
+ self.sa_session.add( uci )
+ self.sa_session.add( snapshot )
+ self.sa_session.flush()
except Exception, ex:
err = "Error while deleting snapshot: " + str( ex )
log.error( err )
@@ -785,14 +828,16 @@
snapshot.error = err
uci.error = err
uci.state = uci_states.ERROR
- uci.flush()
- snapshot.flush()
+ self.sa_session.add( uci )
+ self.sa_session.add( snapshot )
+ self.sa_session.flush()
else:
err = "Cannot delete snapshot '"+snapshot.snapshot_id+"' because its status is '"+snapshot.status+"'. Only snapshots with '" + \
snapshot_status.COMPLETED+"' status can be deleted."
log.error( err )
snapshot.error = err
- snapshot.flush()
+ self.sa_session.add( snapshot )
+ self.sa_session.flush()
def processZombie( self, inst ):
"""
@@ -800,6 +845,10 @@
accordingly or if something else failed and instance was never started. Currently, no automatic
repairs are being attempted; instead, appropriate error messages are set.
"""
+ uci_id = inst.uci_id
+ uci = self.sa_session.query( model.UCI ).get( uci_id )
+ self.sa_session.refresh( uci )
+
# Check if any instance-specific information was written to local DB; if 'yes', set instance and UCI's error message
# suggesting manual check.
if inst.launch_time != None or inst.reservation_id != None or inst.instance_id != None:
@@ -820,9 +869,10 @@
try:
state = rl[0].instances[0].update()
inst.state = state
- inst.uci.state = state
- inst.flush()
- inst.uci.flush()
+ uci.state = state
+ self.sa_session.add( inst )
+ self.sa_session.add( uci )
+ self.sa_session.flush()
except: # something failed, so skip
pass
@@ -830,10 +880,12 @@
try:
launch_time = self.format_time( rl[0].instances[0].launch_time )
inst.launch_time = launch_time
- inst.flush()
+ self.sa_session.add( inst )
+ self.sa_session.flush()
if inst.uci.launch_time == None:
- inst.uci.launch_time = launch_time
- inst.uci.flush()
+ uci.launch_time = launch_time
+ self.sa_session.add( uci )
+ self.sa_session.flush()
except: # something failed, so skip
pass
else:
@@ -844,8 +896,9 @@
inst.uci.error = err
inst.uci.state = uci_states.ERROR
log.error( err )
- inst.flush()
- inst.uci.flush()
+ self.sa_session.add( inst )
+ self.sa_session.add( uci )
+ self.sa_session.flush()
else: #Instance most likely never got processed, so set error message suggesting user to try starting instance again.
err = "Starting a machine instance (DB id: '"+str(inst.id)+"') associated with this UCI '" + str(inst.uci.name) + \
@@ -853,11 +906,12 @@
"starting the instance again."
inst.error = err
inst.state = instance_states.ERROR
- inst.uci.error = err
- inst.uci.state = uci_states.ERROR
+ uci.error = err
+ uci.state = uci_states.ERROR
log.error( err )
- inst.flush()
- inst.uci.flush()
+ self.sa_session.add( inst )
+ self.sa_session.add( uci )
+ self.sa_session.flush()
# uw = UCIwrapper( inst.uci )
# log.debug( "Try automatically re-submitting UCI '%s'." % uw.get_name() )
@@ -882,7 +936,8 @@
log.error( err )
uci.error = err
uci.state = uci_states.ERROR
- uci.flush()
+ self.sa_session.add( uci )
+ self.sa_session.flush()
return None
return conn
@@ -895,7 +950,7 @@
# conn = self.get_connection( uci )
#
# # Update status of storage volumes
-# vl = model.CloudStore.filter( model.CloudInstance.c.uci_id == uci.id ).all()
+# vl = model.CloudStore.filter( model.CloudInstance.table.c.uci_id == uci.id ).all()
# vols = []
# for v in vl:
# vols.append( v.volume_id )
@@ -911,7 +966,7 @@
# pass
#
# # Update status of instances
-# il = model.CloudInstance.filter_by( uci_id=uci.id ).filter( model.CloudInstance.c.state != 'terminated' ).all()
+# il = model.CloudInstance.filter_by( uci_id=uci.id ).filter( model.CloudInstance.table.c.state != 'terminated' ).all()
# instanceList = []
# for i in il:
# instanceList.append( i.instance_id )
diff -r 7d013eb98022 -r 39502dd3fd23 lib/galaxy/cloud/providers/eucalyptus.py
--- a/lib/galaxy/cloud/providers/eucalyptus.py Thu Nov 12 16:36:07 2009 -0500
+++ b/lib/galaxy/cloud/providers/eucalyptus.py Mon Nov 16 20:37:47 2009 -0500
@@ -73,6 +73,7 @@
self.type = "eucalyptus" # cloud provider type (e.g., ec2, eucalyptus, opennebula)
self.zone = "epc"
self.queue = Queue()
+ self.sa_session = app.model.context
self.threads = []
nworkers = 5
@@ -83,12 +84,28 @@
self.threads.append( worker )
log.debug( "%d eucalyptus cloud workers ready", nworkers )
+ def shutdown( self ):
+ """Attempts to gracefully shut down the monitor thread"""
+ log.info( "sending stop signal to worker threads in eucalyptus cloud manager" )
+ for i in range( len( self.threads ) ):
+ self.queue.put( self.STOP_SIGNAL )
+ log.info( "eucalyptus cloud manager stopped" )
+
+ def put( self, uci_wrapper ):
+ """
+ Adds uci_wrapper object to the end of the request queue to be handled by
+ this cloud provider.
+ """
+ state = uci_wrapper.get_uci_state()
+ uci_wrapper.change_state( state.split('U')[0] ) # remove 'UCI' from end of state description (i.e., mark as accepted and ready for processing)
+ self.queue.put( uci_wrapper )
+
def run_next( self ):
- """Run the next job, waiting until one is available if necessary"""
+ """Process next request, waiting until one is available if necessary."""
cnt = 0
while 1:
uci_wrapper = self.queue.get()
- uci_state = uci_wrapper.get_state()
+ uci_state = uci_wrapper.get_uci_state()
if uci_state is self.STOP_SIGNAL:
return
try:
@@ -109,7 +126,7 @@
def get_connection( self, uci_wrapper ):
"""
- Establishes eucalyptus cloud connection using user's credentials associated with given UCI
+ Establishes cloud connection using user's credentials associated with given UCI
"""
log.debug( 'Establishing %s cloud connection.' % self.type )
provider = uci_wrapper.get_provider()
@@ -137,7 +154,10 @@
def check_key_pair( self, uci_wrapper, conn ):
"""
- Generate key pair using user's credentials
+ Check if a key pair associated with this UCI exists on cloud provider.
+ If yes, return key pair name; otherwise, generate a key pair with the cloud
+ provider and, again, return key pair name.
+ Key pair name for given UCI is generated from UCI's name and suffix '_kp'
"""
kp = None
kp_name = uci_wrapper.get_name().replace(' ','_') + "_kp"
@@ -185,6 +205,7 @@
return None
def create_key_pair( self, conn, kp_name ):
+ """ Initiate creation of key pair under kp_name by current cloud provider. """
try:
return conn.create_key_pair( kp_name )
except boto.exception.EC2ResponseError, e:
@@ -192,15 +213,15 @@
def get_mi_id( self, uci_wrapper, i_index ):
"""
- Get appropriate machine image (mi) based on instance size.
+ Get appropriate machine image (mi) ID based on instance type.
"""
- i_type = uci_wrapper.get_type( i_index )
+ i_type = uci_wrapper.get_instance_type( i_index )
if i_type=='m1.small' or i_type=='c1.medium':
arch = 'i386'
else:
arch = 'x86_64'
- mi = model.CloudImage.filter_by( deleted=False, provider_type=self.type, architecture=arch ).first()
+ mi = self.sa_session.query( model.CloudImage ).filter_by( deleted=False, provider_type=self.type, architecture=arch ).first()
if mi:
return mi.image_id
else:
@@ -209,23 +230,10 @@
uci_wrapper.set_error( err+". Contact site administrator to ensure needed machine image is registered.", True )
return None
- def shutdown( self ):
- """Attempts to gracefully shut down the monitor thread"""
- log.info( "sending stop signal to worker threads in eucalyptus cloud manager" )
- for i in range( len( self.threads ) ):
- self.queue.put( self.STOP_SIGNAL )
- log.info( "eucalyptus cloud manager stopped" )
-
- def put( self, uci_wrapper ):
- # Get rid of UCI from state description
- state = uci_wrapper.get_state()
- uci_wrapper.change_state( state.split('U')[0] ) # remove 'UCI' from end of state description (i.e., mark as accepted and ready for processing)
- self.queue.put( uci_wrapper )
-
def createUCI( self, uci_wrapper ):
"""
- Creates User Configured Instance (UCI). Essentially, creates storage volume on cloud provider
- and registers relevant information in Galaxy database.
+ Create User Configured Instance (UCI) - i.e., create storage volume on cloud provider
+ and register relevant information in local Galaxy database.
"""
conn = self.get_connection( uci_wrapper )
@@ -270,8 +278,11 @@
def deleteUCI( self, uci_wrapper ):
"""
- Deletes UCI. NOTE that this implies deletion of any and all data associated
+ Delete UCI - i.e., delete all storage volumes associated with this UCI.
+ NOTE that this implies deletion of any and all data associated
with this UCI from the cloud. All data will be deleted.
+ Information in local Galaxy database is marked as deleted but not actually removed
+ from the database.
"""
conn = self.get_connection( uci_wrapper )
vl = [] # volume list
@@ -287,7 +298,8 @@
if conn.delete_volume( v.volume_id ):
deletedList.append( v.volume_id )
v.deleted = True
- v.flush()
+ self.sa_session.add( v )
+ self.sa_session.flush()
count += 1
else:
failedList.append( v.volume_id )
@@ -301,16 +313,17 @@
if count == len( vl ):
uci_wrapper.set_deleted()
else:
- err = "Deleting following volume(s) failed: "+failedList+". However, these volumes were successfully deleted: "+deletedList+". \
- MANUAL intervention and processing needed."
+ err = "Deleting following volume(s) failed: "+ str( failedList )+". However, these volumes were successfully deleted: " \
+ + str( deletedList ) +". MANUAL intervention and processing needed."
log.error( err )
uci_wrapper.set_error( err, True )
def snapshotUCI( self, uci_wrapper ):
"""
- Creates snapshot of all storage volumes associated with this UCI.
+ Initiate creation of a snapshot by cloud provider for all storage volumes
+ associated with this UCI.
"""
- if uci_wrapper.get_state() != uci_states.ERROR:
+ if uci_wrapper.get_uci_state() != uci_states.ERROR:
conn = self.get_connection( uci_wrapper )
snapshots = uci_wrapper.get_snapshots( status = snapshot_status.SUBMITTED )
@@ -337,7 +350,7 @@
uci_wrapper.change_state( uci_state=uci_states.AVAILABLE )
-# if uci_wrapper.get_state() != uci_states.ERROR:
+# if uci_wrapper.get_uci_state() != uci_states.ERROR:
#
# snapshots = uci_wrapper.get_snapshots( status = 'submitted' )
# for snapshot in snapshots:
@@ -363,9 +376,9 @@
def startUCI( self, uci_wrapper ):
"""
- Starts instance(s) of given UCI on the cloud.
+ Start instance(s) of given UCI on the cloud.
"""
- if uci_wrapper.get_state() != uci_states.ERROR:
+ if uci_wrapper.get_uci_state() != uci_states.ERROR:
conn = self.get_connection( uci_wrapper )
self.check_key_pair( uci_wrapper, conn )
if uci_wrapper.get_key_pair_name() == None:
@@ -383,16 +396,16 @@
log.debug( "mi_id: %s, uci_wrapper.get_key_pair_name(): %s" % ( mi_id, uci_wrapper.get_key_pair_name() ) )
uci_wrapper.set_mi( i_index, mi_id )
- if uci_wrapper.get_state() != uci_states.ERROR:
+ if uci_wrapper.get_uci_state() != uci_states.ERROR:
# Start an instance
log.debug( "Starting UCI instance '%s'" % uci_wrapper.get_name() )
log.debug( "Using following command: conn.run_instances( image_id='%s', key_name='%s', instance_type='%s' )"
- % ( mi_id, uci_wrapper.get_key_pair_name(), uci_wrapper.get_type( i_index ) ) )
+ % ( mi_id, uci_wrapper.get_key_pair_name(), uci_wrapper.get_instance_type( i_index ) ) )
reservation = None
try:
reservation = conn.run_instances( image_id=mi_id,
key_name=uci_wrapper.get_key_pair_name(),
- instance_type=uci_wrapper.get_type( i_index ) )
+ instance_type=uci_wrapper.get_instance_type( i_index ) )
except boto.exception.EC2ResponseError, e:
err = "EC2 response error when starting UCI '"+ uci_wrapper.get_name() +"': " + str( e )
log.error( err )
@@ -403,9 +416,11 @@
uci_wrapper.set_error( err, True )
# Record newly available instance data into local Galaxy database
if reservation:
- uci_wrapper.set_launch_time( self.format_time( reservation.instances[0].launch_time ), i_index=i_index )
+ l_time = datetime.utcnow()
+# uci_wrapper.set_instance_launch_time( self.format_time( reservation.instances[0].launch_time ), i_index=i_index )
+ uci_wrapper.set_instance_launch_time( l_time, i_index=i_index )
if not uci_wrapper.uci_launch_time_set():
- uci_wrapper.set_uci_launch_time( self.format_time( reservation.instances[0].launch_time ) )
+ uci_wrapper.set_uci_launch_time( l_time )
try:
uci_wrapper.set_reservation_id( i_index, str( reservation ).split(":")[1] )
# TODO: if more than a single instance will be started through single reservation, change this reference from element [0]
@@ -413,7 +428,7 @@
uci_wrapper.set_instance_id( i_index, i_id )
s = reservation.instances[0].state
uci_wrapper.change_state( s, i_id, s )
- log.debug( "Instance of UCI '%s' started, current state: '%s'" % ( uci_wrapper.get_name(), uci_wrapper.get_state() ) )
+ log.debug( "Instance of UCI '%s' started, current state: '%s'" % ( uci_wrapper.get_name(), uci_wrapper.get_uci_state() ) )
except boto.exception.EC2ResponseError, e:
err = "EC2 response error when retrieving instance information for UCI '" + uci_wrapper.get_name() + "': " + str( e )
log.error( err )
@@ -430,12 +445,16 @@
def stopUCI( self, uci_wrapper):
"""
- Stops all of cloud instances associated with given UCI.
+ Stop all cloud instances associated with given UCI.
"""
conn = self.get_connection( uci_wrapper )
# Get all instances associated with given UCI
il = uci_wrapper.get_instances_ids() # instance list
+ # Process list of instances and remove any references to empty instance id's
+ for i in il:
+ if i is None:
+ l.remove( i )
log.debug( 'List of instances being terminated: %s' % il )
rl = conn.get_all_instances( il ) # Reservation list associated with given instances
@@ -506,41 +525,41 @@
def update( self ):
"""
- Runs a global status update on all instances that are in 'running', 'pending', or 'shutting-down' state.
- Also, runs update on all storage volumes that are in 'in-use', 'creating', or 'None' state.
+ Run status update on all instances that are in 'running', 'pending', or 'shutting-down' state.
+ Run status update on all storage volumes whose status is 'in-use', 'creating', or 'None'.
+ Run status update on all snapshots whose status is 'pending' or 'delete'
+ Run status update on any zombie UCIs, i.e., UCI's that is in 'submitted' state for an
+ extended period of time.
+
Reason behind this method is to sync state of local DB and real-world resources
"""
log.debug( "Running general status update for EPC UCIs..." )
# Update instances
- instances = model.CloudInstance.filter( or_( model.CloudInstance.c.state==instance_states.RUNNING,
- model.CloudInstance.c.state==instance_states.PENDING,
- model.CloudInstance.c.state==instance_states.SHUTTING_DOWN ) ).all()
+ instances = self.sa_session.query( model.CloudInstance ) \
+ .filter( or_( model.CloudInstance.table.c.state==instance_states.RUNNING,
+ model.CloudInstance.table.c.state==instance_states.PENDING,
+ model.CloudInstance.table.c.state==instance_states.SHUTTING_DOWN ) ) \
+ .all()
for inst in instances:
if self.type == inst.uci.credentials.provider.type:
log.debug( "[%s] Running general status update on instance '%s'" % ( inst.uci.credentials.provider.type, inst.instance_id ) )
self.updateInstance( inst )
# Update storage volume(s)
- stores = model.CloudStore.filter( or_( model.CloudStore.c.status==store_status.IN_USE,
- model.CloudStore.c.status==store_status.CREATING,
- model.CloudStore.c.status==None ) ).all()
+ stores = self.sa_session.query( model.CloudStore ) \
+ .filter( or_( model.CloudStore.table.c.status==store_status.IN_USE,
+ model.CloudStore.table.c.status==store_status.CREATING,
+ model.CloudStore.table.c.status==None ) ) \
+ .all()
for store in stores:
if self.type == store.uci.credentials.provider.type: # and store.volume_id != None:
log.debug( "[%s] Running general status update on store with local database ID: '%s'" % ( store.uci.credentials.provider.type, store.id ) )
self.updateStore( store )
-# else:
-# log.error( "[%s] There exists an entry for UCI (%s) storage volume without an ID. Storage volume might have been created with "
-# "cloud provider though. Manual check is recommended." % ( store.uci.credentials.provider.type, store.uci.name ) )
-# store.uci.error = "There exists an entry in local database for a storage volume without an ID. Storage volume might have been created " \
-# "with cloud provider though. Manual check is recommended. After understanding what happened, local database entry for given " \
-# "storage volume should be updated."
-# store.status = store_status.ERROR
-# store.uci.state = uci_states.ERROR
-# store.uci.flush()
-# store.flush()
# Update pending snapshots or delete ones marked for deletion
- snapshots = model.CloudSnapshot.filter_by( status=snapshot_status.PENDING, status=snapshot_status.DELETE ).all()
+ snapshots = self.sa_session.query( model.CloudSnapshot ) \
+ .filter_by( status=snapshot_status.PENDING, status=snapshot_status.DELETE ) \
+ .all()
for snapshot in snapshots:
if self.type == snapshot.uci.credentials.provider.type and snapshot.status == snapshot_status.PENDING:
log.debug( "[%s] Running general status update on snapshot '%s'" % ( snapshot.uci.credentials.provider.type, snapshot.snapshot_id ) )
@@ -550,29 +569,34 @@
self.delete_snapshot( snapshot )
# Attempt at updating any zombie UCIs (i.e., instances that have been in SUBMITTED state for longer than expected - see below for exact time)
- zombies = model.UCI.filter_by( state=uci_states.SUBMITTED ).all()
+ zombies = self.sa_session.query( model.UCI ).filter_by( state=uci_states.SUBMITTED ).all()
for zombie in zombies:
log.debug( "zombie UCI: %s" % zombie.name )
- z_instances = model.CloudInstance \
- .filter_by( uci_id=zombie.id, state=None ) \
+ z_instances = self.sa_session.query( model.CloudInstance ) \
+ .filter( or_( model.CloudInstance.table.c.state != instance_states.TERMINATED,
+ model.CloudInstance.table.c.state == None ) ) \
.all()
for z_inst in z_instances:
if self.type == z_inst.uci.credentials.provider.type:
# log.debug( "z_inst.id: '%s', state: '%s'" % ( z_inst.id, z_inst.state ) )
td = datetime.utcnow() - z_inst.update_time
- log.debug( "z_inst.id: %s, time delta is %s sec" % ( z_inst.id, td.seconds ) )
+# log.debug( "z_inst.id: %s, time delta is %s sec" % ( z_inst.id, td.seconds ) )
if td.seconds > 180: # if instance has been in SUBMITTED state for more than 3 minutes
log.debug( "[%s](td=%s) Running zombie repair update on instance with DB id '%s'" % ( z_inst.uci.credentials.provider.type, td.seconds, z_inst.id ) )
self.processZombie( z_inst )
def updateInstance( self, inst ):
-
+ """
+ Update information in local database for given instance as it is obtained from cloud provider.
+ Along with updating information about given instance, information about the UCI controlling
+ this instance is also updated.
+ """
# Get credentials associated wit this instance
uci_id = inst.uci_id
- uci = model.UCI.get( uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( uci_id )
+ self.sa_session.refresh( uci )
conn = self.get_connection_from_uci( uci )
-
+
# Get reservations handle for given instance
try:
rl= conn.get_all_instances( [inst.instance_id] )
@@ -581,10 +605,12 @@
log.error( err )
uci.error = err
uci.state = uci_states.ERROR
+ self.sa_session.add( uci )
+ self.sa_session.flush()
return None
- # Because EPC deletes references to reservations after a short while after instances have terminated, getting an empty list as a response to a query
- # typically means the instance has successfully shut down but the check was not performed in short enough amount of time. Until alternative solution
+ # Because references to reservations are deleted shortly after instances have been terminated, getting an empty list as a response to a query
+ # typically means the instance has successfully shut down but the check was not performed in short enough amount of time. Until an alternative solution
# is found, below code sets state of given UCI to 'error' to indicate to the user something out of ordinary happened.
if len( rl ) == 0:
err = "Instance ID '"+inst.instance_id+"' was not found by the cloud provider. Instance might have crashed or otherwise been terminated."+ \
@@ -595,109 +621,151 @@
inst.state = instance_states.TERMINATED
uci.state = uci_states.ERROR
uci.launch_time = None
- inst.flush()
- uci.flush()
+ self.sa_session.add( inst )
+ self.sa_session.add( uci )
+ self.sa_session.flush()
# Update instance status in local DB with info from cloud provider
for r in rl:
for i, cInst in enumerate( r.instances ):
try:
s = cInst.update()
- log.debug( "Checking state of cloud instance '%s' associated with reservation '%s'. State='%s'" % ( cInst, r, s ) )
+ log.debug( "Checking state of cloud instance '%s' associated with UCI '%s' and reservation '%s'. State='%s'" % ( cInst, uci.name, r, s ) )
if s != inst.state:
inst.state = s
- inst.flush()
- # After instance has shut down, ensure UCI is marked as 'available'
- if s == instance_states.TERMINATED and uci.state != uci_states.ERROR:
+ self.sa_session.add( inst )
+ self.sa_session.flush()
+ # After instance has shut down, ensure UCI is marked as 'available'
+ if s == instance_states.TERMINATED and uci.state != uci_states.ERROR:
uci.state = uci_states.AVAILABLE
uci.launch_time = None
- uci.flush()
+ self.sa_session.add( uci )
+ self.sa_session.flush()
# Making sure state of UCI is updated. Once multiple instances become associated with single UCI, this will need to be changed.
if s != uci.state and s != instance_states.TERMINATED:
uci.state = s
- uci.flush()
+ self.sa_session.add( uci )
+ self.sa_session.flush()
if cInst.public_dns_name != inst.public_dns:
inst.public_dns = cInst.public_dns_name
- inst.flush()
+ self.sa_session.add( inst )
+ self.sa_session.flush()
if cInst.private_dns_name != inst.private_dns:
inst.private_dns = cInst.private_dns_name
- inst.flush()
+ self.sa_session.add( inst )
+ self.sa_session.flush()
except boto.exception.EC2ResponseError, e:
err = "Updating instance status from cloud failed for UCI '"+ uci.name + "' during general status update: " + str( e )
log.error( err )
uci.error = err
uci.state = uci_states.ERROR
+ self.sa_session.add( uci )
+ self.sa_session.flush()
return None
-
+
def updateStore( self, store ):
+ """
+ Update information in local database for given storage volume as it is obtained from cloud provider.
+ Along with updating information about given storage volume, information about the UCI controlling
+ this storage volume is also updated.
+ """
# Get credentials associated wit this store
uci_id = store.uci_id
- uci = model.UCI.get( uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( uci_id )
+ self.sa_session.refresh( uci )
conn = self.get_connection_from_uci( uci )
-
- try:
- vl = conn.get_all_volumes( [store.volume_id] )
- except boto.exception.EC2ResponseError, e:
- err = "Retrieving volume(s) from cloud failed for UCI '"+ uci.name + "' during general status update: " + str( e )
- log.error( err )
- uci.error = err
- uci.state = uci_states.ERROR
- uci.flush()
- return None
- # Update store status in local DB with info from cloud provider
- if len(vl) > 0:
+ if store.volume_id != None:
+ # Get reservations handle for given store
try:
- if store.status != vl[0].status:
- # In case something failed during creation of UCI but actual storage volume was created and yet
- # UCI state remained as 'new', try to remedy this by updating UCI state here
- if ( store.status == None ) and ( store.volume_id != None ):
- uci.state = vl[0].status
- uci.flush()
- # If UCI was marked in state 'CREATING', update its status to reflect new status
- elif ( uci.state == uci_states.CREATING ):
- # Because Eucalyptus Public Cloud (EPC) deletes volumes immediately after they are created, artificially
- # set status of given UCI to 'available' based on storage volume's availability zone (i.e., it's residing
- # in EPC as opposed to some other Eucalyptus based cloud that allows creation of storage volumes.
- if store.availability_zone == 'epc':
- uci.state = uci_states.AVAILABLE
- else:
- uci.state = vl[0].status
- uci.flush()
-
- store.status = vl[0].status
- store.flush()
- if store.i_id != vl[0].instance_id:
- store.i_id = vl[0].instance_id
- store.flush()
- if store.attach_time != vl[0].attach_time:
- store.attach_time = vl[0].attach_time
- store.flush()
- if store.device != vl[0].device:
- store.device = vl[0].device
- store.flush()
+ log.debug( "Updating storage volume command: vl = conn.get_all_volumes( [%s] )" % store.volume_id )
+ vl = conn.get_all_volumes( [store.volume_id] )
except boto.exception.EC2ResponseError, e:
- err = "Updating status of volume(s) from cloud failed for UCI '"+ uci.name + "' during general status update: " + str( e )
+ err = "Retrieving volume(s) from cloud failed for UCI '"+ uci.name + "' during general status update: " + str( e )
log.error( err )
uci.error = err
uci.state = uci_states.ERROR
- uci.flush()
+ self.sa_session.add( uci )
+ self.sa_session.flush()
return None
+
+ # Update store status in local DB with info from cloud provider
+ if len(vl) > 0:
+ try:
+ if store.status != vl[0].status:
+ # In case something failed during creation of UCI but actual storage volume was created and yet
+ # UCI state remained as 'new', try to remedy this by updating UCI state here
+ if ( store.status == None ) and ( store.volume_id != None ):
+ uci.state = vl[0].status
+ self.sa_session.add( uci )
+ self.sa_session.flush()
+ # If UCI was marked in state 'CREATING', update its status to reflect new status
+ elif ( uci.state == uci_states.CREATING ):
+ # Because Eucalyptus Public Cloud (EPC) deletes volumes immediately after they are created, artificially
+ # set status of given UCI to 'available' based on storage volume's availability zone (i.e., it's residing
+ # in EPC as opposed to some other Eucalyptus based cloud that allows creation of storage volumes.
+ if store.availability_zone == 'epc':
+ uci.state = uci_states.AVAILABLE
+ else:
+ uci.state = vl[0].status
+
+ self.sa_session.add( uci )
+ self.sa_session.flush()
+
+ store.status = vl[0].status
+ self.sa_session.add( store )
+ self.sa_session.flush()
+ if store.i_id != vl[0].instance_id:
+ store.i_id = vl[0].instance_id
+ self.sa_session.add( store )
+ self.sa_session.flush()
+ if store.attach_time != vl[0].attach_time:
+ store.attach_time = vl[0].attach_time
+ self.sa_session.add( store )
+ self.sa_session.flush()
+ if store.device != vl[0].device:
+ store.device = vl[0].device
+ self.sa_session.add( store )
+ self.sa_session.flush()
+ except boto.exception.EC2ResponseError, e:
+ err = "Updating status of volume(s) from cloud failed for UCI '"+ uci.name + "' during general status update: " + str( e )
+ log.error( err )
+ uci.error = err
+ uci.state = uci_states.ERROR
+ self.sa_session.add( uci )
+ self.sa_session.flush()
+ return None
+ else:
+ err = "No storage volumes returned by cloud provider on general update"
+ log.error( "%s for UCI '%s'" % ( err, uci.name ) )
+ store.status = store_status.ERROR
+ store.error = err
+ uci.error = err
+ uci.state = uci_states.ERROR
+ self.sa_session.add( uci )
+ self.sa_session.add( store )
+ self.sa_session.flush()
else:
- err = "No storage volumes returned by cloud provider on general update"
- log.error( "%s for UCI '%s'" % ( err, uci.name ) )
+ err = "Missing storage volume ID in local database on general update. Manual check is needed to check " \
+ "if storage volume was actually created by cloud provider."
+ log.error( "%s (for UCI '%s')" % ( err, uci.name ) )
store.status = store_status.ERROR
store.error = err
uci.error = err
uci.state = uci_states.ERROR
- uci.flush()
- store.flush()
-
+ self.sa_session.add( uci )
+ self.sa_session.add( store )
+ self.sa_session.flush()
+
def updateSnapshot( self, snapshot ):
+ """
+ Update information in local database for given snapshot as it is obtained from cloud provider.
+ Along with updating information about given snapshot, information about the UCI controlling
+ this snapshot is also updated.
+ """
# Get credentials associated wit this store
uci_id = snapshot.uci_id
- uci = model.UCI.get( uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( uci_id )
+ self.sa_session.refresh( uci )
conn = self.get_connection_from_uci( uci )
try:
@@ -706,25 +774,28 @@
if len( snap ) > 0:
log.debug( "Snapshot '%s' status: %s" % ( snapshot.snapshot_id, snap[0].status ) )
snapshot.status = snap[0].status
- snapshot.flush()
+ self.sa_session.add( snapshot )
+ self.sa_session.flush()
else:
- err = "No snapshots returned by cloud provider on general update"
+ err = "No snapshots returned by EC2 on general update"
log.error( "%s for UCI '%s'" % ( err, uci.name ) )
snapshot.status = snapshot_status.ERROR
snapshot.error = err
uci.error = err
uci.state = uci_states.ERROR
- uci.flush()
- snapshot.flush()
+ self.sa_session.add( uci )
+ self.sa_session.add( snapshot )
+ self.sa_session.flush()
except boto.exception.EC2ResponseError, e:
- err = "Cloud provider response error while updating snapshot status: " + str( e )
+ err = "EC2 response error while updating snapshot status: " + str( e )
log.error( err )
snapshot.status = snapshot_status.ERROR
snapshot.error = err
uci.error = err
uci.state = uci_states.ERROR
- uci.flush()
- snapshot.flush()
+ self.sa_session.add( uci )
+ self.sa_session.add( snapshot )
+ self.sa_session.flush()
except Exception, ex:
err = "Error while updating snapshot status: " + str( ex )
log.error( err )
@@ -732,15 +803,19 @@
snapshot.error = err
uci.error = err
uci.state = uci_states.ERROR
- uci.flush()
- snapshot.flush()
-
+ self.sa_session.add( uci )
+ self.sa_session.add( snapshot )
+ self.sa_session.flush()
+
def delete_snapshot( self, snapshot ):
+ """
+ Initiate deletion of given snapshot from cloud provider.
+ """
if snapshot.status == snapshot_status.DELETE:
# Get credentials associated wit this store
uci_id = snapshot.uci_id
- uci = model.UCI.get( uci_id )
- uci.refresh()
+ uci = self.sa_session.query( model.UCI ).get( uci_id )
+ self.sa_session.refresh( uci )
conn = self.get_connection_from_uci( uci )
try:
@@ -749,7 +824,8 @@
if snap == True:
snapshot.deleted = True
snapshot.status = snapshot_status.DELETED
- snapshot.flush()
+ self.sa_session.add( snapshot )
+ self.sa_session.flush()
return snap
except boto.exception.EC2ResponseError, e:
err = "EC2 response error while deleting snapshot: " + str( e )
@@ -758,8 +834,9 @@
snapshot.error = err
uci.error = err
uci.state = uci_states.ERROR
- uci.flush()
- snapshot.flush()
+ self.sa_session.add( uci )
+ self.sa_session.add( snapshot )
+ self.sa_session.flush()
except Exception, ex:
err = "Error while deleting snapshot: " + str( ex )
log.error( err )
@@ -767,21 +844,27 @@
snapshot.error = err
uci.error = err
uci.state = uci_states.ERROR
- uci.flush()
- snapshot.flush()
+ self.sa_session.add( uci )
+ self.sa_session.add( snapshot )
+ self.sa_session.flush()
else:
err = "Cannot delete snapshot '"+snapshot.snapshot_id+"' because its status is '"+snapshot.status+"'. Only snapshots with '" + \
snapshot_status.COMPLETED+"' status can be deleted."
log.error( err )
snapshot.error = err
- snapshot.flush()
-
+ self.sa_session.add( snapshot )
+ self.sa_session.flush()
+
def processZombie( self, inst ):
"""
- Attempt at discovering if starting an instance was successful but local database was not updated
+ Attempt at discovering if starting a cloud instance was successful but local database was not updated
accordingly or if something else failed and instance was never started. Currently, no automatic
repairs are being attempted; instead, appropriate error messages are set.
"""
+ uci_id = inst.uci_id
+ uci = self.sa_session.query( model.UCI ).get( uci_id )
+ self.sa_session.refresh( uci )
+
# Check if any instance-specific information was written to local DB; if 'yes', set instance and UCI's error message
# suggesting manual check.
if inst.launch_time != None or inst.reservation_id != None or inst.instance_id != None:
@@ -790,7 +873,7 @@
# report as error.
# Fields attempting to be recovered are: reservation_id, instance status, and launch_time
if inst.instance_id != None:
- conn = self.get_connection_from_uci( inst.uci )
+ conn = self.get_connection_from_uci( uci )
rl = conn.get_all_instances( [inst.instance_id] ) # reservation list
# Update local DB with relevant data from instance
if inst.reservation_id == None:
@@ -802,9 +885,10 @@
try:
state = rl[0].instances[0].update()
inst.state = state
- inst.uci.state = state
- inst.flush()
- inst.uci.flush()
+ uci.state = state
+ self.sa_session.add( inst )
+ self.sa_session.add( uci )
+ self.sa_session.flush()
except: # something failed, so skip
pass
@@ -812,10 +896,12 @@
try:
launch_time = self.format_time( rl[0].instances[0].launch_time )
inst.launch_time = launch_time
- inst.flush()
+ self.sa_session.add( inst )
+ self.sa_session.flush()
if inst.uci.launch_time == None:
- inst.uci.launch_time = launch_time
- inst.uci.flush()
+ uci.launch_time = launch_time
+ self.sa_session.add( uci )
+ self.sa_session.flush()
except: # something failed, so skip
pass
else:
@@ -826,8 +912,9 @@
inst.uci.error = err
inst.uci.state = uci_states.ERROR
log.error( err )
- inst.flush()
- inst.uci.flush()
+ self.sa_session.add( inst )
+ self.sa_session.add( uci )
+ self.sa_session.flush()
else: #Instance most likely never got processed, so set error message suggesting user to try starting instance again.
err = "Starting a machine instance (DB id: '"+str(inst.id)+"') associated with this UCI '" + str(inst.uci.name) + \
@@ -835,11 +922,12 @@
"starting the instance again."
inst.error = err
inst.state = instance_states.ERROR
- inst.uci.error = err
- inst.uci.state = uci_states.ERROR
+ uci.error = err
+ uci.state = uci_states.ERROR
log.error( err )
- inst.flush()
- inst.uci.flush()
+ self.sa_session.add( inst )
+ self.sa_session.add( uci )
+ self.sa_session.flush()
# uw = UCIwrapper( inst.uci )
# log.debug( "Try automatically re-submitting UCI '%s'." % uw.get_name() )
@@ -848,16 +936,23 @@
Establishes and returns connection to cloud provider. Information needed to do so is obtained
directly from uci database object.
"""
- log.debug( 'Establishing %s cloud connection.' % self.type )
+ log.debug( 'Establishing %s cloud connection' % self.type )
a_key = uci.credentials.access_key
s_key = uci.credentials.secret_key
# Get connection
try:
region = RegionInfo( None, uci.credentials.provider.region_name, uci.credentials.provider.region_endpoint )
+ log.debug( "[%s] Using following command to connect to cloud provider: "
+ "conn = EC2Connection( aws_access_key_id=%s, "
+ "aws_secret_access_key=%s, "
+ "port=%s, "
+ "is_secure=%s, "
+ "region=region, "
+ "path=%s )" % ( self.type, a_key, s_key, uci.credentials.provider.is_secure, uci.credentials.provider.port, uci.credentials.provider.path ) )
conn = EC2Connection( aws_access_key_id=a_key,
aws_secret_access_key=s_key,
- is_secure=uci.credentials.provider.is_secure,
- port=uci.credentials.provider.port,
+ is_secure=uci.credentials.provider.is_secure,
+ port=uci.credentials.provider.port,
region=region,
path=uci.credentials.provider.path )
except boto.exception.EC2ResponseError, e:
@@ -865,7 +960,8 @@
log.error( err )
uci.error = err
uci.state = uci_states.ERROR
- uci.flush()
+ self.sa_session.add( uci )
+ self.sa_session.flush()
return None
return conn
@@ -878,7 +974,7 @@
# conn = self.get_connection( uci )
#
# # Update status of storage volumes
-# vl = model.CloudStore.filter( model.CloudInstance.c.uci_id == uci.id ).all()
+# vl = model.CloudStore.filter( model.CloudInstance.table.c.uci_id == uci.id ).all()
# vols = []
# for v in vl:
# vols.append( v.volume_id )
@@ -894,7 +990,7 @@
# pass
#
# # Update status of instances
-# il = model.CloudInstance.filter_by( uci_id=uci.id ).filter( model.CloudInstance.c.state != 'terminated' ).all()
+# il = model.CloudInstance.filter_by( uci_id=uci.id ).filter( model.CloudInstance.table.c.state != 'terminated' ).all()
# instanceList = []
# for i in il:
# instanceList.append( i.instance_id )
diff -r 7d013eb98022 -r 39502dd3fd23 lib/galaxy/config.py
--- a/lib/galaxy/config.py Thu Nov 12 16:36:07 2009 -0500
+++ b/lib/galaxy/config.py Mon Nov 16 20:37:47 2009 -0500
@@ -114,12 +114,11 @@
self.tool_runners = []
self.datatypes_config = kwargs.get( 'datatypes_config_file', 'datatypes_conf.xml' )
# Cloud configuration options
- self.cloud_controller_instance = string_as_bool( kwargs.get( 'cloud_controller_instance', 'False' ) )
- self.cloud_provider = kwargs.get( 'cloud_provider', None )
- if self.cloud_controller_instance:
- self.enable_cloud_execution = string_as_bool( kwargs.get( 'enable_cloud_execution', 'True' ) )
+ self.cloud_controller_instance = string_as_bool( kwargs.get( 'cloud_controller_instance', False ) )
+ if self.cloud_controller_instance == True:
+ self.enable_cloud_execution = string_as_bool( kwargs.get( 'enable_cloud_execution', True ) )
else:
- self.enable_cloud_execution = string_as_bool( kwargs.get( 'enable_cloud_execution', 'False' ) )
+ self.enable_cloud_execution = string_as_bool( kwargs.get( 'enable_cloud_execution', False ) )
def get( self, key, default ):
return self.config_dict.get( key, default )
def get_bool( self, key, default ):
diff -r 7d013eb98022 -r 39502dd3fd23 lib/galaxy/web/controllers/cloud.py
--- a/lib/galaxy/web/controllers/cloud.py Thu Nov 12 16:36:07 2009 -0500
+++ b/lib/galaxy/web/controllers/cloud.py Mon Nov 16 20:37:47 2009 -0500
@@ -91,48 +91,48 @@
cloudCredentials = trans.sa_session.query( model.CloudUserCredentials ) \
.filter_by( user=user ) \
- .filter( model.CloudUserCredentials.c.deleted != True ) \
- .order_by( model.CloudUserCredentials.c.name ) \
+ .filter( model.CloudUserCredentials.table.c.deleted != True ) \
+ .order_by( model.CloudUserCredentials.table.c.name ) \
.all()
cloudProviders = trans.sa_session.query( model.CloudProvider ) \
.filter_by( user=user ) \
- .filter( model.CloudProvider.c.deleted != True ) \
- .order_by( model.CloudProvider.c.name ) \
+ .filter( model.CloudProvider.table.c.deleted != True ) \
+ .order_by( model.CloudProvider.table.c.name ) \
.all()
liveInstances = trans.sa_session.query( model.UCI ) \
.filter_by( user=user ) \
- .filter( or_( model.UCI.c.state==uci_states.RUNNING,
- model.UCI.c.state==uci_states.PENDING,
- model.UCI.c.state==uci_states.SUBMITTED,
- model.UCI.c.state==uci_states.SUBMITTED_UCI,
- model.UCI.c.state==uci_states.SHUTTING_DOWN,
- model.UCI.c.state==uci_states.SHUTTING_DOWN_UCI ) ) \
- .order_by( desc( model.UCI.c.update_time ) ) \
+ .filter( or_( model.UCI.table.c.state==uci_states.RUNNING,
+ model.UCI.table.c.state==uci_states.PENDING,
+ model.UCI.table.c.state==uci_states.SUBMITTED,
+ model.UCI.table.c.state==uci_states.SUBMITTED_UCI,
+ model.UCI.table.c.state==uci_states.SHUTTING_DOWN,
+ model.UCI.table.c.state==uci_states.SHUTTING_DOWN_UCI ) ) \
+ .order_by( desc( model.UCI.table.c.update_time ) ) \
.all()
prevInstances = trans.sa_session.query( model.UCI ) \
.filter_by( user=user, deleted=False ) \
- .filter( or_( model.UCI.c.state==uci_states.AVAILABLE,
- model.UCI.c.state==uci_states.NEW,
- model.UCI.c.state==uci_states.NEW_UCI,
- model.UCI.c.state==uci_states.CREATING,
- model.UCI.c.state==uci_states.ERROR,
- model.UCI.c.state==uci_states.DELETED,
- model.UCI.c.state==uci_states.DELETING,
- model.UCI.c.state==uci_states.DELETING_UCI,
- model.UCI.c.state==uci_states.SNAPSHOT,
- model.UCI.c.state==uci_states.SNAPSHOT_UCI ) ) \
- .order_by( desc( model.UCI.c.update_time ) ) \
+ .filter( or_( model.UCI.table.c.state==uci_states.AVAILABLE,
+ model.UCI.table.c.state==uci_states.NEW,
+ model.UCI.table.c.state==uci_states.NEW_UCI,
+ model.UCI.table.c.state==uci_states.CREATING,
+ model.UCI.table.c.state==uci_states.ERROR,
+ model.UCI.table.c.state==uci_states.DELETED,
+ model.UCI.table.c.state==uci_states.DELETING,
+ model.UCI.table.c.state==uci_states.DELETING_UCI,
+ model.UCI.table.c.state==uci_states.SNAPSHOT,
+ model.UCI.table.c.state==uci_states.SNAPSHOT_UCI ) ) \
+ .order_by( desc( model.UCI.table.c.update_time ) ) \
.all()
# Check after update there are instances in pending state; if so, display message
pendingInstances = trans.sa_session.query( model.UCI ) \
.filter_by( user=user ) \
- .filter( or_( model.UCI.c.state==uci_states.PENDING,
- model.UCI.c.state==uci_states.SUBMITTED,
- model.UCI.c.state==uci_states.SUBMITTED_UCI ) ) \
+ .filter( or_( model.UCI.table.c.state==uci_states.PENDING,
+ model.UCI.table.c.state==uci_states.SUBMITTED,
+ model.UCI.table.c.state==uci_states.SUBMITTED_UCI ) ) \
.all()
if pendingInstances:
trans.set_message( "Galaxy instance started. NOTE: Please wait about 5 minutes for the instance to "
@@ -173,7 +173,7 @@
# Create new user configured instance
try:
- if trans.app.model.UCI \
+ if trans.sa_session.query( model.UCI ) \
.filter_by (user=user, deleted=False, name=instanceName ) \
.first():
error['inst_error'] = "An instance with that name already exist."
@@ -191,8 +191,8 @@
# Capture user configured instance information
uci = model.UCI()
uci.name = instanceName
- uci.credentials = trans.app.model.CloudUserCredentials.filter(
- trans.app.model.CloudUserCredentials.table.c.name==credName ).first()
+ uci.credentials = trans.sa_session.query( model.CloudUserCredentials ) \
+ .filter( model.CloudUserCredentials.table.c.name==credName ).first()
uci.user= user
uci.total_size = volSize # This is OK now because new instance is being created and only one storage volume can be created at UCI creation time
uci.state = uci_states.NEW_UCI
@@ -204,8 +204,8 @@
storage.availability_zone = zone
# Persist
session = trans.sa_session
- session.save_or_update( uci )
- session.save_or_update( storage )
+ session.add( uci )
+ session.add( storage )
session.flush()
# Log and display the management page
trans.log_event( "User configured new cloud instance: '%s'" % instanceName )
@@ -278,8 +278,8 @@
uci.state = uci_states.SUBMITTED_UCI
# Persist
session = trans.sa_session
- session.save_or_update( instance )
- session.save_or_update( uci )
+ session.add( instance )
+ session.add( uci )
session.flush()
# Log
trans.log_event ("User initiated starting of UCI '%s'." % uci.name )
@@ -309,7 +309,7 @@
( uci.state != uci_states.AVAILABLE ):
uci.state = uci_states.SHUTTING_DOWN_UCI
session = trans.sa_session
- session.save_or_update( uci )
+ session.add( uci )
session.flush()
trans.log_event( "User stopped cloud instance '%s' (id: %s)" % ( uci.name, uci.id ) )
trans.set_message( "Stopping of Galaxy instance '%s' initiated." % uci.name )
@@ -378,7 +378,7 @@
prevInstances = trans.sa_session.query( model.CloudInstance ) \
.filter_by( user=user, state=instance_states.TERMINATED, uci_id=id ) \
- .order_by( desc( model.CloudInstance.c.update_time ) ) \
+ .order_by( desc( model.CloudInstance.table.c.update_time ) ) \
.all()
return trans.fill_template( "cloud/view_usage.mako", prevInstances = prevInstances )
@@ -396,7 +396,7 @@
name = uci.name
uci.state = uci_states.DELETING_UCI
session = trans.sa_session
- session.save_or_update( uci )
+ session.add( uci )
session.flush()
trans.log_event( "User marked cloud instance '%s' for deletion." % name )
trans.set_message( "Galaxy instance '%s' marked for deletion." % name )
@@ -431,8 +431,8 @@
uci.state = uci_states.SNAPSHOT_UCI
# Persist
session = trans.sa_session
- session.save_or_update( snapshot )
- session.save_or_update( uci )
+ session.add( snapshot )
+ session.add( uci )
session.flush()
elif len( stores ) == 0:
error( "No storage volumes found that are associated with this instance." )
@@ -455,7 +455,7 @@
snaps = trans.sa_session.query( model.CloudSnapshot ) \
.filter_by( user=user, uci_id=id, deleted=False ) \
- .order_by( desc( model.CloudSnapshot.c.update_time ) ) \
+ .order_by( desc( model.CloudSnapshot.table.c.update_time ) ) \
.all()
return trans.fill_template( "cloud/view_snapshots.mako",
@@ -474,7 +474,8 @@
if snap.status == snapshot_status.COMPLETED:
snap.status = snapshot_status.DELETE
- snap.flush()
+ trans.sa_session.add( snap )
+ trans.sa_session.flush()
trans.set_message( "Snapshot '%s' is marked for deletion. Once the deletion is complete, it will no longer be visible in this list. "
"Please note that this process may take up to a minute." % snap.snapshot_id )
else:
@@ -485,7 +486,7 @@
uci_id = trans.security.decode_id( uci_id )
snaps = trans.sa_session.query( model.CloudSnapshot ) \
.filter_by( user=user, uci_id=uci_id, deleted=False ) \
- .order_by( desc( model.CloudSnapshot.c.update_time ) ) \
+ .order_by( desc( model.CloudSnapshot.table.c.update_time ) ) \
.all()
return trans.fill_template( "cloud/view_snapshots.mako",
@@ -514,9 +515,9 @@
error['provider_error'] = "You must select cloud provider type for this machine image."
elif image_id=='' or len( image_id ) > 255:
error['id_error'] = "Image ID must be between 1 and 255 characters long."
- elif trans.app.model.CloudUserCredentials \
+ elif trans.sa_session.query( model.CloudUserCredentials ) \
.filter_by( deleted=False ) \
- .filter( trans.app.model.CloudImage.table.c.image_id == image_id ) \
+ .filter( model.CloudImage.table.c.image_id == image_id ) \
.first():
error['id_error'] = "Image with ID '" + image_id + "' is already registered. \
Please choose another ID."
@@ -531,7 +532,7 @@
image.architecture = architecture
# Persist
session = trans.sa_session
- session.save_or_update( image )
+ session.add( image )
session.flush()
# Log and display the management page
trans.log_event( "New cloud image added: '%s'" % image.image_id )
@@ -557,7 +558,7 @@
@web.expose
@web.require_login( "use Galaxy cloud" )
def list_machine_images( self, trans ):
- images = trans.sa_session.query( model.CloudImage ).filter( trans.app.model.CloudImage.table.c.deleted != True ).all()
+ images = trans.sa_session.query( model.CloudImage ).filter( model.CloudImage.table.c.deleted != True ).all()
return trans.fill_template( '/cloud/list_images.mako', images=images )
@web.expose
@@ -568,7 +569,8 @@
image = trans.sa_session.query( model.CloudImage ).get( id )
image.deleted = True
- image.flush()
+ trans.sa_session.add( image )
+ trans.sa_session.flush()
return self.list_machine_images( trans )
@web.expose
@@ -588,9 +590,9 @@
image = trans.sa_session.query( model.CloudImage ).get( id )
if image_id=='' or len( image_id ) > 255:
error['id_error'] = "Image ID must be between 1 and 255 characters in length."
- elif trans.app.model.CloudImage \
+ elif trans.sa_session.query( model.CloudImage ) \
.filter_by( deleted=False ) \
- .filter( and_( trans.app.model.CloudImage.table.c.id != image.id, trans.app.model.CloudImage.table.c.image_id==image_id ) ) \
+ .filter( and_( model.CloudImage.table.c.id != image.id, model.CloudImage.table.c.image_id==image_id ) ) \
.first():
error['id_error'] = "Image with ID '" + image_id + "' already exist. Please choose an alternative name."
elif architecture=='' or len( architecture ) > 255:
@@ -606,7 +608,7 @@
image.architecture = architecture
# Persist
session = trans.sa_session
- session.save_or_update( image )
+ session.add( image )
session.flush()
# Log and display the management page
trans.set_message( "Machine image '%s' edited." % image.image_id )
@@ -626,9 +628,9 @@
if credName or providerName or accessKey or secretKey:
if credName=='' or len( credName ) > 255:
error['cred_error'] = "Credentials name must be between 1 and 255 characters in length."
- elif trans.app.model.CloudUserCredentials \
+ elif trans.sa_session.query( model.CloudUserCredentials ) \
.filter_by( user=user, deleted=False ) \
- .filter( trans.app.model.CloudUserCredentials.table.c.name == credName ) \
+ .filter( model.CloudUserCredentials.table.c.name == credName ) \
.first():
error['cred_error'] = "Credentials with that name already exist."
elif providerName=='':
@@ -648,7 +650,7 @@
credentials.provider = provider
# Persist
session = trans.sa_session
- session.save_or_update( credentials )
+ session.add( credentials )
session.flush()
# Log and display the management page
trans.log_event( "User added new credentials" )
@@ -680,9 +682,9 @@
credentials = get_stored_credentials( trans, id )
if credName=='' or len( credName ) > 255:
error['cred_error'] = "Credentials name must be between 1 and 255 characters in length."
- elif trans.app.model.CloudUserCredentials \
+ elif trans.sa_session.query( model.CloudUserCredentials ) \
.filter_by( user=user ) \
- .filter( and_( trans.app.model.CloudUserCredentials.table.c.id != credentials.id, trans.app.model.CloudUserCredentials.table.c.name==credName ) ) \
+ .filter( and_( model.CloudUserCredentials.table.c.id != credentials.id, model.CloudUserCredentials.table.c.name==credName ) ) \
.first():
error['cred_error'] = "Credentials with name '" + credName + "' already exist. Please choose an alternative name."
elif accessKey=='' or len( accessKey ) > 255:
@@ -702,7 +704,7 @@
credentials.secret_key = secretKey
# Persist
session = trans.sa_session
- session.save_or_update( credentials )
+ session.add( credentials )
session.flush()
# Log and display the management page
trans.set_message( "Credential '%s' edited." % credentials.name )
@@ -745,7 +747,8 @@
if UCI == None:
# Delete and save
stored.deleted = True
- stored.flush()
+ trans.sa_session.add( stored )
+ trans.sa_session.flush()
# Display the management page
trans.set_message( "Credentials '%s' deleted." % stored.name )
return self.list( trans )
@@ -769,9 +772,9 @@
except ValueError:
error['is_secure_error'] = "Field 'is secure' can only take on an integer value '0' or '1'"
- if trans.app.model.CloudProvider \
+ if trans.sa_session.query( model.CloudProvider ) \
.filter_by (user=user, name=name) \
- .filter( model.CloudProvider.c.deleted != True ) \
+ .filter( model.CloudProvider.table.c.deleted != True ) \
.first():
error['name_error'] = "A provider with that name already exist."
elif name=='' or len( name ) > 255:
@@ -843,7 +846,7 @@
provider.path = path
# Persist
session = trans.sa_session
- session.save_or_update( provider )
+ session.add( provider )
session.flush()
# Log and display the management page
trans.log_event( "User configured new cloud provider: '%s'" % name )
@@ -909,9 +912,9 @@
if name=='' or len( name ) > 255:
error['name_error'] = "Cloud provider name must be between 1 and 255 characters in length."
- elif trans.app.model.CloudProvider \
+ elif trans.sa_session.query( model.CloudProvider ) \
.filter_by( user=user ) \
- .filter( and_( trans.app.model.CloudProvider.table.c.id != provider.id, trans.app.model.CloudProvider.table.c.name == name ) ) \
+ .filter( and_( model.CloudProvider.table.c.id != provider.id, model.CloudProvider.table.c.name == name ) ) \
.first():
error['name_error'] = "Cloud provider with name '" + name + "' already exist. Please choose an alternative name."
elif not ( is_secure == 0 or is_secure == 1):
@@ -985,7 +988,7 @@
provider.path = None
# Persist
session = trans.sa_session
- session.save_or_update( provider )
+ session.add( provider )
session.flush()
# Log and display the management page
trans.log_event( "User edited cloud provider: '%s'" % name )
@@ -1003,14 +1006,15 @@
provider = get_provider_by_id( trans, id )
creds = trans.sa_session.query( model.CloudUserCredentials ) \
.filter_by( user=user, provider_id=provider.id ) \
- .filter( model.CloudUserCredentials.c.deleted != True ) \
+ .filter( model.CloudUserCredentials.table.c.deleted != True ) \
.all()
if len( creds ) == 0:
# Delete and save
#sess = trans.sa_session
provider.deleted = True
- provider.flush()
+ trans.sa_session.add( provider )
+ trans.sa_session.flush()
# Display the management page
trans.set_message( "Cloud provider '%s' deleted." % provider.name )
return self.list( trans )
@@ -1024,7 +1028,7 @@
@web.json
def json_update( self, trans ):
user = trans.get_user()
- UCIs = trans.sa_session.query( model.UCI ).filter_by( user=user ).filter( model.UCI.c.deleted != True ).all()
+ UCIs = trans.sa_session.query( model.UCI ).filter_by( user=user ).filter( model.UCI.table.c.deleted != True ).all()
insd = {} # instance name-state dict
for uci in UCIs:
dict = {}
@@ -1062,7 +1066,7 @@
def get_provider( trans, name ):
user = trans.get_user()
- return trans.app.model.CloudProvider \
+ return trans.sa_session.query( model.CloudProvider ) \
.filter_by (user=user, name=name) \
.first()
@@ -1126,19 +1130,6 @@
# Looks good
return live
-def get_mi( trans, uci, size='m1.small' ):
- """
- Get appropriate machine image (mi) based on instance size.
- TODO: Dummy method - need to implement logic
- For valid sizes, see http://aws.amazon.com/ec2/instance-types/
- """
- if uci.credentials.provider.type == 'ec2':
- return trans.app.model.CloudImage.filter(
- trans.app.model.CloudImage.table.c.id==2).first()
- else:
- return trans.app.model.CloudImage.filter(
- trans.app.model.CloudImage.table.c.id==1).first()
-
def get_stores( trans, uci ):
"""
Get stores objects that are connected to uci object
@@ -1146,7 +1137,7 @@
user = trans.get_user()
stores = trans.sa_session.query( model.CloudStore ) \
.filter_by( user=user, uci_id=uci.id ) \
- .filter( model.CloudStore.c.status != store_status.ERROR ) \
+ .filter( model.CloudStore.table.c.status != store_status.ERROR ) \
.all()
return stores
@@ -1173,7 +1164,7 @@
# creds = trans.sa_session.query( model.CloudUserCredentials ) \
# .filter_by( user=user, name=credName ) \
# .first()
- #.filter( model.CloudUserCredentials.c.deleted != True ) \ MOVE TO LINE ABOVE ONCE DELETE COLUMS ARE IMPLEMENTED
+ #.filter( model.CloudUserCredentials.table.c.deleted != True ) \ MOVE TO LINE ABOVE ONCE DELETE COLUMS ARE IMPLEMENTED
if creds:
a_key = creds.access_key
diff -r 7d013eb98022 -r 39502dd3fd23 templates/cloud/configure_cloud.mako
--- a/templates/cloud/configure_cloud.mako Thu Nov 12 16:36:07 2009 -0500
+++ b/templates/cloud/configure_cloud.mako Mon Nov 16 20:37:47 2009 -0500
@@ -90,8 +90,9 @@
// Update 'state' and 'time alive' fields
$(elem + "-state").text( data[i].state );
- if ( ( prev_old_state.match('newUCI') && new_state=='new' ) || \
+ if ( ( prev_old_state.match('newUCI') && new_state=='new' ) || ( prev_old_state.match('newUCI') && new_state=='available' ) || \
( prev_old_state.match('newUCI') && new_state=='creating' ) || ( prev_old_state.match('new') && new_state=='creating' ) || \
+ ( prev_old_state.match('new') && new_state=='available' ) || \
( prev_old_state.match('deletingUCI') && new_state=='deleted' ) || ( prev_old_state.match('deleting') && new_state=='deleted' ) || \
( prev_old_state.match('available') && new_state=='error' ) || ( prev_old_state.match('deleting') && new_state=='error' ) ) {
// TODO: on state change from available->error and deleting->error page should be refreshed but that causes problems with
1
0
23 Nov '09
details: http://www.bx.psu.edu/hg/galaxy/rev/b90db227df6d
changeset: 3090:b90db227df6d
user: Enis Afgan <afgane(a)gmail.com>
date: Thu Nov 12 10:28:54 2009 -0500
description:
Fixed a bug in EC2 controller regarding error reporting during snapshot update. Cleaned up code dealing with snapshots error reporting a bit.
diffstat:
lib/galaxy/cloud/__init__.py | 11 ++-
lib/galaxy/cloud/providers/ec2.py | 131 ++++++++++++++++++--------------
lib/galaxy/cloud/providers/eucalyptus.py | 83 +++++++++++---------
lib/galaxy/web/controllers/cloud.py | 15 +++-
4 files changed, 140 insertions(+), 100 deletions(-)
diffs (426 lines):
diff -r c1dc30106721 -r b90db227df6d lib/galaxy/cloud/__init__.py
--- a/lib/galaxy/cloud/__init__.py Wed Nov 11 20:11:58 2009 -0500
+++ b/lib/galaxy/cloud/__init__.py Thu Nov 12 10:28:54 2009 -0500
@@ -44,6 +44,15 @@
ERROR = "error"
)
+snapshot_status = Bunch(
+ SUBMITTED = 'submitted',
+ PENDING = 'pending',
+ COMPLETED = 'completed',
+ DELETE = 'delete',
+ DELETED= 'deleted',
+ ERROR = "error"
+)
+
class CloudManager( object ):
"""
Highest level interface to cloud management.
@@ -338,7 +347,7 @@
snap.error = error
if set_status:
- snap.status = 'error'
+ snap.status = snapshot_status.ERROR
snap.flush()
diff -r c1dc30106721 -r b90db227df6d lib/galaxy/cloud/providers/ec2.py
--- a/lib/galaxy/cloud/providers/ec2.py Wed Nov 11 20:11:58 2009 -0500
+++ b/lib/galaxy/cloud/providers/ec2.py Thu Nov 12 10:28:54 2009 -0500
@@ -52,6 +52,15 @@
ERROR = "error"
)
+snapshot_status = Bunch(
+ SUBMITTED = 'submitted',
+ PENDING = 'pending',
+ COMPLETED = 'completed',
+ DELETE = 'delete',
+ DELETED= 'deleted',
+ ERROR = "error"
+)
+
class EC2CloudProvider( object ):
"""
Amazon EC2-based cloud provider implementation for managing instances.
@@ -282,7 +291,7 @@
if uci_wrapper.get_state() != uci_states.ERROR:
conn = self.get_connection( uci_wrapper )
- snapshots = uci_wrapper.get_snapshots( status = 'submitted' )
+ snapshots = uci_wrapper.get_snapshots( status = snapshot_status.SUBMITTED )
for snapshot in snapshots:
log.debug( "Snapshot DB id: '%s', volume id: '%s'" % ( snapshot.id, snapshot.store.volume_id ) )
try:
@@ -292,14 +301,16 @@
sh = conn.get_all_snapshots( snap_id ) # get updated status
uci_wrapper.set_snapshot_status( status=sh[0].status, snap_id=snap_id )
except boto.exception.EC2ResponseError, ex:
- log.error( "EC2 response error while creating snapshot: '%s'" % e )
- uci_wrapper.set_snapshot_error( error="EC2 response error while creating snapshot: " + str( e ), snap_index=snapshot.id, set_status=True )
- uci_wrapper.set_error( "EC2 response error while creating snapshot: " + str( e ), True )
+ err = "Cloud provider response error while creating snapshot: " + str( e )
+ log.error( err )
+ uci_wrapper.set_snapshot_error( error=err, snap_index=snapshot.id, set_status=True )
+ uci_wrapper.set_error( error=err, True )
return
except Exception, ex:
- log.error( "Error while creating snapshot: '%s'" % ex )
- uci_wrapper.set_snapshot_error( error="Error while creating snapshot: "+str( ex ), snap_index=snapshot.id, set_status=True )
- uci_wrapper.set_error( "Error while creating snapshot: " + str( ex ), True )
+ err = "Error while creating snapshot: " + str( ex )
+ log.error( err )
+ uci_wrapper.set_snapshot_error( error=err, snap_index=snapshot.id, set_status=True )
+ uci_wrapper.set_error( error=err, True )
return
uci_wrapper.change_state( uci_state=uci_states.AVAILABLE )
@@ -509,12 +520,12 @@
# store.flush()
# Update pending snapshots or delete ones marked for deletion
- snapshots = model.CloudSnapshot.filter_by( status='pending', status='delete' ).all()
+ snapshots = model.CloudSnapshot.filter_by( status=snapshot_status.PENDING, status=snapshot_status.DELETE ).all()
for snapshot in snapshots:
- if self.type == snapshot.uci.credentials.provider.type and snapshot.status == 'pending':
+ if self.type == snapshot.uci.credentials.provider.type and snapshot.status == snapshot_status.PENDING:
log.debug( "[%s] Running general status update on snapshot '%s'" % ( snapshot.uci.credentials.provider.type, snapshot.snapshot_id ) )
self.update_snapshot( snapshot )
- elif self.type == snapshot.uci.credentials.provider.type and snapshot.status == 'delete':
+ elif self.type == snapshot.uci.credentials.provider.type and snapshot.status == snapshot_status.DELETE:
log.debug( "[%s] Initiating deletion of snapshot '%s'" % ( snapshot.uci.credentials.provider.type, snapshot.snapshot_id ) )
self.delete_snapshot( snapshot )
@@ -640,49 +651,48 @@
def updateSnapshot( self, snapshot ):
# Get credentials associated wit this store
- if snapshot.status == 'completed':
- uci_id = snapshot.uci_id
- uci = model.UCI.get( uci_id )
- uci.refresh()
- conn = self.get_connection_from_uci( uci )
-
- try:
- log.debug( "Updating status of snapshot '%s'" % snapshot.snapshot_id )
- snap = conn.get_all_snapshots( [snapshot.snapshot_id] )
- if len( snap ) > 0:
- log.debug( "Snapshot '%s' status: %s" % ( snapshot.snapshot_id, snap[0].status ) )
- snapshot.status = snap[0].status
- snapshot.flush()
- else:
- log.error( "No snapshots returned by EC2 for UCI '%s'" % uci.name )
- snapshot.status = 'No snapshots returned by EC2.'
- uci.error = "No snapshots returned by EC2."
- uci.state = uci_states.ERROR
- uci.flush()
- snapshot.flush()
- except boto.exception.EC2ResponseError, e:
- log.error( "EC2 response error while updating snapshot: '%s'" % e )
- snapshot.status = 'error'
- snapshot.error = "EC2 response error while updating snapshot status: " + str( e )
- uci.error = "EC2 response error while updating snapshot status: " + str( e )
+ uci_id = snapshot.uci_id
+ uci = model.UCI.get( uci_id )
+ uci.refresh()
+ conn = self.get_connection_from_uci( uci )
+
+ try:
+ log.debug( "Updating status of snapshot '%s'" % snapshot.snapshot_id )
+ snap = conn.get_all_snapshots( [snapshot.snapshot_id] )
+ if len( snap ) > 0:
+ log.debug( "Snapshot '%s' status: %s" % ( snapshot.snapshot_id, snap[0].status ) )
+ snapshot.status = snap[0].status
+ snapshot.flush()
+ else:
+ err = "No snapshots returned by EC2"
+ log.error( "%s for UCI '%s'" % ( err, uci.name ) )
+ snapshot.status = snapshot_status.ERROR
+ snapshot.error = err
+ uci.error = err
uci.state = uci_states.ERROR
uci.flush()
snapshot.flush()
- except Exception, ex:
- log.error( "Error while updating snapshot: '%s'" % ex )
- snapshot.status = 'error'
- snapshot.error = "Error while updating snapshot status: " + str( e )
- uci.error = "Error while updating snapshot status: " + str( ex )
- uci.state = uci_states.ERROR
- uci.flush()
- snapshot.flush()
- else:
- log.error( "Cannot delete snapshot '%s' because its status is '%s'. Only snapshots with 'completed' status can be deleted." % ( snapshot.snapshot_id, snapshot.status ) )
- snapshot.error = "Cannot delete snapshot because its status is '"+snapshot.status+"'. Only snapshots with 'completed' status can be deleted."
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error while updating snapshot status: " + str( e )
+ log.error( err )
+ snapshot.status = snapshot_status.ERROR
+ snapshot.error = err
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
+ snapshot.flush()
+ except Exception, ex:
+ err = "Error while updating snapshot status: " + str( ex )
+ log.error( err )
+ snapshot.status = snapshot_status.ERROR
+ snapshot.error = err
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
snapshot.flush()
def delete_snapshot( self, snapshot ):
- if snapshot.status == 'delete':
+ if snapshot.status == snapshot_status.DELETE:
# Get credentials associated wit this store
uci_id = snapshot.uci_id
uci = model.UCI.get( uci_id )
@@ -694,29 +704,32 @@
snap = conn.delete_snapshot( snapshot.snapshot_id )
if snap == True:
snapshot.deleted = True
- snapshot.status = 'deleted'
+ snapshot.status = snapshot_status.DELETED
snapshot.flush()
return snap
except boto.exception.EC2ResponseError, e:
- log.error( "EC2 response error while deleting snapshot: '%s'" % e )
- snapshot.status = 'error'
- snapshot.error = "EC2 response error while deleting snapshot: " + str( e )
- uci.error = "EC2 response error while deleting snapshot: " + str( e )
+ err = "EC2 response error while deleting snapshot: " + str( e )
+ log.error( err )
+ snapshot.status = snapshot_status.ERROR
+ snapshot.error = err
+ uci.error = err
uci.state = uci_states.ERROR
uci.flush()
snapshot.flush()
except Exception, ex:
- log.error( "Error while deleting snapshot: '%s'" % ex )
- snapshot.status = 'error'
- snapshot.error = "Cloud provider error while deleting snapshot: " + str( ex )
- uci.error = "Cloud provider error while deleting snapshot: " + str( ex )
+ err = "Error while deleting snapshot: " + str( ex )
+ log.error( err )
+ snapshot.status = snapshot_status.ERROR
+ snapshot.error = err
+ uci.error = err
uci.state = uci_states.ERROR
uci.flush()
snapshot.flush()
else:
- log.error( "Cannot delete snapshot '%s' because its status is '%s'. Only snapshots with 'completed' status can be deleted." % ( snapshot.snapshot_id, snapshot.status ) )
- snapshot.error = "Cannot delete snapshot because its status is '"+snapshot.status+"'. Only snapshots with 'completed' status can be deleted."
- snapshot.status = 'error'
+ err = "Cannot delete snapshot '"+snapshot.snapshot_id+"' because its status is '"+snapshot.status+"'. Only snapshots with '" + \
+ snapshot_status.COMPLETED+"' status can be deleted."
+ log.error( err )
+ snapshot.error = err
snapshot.flush()
def processZombie( self, inst ):
diff -r c1dc30106721 -r b90db227df6d lib/galaxy/cloud/providers/eucalyptus.py
--- a/lib/galaxy/cloud/providers/eucalyptus.py Wed Nov 11 20:11:58 2009 -0500
+++ b/lib/galaxy/cloud/providers/eucalyptus.py Thu Nov 12 10:28:54 2009 -0500
@@ -267,7 +267,7 @@
if uci_wrapper.get_state() != uci_states.ERROR:
conn = self.get_connection( uci_wrapper )
- snapshots = uci_wrapper.get_snapshots( status = 'submitted' )
+ snapshots = uci_wrapper.get_snapshots( status = snapshot_status.SUBMITTED )
for snapshot in snapshots:
log.debug( "Snapshot DB id: '%s', volume id: '%s'" % ( snapshot.id, snapshot.store.volume_id ) )
try:
@@ -277,16 +277,18 @@
sh = conn.get_all_snapshots( snap_id ) # get updated status
uci_wrapper.set_snapshot_status( status=sh[0].status, snap_id=snap_id )
except boto.exception.EC2ResponseError, ex:
- log.error( "EC2 response error while creating snapshot: '%s'" % e )
- uci_wrapper.set_snapshot_error( error="EC2 response error while creating snapshot: " + str( e ), snap_index=snapshot.id, set_status=True )
- uci_wrapper.set_error( "Cloud provider response error while creating snapshot: " + str( e ), True )
+ err = "Cloud provider response error while creating snapshot: " + str( e )
+ log.error( err )
+ uci_wrapper.set_snapshot_error( error=err, snap_index=snapshot.id, set_status=True )
+ uci_wrapper.set_error( error=err, True )
return
except Exception, ex:
- log.error( "Error while creating snapshot: '%s'" % ex )
- uci_wrapper.set_snapshot_error( error="Error while creating snapshot: "+str( ex ), snap_index=snapshot.id, set_status=True )
- uci_wrapper.set_error( "Error while creating snapshot: " + str( ex ), True )
+ err = "Error while creating snapshot: " + str( ex )
+ log.error( err )
+ uci_wrapper.set_snapshot_error( error=err, snap_index=snapshot.id, set_status=True )
+ uci_wrapper.set_error( error=err, True )
return
-
+
uci_wrapper.change_state( uci_state=uci_states.AVAILABLE )
# if uci_wrapper.get_state() != uci_states.ERROR:
@@ -477,12 +479,12 @@
# store.flush()
# Update pending snapshots or delete ones marked for deletion
- snapshots = model.CloudSnapshot.filter_by( status='pending', status='delete' ).all()
+ snapshots = model.CloudSnapshot.filter_by( status=snapshot_status.PENDING, status=snapshot_status.DELETE ).all()
for snapshot in snapshots:
- if self.type == snapshot.uci.credentials.provider.type and snapshot.status == 'pending':
+ if self.type == snapshot.uci.credentials.provider.type and snapshot.status == snapshot_status.PENDING:
log.debug( "[%s] Running general status update on snapshot '%s'" % ( snapshot.uci.credentials.provider.type, snapshot.snapshot_id ) )
self.update_snapshot( snapshot )
- elif self.type == snapshot.uci.credentials.provider.type and snapshot.status == 'delete':
+ elif self.type == snapshot.uci.credentials.provider.type and snapshot.status == snapshot_status.DELETE:
log.debug( "[%s] Initiating deletion of snapshot '%s'" % ( snapshot.uci.credentials.provider.type, snapshot.snapshot_id ) )
self.delete_snapshot( snapshot )
@@ -606,7 +608,7 @@
uci.flush()
return None
- def update_snapshot( self, snapshot ):
+ def updateSnapshot( self, snapshot ):
# Get credentials associated wit this store
uci_id = snapshot.uci_id
uci = model.UCI.get( uci_id )
@@ -617,35 +619,39 @@
log.debug( "Updating status of snapshot '%s'" % snapshot.snapshot_id )
snap = conn.get_all_snapshots( [snapshot.snapshot_id] )
if len( snap ) > 0:
+ log.debug( "Snapshot '%s' status: %s" % ( snapshot.snapshot_id, snap[0].status ) )
snapshot.status = snap[0].status
- log.debug( "Snapshot '%s' status: %s" % ( snapshot.snapshot_id, snapshot.status ) )
snapshot.flush()
else:
- log.error( "No snapshots returned by cloud provider for UCI '%s'" % uci.name )
- snapshot.status = 'No snapshots returned by cloud provider.'
- uci.error = "No snapshots returned by cloud provider."
+ err = "No snapshots returned by cloud provider"
+ log.error( "%s for UCI '%s'" % ( err, uci.name ) )
+ snapshot.status = snapshot_status.ERROR
+ snapshot.error = err
+ uci.error = err
uci.state = uci_states.ERROR
uci.flush()
snapshot.flush()
except boto.exception.EC2ResponseError, e:
- log.error( "Cloud provider response error while updating snapshot: '%s'" % e )
- snapshot.status = 'error'
- snapshot.error = "Cloud provider response error while updating snapshot status: " + str( e )
- uci.error = "Cloud provider response error while updating snapshot status: " + str( e )
+ err = "Cloud provider response error while updating snapshot status: " + str( e )
+ log.error( err )
+ snapshot.status = snapshot_status.ERROR
+ snapshot.error = err
+ uci.error = err
uci.state = uci_states.ERROR
uci.flush()
snapshot.flush()
except Exception, ex:
- log.error( "Error while updating snapshot: '%s'" % ex )
- snapshot.status = 'error'
- snapshot.error = "Error while updating snapshot status: " + str( e )
- uci.error = "Error while updating snapshot status: " + str( ex )
+ err = "Error while updating snapshot status: " + str( ex )
+ log.error( err )
+ snapshot.status = snapshot_status.ERROR
+ snapshot.error = err
+ uci.error = err
uci.state = uci_states.ERROR
uci.flush()
snapshot.flush()
def delete_snapshot( self, snapshot ):
- if snapshot.status == 'delete':
+ if snapshot.status == snapshot_status.DELETE:
# Get credentials associated wit this store
uci_id = snapshot.uci_id
uci = model.UCI.get( uci_id )
@@ -657,29 +663,32 @@
snap = conn.delete_snapshot( snapshot.snapshot_id )
if snap == True:
snapshot.deleted = True
- snapshot.status = 'deleted'
+ snapshot.status = snapshot_status.DELETED
snapshot.flush()
return snap
except boto.exception.EC2ResponseError, e:
- log.error( "EC2 response error while deleting snapshot: '%s'" % e )
- snapshot.status = 'error'
- snapshot.error = "Cloud provider response error while deleting snapshot: " + str( e )
- uci.error = "Cloud provider response error while deleting snapshot: " + str( e )
+ err = "EC2 response error while deleting snapshot: " + str( e )
+ log.error( err )
+ snapshot.status = snapshot_status.ERROR
+ snapshot.error = err
+ uci.error = err
uci.state = uci_states.ERROR
uci.flush()
snapshot.flush()
except Exception, ex:
- log.error( "Error while deleting snapshot: '%s'" % ex )
- snapshot.status = 'error'
- snapshot.error = "Cloud provider error while deleting snapshot: " + str( ex )
- uci.error = "Cloud provider error while deleting snapshot: " + str( ex )
+ err = "Error while deleting snapshot: " + str( ex )
+ log.error( err )
+ snapshot.status = snapshot_status.ERROR
+ snapshot.error = err
+ uci.error = err
uci.state = uci_states.ERROR
uci.flush()
snapshot.flush()
else:
- log.error( "Cannot delete snapshot '%s' because its status is '%s'. Only snapshots with 'completed' status can be deleted." % ( snapshot.snapshot_id, snapshot.status ) )
- snapshot.error = "Cannot delete snapshot because its status is '"+snapshot.status+"'. Only snapshots with 'completed' status can be deleted."
- snapshot.status = 'error'
+ err = "Cannot delete snapshot '"+snapshot.snapshot_id+"' because its status is '"+snapshot.status+"'. Only snapshots with '" + \
+ snapshot_status.COMPLETED+"' status can be deleted."
+ log.error( err )
+ snapshot.error = err
snapshot.flush()
def processZombie( self, inst ):
diff -r c1dc30106721 -r b90db227df6d lib/galaxy/web/controllers/cloud.py
--- a/lib/galaxy/web/controllers/cloud.py Wed Nov 11 20:11:58 2009 -0500
+++ b/lib/galaxy/web/controllers/cloud.py Thu Nov 12 10:28:54 2009 -0500
@@ -64,6 +64,15 @@
ERROR = "error"
)
+snapshot_status = Bunch(
+ SUBMITTED = 'submitted',
+ PENDING = 'pending',
+ COMPLETED = 'completed',
+ DELETE = 'delete',
+ DELETED= 'deleted',
+ ERROR = "error"
+)
+
class CloudController( BaseController ):
@web.expose
@@ -243,7 +252,7 @@
snapshot.user = user
snapshot.uci = uci
snapshot.store = store
- snapshot.status = 'submitted'
+ snapshot.status = snapshot_status.SUBMITTED
uci.state = uci_states.SNAPSHOT_UCI
# Persist
session = trans.sa_session
@@ -288,8 +297,8 @@
# Set snapshot as 'ready for deletion' to be picked up by general updater
snap = trans.sa_session.query( model.CloudSnapshot ).get( snap_id )
- if snap.status == 'completed':
- snap.status = 'delete'
+ if snap.status == snapshot_status.COMPLETED:
+ snap.status = snapshot_status.DELETE
snap.flush()
trans.set_message( "Snapshot '%s' is marked for deletion. Once the deletion is complete, it will no longer be visible in this list. "
"Please note that this process may take up to a minute." % snap.snapshot_id )
1
0
23 Nov '09
details: http://www.bx.psu.edu/hg/galaxy/rev/83551d2b3144
changeset: 3093:83551d2b3144
user: Enis Afgan <afgane(a)gmail.com>
date: Thu Nov 12 15:26:10 2009 -0500
description:
Cleaned up AJAXed UCI state changes on main UI. UCI still does not get automatically removed after from displayed list being deleted by user.
diffstat:
templates/cloud/configure_cloud.mako | 28 ++++++++++++++++++----------
1 files changed, 18 insertions(+), 10 deletions(-)
diffs (61 lines):
diff -r 118dc385752b -r 83551d2b3144 templates/cloud/configure_cloud.mako
--- a/templates/cloud/configure_cloud.mako Thu Nov 12 14:24:43 2009 -0500
+++ b/templates/cloud/configure_cloud.mako Thu Nov 12 15:26:10 2009 -0500
@@ -36,24 +36,21 @@
old_state = $(elem + "-state").text();
prev_old_state = trim19( $(elem + "-state-p").text() );
new_state = data[i].state;
- console.log( "old_state[%d] = %s", i, old_state );
- console.log( "prev_old_state[%d] = %s", i, prev_old_state );
- console.log( "new_state[%d] = %s", i, new_state );
+ //console.log( "old_state[%d] = %s", i, old_state );
+ //console.log( "prev_old_state[%d] = %s", i, prev_old_state );
+ //console.log( "new_state[%d] = %s", i, new_state );
//console.log( trim19(prev_old_state) );
if ( ( old_state=='pending' && new_state=='running' ) || ( old_state=='shutting-down' && new_state=='available' ) || \
( old_state=='running' && new_state=='available' ) || ( old_state=='running' && new_state=='error' ) || \
- ( old_state=='pending' && new_state=='error' ) || ( old_state=='pending' && new_state=='available' ) || \
- ( old_state=='submitted' && new_state=='available' ) || ( prev_old_state.match('newUCI') && new_state=='available' ) || \
- ( prev_old_state.match('new') && new_state=='available' ) || ( prev_old_state.match('deletingUCI') && new_state=='deleted' ) || \
- ( prev_old_state.match('deleting') && new_state=='deleted' ) ) {
+ ( old_state=='pending' && new_state=='available' ) || ( old_state=='submitted' && new_state=='available' ) || \
+ ( prev_old_state.match('creating') && new_state=='available' ) ) {
var url = "${h.url_for( controller='cloud', action='list')}";
location.replace( url );
}
else if ( ( old_state=='running' && new_state=='error' ) || ( old_state=='pending' && new_state=='error' ) || \
( old_state=='submitted' && new_state=='error' ) || ( old_state=='submittedUCI' && new_state=='error' ) || \
( old_state=='shutting-down' && new_state=='error' ) || ( prev_old_state.match('newUCI') && new_state=='error' ) || \
- ( prev_old_state.match('new') && new_state=='error' ) || \
- ( prev_old_state.match('deletingUCI') && new_state=='error' ) ) {
+ ( prev_old_state.match('new') && new_state=='error' ) || ( prev_old_state.match('deletingUCI') && new_state=='error' ) ) {
// TODO: Following clause causes constant page refresh for an exception thrown as a result of instance not starting correctly - need alternative method!
//( prev_old_state.match('available') && new_state=='error' ) || ( prev_old_state.match('deleting') && new_state=='error' ) \
@@ -61,6 +58,10 @@
location.replace( url );
}
+ if ( prev_old_state.match('deletingUCI') || prev_old_state.match('deleting') ) {
+ setTimeout("update_state()", 3000);
+ }
+
if ( new_state=='shutting-down' || new_state=='shutting-downUCI' ) {
$(elem + "-link").text( "" );
}
@@ -89,7 +90,14 @@
// Update 'state' and 'time alive' fields
$(elem + "-state").text( data[i].state );
- //$(elem + "-state-p").text( data[i].state );
+ if ( ( prev_old_state.match('newUCI') && new_state=='new' ) || \
+ ( prev_old_state.match('newUCI') && new_state=='creating' ) || ( prev_old_state.match('new') && new_state=='creating' ) || \
+ ( prev_old_state.match('deletingUCI') && new_state=='deleted' ) || ( prev_old_state.match('deleting') && new_state=='deleted' ) || \
+ ( prev_old_state.match('available') && new_state=='error' ) || ( prev_old_state.match('deleting') && new_state=='error' ) ) {
+ // TODO: on state change from available->error and deleting->error page should be refreshed but that causes problems with
+ // constant refreshings depending on what error message is so at least do it here...
+ $(elem + "-state-p").text( data[i].state );
+ }
if (data[i].launch_time) {
$(elem + "-launch_time").text( data[i].launch_time.substring(0, 16 ) + " UTC (" + data[i].time_ago + ")" );
}
1
0
23 Nov '09
details: http://www.bx.psu.edu/hg/galaxy/rev/118dc385752b
changeset: 3092:118dc385752b
user: Enis Afgan <afgane(a)gmail.com>
date: Thu Nov 12 14:24:43 2009 -0500
description:
Cleaned up code in main cloud controller.
diffstat:
lib/galaxy/cloud/providers/ec2.py | 16 +-
lib/galaxy/cloud/providers/eucalyptus.py | 12 +-
lib/galaxy/web/controllers/cloud.py | 632 +++++++++++++++++-----------------
templates/admin/index.mako | 4 +-
templates/cloud/add_credentials.mako | 2 +-
templates/cloud/add_image.mako | 2 +-
templates/cloud/configure_cloud.mako | 30 +-
templates/cloud/configure_uci.mako | 2 +-
templates/cloud/edit_credentials.mako | 2 +-
templates/cloud/edit_image.mako | 2 +-
templates/cloud/list_images.mako | 6 +-
templates/cloud/view.mako | 157 --------
templates/cloud/viewInstance.mako | 140 -------
templates/cloud/view_credentials.mako | 157 ++++++++
templates/cloud/view_instance.mako | 140 +++++++
templates/cloud/view_usage.mako | 2 +-
16 files changed, 660 insertions(+), 646 deletions(-)
diffs (1644 lines):
diff -r e21f605d2766 -r 118dc385752b lib/galaxy/cloud/providers/ec2.py
--- a/lib/galaxy/cloud/providers/ec2.py Thu Nov 12 12:42:17 2009 -0500
+++ b/lib/galaxy/cloud/providers/ec2.py Thu Nov 12 14:24:43 2009 -0500
@@ -48,7 +48,7 @@
ERROR = "error"
)
-store_states = Bunch(
+store_status = Bunch(
IN_USE = "in-use",
CREATING = "creating",
DELETED = 'deleted',
@@ -105,14 +105,14 @@
elif uci_state==uci_states.SNAPSHOT:
self.snapshotUCI( uci_wrapper )
except:
- log.exception( "Uncaught exception executing request." )
+ log.exception( "Uncaught exception executing cloud request." )
cnt += 1
def get_connection( self, uci_wrapper ):
"""
Establishes EC2 cloud connection using user's credentials associated with given UCI
"""
- log.debug( 'Establishing %s cloud connection' % self.type )
+ log.debug( 'Establishing %s cloud connection.' % self.type )
provider = uci_wrapper.get_provider()
try:
region = RegionInfo( None, provider.region_name, provider.region_endpoint )
@@ -127,8 +127,8 @@
is_secure=provider.is_secure,
region=region,
path=provider.path )
- except Exception, ex:
- err = "Establishing connection with cloud failed: " + str( ex )
+ except boto.exception.EC2ResponseError, e:
+ err = "Establishing connection with cloud failed: " + str( e )
log.error( err )
uci_wrapper.set_error( err, True )
return None
@@ -545,8 +545,8 @@
self.updateInstance( inst )
# Update storage volume(s)
- stores = model.CloudStore.filter( or_( model.CloudStore.c.status==store_states.IN_USE,
- model.CloudStore.c.status==store_states.CREATING,
+ stores = model.CloudStore.filter( or_( model.CloudStore.c.status==store_status.IN_USE,
+ model.CloudStore.c.status==store_status.CREATING,
model.CloudStore.c.status==None ) ).all()
for store in stores:
if self.type == store.uci.credentials.provider.type: # and store.volume_id != None:
@@ -558,7 +558,7 @@
# store.uci.error = "There exists an entry in local database for a storage volume without an ID. Storage volume might have been created " \
# "with cloud provider though. Manual check is recommended. After understanding what happened, local database entry for given " \
# "storage volume should be updated."
-# store.status = store_states.ERROR
+# store.status = store_status.ERROR
# store.uci.state = uci_states.ERROR
# store.uci.flush()
# store.flush()
diff -r e21f605d2766 -r 118dc385752b lib/galaxy/cloud/providers/eucalyptus.py
--- a/lib/galaxy/cloud/providers/eucalyptus.py Thu Nov 12 12:42:17 2009 -0500
+++ b/lib/galaxy/cloud/providers/eucalyptus.py Thu Nov 12 14:24:43 2009 -0500
@@ -114,9 +114,9 @@
log.debug( 'Establishing %s cloud connection.' % self.type )
provider = uci_wrapper.get_provider()
try:
- euca_region = RegionInfo( None, provider.region_name, provider.region_endpoint )
- except Exception, e:
- err = "Selecting region with cloud provider failed: " + str( e )
+ region = RegionInfo( None, provider.region_name, provider.region_endpoint )
+ except Exception, ex:
+ err = "Selecting region with cloud provider failed: " + str( ex )
log.error( err )
uci_wrapper.set_error( err, True )
return None
@@ -125,7 +125,7 @@
aws_secret_access_key=uci_wrapper.get_secret_key(),
is_secure=provider.is_secure,
port=provider.port,
- region=euca_region,
+ region=region,
path=provider.path )
except boto.exception.EC2ResponseError, e:
err = "Establishing connection with cloud failed: " + str( e )
@@ -853,12 +853,12 @@
s_key = uci.credentials.secret_key
# Get connection
try:
- euca_region = RegionInfo( None, uci.credentials.provider.region_name, uci.credentials.provider.region_endpoint )
+ region = RegionInfo( None, uci.credentials.provider.region_name, uci.credentials.provider.region_endpoint )
conn = EC2Connection( aws_access_key_id=a_key,
aws_secret_access_key=s_key,
is_secure=uci.credentials.provider.is_secure,
port=uci.credentials.provider.port,
- region=euca_region,
+ region=region,
path=uci.credentials.provider.path )
except boto.exception.EC2ResponseError, e:
err = "Establishing connection with cloud failed: " + str( e )
diff -r e21f605d2766 -r 118dc385752b lib/galaxy/web/controllers/cloud.py
--- a/lib/galaxy/web/controllers/cloud.py Thu Nov 12 12:42:17 2009 -0500
+++ b/lib/galaxy/web/controllers/cloud.py Thu Nov 12 14:24:43 2009 -0500
@@ -59,7 +59,7 @@
ERROR = "error"
)
-store_states = Bunch(
+store_status = Bunch(
IN_USE = "in-use",
CREATING = "creating",
DELETED = 'deleted',
@@ -135,9 +135,9 @@
model.UCI.c.state==uci_states.SUBMITTED_UCI ) ) \
.all()
if pendingInstances:
- trans.set_message( "Galaxy instance started. Note that it will take several minutes for the instance to start "
- "(typically, 3-5 minutes). Once the instance is running and Galaxy is available, "
- "a button to connect to the instance will then appear alongside instance description." )
+ trans.set_message( "Galaxy instance started. NOTE: Please wait about 5 minutes for the instance to "
+ "start up. A button to connect to the instance will appear alongside "
+ "instance description once cloud instance of Galaxy is ready." )
# log.debug( "provider.is_secure: '%s'" % trans.sa_session.query( model.CloudProvider).filter_by(id=1).first().is_secure )
# trans.sa_session.query( model.CloudProvider).filter_by(id=1).first().is_secure=False
@@ -152,200 +152,11 @@
prevInstances = prevInstances,
cloudProviders = cloudProviders )
- @web.expose
- @web.require_login( "start Galaxy cloud instance" )
- def start( self, trans, id, type='m1.small' ):
- """
- Start a new cloud resource instance
- """
- user = trans.get_user()
- uci = get_uci( trans, id )
- stores = get_stores( trans, uci )
- # Ensure instance is available and then store relevant data
- # into DB to initiate instance startup by cloud manager
- if ( len(stores) is not 0 ) and ( uci.state == uci_states.AVAILABLE ):
- instance = model.CloudInstance()
- instance.user = user
- instance.uci = uci
- instance.state = instance_states.SUBMITTED
- instance.availability_zone = stores[0].availability_zone # Bc. all EBS volumes need to be in the same avail. zone, just check 1st
- instance.type = type
- uci.state = uci_states.SUBMITTED_UCI
- # Persist
- session = trans.sa_session
- session.save_or_update( instance )
- session.save_or_update( uci )
- session.flush()
- # Log
- trans.log_event ("User initiated starting of UCI '%s'." % uci.name )
- trans.set_message( "Galaxy instance started. NOTE: Please wait about 5 minutes for the instance to "
- "start up. A button to connect to the instance will appear alongside "
- "instance description once cloud instance of Galaxy is ready." )
- return self.list( trans )
-
- if len(stores) == 0:
- error( "This instance does not have any storage volumes associated it and thus cannot be started." )
- else:
- error( "Cannot start instance that is in state '%s'." % uci.state )
- return self.list( trans )
-
- @web.expose
- @web.require_login( "stop Galaxy cloud instance" )
- def stop( self, trans, id ):
- """
- Stop a cloud UCI instance.
- """
- uci = get_uci( trans, id )
- if ( uci.state != uci_states.DELETING ) and \
- ( uci.state != uci_states.DELETING_UCI ) and \
- ( uci.state != uci_states.ERROR ) and \
- ( uci.state != uci_states.SHUTTING_DOWN_UCI ) and \
- ( uci.state != uci_states.SHUTTING_DOWN ) and \
- ( uci.state != uci_states.AVAILABLE ):
- uci.state = uci_states.SHUTTING_DOWN_UCI
- session = trans.sa_session
- session.save_or_update( uci )
- session.flush()
- trans.log_event( "User stopped cloud instance '%s' (id: %s)" % ( uci.name, uci.id ) )
- trans.set_message( "Stopping of Galaxy instance '%s' initiated." % uci.name )
-
- return self.list( trans )
-
- trans.show_error_message( "Cannot stop instance that is in state '%s'." % uci.state )
- return self.list( trans )
-
- @web.expose
- @web.require_login( "delete user configured Galaxy cloud instance" )
- def deleteInstance( self, trans, id ):
- """
- Deletes User Configured Instance (UCI) from the cloud and local database. NOTE that this implies deletion of
- any and all storage associated with this UCI!
- """
- uci = get_uci( trans, id )
-
- if ( uci.state != uci_states.DELETING_UCI ) and ( uci.state != uci_states.DELETING ) and ( uci.state != uci_states.ERROR ):
- name = uci.name
- uci.state = uci_states.DELETING_UCI
- session = trans.sa_session
- session.save_or_update( uci )
- session.flush()
- trans.log_event( "User marked cloud instance '%s' for deletion." % name )
- trans.set_message( "Galaxy instance '%s' marked for deletion." % name )
- return self.list( trans )
-
- if uci.state != uci_states.ERROR:
- trans.set_message( "Cannot delete instance in state ERROR." )
- else:
- trans.set_message( "Instance '%s' is already marked for deletion." % uci.name )
- return self.list( trans )
+ # ----- UCI methods -----
@web.expose
@web.require_login( "use Galaxy cloud" )
- def create_snapshot( self, trans, id ):
- user = trans.get_user()
- id = trans.security.decode_id( id )
- uci = get_uci( trans, id )
-
- stores = trans.sa_session.query( model.CloudStore ) \
- .filter_by( user=user, deleted=False, uci_id=id ) \
- .all()
-
- if ( len( stores ) > 0 ) and ( uci.state == uci_states.AVAILABLE ):
- for store in stores:
- snapshot = model.CloudSnapshot()
- snapshot.user = user
- snapshot.uci = uci
- snapshot.store = store
- snapshot.status = snapshot_status.SUBMITTED
- uci.state = uci_states.SNAPSHOT_UCI
- # Persist
- session = trans.sa_session
- session.save_or_update( snapshot )
- session.save_or_update( uci )
- session.flush()
- elif len( stores ) == 0:
- error( "No storage volumes found that are associated with this instance." )
- else:
- error( "Snapshot can be created only for an instance that is in 'available' state." )
-
- # Log and display the management page
- trans.log_event( "User initiated creation of new snapshot." )
- trans.set_message( "Creation of new snapshot initiated. " )
- return self.list( trans )
-
- @web.expose
- @web.require_login( "use Galaxy cloud" )
- def view_snapshots( self, trans, id=None ):
- """
- View details about any snapshots associated with given UCI
- """
- user = trans.get_user()
- id = trans.security.decode_id( id )
-
- snaps = trans.sa_session.query( model.CloudSnapshot ) \
- .filter_by( user=user, uci_id=id, deleted=False ) \
- .order_by( desc( model.CloudSnapshot.c.update_time ) ) \
- .all()
-
- return trans.fill_template( "cloud/view_snapshots.mako",
- snaps = snaps )
-
- @web.expose
- @web.require_login( "use Galaxy cloud" )
- def delete_snapshot( self, trans, uci_id=None, snap_id=None ):
- """
- Initiates deletion of a snapshot
- """
- user = trans.get_user()
- snap_id = trans.security.decode_id( snap_id )
- # Set snapshot as 'ready for deletion' to be picked up by general updater
- snap = trans.sa_session.query( model.CloudSnapshot ).get( snap_id )
-
- if snap.status == snapshot_status.COMPLETED:
- snap.status = snapshot_status.DELETE
- snap.flush()
- trans.set_message( "Snapshot '%s' is marked for deletion. Once the deletion is complete, it will no longer be visible in this list. "
- "Please note that this process may take up to a minute." % snap.snapshot_id )
- else:
- error( "Only snapshots in state 'completed' can be deleted. See the cloud provider directly "
- "if you believe the snapshot is available and can be deleted." )
-
- # Display new list of snapshots
- uci_id = trans.security.decode_id( uci_id )
- snaps = trans.sa_session.query( model.CloudSnapshot ) \
- .filter_by( user=user, uci_id=uci_id, deleted=False ) \
- .order_by( desc( model.CloudSnapshot.c.update_time ) ) \
- .all()
-
- return trans.fill_template( "cloud/view_snapshots.mako",
- snaps = snaps )
-
- @web.expose
- @web.require_login( "add instance storage" )
- def addStorage( self, trans, id ):
- instance = get_uci( trans, id )
-
-
- error( "Adding storage to instance '%s' is not supported yet." % instance.name )
-
- return self.list( trans )
-
- @web.expose
- @web.require_login( "use Galaxy cloud" )
- def usageReport( self, trans, id ):
- user = trans.get_user()
- id = trans.security.decode_id( id )
-
- prevInstances = trans.sa_session.query( model.CloudInstance ) \
- .filter_by( user=user, state=instance_states.TERMINATED, uci_id=id ) \
- .order_by( desc( model.CloudInstance.c.update_time ) ) \
- .all()
-
- return trans.fill_template( "cloud/view_usage.mako", prevInstances = prevInstances )
-
- @web.expose
- @web.require_login( "use Galaxy cloud" )
- def configureNew( self, trans, instanceName='', credName='', volSize='', zone='' ):
+ def configure_new_uci( self, trans, instanceName='', credName='', volSize='', zone='' ):
"""
Configure and add new cloud instance to user's instance pool
"""
@@ -447,8 +258,255 @@
providersToZones = providersToZones )
@web.expose
+ @web.require_login( "start Galaxy cloud instance" )
+ def start( self, trans, id, type='m1.small' ):
+ """
+ Start a new cloud resource instance
+ """
+ user = trans.get_user()
+ uci = get_uci( trans, id )
+ stores = get_stores( trans, uci )
+ # Ensure instance is available and then store relevant data
+ # into DB to initiate instance startup by cloud manager
+ if ( len(stores) is not 0 ) and ( uci.state == uci_states.AVAILABLE ):
+ instance = model.CloudInstance()
+ instance.user = user
+ instance.uci = uci
+ instance.state = instance_states.SUBMITTED
+ instance.availability_zone = stores[0].availability_zone # Bc. all EBS volumes need to be in the same avail. zone, just check 1st
+ instance.type = type
+ uci.state = uci_states.SUBMITTED_UCI
+ # Persist
+ session = trans.sa_session
+ session.save_or_update( instance )
+ session.save_or_update( uci )
+ session.flush()
+ # Log
+ trans.log_event ("User initiated starting of UCI '%s'." % uci.name )
+ trans.set_message( "Galaxy instance started. NOTE: Please wait about 5 minutes for the instance to "
+ "start up. A button to connect to the instance will appear alongside "
+ "instance description once cloud instance of Galaxy is ready." )
+ return self.list( trans )
+
+ if len(stores) == 0:
+ error( "This instance does not have any storage volumes associated it and thus cannot be started." )
+ else:
+ error( "Cannot start instance that is in state '%s'." % uci.state )
+ return self.list( trans )
+
+ @web.expose
+ @web.require_login( "stop Galaxy cloud instance" )
+ def stop( self, trans, id ):
+ """
+ Stop a cloud UCI instance.
+ """
+ uci = get_uci( trans, id )
+ if ( uci.state != uci_states.DELETING ) and \
+ ( uci.state != uci_states.DELETING_UCI ) and \
+ ( uci.state != uci_states.ERROR ) and \
+ ( uci.state != uci_states.SHUTTING_DOWN_UCI ) and \
+ ( uci.state != uci_states.SHUTTING_DOWN ) and \
+ ( uci.state != uci_states.AVAILABLE ):
+ uci.state = uci_states.SHUTTING_DOWN_UCI
+ session = trans.sa_session
+ session.save_or_update( uci )
+ session.flush()
+ trans.log_event( "User stopped cloud instance '%s' (id: %s)" % ( uci.name, uci.id ) )
+ trans.set_message( "Stopping of Galaxy instance '%s' initiated." % uci.name )
+
+ return self.list( trans )
+
+ trans.show_error_message( "Cannot stop instance that is in state '%s'." % uci.state )
+ return self.list( trans )
+
+ @web.expose
+ @web.require_login( "use Galaxy cloud" )
+ def set_uci_state( self, trans, id, state='available', clear_error=True ):
+ """
+ Sets state of UCI to given state, optionally resets error field, and resets UCI's launch time field to 'None'.
+ """
+ uci = get_uci( trans, id )
+ uci.state = state
+ if clear_error:
+ uci.error = None
+ uci.launch_time = None
+ trans.sa_session.flush()
+ trans.set_message( "Instance '%s' state reset." % uci.name )
+ return self.list( trans )
+
+ @web.expose
+ @web.require_login( "view instance details" )
+ def view_uci_details( self, trans, id=None ):
+ """
+ View details about running instance
+ """
+ uci = get_uci( trans, id )
+ instances = get_instances( trans, uci ) # TODO: Handle list (will probably need to be done in mako template)
+
+ return trans.fill_template( "cloud/view_instance.mako",
+ liveInstance = instances )
+
+ @web.expose
+ @web.require_login( "use Galaxy cloud" )
+ def rename_uci( self, trans, id, new_name=None ):
+ instance = get_uci( trans, id )
+ if new_name is not None:
+ if len(new_name) > 255:
+ error( "Instance name must be less than 255 characters long." )
+ user = trans.get_user()
+ name_exists = trans.sa_session.query( model.UCI ) \
+ .filter_by( user=user, name=new_name ) \
+ .first()
+ if name_exists:
+ error( "Specified name ('%s') is already used by an existing instance. Please choose an alternative name." % new_name )
+
+ # Update name in local DB
+ instance.name = new_name
+ trans.sa_session.flush()
+ trans.set_message( "Instance renamed to '%s'." % new_name )
+ return self.list( trans )
+ else:
+ return trans.show_form(
+ web.FormBuilder( url_for( id=trans.security.encode_id(instance.id) ), "Rename instance", submit_text="Rename" )
+ .add_text( "new_name", "Instance name", value=instance.name ) )
+
+ @web.expose
+ @web.require_login( "use Galaxy cloud" )
+ def uci_usage_report( self, trans, id ):
+ user = trans.get_user()
+ id = trans.security.decode_id( id )
+
+ prevInstances = trans.sa_session.query( model.CloudInstance ) \
+ .filter_by( user=user, state=instance_states.TERMINATED, uci_id=id ) \
+ .order_by( desc( model.CloudInstance.c.update_time ) ) \
+ .all()
+
+ return trans.fill_template( "cloud/view_usage.mako", prevInstances = prevInstances )
+
+ @web.expose
+ @web.require_login( "delete user configured Galaxy cloud instance" )
+ def delete_uci( self, trans, id ):
+ """
+ Deletes User Configured Instance (UCI) from the cloud and local database. NOTE that this implies deletion of
+ any and all storage associated with this UCI!
+ """
+ uci = get_uci( trans, id )
+
+ if ( uci.state != uci_states.DELETING_UCI ) and ( uci.state != uci_states.DELETING ) and ( uci.state != uci_states.ERROR ):
+ name = uci.name
+ uci.state = uci_states.DELETING_UCI
+ session = trans.sa_session
+ session.save_or_update( uci )
+ session.flush()
+ trans.log_event( "User marked cloud instance '%s' for deletion." % name )
+ trans.set_message( "Galaxy instance '%s' marked for deletion." % name )
+ return self.list( trans )
+
+ if uci.state != uci_states.ERROR:
+ trans.set_message( "Cannot delete instance in state ERROR." )
+ else:
+ trans.set_message( "Instance '%s' is already marked for deletion." % uci.name )
+ return self.list( trans )
+
+ # ----- Snapshot methods -----
+
+ @web.expose
+ @web.require_login( "use Galaxy cloud" )
+ def create_snapshot( self, trans, id ):
+ user = trans.get_user()
+ id = trans.security.decode_id( id )
+ uci = get_uci( trans, id )
+
+ stores = trans.sa_session.query( model.CloudStore ) \
+ .filter_by( user=user, deleted=False, uci_id=id ) \
+ .all()
+
+ if ( len( stores ) > 0 ) and ( uci.state == uci_states.AVAILABLE ):
+ for store in stores:
+ snapshot = model.CloudSnapshot()
+ snapshot.user = user
+ snapshot.uci = uci
+ snapshot.store = store
+ snapshot.status = snapshot_status.SUBMITTED
+ uci.state = uci_states.SNAPSHOT_UCI
+ # Persist
+ session = trans.sa_session
+ session.save_or_update( snapshot )
+ session.save_or_update( uci )
+ session.flush()
+ elif len( stores ) == 0:
+ error( "No storage volumes found that are associated with this instance." )
+ else:
+ error( "Snapshot can be created only for an instance that is in 'available' state." )
+
+ # Log and display the management page
+ trans.log_event( "User initiated creation of new snapshot." )
+ trans.set_message( "Creation of new snapshot initiated. " )
+ return self.list( trans )
+
+ @web.expose
+ @web.require_login( "use Galaxy cloud" )
+ def view_snapshots( self, trans, id=None ):
+ """
+ View details about any snapshots associated with given UCI
+ """
+ user = trans.get_user()
+ id = trans.security.decode_id( id )
+
+ snaps = trans.sa_session.query( model.CloudSnapshot ) \
+ .filter_by( user=user, uci_id=id, deleted=False ) \
+ .order_by( desc( model.CloudSnapshot.c.update_time ) ) \
+ .all()
+
+ return trans.fill_template( "cloud/view_snapshots.mako",
+ snaps = snaps )
+
+ @web.expose
+ @web.require_login( "use Galaxy cloud" )
+ def delete_snapshot( self, trans, uci_id=None, snap_id=None ):
+ """
+ Initiates deletion of a snapshot
+ """
+ user = trans.get_user()
+ snap_id = trans.security.decode_id( snap_id )
+ # Set snapshot as 'ready for deletion' to be picked up by general updater
+ snap = trans.sa_session.query( model.CloudSnapshot ).get( snap_id )
+
+ if snap.status == snapshot_status.COMPLETED:
+ snap.status = snapshot_status.DELETE
+ snap.flush()
+ trans.set_message( "Snapshot '%s' is marked for deletion. Once the deletion is complete, it will no longer be visible in this list. "
+ "Please note that this process may take up to a minute." % snap.snapshot_id )
+ else:
+ error( "Only snapshots in state 'completed' can be deleted. See the cloud provider directly "
+ "if you believe the snapshot is available and can be deleted." )
+
+ # Display new list of snapshots
+ uci_id = trans.security.decode_id( uci_id )
+ snaps = trans.sa_session.query( model.CloudSnapshot ) \
+ .filter_by( user=user, uci_id=uci_id, deleted=False ) \
+ .order_by( desc( model.CloudSnapshot.c.update_time ) ) \
+ .all()
+
+ return trans.fill_template( "cloud/view_snapshots.mako",
+ snaps = snaps )
+
+ # ----- Storage methods -----
+
+ @web.expose
+ @web.require_login( "add instance storage" )
+ def add_storage( self, trans, id ):
+ instance = get_uci( trans, id )
+
+
+ error( "Adding storage to instance '%s' is not supported yet." % instance.name )
+
+ return self.list( trans )
+
+ # ----- Image methods -----
+ @web.expose
@web.require_admin
- def addNewImage( self, trans, provider_type='', image_id='', manifest='', architecture='', state=None ):
+ def add_new_image( self, trans, provider_type='', image_id='', manifest='', architecture='', state=None ):
#id_error = arch_error = provider_error = manifest_error = None
error = {}
if provider_type or image_id or manifest or architecture:
@@ -498,24 +556,24 @@
@web.expose
@web.require_login( "use Galaxy cloud" )
- def listMachineImages( self, trans ):
+ def list_machine_images( self, trans ):
images = trans.sa_session.query( model.CloudImage ).filter( trans.app.model.CloudImage.table.c.deleted != True ).all()
return trans.fill_template( '/cloud/list_images.mako', images=images )
@web.expose
@web.require_admin
- def deleteImage( self, trans, id=None ):
+ def delete_image( self, trans, id=None ):
if not isinstance( id, int ):
id = trans.security.decode_id( id )
image = trans.sa_session.query( model.CloudImage ).get( id )
image.deleted = True
image.flush()
- return self.listMachineImages( trans )
+ return self.list_machine_images( trans )
@web.expose
@web.require_admin
- def editImage( self, trans, provider_type='', image_id='', manifest='', architecture='', id='', edited=False ):
+ def edit_image( self, trans, provider_type='', image_id='', manifest='', architecture='', id='', edited=False ):
error = {}
if not isinstance( id, int ):
id = trans.security.decode_id( id )
@@ -552,93 +610,13 @@
session.flush()
# Log and display the management page
trans.set_message( "Machine image '%s' edited." % image.image_id )
- return self.listMachineImages( trans )
-
- @web.expose
- @web.require_login( "use Galaxy cloud" )
- def edit( self, trans, id, credName=None, accessKey=None, secretKey=None, edited=False ):
- error = {}
- if not edited:
- credentials = get_stored_credentials( trans, id )
- return trans.fill_template( "cloud/edit_credentials.mako",
- credential = credentials,
- error = error
- )
- else:
- user = trans.get_user()
- credentials = get_stored_credentials( trans, id )
- if credName=='' or len( credName ) > 255:
- error['cred_error'] = "Credentials name must be between 1 and 255 characters in length."
- elif trans.app.model.CloudUserCredentials \
- .filter_by( user=user ) \
- .filter( and_( trans.app.model.CloudUserCredentials.table.c.id != credentials.id, trans.app.model.CloudUserCredentials.table.c.name==credName ) ) \
- .first():
- error['cred_error'] = "Credentials with name '" + credName + "' already exist. Please choose an alternative name."
- elif accessKey=='' or len( accessKey ) > 255:
- error['access_key_error'] = "Access key must be between 1 and 255 characters long."
- elif secretKey=='' or len( secretKey ) > 255:
- error['secret_key_error'] = "Secret key must be between 1 and 255 characters long."
+ return self.list_machine_images( trans )
- if error:
- return trans.fill_template( "cloud/edit_credentials.mako",
- credential = credentials,
- error = error
- )
- else:
- # Edit user stored credentials
- credentials.name = credName
- credentials.access_key = accessKey
- credentials.secret_key = secretKey
- # Persist
- session = trans.sa_session
- session.save_or_update( credentials )
- session.flush()
- # Log and display the management page
- trans.set_message( "Credential '%s' edited." % credentials.name )
- return self.list( trans )
-
- @web.expose
- @web.require_login( "use Galaxy cloud" )
- def renameInstance( self, trans, id, new_name=None ):
- instance = get_uci( trans, id )
- if new_name is not None:
- if len(new_name) > 255:
- error( "Instance name must be less than 255 characters long." )
- user = trans.get_user()
- name_exists = trans.sa_session.query( model.UCI ) \
- .filter_by( user=user, name=new_name ) \
- .first()
- if name_exists:
- error( "Specified name ('%s') is already used by an existing instance. Please choose an alternative name." % new_name )
-
- # Update name in local DB
- instance.name = new_name
- trans.sa_session.flush()
- trans.set_message( "Instance renamed to '%s'." % new_name )
- return self.list( trans )
- else:
- return trans.show_form(
- web.FormBuilder( url_for( id=trans.security.encode_id(instance.id) ), "Rename instance", submit_text="Rename" )
- .add_text( "new_name", "Instance name", value=instance.name ) )
-
- @web.expose
- @web.require_login( "use Galaxy cloud" )
- def set_uci_state( self, trans, id, state='available', clear_error=True ):
- """
- Sets state of UCI to given state, optionally resets error field, and resets UCI's launch time field to 'None'.
- """
- uci = get_uci( trans, id )
- uci.state = state
- if clear_error:
- uci.error = None
- uci.launch_time = None
- trans.sa_session.flush()
- trans.set_message( "Instance '%s' state reset." % uci.name )
- return self.list( trans )
-
+ # ----- Credentials methods -----
+
@web.expose
@web.require_login( "add credentials" )
- def add( self, trans, credName='', accessKey='', secretKey='', providerName='' ):
+ def add_credentials( self, trans, credName='', accessKey='', secretKey='', providerName='' ):
"""
Add user's cloud credentials stored under name `credName`.
"""
@@ -688,15 +666,58 @@
)
@web.expose
+ @web.require_login( "use Galaxy cloud" )
+ def edit_credentials( self, trans, id, credName=None, accessKey=None, secretKey=None, edited=False ):
+ error = {}
+ if not edited:
+ credentials = get_stored_credentials( trans, id )
+ return trans.fill_template( "cloud/edit_credentials.mako",
+ credential = credentials,
+ error = error
+ )
+ else:
+ user = trans.get_user()
+ credentials = get_stored_credentials( trans, id )
+ if credName=='' or len( credName ) > 255:
+ error['cred_error'] = "Credentials name must be between 1 and 255 characters in length."
+ elif trans.app.model.CloudUserCredentials \
+ .filter_by( user=user ) \
+ .filter( and_( trans.app.model.CloudUserCredentials.table.c.id != credentials.id, trans.app.model.CloudUserCredentials.table.c.name==credName ) ) \
+ .first():
+ error['cred_error'] = "Credentials with name '" + credName + "' already exist. Please choose an alternative name."
+ elif accessKey=='' or len( accessKey ) > 255:
+ error['access_key_error'] = "Access key must be between 1 and 255 characters long."
+ elif secretKey=='' or len( secretKey ) > 255:
+ error['secret_key_error'] = "Secret key must be between 1 and 255 characters long."
+
+ if error:
+ return trans.fill_template( "cloud/edit_credentials.mako",
+ credential = credentials,
+ error = error
+ )
+ else:
+ # Edit user stored credentials
+ credentials.name = credName
+ credentials.access_key = accessKey
+ credentials.secret_key = secretKey
+ # Persist
+ session = trans.sa_session
+ session.save_or_update( credentials )
+ session.flush()
+ # Log and display the management page
+ trans.set_message( "Credential '%s' edited." % credentials.name )
+ return self.list( trans )
+
+ @web.expose
@web.require_login( "view credentials" )
- def view( self, trans, id=None ):
+ def view_credentials( self, trans, id=None ):
"""
View details for user credentials
"""
# Load credentials from database
stored = get_stored_credentials( trans, id )
- return trans.fill_template( "cloud/view.mako",
+ return trans.fill_template( "cloud/view_credentials.mako",
credDetails = stored )
@web.expose
@@ -707,42 +728,33 @@
"""
@web.expose
- @web.require_login( "view instance details" )
- def viewInstance( self, trans, id=None ):
- """
- View details about running instance
- """
- uci = get_uci( trans, id )
- instances = get_instances( trans, uci ) # TODO: Handle list (will probably need to be done in mako template)
-
- return trans.fill_template( "cloud/viewInstance.mako",
- liveInstance = instances )
-
- @web.expose
@web.require_login( "delete credentials" )
- def delete( self, trans, id=None ):
+ def delete_credentials( self, trans, id=None ):
"""
Delete user's cloud credentials checking that no registered instances are tied to given credentials.
"""
# Load credentials from database
user = trans.get_user()
stored = get_stored_credentials( trans, id )
- UCIs = trans.sa_session.query( model.UCI ) \
- .filter_by( user=user, credentials_id=stored.id ) \
- .filter( model.UCI.c.deleted != True ) \
- .all()
+ # Check if there are any UCIs that depend on these credentials
+ UCI = None
+ UCI = trans.sa_session.query( model.UCI ) \
+ .filter_by( user=user, credentials_id=stored.id, deleted=False ) \
+ .first()
- if len(UCIs) == 0:
+ if UCI == None:
# Delete and save
stored.deleted = True
stored.flush()
# Display the management page
trans.set_message( "Credentials '%s' deleted." % stored.name )
return self.list( trans )
-
- error( "Existing instance(s) depend on credentials '%s'. You must delete those instances before being able \
- to delete these credentials." % stored.name )
- return self.list( trans )
+ else:
+ error( "Existing instance(s) depend on credentials '%s'. You must delete those instances before being able \
+ to delete these credentials." % stored.name )
+ return self.list( trans )
+
+ # ----- Provider methods -----
@web.expose
@web.require_login( "add provider" )
@@ -1007,6 +1019,8 @@
to delete this cloud provider." % provider.name )
return self.list( trans )
+ # ----- AJAX methods -----
+
@web.json
def json_update( self, trans ):
user = trans.get_user()
@@ -1132,7 +1146,7 @@
user = trans.get_user()
stores = trans.sa_session.query( model.CloudStore ) \
.filter_by( user=user, uci_id=uci.id ) \
- .filter( model.CloudStore.c.status != store_states.ERROR ) \
+ .filter( model.CloudStore.c.status != store_status.ERROR ) \
.all()
return stores
diff -r e21f605d2766 -r 118dc385752b templates/admin/index.mako
--- a/templates/admin/index.mako Thu Nov 12 12:42:17 2009 -0500
+++ b/templates/admin/index.mako Thu Nov 12 14:24:43 2009 -0500
@@ -127,8 +127,8 @@
</div>
<div class="toolSectionBody">
<div class="toolSectionBg">
- <div class="toolTitle"><a href="${h.url_for( controller='cloud', action='listMachineImages' )}" target="galaxy_main">List machine images</a></div>
- <div class="toolTitle"><a href="${h.url_for( controller='cloud', action='addNewImage' )}" target="galaxy_main">Add machine image</a></div>
+ <div class="toolTitle"><a href="${h.url_for( controller='cloud', action='list_machine_images' )}" target="galaxy_main">List machine images</a></div>
+ <div class="toolTitle"><a href="${h.url_for( controller='cloud', action='add_new_image' )}" target="galaxy_main">Add machine image</a></div>
</div>
</div>
</div>
diff -r e21f605d2766 -r 118dc385752b templates/cloud/add_credentials.mako
--- a/templates/cloud/add_credentials.mako Thu Nov 12 12:42:17 2009 -0500
+++ b/templates/cloud/add_credentials.mako Thu Nov 12 14:24:43 2009 -0500
@@ -20,7 +20,7 @@
<div class="form">
<div class="form-title">Add credentials</div>
<div class="form-body">
- <form name="Add credentials" action="${h.url_for( action='add' )}" method="post" >
+ <form name="add_credentials" action="${h.url_for( action='add_credentials' )}" method="post" >
<%
cls = "form-row"
diff -r e21f605d2766 -r 118dc385752b templates/cloud/add_image.mako
--- a/templates/cloud/add_image.mako Thu Nov 12 12:42:17 2009 -0500
+++ b/templates/cloud/add_image.mako Thu Nov 12 14:24:43 2009 -0500
@@ -18,7 +18,7 @@
<div class="form">
<div class="form-title">Add machine image</div>
<div class="form-body">
- <form name="add_image" action="${h.url_for( action='addNewImage' )}" method="post" >
+ <form name="add_image" action="${h.url_for( action='add_new_image' )}" method="post" >
<%
cls = "form-row"
if error.has_key('provider_error'):
diff -r e21f605d2766 -r 118dc385752b templates/cloud/configure_cloud.mako
--- a/templates/cloud/configure_cloud.mako Thu Nov 12 12:42:17 2009 -0500
+++ b/templates/cloud/configure_cloud.mako Thu Nov 12 14:24:43 2009 -0500
@@ -156,7 +156,7 @@
%if cloudCredentials:
<ul class="manage-table-actions">
<li>
- <a class="action-button" href="${h.url_for( action='add' )}">
+ <a class="action-button" href="${h.url_for( action='add_credentials' )}">
<img src="${h.url_for('/static/images/silk/add.png')}" />
<span>Add credentials</span>
</a>
@@ -181,9 +181,9 @@
</td>
<td>
<div popupmenu="cr-${i}-popup">
- <a class="action-button" href="${h.url_for( action='view', id=trans.security.encode_id(cloudCredential.id) )}">View</a>
- <a class="action-button" href="${h.url_for( action='edit', id=trans.security.encode_id(cloudCredential.id) )}">Edit</a>
- <a class="action-button" confirm="Are you sure you want to delete credentials '${cloudCredential.name}'?" href="${h.url_for( action='delete', id=trans.security.encode_id(cloudCredential.id) )}">Delete</a>
+ <a class="action-button" href="${h.url_for( action='view_credentials', id=trans.security.encode_id(cloudCredential.id) )}">View</a>
+ <a class="action-button" href="${h.url_for( action='edit_credentials', id=trans.security.encode_id(cloudCredential.id) )}">Edit</a>
+ <a class="action-button" confirm="Are you sure you want to delete credentials '${cloudCredential.name}'?" href="${h.url_for( action='delete_credentials', id=trans.security.encode_id(cloudCredential.id) )}">Delete</a>
</div>
</td>
</tr>
@@ -196,7 +196,7 @@
<h3>Manage your cloud instances</h3>
<ul class="manage-table-actions">
<li>
- <a class="action-button" href="${h.url_for( action='configureNew' )}">
+ <a class="action-button" href="${h.url_for( action='configure_new_uci' )}">
<img src="${h.url_for('/static/images/silk/add.png')}" />
<span>Configure new instance</span>
</a>
@@ -253,10 +253,10 @@
<td id="${ liveInstance.id }-link"></td>
<td>
<div popupmenu="li-${i}-popup">
- <a class="action-button" confirm="Are you sure you want to stop instance '${liveInstance.name}'? Please note that this may take up to 1 minute during which time the page will not refresh." href="${h.url_for( action='stop', id=trans.security.encode_id(liveInstance.id) )}">Stop</a>
- <a class="action-button" href="${h.url_for( action='renameInstance', id=trans.security.encode_id(liveInstance.id) )}">Rename</a>
- <a class="action-button" href="${h.url_for( action='viewInstance', id=trans.security.encode_id(liveInstance.id) )}">View details</a>
- <a class="action-button" href="${h.url_for( action='usageReport', id=trans.security.encode_id(liveInstance.id) )}">Usage report</a>
+ <a class="action-button" confirm="Are you sure you want to stop instance '${liveInstance.name}'?" href="${h.url_for( action='stop', id=trans.security.encode_id(liveInstance.id) )}">Stop</a>
+ <a class="action-button" href="${h.url_for( action='rename_uci', id=trans.security.encode_id(liveInstance.id) )}">Rename</a>
+ <a class="action-button" href="${h.url_for( action='view_uci_details', id=trans.security.encode_id(liveInstance.id) )}">View details</a>
+ <a class="action-button" href="${h.url_for( action='uci_usage_report', id=trans.security.encode_id(liveInstance.id) )}">Usage report</a>
</div>
</td>
</tr>
@@ -322,12 +322,12 @@
<div popupmenu="pi-${i}-popup">
<a class="action-button" href="${h.url_for( action='start', id=trans.security.encode_id(prevInstance.id), type='m1.small' )}"> Start m1.small</a>
<a class="action-button" href="${h.url_for( action='start', id=trans.security.encode_id(prevInstance.id), type='c1.medium' )}"> Start c1.medium</a>
- <a class="action-button" href="${h.url_for( action='renameInstance', id=trans.security.encode_id(prevInstance.id) )}">Rename</a>
- <a class="action-button" href="${h.url_for( action='create_snapshot', id=trans.security.encode_id(prevInstance.id) )}">Create snapshot</a>
+ <a class="action-button" href="${h.url_for( action='rename_uci', id=trans.security.encode_id(prevInstance.id) )}">Rename</a>
+ <a class="action-button" href="${h.url_for( action='uci_usage_report', id=trans.security.encode_id(prevInstance.id) )}">Usage report</a>
+ <a class="action-button" href="${h.url_for( action='create_snapshot', id=trans.security.encode_id(prevInstance.id) )}">Create snapshot</a>
<a class="action-button" href="${h.url_for( action='view_snapshots', id=trans.security.encode_id(prevInstance.id) )}">View snapshots</a>
- <a class="action-button" href="${h.url_for( action='addStorage', id=trans.security.encode_id(prevInstance.id) )}" target="_parent">Add storage</a>
- <a class="action-button" href="${h.url_for( action='usageReport', id=trans.security.encode_id(prevInstance.id) )}">Usage report</a>
- <a class="action-button" confirm="Are you sure you want to delete instance '${prevInstance.name}'? This will delete all of your data assocaiated with this instance!" href="${h.url_for( action='deleteInstance', id=trans.security.encode_id(prevInstance.id) )}">Delete</a>
+ <a class="action-button" href="${h.url_for( action='add_storage', id=trans.security.encode_id(prevInstance.id) )}" target="_parent">Add storage</a>
+ <a class="action-button" confirm="Are you sure you want to delete instance '${prevInstance.name}'? This will delete all of your data assocaiated with this instance!" href="${h.url_for( action='delete_uci', id=trans.security.encode_id(prevInstance.id) )}">Delete</a>
</div>
</td>
</tr>
@@ -341,7 +341,7 @@
%else:
You have no credentials associated with your Galaxy account:
- <a class="action-button" href="${h.url_for( action='add' )}">
+ <a class="action-button" href="${h.url_for( action='add_credentials' )}">
<img src="${h.url_for('/static/images/silk/add.png')}" />
<span>add credentials</span>
</a>
diff -r e21f605d2766 -r 118dc385752b templates/cloud/configure_uci.mako
--- a/templates/cloud/configure_uci.mako Thu Nov 12 12:42:17 2009 -0500
+++ b/templates/cloud/configure_uci.mako Thu Nov 12 14:24:43 2009 -0500
@@ -35,7 +35,7 @@
<div class="form">
<div class="form-title">Configure new Galaxy instance</div>
<div class="form-body">
- <form name="Configure new UCI" action="${h.url_for( action='configureNew' )}" method="post" >
+ <form name="Configure new UCI" action="${h.url_for( action='configure_new_uci' )}" method="post" >
<%
cls = "form-row"
diff -r e21f605d2766 -r 118dc385752b templates/cloud/edit_credentials.mako
--- a/templates/cloud/edit_credentials.mako Thu Nov 12 12:42:17 2009 -0500
+++ b/templates/cloud/edit_credentials.mako Thu Nov 12 14:24:43 2009 -0500
@@ -20,7 +20,7 @@
<div class="form">
<div class="form-title">Edit credentials</div>
<div class="form-body">
- <form name="edit_credentials" action="${h.url_for( action='edit', id=trans.security.encode_id(credential.id), edited="true" )}" method="post" >
+ <form name="edit_credentials" action="${h.url_for( action='edit_credentials', id=trans.security.encode_id(credential.id), edited="true" )}" method="post" >
<%
cls = "form-row"
diff -r e21f605d2766 -r 118dc385752b templates/cloud/edit_image.mako
--- a/templates/cloud/edit_image.mako Thu Nov 12 12:42:17 2009 -0500
+++ b/templates/cloud/edit_image.mako Thu Nov 12 14:24:43 2009 -0500
@@ -20,7 +20,7 @@
<div class="form">
<div class="form-title">Edit image</div>
<div class="form-body">
- <form name="edit_image" action="${h.url_for( action='editImage', id=trans.security.encode_id(image.id), edited="true" )}" method="post" >
+ <form name="edit_image" action="${h.url_for( action='edit_image', id=trans.security.encode_id(image.id), edited="true" )}" method="post" >
<%
cls = "form-row"
if error.has_key('provider_error'):
diff -r e21f605d2766 -r 118dc385752b templates/cloud/list_images.mako
--- a/templates/cloud/list_images.mako Thu Nov 12 12:42:17 2009 -0500
+++ b/templates/cloud/list_images.mako Thu Nov 12 14:24:43 2009 -0500
@@ -69,18 +69,18 @@
%endif
</td>
<td>
- <a href="${h.url_for( controller='cloud', action='editImage', image_id=image.image_id, manifest=image.manifest, id=trans.security.encode_id(image.id) )}">e</a>
+ <a href="${h.url_for( controller='cloud', action='edit_image', image_id=image.image_id, manifest=image.manifest, id=trans.security.encode_id(image.id) )}">e</a>
</td>
<td>
<a confirm="Are you sure you want to delete machine image '${image.image_id}'? Note that this may result in users' UCI's not to work any more!"
- href="${h.url_for( controller='cloud', action='deleteImage', id=trans.security.encode_id(image.id) )}">x</a>
+ href="${h.url_for( controller='cloud', action='delete_image', id=trans.security.encode_id(image.id) )}">x</a>
</td>
</tr>
%endfor
</table>
%else:
<h3>There are no registered machine images.</h3><br />
- <a href="${h.url_for( controller='cloud', action='addNewImage' )}" target="galaxy_main">Add machine image now?</a>
+ <a href="${h.url_for( controller='cloud', action='add_new_image' )}" target="galaxy_main">Add machine image now?</a>
%endif
diff -r e21f605d2766 -r 118dc385752b templates/cloud/view.mako
--- a/templates/cloud/view.mako Thu Nov 12 12:42:17 2009 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,157 +0,0 @@
-<%inherit file="/base.mako"/>
-
-<%def name="title()">Cloud credentials</%def>
-
-<h2>Credentials details</h2>
-
-<ul class="manage-table-actions">
- <li>
- <a class="action-button" href="${h.url_for( action='list' )}">
- <img src="${h.url_for('/static/images/silk/resultset_previous.png')}" />
- <span>Return to cloud management console</span>
- </a>
- </li>
-</ul>
-
-%if credDetails:
- ${view_cred( credDetails )}
-%else:
- There are no credentials under that name.
-%endif
-
-
-
-
-<%def name="view_cred( credDetails )">
- <table class="mange-table colored" border="0" cellspacing="0" cellpadding="0" width="100%">
- <tr>
- <td> Credentials name: </td>
- <td>
- ${credDetails.name}
- <a id="wf-popup" class="popup-arrow" style="display: none;">▼</a>
- </td>
- <td>
- <div popupmenu="wf-popup">
- <a class="action-button" href="${h.url_for( action='rename', id=trans.security.encode_id(credDetails.id) )}">Rename</a>
- <a class="action-button" confirm="Are you sure you want to delete credentials '${credDetails.name}'?" href="${h.url_for( action='delete', id=trans.security.encode_id(credDetails.id) )}">Delete</a>
- </div>
- </td>
- </tr>
- <tr>
- <td> Last updated: </td>
- <td> ${str(credDetails.update_time)[:16]}
- <%
- context.write( ' UTC (' )
- context.write( str(h.date.distance_of_time_in_words (credDetails.update_time, h.date.datetime.utcnow() ) ) )
- %> ago)
- </td>
- </tr>
- <tr>
- <td> Cloud provider type: </td>
- <td> ${str(credDetails.provider.type)}</td>
- </tr>
- <tr>
- <td> Cloud provider name: </td>
- <td> ${str(credDetails.provider.name)}</td>
- </tr>
- <tr>
- <td> Access key: </td>
- <td>
- ${credDetails.access_key}
- </td>
- </tr>
- <tr>
- <td> Secret key: </td>
- <td>
- <div id="shortComment2">
- <a onclick="document.getElementById('fullComment2').style.display = 'block';
- document.getElementById('shortComment2').style.display = 'none'; return 0"
- href="javascript:void(0)">
- + Show
- </a>
- </div>
- <div id="fullComment2" style="DISPLAY: none">
- <a onclick="document.getElementById('shortComment2').style.display = 'block';
- document.getElementById('fullComment2').style.display = 'none'; return 0;"
- href="javascript:void(0)">
- - Hide
- </a><br />
- <nobr>${credDetails.secret_key}</nobr><br/>
- </div>
- </td>
- </tr>
- <tr><td id="addl"><b>Additional cloud provider information (if available):</b></td></tr>
- %if credDetails.provider.region_connection != None:
- <tr>
- <td> Region connection: </td>
- <td> ${credDetails.provider.region_connection} </td>
- </tr>
- %endif
- %if credDetails.provider.region_name != None:
- <tr>
- <td> Region name: </td>
- <td> ${credDetails.provider.region_name} </td>
- </tr>
- %endif
- %if credDetails.provider.region_endpoint != None:
- <tr>
- <td> Region endpoint: </td>
- <td> ${credDetails.provider.region_endpoint} </td>
- </tr>
- %endif
- %if credDetails.provider.is_secure != None:
- <tr>
- <td> Is secure: </td>
- <td> ${credDetails.provider.is_secure} </td>
- </tr>
- %endif
- %if credDetails.provider.host != None:
- <tr>
- <td> Host: </td>
- <td> ${credDetails.provider.host} </td>
- </tr>
- %endif
- %if credDetails.provider.port != None:
- <tr>
- <td> Port: </td>
- <td> ${credDetails.provider.port} </td>
- </tr>
- %endif
- %if credDetails.provider.proxy != None:
- <tr>
- <td> Proxy: </td>
- <td> ${credDetails.provider.proxy} </td>
- </tr>
- %endif
- %if credDetails.provider.proxy_port != None:
- <tr>
- <td> Proxy port: </td>
- <td> ${credDetails.provider.proxy_port} </td>
- </tr>
- %endif
- %if credDetails.provider.proxy_pass != None:
- <tr>
- <td> Proxy pass: </td>
- <td> ${credDetails.provider.proxy_pass} </td>
- </tr>
- %endif
- %if credDetails.provider.debug != None:
- <tr>
- <td> Debug: </td>
- <td> ${credDetails.provider.debug} </td>
- </tr>
- %endif
- %if credDetails.provider.https_connection_factory != None:
- <tr>
- <td> HTTPS connection factory: </td>
- <td> ${credDetails.provider.https_connection_factory} </td>
- </tr>
- %endif
- %if credDetails.provider.path != None:
- <tr>
- <td> Path: </td>
- <td> ${credDetails.provider.path} </td>
- </tr>
- %endif
- </table>
-</%def>
diff -r e21f605d2766 -r 118dc385752b templates/cloud/viewInstance.mako
--- a/templates/cloud/viewInstance.mako Thu Nov 12 12:42:17 2009 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,140 +0,0 @@
-<%inherit file="/base.mako"/>
-<%def name="title()">Live instance details</%def>
-
-<%
- # Because of the one-to-many relationship between liveInstance (i.e., UCI) and actual instances, need to know
- # which one is currently active. Because only one instance of UCI can be alive at any point in time, simply
- # select the most recent one.
- # TODO: Once individual UCI's will be able to start more than one instance, this will need to be fixed
- #i_id = len(liveInstance.instance) - 1
-%>
-
-<h2>Live instance details</h2>
-
-%if liveInstance:
- <ul class="manage-table-actions">
- <li>
- <a class="action-button" href="${h.url_for( action='list' )}">
- <img src="${h.url_for('/static/images/silk/resultset_previous.png')}" />
- <span>Return to cloud management console</span>
- </a>
- </li>
- </ul>
-
- <table class="mange-table colored" border="0" cellspacing="0" cellpadding="0" width="100%">
- <tr>
- <td> Instance name: </td>
- <td>
- ${liveInstance.uci.name}
- <a id="li-popup" class="popup-arrow" style="display: none;">▼</a>
- </td>
- <td>
- <div popupmenu="li-popup">
- <a class="action-button" href="${h.url_for( action='renameInstance', id=trans.security.encode_id(liveInstance.uci.id) )}">Rename</a>
- <a class="action-button" confirm="Are you sure you want to stop instance '${liveInstance.uci.name}'?" href="${h.url_for( action='stop', id=trans.security.encode_id(liveInstance.uci.id) )}">Stop</a>
- </div>
- </td>
- </tr>
- <tr>
- <td> Date created: </td>
- <td> ${str(liveInstance.uci.create_time)[:16]}
- <%
- context.write( ' UTC (' )
- context.write( str(h.date.distance_of_time_in_words (liveInstance.uci.create_time, h.date.datetime.utcnow() ) ) )
- %> ago)
- </td>
- </tr>
- <tr>
- <td> Alive since: </td>
- <td> ${str(liveInstance.launch_time)[:16]}
- <%
- context.write( ' UTC (' )
- context.write( str(h.date.distance_of_time_in_words (liveInstance.launch_time, h.date.datetime.utcnow() ) ) )
- %> ago)
- </td>
- </tr>
- %if liveInstance.instance_id != None:
- <tr>
- <td> Instance ID: </td>
- <td> ${liveInstance.instance_id} </td>
- </tr>
- %endif
- %if liveInstance.reservation_id != None:
- <tr>
- <td> Reservation ID: </td>
- <td> ${liveInstance.reservation_id} </td>
- </tr>
- %endif
- <tr>
- <td> AMI: </td>
- <td> ${liveInstance.mi_id} </td>
- </tr>
- <tr>
- <td> State:</td>
- <td> ${liveInstance.state} </td>
- </tr>
- <tr>
- <td> Type:</td>
- <td> ${liveInstance.type} </td>
- </tr>
- <tr>
- <td> Storage size:</td>
- <td> ${liveInstance.uci.total_size} </td>
- </tr>
- <tr>
- <td> Public DNS:</td>
- <%
- lnk="http://"+str(liveInstance.public_dns)
- %>
- <td> <a href="${lnk}" target="_blank">${liveInstance.public_dns}</a></td>
- </tr>
- %if liveInstance.private_dns != None:
- <tr>
- <td> Private DNS:</td>
- <td> ${liveInstance.private_dns}</td>
- </tr>
- %endif
- %if liveInstance.security_group != None:
- <tr>
- <td> Security group zone:</td>
- <td> ${liveInstance.security_group} </td>
- </tr>
- %endif
- %if liveInstance.availability_zone != None:
- <tr>
- <td> Availabilty zone:</td>
- <td> ${liveInstance.availability_zone} </td>
- </tr>
- %endif
- %if liveInstance.uci.key_pair_name != None:
- <tr>
- <td> Keypair file name:</td>
- <td> ${liveInstance.uci.key_pair_name} </td>
- </tr>
- %endif
- %if liveInstance.uci.key_pair_material != None:
- <tr>
- <td> Keypair material:</td>
- <td>
- <div id="short">
- <a onclick="document.getElementById('full').style.display = 'block';
- document.getElementById('short').style.display = 'none'; return 0"
- href="javascript:void(0)">
- + Show
- </a>
- </div>
- <div id="full" style="DISPLAY: none">
- <a onclick="document.getElementById('short').style.display = 'block';
- document.getElementById('full').style.display = 'none'; return 0;"
- href="javascript:void(0)">
- - Hide</a>
- ${liveInstance.uci.key_pair_material}<br/>
- </div>
- </td>
- </tr>
- %endif
-
- </table>
-%else:
- There is no live instance under that name.
-%endif
diff -r e21f605d2766 -r 118dc385752b templates/cloud/view_credentials.mako
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/templates/cloud/view_credentials.mako Thu Nov 12 14:24:43 2009 -0500
@@ -0,0 +1,157 @@
+<%inherit file="/base.mako"/>
+
+<%def name="title()">Cloud credentials</%def>
+
+<h2>Credentials details</h2>
+
+<ul class="manage-table-actions">
+ <li>
+ <a class="action-button" href="${h.url_for( action='list' )}">
+ <img src="${h.url_for('/static/images/silk/resultset_previous.png')}" />
+ <span>Return to cloud management console</span>
+ </a>
+ </li>
+</ul>
+
+%if credDetails:
+ ${view_cred( credDetails )}
+%else:
+ There are no credentials under that name.
+%endif
+
+
+
+
+<%def name="view_cred( credDetails )">
+ <table class="mange-table colored" border="0" cellspacing="0" cellpadding="0" width="100%">
+ <tr>
+ <td> Credentials name: </td>
+ <td>
+ ${credDetails.name}
+ <a id="wf-popup" class="popup-arrow" style="display: none;">▼</a>
+ </td>
+ <td>
+ <div popupmenu="wf-popup">
+ <a class="action-button" href="${h.url_for( action='edit_credentials', id=trans.security.encode_id(credDetails.id) )}">Edit</a>
+ <a class="action-button" confirm="Are you sure you want to delete credentials '${credDetails.name}'?" href="${h.url_for( action='delete_credentials', id=trans.security.encode_id(credDetails.id) )}">Delete</a>
+ </div>
+ </td>
+ </tr>
+ <tr>
+ <td> Last updated: </td>
+ <td> ${str(credDetails.update_time)[:16]}
+ <%
+ context.write( ' UTC (' )
+ context.write( str(h.date.distance_of_time_in_words (credDetails.update_time, h.date.datetime.utcnow() ) ) )
+ %> ago)
+ </td>
+ </tr>
+ <tr>
+ <td> Cloud provider type: </td>
+ <td> ${str(credDetails.provider.type)}</td>
+ </tr>
+ <tr>
+ <td> Cloud provider name: </td>
+ <td> ${str(credDetails.provider.name)}</td>
+ </tr>
+ <tr>
+ <td> Access key: </td>
+ <td>
+ ${credDetails.access_key}
+ </td>
+ </tr>
+ <tr>
+ <td> Secret key: </td>
+ <td>
+ <div id="shortComment2">
+ <a onclick="document.getElementById('fullComment2').style.display = 'block';
+ document.getElementById('shortComment2').style.display = 'none'; return 0"
+ href="javascript:void(0)">
+ + Show
+ </a>
+ </div>
+ <div id="fullComment2" style="DISPLAY: none">
+ <a onclick="document.getElementById('shortComment2').style.display = 'block';
+ document.getElementById('fullComment2').style.display = 'none'; return 0;"
+ href="javascript:void(0)">
+ - Hide
+ </a><br />
+ <nobr>${credDetails.secret_key}</nobr><br/>
+ </div>
+ </td>
+ </tr>
+ <tr><td id="addl"><b>Additional cloud provider information (if available):</b></td></tr>
+ %if credDetails.provider.region_connection != None:
+ <tr>
+ <td> Region connection: </td>
+ <td> ${credDetails.provider.region_connection} </td>
+ </tr>
+ %endif
+ %if credDetails.provider.region_name != None:
+ <tr>
+ <td> Region name: </td>
+ <td> ${credDetails.provider.region_name} </td>
+ </tr>
+ %endif
+ %if credDetails.provider.region_endpoint != None:
+ <tr>
+ <td> Region endpoint: </td>
+ <td> ${credDetails.provider.region_endpoint} </td>
+ </tr>
+ %endif
+ %if credDetails.provider.is_secure != None:
+ <tr>
+ <td> Is secure: </td>
+ <td> ${credDetails.provider.is_secure} </td>
+ </tr>
+ %endif
+ %if credDetails.provider.host != None:
+ <tr>
+ <td> Host: </td>
+ <td> ${credDetails.provider.host} </td>
+ </tr>
+ %endif
+ %if credDetails.provider.port != None:
+ <tr>
+ <td> Port: </td>
+ <td> ${credDetails.provider.port} </td>
+ </tr>
+ %endif
+ %if credDetails.provider.proxy != None:
+ <tr>
+ <td> Proxy: </td>
+ <td> ${credDetails.provider.proxy} </td>
+ </tr>
+ %endif
+ %if credDetails.provider.proxy_port != None:
+ <tr>
+ <td> Proxy port: </td>
+ <td> ${credDetails.provider.proxy_port} </td>
+ </tr>
+ %endif
+ %if credDetails.provider.proxy_pass != None:
+ <tr>
+ <td> Proxy pass: </td>
+ <td> ${credDetails.provider.proxy_pass} </td>
+ </tr>
+ %endif
+ %if credDetails.provider.debug != None:
+ <tr>
+ <td> Debug: </td>
+ <td> ${credDetails.provider.debug} </td>
+ </tr>
+ %endif
+ %if credDetails.provider.https_connection_factory != None:
+ <tr>
+ <td> HTTPS connection factory: </td>
+ <td> ${credDetails.provider.https_connection_factory} </td>
+ </tr>
+ %endif
+ %if credDetails.provider.path != None:
+ <tr>
+ <td> Path: </td>
+ <td> ${credDetails.provider.path} </td>
+ </tr>
+ %endif
+ </table>
+</%def>
diff -r e21f605d2766 -r 118dc385752b templates/cloud/view_instance.mako
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/templates/cloud/view_instance.mako Thu Nov 12 14:24:43 2009 -0500
@@ -0,0 +1,140 @@
+<%inherit file="/base.mako"/>
+<%def name="title()">Live instance details</%def>
+
+<%
+ # Because of the one-to-many relationship between liveInstance (i.e., UCI) and actual instances, need to know
+ # which one is currently active. Because only one instance of UCI can be alive at any point in time, simply
+ # select the most recent one.
+ # TODO: Once individual UCI's will be able to start more than one instance, this will need to be fixed
+ #i_id = len(liveInstance.instance) - 1
+%>
+
+<h2>Live instance details</h2>
+
+%if liveInstance:
+ <ul class="manage-table-actions">
+ <li>
+ <a class="action-button" href="${h.url_for( action='list' )}">
+ <img src="${h.url_for('/static/images/silk/resultset_previous.png')}" />
+ <span>Return to cloud management console</span>
+ </a>
+ </li>
+ </ul>
+
+ <table class="mange-table colored" border="0" cellspacing="0" cellpadding="0" width="100%">
+ <tr>
+ <td> Instance name: </td>
+ <td>
+ ${liveInstance.uci.name}
+ <a id="li-popup" class="popup-arrow" style="display: none;">▼</a>
+ </td>
+ <td>
+ <div popupmenu="li-popup">
+ <a class="action-button" href="${h.url_for( action='rename_uci', id=trans.security.encode_id(liveInstance.uci.id) )}">Rename</a>
+ <a class="action-button" confirm="Are you sure you want to stop instance '${liveInstance.uci.name}'?" href="${h.url_for( action='stop', id=trans.security.encode_id(liveInstance.uci.id) )}">Stop</a>
+ </div>
+ </td>
+ </tr>
+ <tr>
+ <td> Date created: </td>
+ <td> ${str(liveInstance.uci.create_time)[:16]}
+ <%
+ context.write( ' UTC (' )
+ context.write( str(h.date.distance_of_time_in_words (liveInstance.uci.create_time, h.date.datetime.utcnow() ) ) )
+ %> ago)
+ </td>
+ </tr>
+ <tr>
+ <td> Alive since: </td>
+ <td> ${str(liveInstance.launch_time)[:16]}
+ <%
+ context.write( ' UTC (' )
+ context.write( str(h.date.distance_of_time_in_words (liveInstance.launch_time, h.date.datetime.utcnow() ) ) )
+ %> ago)
+ </td>
+ </tr>
+ %if liveInstance.instance_id != None:
+ <tr>
+ <td> Instance ID: </td>
+ <td> ${liveInstance.instance_id} </td>
+ </tr>
+ %endif
+ %if liveInstance.reservation_id != None:
+ <tr>
+ <td> Reservation ID: </td>
+ <td> ${liveInstance.reservation_id} </td>
+ </tr>
+ %endif
+ <tr>
+ <td> AMI: </td>
+ <td> ${liveInstance.mi_id} </td>
+ </tr>
+ <tr>
+ <td> State:</td>
+ <td> ${liveInstance.state} </td>
+ </tr>
+ <tr>
+ <td> Type:</td>
+ <td> ${liveInstance.type} </td>
+ </tr>
+ <tr>
+ <td> Storage size:</td>
+ <td> ${liveInstance.uci.total_size} </td>
+ </tr>
+ <tr>
+ <td> Public DNS:</td>
+ <%
+ lnk="http://"+str(liveInstance.public_dns)
+ %>
+ <td> <a href="${lnk}" target="_blank">${liveInstance.public_dns}</a></td>
+ </tr>
+ %if liveInstance.private_dns != None:
+ <tr>
+ <td> Private DNS:</td>
+ <td> ${liveInstance.private_dns}</td>
+ </tr>
+ %endif
+ %if liveInstance.security_group != None:
+ <tr>
+ <td> Security group zone:</td>
+ <td> ${liveInstance.security_group} </td>
+ </tr>
+ %endif
+ %if liveInstance.availability_zone != None:
+ <tr>
+ <td> Availabilty zone:</td>
+ <td> ${liveInstance.availability_zone} </td>
+ </tr>
+ %endif
+ %if liveInstance.uci.key_pair_name != None:
+ <tr>
+ <td> Keypair file name:</td>
+ <td> ${liveInstance.uci.key_pair_name} </td>
+ </tr>
+ %endif
+ %if liveInstance.uci.key_pair_material != None:
+ <tr>
+ <td> Keypair material:</td>
+ <td>
+ <div id="short">
+ <a onclick="document.getElementById('full').style.display = 'block';
+ document.getElementById('short').style.display = 'none'; return 0"
+ href="javascript:void(0)">
+ + Show
+ </a>
+ </div>
+ <div id="full" style="DISPLAY: none">
+ <a onclick="document.getElementById('short').style.display = 'block';
+ document.getElementById('full').style.display = 'none'; return 0;"
+ href="javascript:void(0)">
+ - Hide</a>
+ ${liveInstance.uci.key_pair_material}<br/>
+ </div>
+ </td>
+ </tr>
+ %endif
+
+ </table>
+%else:
+ There is no live instance under that name.
+%endif
diff -r e21f605d2766 -r 118dc385752b templates/cloud/view_usage.mako
--- a/templates/cloud/view_usage.mako Thu Nov 12 12:42:17 2009 -0500
+++ b/templates/cloud/view_usage.mako Thu Nov 12 14:24:43 2009 -0500
@@ -88,7 +88,7 @@
</table>
<br/>Total number of hours instance was alive: ${total_hours} <br />
Note that these are just best effort estimates - true usage should be obtained from respective cloud provider. <br />
- <%namespace name="view_cred" file="view.mako" />
+ <%namespace name="view_cred" file="view_credentials.mako" />
<div id="hide_cred_details">
This instance uses credentials:
1
0
23 Nov '09
details: http://www.bx.psu.edu/hg/galaxy/rev/e21f605d2766
changeset: 3091:e21f605d2766
user: Enis Afgan <afgane(a)gmail.com>
date: Thu Nov 12 12:42:17 2009 -0500
description:
Cleaned up code dealing with error reporting in cloud providers (EC2 and Eucalyptus).
diffstat:
lib/galaxy/cloud/__init__.py | 26 +-
lib/galaxy/cloud/providers/ec2.py | 264 +++++++++++++--------
lib/galaxy/cloud/providers/eucalyptus.py | 267 ++++++++++++++-------
lib/galaxy/model/mapping.py | 14 +-
lib/galaxy/model/migrate/versions/0014_cloud_tables.py | 25 +-
lib/galaxy/web/controllers/cloud.py | 6 +-
6 files changed, 376 insertions(+), 226 deletions(-)
diffs (1173 lines):
diff -r b90db227df6d -r e21f605d2766 lib/galaxy/cloud/__init__.py
--- a/lib/galaxy/cloud/__init__.py Thu Nov 12 10:28:54 2009 -0500
+++ b/lib/galaxy/cloud/__init__.py Thu Nov 12 12:42:17 2009 -0500
@@ -21,6 +21,7 @@
uci_states = Bunch(
NEW_UCI = "newUCI",
NEW = "new",
+ CREATING = "creating",
DELETING_UCI = "deletingUCI",
DELETING = "deleting",
DELETED = "deleted",
@@ -317,6 +318,17 @@
uci.store[store_id].device = device
uci.store[store_id].flush()
+ def set_store_error( self, error, store_index=None, store_id=None ):
+ if store_index != None:
+ store = model.CloudStore.get( store_index )
+ elif store_id != None:
+ store = model.CloudStore.filter_by( volume_id = store_id ).first()
+ else:
+ return None
+
+ store.error = error
+ store.flush()
+
def set_store_status( self, vol_id, status ):
vol = model.CloudStore.filter( model.CloudStore.c.volume_id == vol_id ).first()
vol.status = status
@@ -404,6 +416,13 @@
i.state = instance_states.ERROR
i.flush()
uci.flush()
+
+ def set_deleted( self ):
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ uci.state = uci_states.DELETED # for bookkeeping reasons, mark as deleted but don't actually delete.
+ uci.deleted = True
+ uci.flush()
# --------- Getter methods -----------------
@@ -563,13 +582,6 @@
uci.refresh()
return uci.launch_time
- def delete( self ):
- uci = model.UCI.get( self.uci_id )
- uci.refresh()
- uci.state = uci_states.DELETED # for bookkeeping reasons, mark as deleted but don't actually delete.
- uci.deleted = True
- uci.flush()
-
class CloudProvider( object ):
def __init__( self, app ):
import providers.eucalyptus
diff -r b90db227df6d -r e21f605d2766 lib/galaxy/cloud/providers/ec2.py
--- a/lib/galaxy/cloud/providers/ec2.py Thu Nov 12 10:28:54 2009 -0500
+++ b/lib/galaxy/cloud/providers/ec2.py Thu Nov 12 12:42:17 2009 -0500
@@ -6,8 +6,9 @@
from galaxy.model import mapping
from galaxy.datatypes.data import nice_size
from galaxy.util.bunch import Bunch
+from galaxy.cloud import UCIwrapper
from Queue import Queue
-from sqlalchemy import or_
+from sqlalchemy import or_, and_
import galaxy.eggs
galaxy.eggs.require("boto")
@@ -22,6 +23,7 @@
uci_states = Bunch(
NEW_UCI = "newUCI",
NEW = "new",
+ CREATING = "creating",
DELETING_UCI = "deletingUCI",
DELETING = "deleting",
SUBMITTED_UCI = "submittedUCI",
@@ -49,6 +51,7 @@
store_states = Bunch(
IN_USE = "in-use",
CREATING = "creating",
+ DELETED = 'deleted',
ERROR = "error"
)
@@ -69,7 +72,6 @@
def __init__( self, app ):
self.type = "ec2" # cloud provider type (e.g., ec2, eucalyptus, opennebula)
self.zone = "us-east-1a"
- self.key_pair = "galaxy-keypair"
self.security_group = "galaxyWeb"
self.queue = Queue()
@@ -114,9 +116,10 @@
provider = uci_wrapper.get_provider()
try:
region = RegionInfo( None, provider.region_name, provider.region_endpoint )
- except Exception, e:
- log.error( "Selecting region with cloud provider failed: %s" % str(e) )
- uci_wrapper.set_error( "Selecting region with cloud provider failed: " + str(e), True )
+ except Exception, ex:
+ err = "Selecting region with cloud provider failed: " + str( ex )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
return None
try:
conn = EC2Connection( aws_access_key_id=uci_wrapper.get_access_key(),
@@ -124,9 +127,10 @@
is_secure=provider.is_secure,
region=region,
path=provider.path )
- except Exception, e:
- log.error( "Establishing connection with cloud failed: %s" % str(e) )
- uci_wrapper.set_error( "Establishing connection with cloud failed: " + str(e), True )
+ except Exception, ex:
+ err = "Establishing connection with cloud failed: " + str( ex )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
return None
return conn
@@ -150,27 +154,30 @@
kp = self.create_key_pair( conn, kp_name )
uci_wrapper.set_key_pair( kp.name, kp.material )
except boto.exception.EC2ResponseError, e:
- log.error( "EC2 response error when deleting key pair: '%s'" % e )
- uci_wrapper.set_error( "EC2 response error while deleting key pair: " + str( e ), True )
+ err = "EC2 response error while deleting key pair: " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
else:
try:
kp = self.create_key_pair( conn, kp_name )
uci_wrapper.set_key_pair( kp.name, kp.material )
except boto.exception.EC2ResponseError, e:
- log.error( "EC2 response error when creating key pair: '%s'" % e )
- uci_wrapper.set_error( "EC2 response error while creating key pair: " + str( e ), True )
+ err = "EC2 response error while creating key pair: " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
except Exception, ex:
- log.error( "Exception when creating key pair: '%s'" % e )
- uci_wrapper.set_error( "Error while creating key pair: " + str( e ), True )
-
+ err = "Exception while creating key pair: " + str( ex )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
except boto.exception.EC2ResponseError, e: # No keypair under this name exists so create it
if e.code == 'InvalidKeyPair.NotFound':
log.info( "No keypair found, creating keypair '%s'" % kp_name )
kp = self.create_key_pair( conn, kp_name )
uci_wrapper.set_key_pair( kp.name, kp.material )
else:
- log.error( "EC2 response error while retrieving key pair: '%s'" % e )
- uci_wrapper.set_error( "Cloud provider response error while retrieving key pair: " + str( e ), True )
+ err = "EC2 response error while retrieving key pair: " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
if kp != None:
return kp.name
@@ -197,8 +204,9 @@
if mi:
return mi.image_id
else:
- log.error( "Machine image could not be retrieved for UCI '%s'." % uci_wrapper.get_name() )
- uci_wrapper.set_error( "Machine image could not be retrieved. Contact site administrator to ensure needed machine image is registered.", True )
+ err = "Machine image could not be retrieved"
+ log.error( "%s for UCI '%s'." % (err, uci_wrapper.get_name() ) )
+ uci_wrapper.set_error( err+". Contact site administrator to ensure needed machine image is registered.", True )
return None
def shutdown( self ):
@@ -242,14 +250,30 @@
# conn.delete_volume( vol.id )
# uci_wrapper.change_state( uci_state='error' )
# return
- vl = conn.get_all_volumes( [vol.id] )
+
+ # Retrieve created volume again to get updated status
+ try:
+ vl = conn.get_all_volumes( [vol.id] )
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error while retrieving (i.e., updating status) of just created storage volume '" + vol.id + "': " + str( e )
+ log.error( err )
+ uci_wrapper.set_store_status( vol.id, uci_states.ERROR )
+ uci_wrapper.set_error( err, True )
+ return
+ except Exception, ex:
+ err = "Error while retrieving (i.e., updating status) of just created storage volume '" + vol.id + "': " + str( ex )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ return
+
if len( vl ) > 0:
uci_wrapper.change_state( uci_state=vl[0].status )
uci_wrapper.set_store_status( vol.id, vl[0].status )
else:
- uci_wrapper.change_state( uci_state=uci_states.ERROR )
+ err = "Volume '" + vol.id +"' not found by EC2 after being created."
+ log.error( err )
uci_wrapper.set_store_status( vol.id, uci_states.ERROR )
- uci_wrapper.set_error( "Volume '%s' not found by EC2 after being created" % vol.id )
+ uci_wrapper.set_error( err, True )
def deleteUCI( self, uci_wrapper ):
"""
@@ -266,23 +290,28 @@
failedList = []
for v in vl:
log.debug( "Deleting volume with id='%s'" % v.volume_id )
- if conn.delete_volume( v.volume_id ):
- deletedList.append( v.volume_id )
- v.deleted = True
- v.flush()
- count += 1
- else:
- failedList.append( v.volume_id )
+ try:
+ if conn.delete_volume( v.volume_id ):
+ deletedList.append( v.volume_id )
+ v.deleted = True
+ v.flush()
+ count += 1
+ else:
+ failedList.append( v.volume_id )
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error while deleting storage volume '" + v.volume_id + "': " + str( e )
+ log.error( err )
+ uci_wrapper.set_store_error( err, store_id = v.volume_id )
+ uci_wrapper.set_error( err, True )
# Delete UCI if all of associated
if count == len( vl ):
- uci_wrapper.delete()
+ uci_wrapper.set_deleted()
else:
- log.error( "Deleting following volume(s) failed: %s. However, these volumes were successfully deleted: %s. \
- MANUAL intervention and processing needed." % ( failedList, deletedList ) )
- uci_wrapper.change_state( uci_state=uci_state.ERROR )
- uci_wrapper.set_error( "Deleting following volume(s) failed: "+failedList+". However, these volumes were successfully deleted: "+deletedList+". \
- MANUAL intervention and processing needed." )
+ err = "Deleting following volume(s) failed: "+failedList+". However, these volumes were successfully deleted: "+deletedList+". \
+ MANUAL intervention and processing needed."
+ log.error( err )
+ uci_wrapper.set_error( err, True )
def snapshotUCI( self, uci_wrapper ):
"""
@@ -300,17 +329,17 @@
uci_wrapper.set_snapshot_id( snapshot.id, snap_id )
sh = conn.get_all_snapshots( snap_id ) # get updated status
uci_wrapper.set_snapshot_status( status=sh[0].status, snap_id=snap_id )
- except boto.exception.EC2ResponseError, ex:
- err = "Cloud provider response error while creating snapshot: " + str( e )
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error while creating snapshot: " + str( e )
log.error( err )
uci_wrapper.set_snapshot_error( error=err, snap_index=snapshot.id, set_status=True )
- uci_wrapper.set_error( error=err, True )
+ uci_wrapper.set_error( err, True )
return
except Exception, ex:
err = "Error while creating snapshot: " + str( ex )
log.error( err )
uci_wrapper.set_snapshot_error( error=err, snap_index=snapshot.id, set_status=True )
- uci_wrapper.set_error( error=err, True )
+ uci_wrapper.set_error( err, True )
return
uci_wrapper.change_state( uci_state=uci_states.AVAILABLE )
@@ -335,6 +364,11 @@
if uci_wrapper.get_state() != uci_states.ERROR:
conn = self.get_connection( uci_wrapper )
self.check_key_pair( uci_wrapper, conn )
+ if uci_wrapper.get_key_pair_name() == None:
+ err = "Key pair not found"
+ log.error( "%s for UCI '%s'." % ( err, uci_wrapper.get_name() ) )
+ uci_wrapper.set_error( err + ". Try resetting the state and starting the instance again.", True )
+ return
i_indexes = uci_wrapper.get_instances_indexes( state=instance_states.SUBMITTED ) # Get indexes of i_indexes associated with this UCI that are in 'submitted' state
log.debug( "Starting instances with IDs: '%s' associated with UCI '%s' " % ( i_indexes, uci_wrapper.get_name(), ) )
@@ -342,12 +376,9 @@
for i_index in i_indexes:
# Get machine image for current instance
mi_id = self.get_mi_id( uci_wrapper, i_index )
+ log.debug( "mi_id: %s, uci_wrapper.get_key_pair_name(): %s" % ( mi_id, uci_wrapper.get_key_pair_name() ) )
uci_wrapper.set_mi( i_index, mi_id )
- if uci_wrapper.get_key_pair_name() == None:
- log.error( "Key pair for UCI '%s' is None." % uci_wrapper.get_name() )
- uci_wrapper.set_error( "Key pair not found. Try resetting the state and starting the instance again.", True )
- return
-
+
# Check if galaxy security group exists (and create it if it does not)
log.debug( "Setting up '%s' security group." % self.security_group )
try:
@@ -359,16 +390,18 @@
gSecurityGroup = conn.create_security_group(self.security_group, 'Security group for Galaxy.')
gSecurityGroup.authorize( 'tcp', 80, 80, '0.0.0.0/0' ) # Open HTTP port
gSecurityGroup.authorize( 'tcp', 22, 22, '0.0.0.0/0' ) # Open SSH port
- except boto.exception.EC2ResponseError, ex:
- log.error( "EC2 response error while creating security group: '%s'" % e )
- uci_wrapper.set_error( "EC2 response error while creating security group: " + str( e ), True )
+ except boto.exception.EC2ResponseError, ee:
+ err = "EC2 response error while creating security group: " + str( ee )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
else:
- log.error( "EC2 response error while retrieving security group: '%s'" % e )
- uci_wrapper.set_error( "EC2 response error while retrieving security group: " + str( e ), True )
+ err = "EC2 response error while retrieving security group: " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
if uci_wrapper.get_state() != uci_states.ERROR:
- # Start an instance
+ # Start an instance
log.debug( "Starting instance for UCI '%s'" % uci_wrapper.get_name() )
#TODO: Once multiple volumes can be attached to a single instance, update 'userdata' composition
userdata = uci_wrapper.get_store_volume_id()+"|"+uci_wrapper.get_access_key()+"|"+uci_wrapper.get_secret_key()
@@ -383,11 +416,13 @@
instance_type=uci_wrapper.get_type( i_index ),
placement=uci_wrapper.get_uci_availability_zone() )
except boto.exception.EC2ResponseError, e:
- log.error( "EC2 response error when starting UCI '%s': '%s'" % ( uci_wrapper.get_name(), str(e) ) )
- uci_wrapper.set_error( "EC2 response error when starting: " + str(e), True )
+ err = "EC2 response error when starting UCI '"+ uci_wrapper.get_name() +"': " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
except Exception, ex:
- log.error( "Error when starting UCI '%s': '%s'" % ( uci_wrapper.get_name(), str( ex ) ) )
- uci_wrapper.set_error( "Cloud provider error when starting: " + str( ex ), True )
+ err = "Error when starting UCI '" + uci_wrapper.get_name() + "': " + str( ex )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
# Record newly available instance data into local Galaxy database
if reservation:
uci_wrapper.set_launch_time( self.format_time( reservation.instances[0].launch_time ), i_index=i_index )
@@ -403,13 +438,16 @@
uci_wrapper.set_security_group_name( self.security_group, i_id=i_id )
log.debug( "Instance of UCI '%s' started, current state: '%s'" % ( uci_wrapper.get_name(), uci_wrapper.get_state() ) )
except boto.exception.EC2ResponseError, e:
- log.error( "EC2 response error when retrieving instance information for UCI '%s': '%s'" % ( uci_wrapper.get_name(), str(e) ) )
- uci_wrapper.set_error( "EC2 response error when retrieving instance information: " + str(e), True )
+ err = "EC2 response error when retrieving instance information for UCI '" + uci_wrapper.get_name() + "': " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
else:
log.error( "UCI '%s' is in 'error' state, starting instance was aborted." % uci_wrapper.get_name() )
else:
- log.error( "No instances were found for UCI '%s'" % uci_wrapper.get_name() )
- uci_wrapper.set_error( "EC2 response error when retrieving instance information: " + str(e), True )
+ err = "No instances in state '"+ instance_states.SUBMITTED +"' found for UCI '" + uci_wrapper.get_name() + \
+ "'. Nothing to start."
+ log.error( err )
+ uci_wrapper.set_error( err, True )
else:
log.error( "UCI '%s' is in 'error' state, starting instance was aborted." % uci_wrapper.get_name() )
@@ -426,14 +464,20 @@
# Initiate shutdown of all instances under given UCI
cnt = 0
stopped = []
- notStopped = []
+ not_stopped = []
for r in rl:
for inst in r.instances:
log.debug( "Sending stop signal to instance '%s' associated with reservation '%s'." % ( inst, r ) )
- inst.stop()
- uci_wrapper.set_stop_time( datetime.utcnow(), i_id=inst.id )
- uci_wrapper.change_state( instance_id=inst.id, i_state=inst.update() )
- stopped.append( inst )
+ try:
+ inst.stop()
+ uci_wrapper.set_stop_time( datetime.utcnow(), i_id=inst.id )
+ uci_wrapper.change_state( instance_id=inst.id, i_state=inst.update() )
+ stopped.append( inst )
+ except boto.exception.EC2ResponseError, e:
+ not_stopped.append( inst )
+ err = "EC2 response error when stopping instance '" + inst.instance_id + "': " + str(e)
+ log.error( err )
+ uci_wrapper.set_error( err, True )
uci_wrapper.reset_uci_launch_time()
log.debug( "Termination was initiated for all instances of UCI '%s'." % uci_wrapper.get_name() )
@@ -556,19 +600,21 @@
try:
rl= conn.get_all_instances( [inst.instance_id] )
except boto.exception.EC2ResponseError, e:
- log.error( "Retrieving instance(s) from cloud for UCI '%s' failed: " % ( uci.name, str(e) ) )
- uci.error( "Retrieving instance(s) from cloud failed: " + str(e) )
- uci.state( uci_states.ERROR )
+ err = "Retrieving instance(s) from cloud failed for UCI '"+ uci.name +"' during general status update: " + str( e )
+ log.error( err )
+ uci.error = err
+ uci.state = uci_states.ERROR
return None
- # Because EPC deletes references to reservations after a short while after instances have terminated, getting an empty list as a response to a query
- # typically means the instance has successfully shut down but the check was not performed in short enough amount of time. Until alternative solution
+ # Because references to reservations are deleted shortly after instances have been terminated, getting an empty list as a response to a query
+ # typically means the instance has successfully shut down but the check was not performed in short enough amount of time. Until an alternative solution
# is found, below code sets state of given UCI to 'error' to indicate to the user something out of ordinary happened.
if len( rl ) == 0:
- log.info( "Instance ID '%s' was not found by the cloud provider. Instance might have crashed or otherwise been terminated." % inst.instance_id )
- inst.error = "Instance ID was not found by the cloud provider. Instance might have crashed or otherwise been terminated. State set to 'terminated'."
- uci.error = "Instance ID '"+inst.instance_id+"' was not found by the cloud provider. Instance might have crashed or otherwise been terminated."+ \
+ err = "Instance ID '"+inst.instance_id+"' was not found by the cloud provider. Instance might have crashed or otherwise been terminated."+ \
"Manual check is recommended."
+ log.error( err )
+ inst.error = err
+ uci.error = err
inst.state = instance_states.TERMINATED
uci.state = uci_states.ERROR
uci.launch_time = None
@@ -599,9 +645,10 @@
inst.private_dns = cInst.private_dns_name
inst.flush()
except boto.exception.EC2ResponseError, e:
- log.error( "Updating status of instance(s) from cloud for UCI '%s' failed: " % ( uci.name, str(e) ) )
- uci.error( "Updating instance status from cloud failed: " + str(e) )
- uci.state( uci_states.ERROR )
+ err = "Updating instance status from cloud failed for UCI '"+ uci.name + "' during general status update: " + str( e )
+ log.error( err )
+ uci.error = err
+ uci.state = uci_states.ERROR
return None
def updateStore( self, store ):
@@ -614,11 +661,11 @@
# Get reservations handle for given store
try:
vl = conn.get_all_volumes( [store.volume_id] )
-# log.debug( "Store '%s' vl: '%s'" % ( store.volume_id, vl ) )
except boto.exception.EC2ResponseError, e:
- log.error( "Retrieving volume(s) from cloud for UCI '%s' failed: " % ( uci.name, str(e) ) )
- uci.error( "Retrieving volume(s) from cloud failed: " + str(e) )
- uci.state( uci_states.ERROR )
+ err = "Retrieving volume(s) from cloud failed for UCI '"+ uci.name + "' during general status update: " + str( e )
+ log.error( err )
+ uci.error = err
+ uci.state = uci_states.ERROR
uci.flush()
return None
@@ -631,7 +678,11 @@
if ( store.status == None ) and ( store.volume_id != None ):
uci.state = vl[0].status
uci.flush()
-
+ # If UCI was marked in state 'CREATING', update its status to reflect new status
+ elif ( uci.state == uci_states.CREATING ):
+ uci.state = vl[0].status
+ uci.flush()
+
store.status = vl[0].status
store.flush()
if store.i_id != vl[0].instance_id:
@@ -644,10 +695,21 @@
store.device = vl[0].device
store.flush()
except boto.exception.EC2ResponseError, e:
- log.error( "Updating status of volume(s) from cloud for UCI '%s' failed: " % ( uci.name, str(e) ) )
- uci.error( "Updating volume status from cloud failed: " + str(e) )
- uci.state( uci_states.ERROR )
+ err = "Updating status of volume(s) from cloud failed for UCI '"+ uci.name + "' during general status update: " + str( e )
+ log.error( err )
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
return None
+ else:
+ err = "No storage volumes returned by cloud provider on general update"
+ log.error( "%s for UCI '%s'" % ( err, uci.name ) )
+ store.status = store_status.ERROR
+ store.error = err
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
+ store.flush()
def updateSnapshot( self, snapshot ):
# Get credentials associated wit this store
@@ -664,7 +726,7 @@
snapshot.status = snap[0].status
snapshot.flush()
else:
- err = "No snapshots returned by EC2"
+ err = "No snapshots returned by EC2 on general update"
log.error( "%s for UCI '%s'" % ( err, uci.name ) )
snapshot.status = snapshot_status.ERROR
snapshot.error = err
@@ -775,30 +837,25 @@
except: # something failed, so skip
pass
else:
- inst.error = "Starting a machine instance associated with UCI '" + str(inst.uci.name) + "' seems to have failed. " \
- "Because it appears that cloud instance might have gotten started, manual check is recommended."
+ err = "Starting a machine instance (DB id: '"+str(inst.id)+"') associated with this UCI '" + str(inst.uci.name) + \
+ "' seems to have failed. Because it appears that cloud instance might have gotten started, manual check is recommended."
+ inst.error = err
inst.state = instance_states.ERROR
- inst.uci.error = "Starting a machine instance (DB id: '"+str(inst.id)+"') associated with this UCI seems to have failed. " \
- "Because it appears that cloud instance might have gotten started, manual check is recommended."
+ inst.uci.error = err
inst.uci.state = uci_states.ERROR
- log.error( "Starting a machine instance (DB id: '%s') associated with UCI '%s' seems to have failed. " \
- "Because it appears that cloud instance might have gotten started, manual check is recommended."
- % ( inst.id, inst.uci.name ) )
+ log.error( err )
inst.flush()
- inst.uci.flush()
+ inst.uci.flush()
else: #Instance most likely never got processed, so set error message suggesting user to try starting instance again.
- inst.error = "Starting a machine instance associated with UCI '" + str(inst.uci.name) + "' seems to have failed. " \
- "Because it appears that cloud instance never got started, it should be safe to reset state and try " \
- "starting the instance again."
+ err = "Starting a machine instance (DB id: '"+str(inst.id)+"') associated with this UCI '" + str(inst.uci.name) + \
+ "' seems to have failed. Because it appears that cloud instance never got started, it should be safe to reset state and try " \
+ "starting the instance again."
+ inst.error = err
inst.state = instance_states.ERROR
- inst.uci.error = "Starting a machine instance (DB id: '"+str(inst.id)+"') associated with this UCI seems to have failed. " \
- "Because it appears that cloud instance never got started, it should be safe to reset state and try " \
- "starting the instance again."
+ inst.uci.error = err
inst.uci.state = uci_states.ERROR
- log.error( "Starting a machine instance (DB id: '%s') associated with UCI '%s' seems to have failed. " \
- "Because it appears that cloud instance never got started, it should be safe to reset state and try " \
- "starting the instance again." % ( inst.id, inst.uci.name ) )
+ log.error( err )
inst.flush()
inst.uci.flush()
# uw = UCIwrapper( inst.uci )
@@ -821,9 +878,10 @@
region=region,
path=uci.credentials.provider.path )
except boto.exception.EC2ResponseError, e:
- log.error( "Establishing connection with cloud failed: %s" % str(e) )
- uci.error( "Establishing connection with cloud failed: " + str(e) )
- uci.state( uci_states.ERROR )
+ err = "Establishing connection with cloud failed: " + str( e )
+ log.error( err )
+ uci.error = err
+ uci.state = uci_states.ERROR
uci.flush()
return None
diff -r b90db227df6d -r e21f605d2766 lib/galaxy/cloud/providers/eucalyptus.py
--- a/lib/galaxy/cloud/providers/eucalyptus.py Thu Nov 12 10:28:54 2009 -0500
+++ b/lib/galaxy/cloud/providers/eucalyptus.py Thu Nov 12 12:42:17 2009 -0500
@@ -23,6 +23,7 @@
uci_states = Bunch(
NEW_UCI = "newUCI",
NEW = "new",
+ CREATING = "creating",
DELETING_UCI = "deletingUCI",
DELETING = "deleting",
SUBMITTED_UCI = "submittedUCI",
@@ -47,9 +48,19 @@
ERROR = "error"
)
-store_states = Bunch(
+store_status = Bunch(
IN_USE = "in-use",
CREATING = "creating",
+ DELETED = 'deleted',
+ ERROR = "error"
+)
+
+snapshot_status = Bunch(
+ SUBMITTED = 'submitted',
+ PENDING = 'pending',
+ COMPLETED = 'completed',
+ DELETE = 'delete',
+ DELETED= 'deleted',
ERROR = "error"
)
@@ -105,10 +116,10 @@
try:
euca_region = RegionInfo( None, provider.region_name, provider.region_endpoint )
except Exception, e:
- log.error( "Selecting region with cloud provider failed: %2" % str(e) )
- uci_wrapper.set_error( "Selecting region with cloud provider failed: " + str(e), True )
- return None
-
+ err = "Selecting region with cloud provider failed: " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ return None
try:
conn = EC2Connection( aws_access_key_id=uci_wrapper.get_access_key(),
aws_secret_access_key=uci_wrapper.get_secret_key(),
@@ -117,8 +128,9 @@
region=euca_region,
path=provider.path )
except boto.exception.EC2ResponseError, e:
- log.error( "Establishing connection with cloud failed: %s" % str(e) )
- uci_wrapper.set_error( "Establishing connection with cloud failed: " + str(e), True )
+ err = "Establishing connection with cloud failed: " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
return None
return conn
@@ -142,27 +154,30 @@
kp = self.create_key_pair( conn, kp_name )
uci_wrapper.set_key_pair( kp.name, kp.material )
except boto.exception.EC2ResponseError, e:
- log.error( "EC2 response error when deleting key pair: '%s'" % e )
- uci_wrapper.set_error( "Cloud provider response error while deleting key pair: " + str( e ), True )
+ err = "EC2 response error while deleting key pair: " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
else:
try:
kp = self.create_key_pair( conn, kp_name )
uci_wrapper.set_key_pair( kp.name, kp.material )
except boto.exception.EC2ResponseError, e:
- log.error( "EC2 response error when creating key pair: '%s'" % e )
- uci_wrapper.set_error( "Cloud provider response error while creating key pair: " + str( e ), True )
+ err = "EC2 response error while creating key pair: " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
except Exception, ex:
- log.error( "Exception when creating key pair: '%s'" % e )
- uci_wrapper.set_error( "Error while creating key pair: " + str( e ), True )
-
+ err = "Exception while creating key pair: " + str( ex )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
except boto.exception.EC2ResponseError, e: # No keypair under this name exists so create it
if e.code == 'InvalidKeyPair.NotFound':
log.info( "No keypair found, creating keypair '%s'" % kp_name )
kp = self.create_key_pair( conn, kp_name )
uci_wrapper.set_key_pair( kp.name, kp.material )
else:
- log.error( "EC2 response error while retrieving key pair: '%s'" % e )
- uci_wrapper.set_error( "Cloud provider response error while retrieving key pair: " + str( e ), True )
+ err = "EC2 response error while retrieving key pair: " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
if kp != None:
return kp.name
@@ -189,8 +204,9 @@
if mi:
return mi.image_id
else:
- log.error( "Machine image could not be retrieved for UCI '%s'." % uci_wrapper.get_name() )
- uci_wrapper.set_error( "Machine image could not be retrieved. Contact site administrator to ensure needed machine image is registered.", True )
+ err = "Machine image could not be retrieved"
+ log.error( "%s for UCI '%s'." % (err, uci_wrapper.get_name() ) )
+ uci_wrapper.set_error( err+". Contact site administrator to ensure needed machine image is registered.", True )
return None
def shutdown( self ):
@@ -220,12 +236,37 @@
log.info( "Availability zone for UCI (i.e., storage volume) was not selected, using default zone: %s" % self.zone )
uci_wrapper.set_store_availability_zone( self.zone )
+ log.debug( "Creating volume; using command: conn.create_volume( %s, '%s', snapshot=None )" % ( uci_wrapper.get_store_size( 0 ), uci_wrapper.get_uci_availability_zone() ))
vol = conn.create_volume( uci_wrapper.get_store_size( 0 ), uci_wrapper.get_uci_availability_zone(), snapshot=None )
uci_wrapper.set_store_volume_id( 0, vol.id )
- # EPC does not allow creation of storage volumes (it deletes one as soon as it is created, so manually set uci_state here)
- uci_wrapper.change_state( uci_state=uci_states.AVAILABLE )
- uci_wrapper.set_store_status( vol.id, vol.status )
+ # Retrieve created volume again to get updated status
+ try:
+ vl = conn.get_all_volumes( [vol.id] )
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error while retrieving (i.e., updating status) of just created storage volume '" + vol.id + "': " + str( e )
+ log.error( err )
+ uci_wrapper.set_store_status( vol.id, uci_states.ERROR )
+ uci_wrapper.set_error( err, True )
+ return
+ except Exception, ex:
+ err = "Error while retrieving (i.e., updating status) of just created storage volume '" + vol.id + "': " + str( ex )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ return
+
+ if len( vl ) > 0:
+ # EPC does not allow creation of storage volumes (it deletes one as soon as it is created, so manually set uci_state here)
+ if vl[0].status == store_status.DELETED:
+ uci_wrapper.change_state( uci_state=uci_states.AVAILABLE )
+ else:
+ uci_wrapper.change_state( uci_state=vl[0].status )
+ uci_wrapper.set_store_status( vol.id, vl[0].status )
+ else:
+ err = "Volume '" + vol.id +"' not found by EC2 after being created."
+ log.error( err )
+ uci_wrapper.set_store_status( vol.id, uci_states.ERROR )
+ uci_wrapper.set_error( err, True )
def deleteUCI( self, uci_wrapper ):
"""
@@ -242,24 +283,29 @@
failedList = []
for v in vl:
log.debug( "Deleting volume with id='%s'" % v.volume_id )
- if conn.delete_volume( v.volume_id ):
- deletedList.append( v.volume_id )
- v.deleted = True
- v.flush()
- count += 1
- else:
- failedList.append( v.volume_id )
-
+ try:
+ if conn.delete_volume( v.volume_id ):
+ deletedList.append( v.volume_id )
+ v.deleted = True
+ v.flush()
+ count += 1
+ else:
+ failedList.append( v.volume_id )
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error while deleting storage volume '" + v.volume_id + "': " + str( e )
+ log.error( err )
+ uci_wrapper.set_store_error( err, store_id = v.volume_id )
+ uci_wrapper.set_error( err, True )
+
# Delete UCI if all of associated
if count == len( vl ):
- uci_wrapper.delete()
+ uci_wrapper.set_deleted()
else:
- log.error( "Deleting following volume(s) failed: %s. However, these volumes were successfully deleted: %s. \
- Manual intervention and processing needed." % ( str( failedList ), str( deletedList ) ) )
- uci_wrapper.change_state( uci_state=uci_states.ERROR )
- uci_wrapper.set_error( "Deleting following volume(s) failed: "+str(failedList)+". However, these volumes were \
- successfully deleted: "+str(deletedList)+". Manual intervention and processing needed." )
-
+ err = "Deleting following volume(s) failed: "+failedList+". However, these volumes were successfully deleted: "+deletedList+". \
+ MANUAL intervention and processing needed."
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+
def snapshotUCI( self, uci_wrapper ):
"""
Creates snapshot of all storage volumes associated with this UCI.
@@ -276,17 +322,17 @@
uci_wrapper.set_snapshot_id( snapshot.id, snap_id )
sh = conn.get_all_snapshots( snap_id ) # get updated status
uci_wrapper.set_snapshot_status( status=sh[0].status, snap_id=snap_id )
- except boto.exception.EC2ResponseError, ex:
+ except boto.exception.EC2ResponseError, e:
err = "Cloud provider response error while creating snapshot: " + str( e )
log.error( err )
uci_wrapper.set_snapshot_error( error=err, snap_index=snapshot.id, set_status=True )
- uci_wrapper.set_error( error=err, True )
+ uci_wrapper.set_error( err, True )
return
except Exception, ex:
err = "Error while creating snapshot: " + str( ex )
log.error( err )
uci_wrapper.set_snapshot_error( error=err, snap_index=snapshot.id, set_status=True )
- uci_wrapper.set_error( error=err, True )
+ uci_wrapper.set_error( err, True )
return
uci_wrapper.change_state( uci_state=uci_states.AVAILABLE )
@@ -323,18 +369,22 @@
conn = self.get_connection( uci_wrapper )
self.check_key_pair( uci_wrapper, conn )
if uci_wrapper.get_key_pair_name() == None:
- log.error( "Key pair for UCI '%s' is NULL." % uci_wrapper.get_name() )
- uci_wrapper.set_error( "Key pair not found. Try resetting the state and starting the instance again.", True )
+ err = "Key pair not found"
+ log.error( "%s for UCI '%s'." % ( err, uci_wrapper.get_name() ) )
+ uci_wrapper.set_error( err + ". Try resetting the state and starting the instance again.", True )
return
i_indexes = uci_wrapper.get_instances_indexes( state=instance_states.SUBMITTED ) # Get indexes of i_indexes associated with this UCI that are in 'submitted' state
+ log.debug( "Starting instances with IDs: '%s' associated with UCI '%s' " % ( i_indexes, uci_wrapper.get_name(), ) )
if len( i_indexes ) > 0:
for i_index in i_indexes:
+ # Get machine image for current instance
mi_id = self.get_mi_id( uci_wrapper, i_index )
log.debug( "mi_id: %s, uci_wrapper.get_key_pair_name(): %s" % ( mi_id, uci_wrapper.get_key_pair_name() ) )
uci_wrapper.set_mi( i_index, mi_id )
- if uci_wrapper.get_state() != uci_states.ERROR and uci_wrapper.get_key_pair_name() != None:
+ if uci_wrapper.get_state() != uci_states.ERROR:
+ # Start an instance
log.debug( "Starting UCI instance '%s'" % uci_wrapper.get_name() )
log.debug( "Using following command: conn.run_instances( image_id='%s', key_name='%s', instance_type='%s' )"
% ( mi_id, uci_wrapper.get_key_pair_name(), uci_wrapper.get_type( i_index ) ) )
@@ -344,11 +394,13 @@
key_name=uci_wrapper.get_key_pair_name(),
instance_type=uci_wrapper.get_type( i_index ) )
except boto.exception.EC2ResponseError, e:
- log.error( "EC2 response error when starting UCI '%s': '%s'" % ( uci_wrapper.get_name(), str( e ) ) )
- uci_wrapper.set_error( "Cloud provider response error when starting: " + str( e ), True )
+ err = "EC2 response error when starting UCI '"+ uci_wrapper.get_name() +"': " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
except Exception, ex:
- log.error( "Error when starting UCI '%s': '%s'" % ( uci_wrapper.get_name(), str( ex ) ) )
- uci_wrapper.set_error( "Cloud provider error when starting: " + str( ex ), True )
+ err = "Error when starting UCI '" + uci_wrapper.get_name() + "': " + str( ex )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
# Record newly available instance data into local Galaxy database
if reservation:
uci_wrapper.set_launch_time( self.format_time( reservation.instances[0].launch_time ), i_index=i_index )
@@ -363,13 +415,16 @@
uci_wrapper.change_state( s, i_id, s )
log.debug( "Instance of UCI '%s' started, current state: '%s'" % ( uci_wrapper.get_name(), uci_wrapper.get_state() ) )
except boto.exception.EC2ResponseError, e:
- log.error( "EC2 response error when retrieving instance information for UCI '%s': '%s'" % ( uci_wrapper.get_name(), str(e) ) )
- uci_wrapper.set_error( "Cloud provider response error when retrieving instance information: " + str(e), True )
+ err = "EC2 response error when retrieving instance information for UCI '" + uci_wrapper.get_name() + "': " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
else:
log.error( "UCI '%s' is in 'error' state, starting instance was aborted." % uci_wrapper.get_name() )
else:
- log.error( "No instances were found for UCI '%s'" % uci_wrapper.get_name() )
- uci_wrapper.set_error( "EC2 response error when retrieving instance information: " + str(e), True )
+ err = "No instances in state '"+ instance_states.SUBMITTED +"' found for UCI '" + uci_wrapper.get_name() + \
+ "'. Nothing to start."
+ log.error( err )
+ uci_wrapper.set_error( err, True )
else:
log.error( "UCI '%s' is in 'error' state, starting instance was aborted." % uci_wrapper.get_name() )
@@ -387,14 +442,20 @@
# Initiate shutdown of all instances under given UCI
cnt = 0
stopped = []
- notStopped = []
+ not_stopped = []
for r in rl:
for inst in r.instances:
log.debug( "Sending stop signal to instance '%s' associated with reservation '%s' (UCI: %s)." % ( inst, r, uci_wrapper.get_name() ) )
- inst.stop()
- uci_wrapper.set_stop_time( datetime.utcnow(), i_id=inst.id )
- uci_wrapper.change_state( instance_id=inst.id, i_state=inst.update() )
- stopped.append( inst )
+ try:
+ inst.stop()
+ uci_wrapper.set_stop_time( datetime.utcnow(), i_id=inst.id )
+ uci_wrapper.change_state( instance_id=inst.id, i_state=inst.update() )
+ stopped.append( inst )
+ except boto.exception.EC2ResponseError, e:
+ not_stopped.append( inst )
+ err = "EC2 response error when stopping instance '" + inst.instance_id + "': " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
uci_wrapper.reset_uci_launch_time()
log.debug( "Termination was initiated for all instances of UCI '%s'." % uci_wrapper.get_name() )
@@ -460,8 +521,8 @@
self.updateInstance( inst )
# Update storage volume(s)
- stores = model.CloudStore.filter( or_( model.CloudStore.c.status==store_states.IN_USE,
- model.CloudStore.c.status==store_states.CREATING,
+ stores = model.CloudStore.filter( or_( model.CloudStore.c.status==store_status.IN_USE,
+ model.CloudStore.c.status==store_status.CREATING,
model.CloudStore.c.status==None ) ).all()
for store in stores:
if self.type == store.uci.credentials.provider.type: # and store.volume_id != None:
@@ -473,7 +534,7 @@
# store.uci.error = "There exists an entry in local database for a storage volume without an ID. Storage volume might have been created " \
# "with cloud provider though. Manual check is recommended. After understanding what happened, local database entry for given " \
# "storage volume should be updated."
-# store.status = store_states.ERROR
+# store.status = store_status.ERROR
# store.uci.state = uci_states.ERROR
# store.uci.flush()
# store.flush()
@@ -516,19 +577,21 @@
try:
rl= conn.get_all_instances( [inst.instance_id] )
except boto.exception.EC2ResponseError, e:
- log.error( "Retrieving instance(s) from cloud for UCI '%s' failed: " % ( uci.name, str(e) ) )
- uci.error( "Retrieving instance(s) from cloud failed: " + str(e) )
- uci.state( uci_states.ERROR )
+ err = "Retrieving instance(s) from cloud failed for UCI '"+ uci.name +"' during general status update: " + str( e )
+ log.error( err )
+ uci.error = err
+ uci.state = uci_states.ERROR
return None
# Because EPC deletes references to reservations after a short while after instances have terminated, getting an empty list as a response to a query
# typically means the instance has successfully shut down but the check was not performed in short enough amount of time. Until alternative solution
# is found, below code sets state of given UCI to 'error' to indicate to the user something out of ordinary happened.
if len( rl ) == 0:
- log.info( "Instance ID '%s' was not found by the cloud provider. Instance might have crashed or otherwise been terminated." % inst.instance_id )
- inst.error = "Instance ID was not found by the cloud provider. Instance might have crashed or otherwise been terminated. State set to 'terminated'."
- uci.error = "Instance ID '"+inst.instance_id+"' was not found by the cloud provider. Instance might have crashed or otherwise been terminated."+ \
+ err = "Instance ID '"+inst.instance_id+"' was not found by the cloud provider. Instance might have crashed or otherwise been terminated."+ \
"Manual check is recommended."
+ log.error( err )
+ inst.error = err
+ uci.error = err
inst.state = instance_states.TERMINATED
uci.state = uci_states.ERROR
uci.launch_time = None
@@ -559,9 +622,10 @@
inst.private_dns = cInst.private_dns_name
inst.flush()
except boto.exception.EC2ResponseError, e:
- log.error( "Updating status of instance(s) from cloud for UCI '%s' failed: " % ( uci.name, str(e) ) )
- uci.error( "Updating volume status from cloud failed: " + str(e) )
- uci.state( uci_states.ERROR )
+ err = "Updating instance status from cloud failed for UCI '"+ uci.name + "' during general status update: " + str( e )
+ log.error( err )
+ uci.error = err
+ uci.state = uci_states.ERROR
return None
def updateStore( self, store ):
@@ -574,9 +638,10 @@
try:
vl = conn.get_all_volumes( [store.volume_id] )
except boto.exception.EC2ResponseError, e:
- log.error( "Retrieving volume(s) from cloud for UCI '%s' failed: " % ( uci.name, str(e) ) )
- uci.error( "Retrieving volume(s) from cloud failed: " + str(e) )
- uci.state( uci_states.ERROR )
+ err = "Retrieving volume(s) from cloud failed for UCI '"+ uci.name + "' during general status update: " + str( e )
+ log.error( err )
+ uci.error = err
+ uci.state = uci_states.ERROR
uci.flush()
return None
@@ -589,6 +654,16 @@
if ( store.status == None ) and ( store.volume_id != None ):
uci.state = vl[0].status
uci.flush()
+ # If UCI was marked in state 'CREATING', update its status to reflect new status
+ elif ( uci.state == uci_states.CREATING ):
+ # Because Eucalyptus Public Cloud (EPC) deletes volumes immediately after they are created, artificially
+ # set status of given UCI to 'available' based on storage volume's availability zone (i.e., it's residing
+ # in EPC as opposed to some other Eucalyptus based cloud that allows creation of storage volumes.
+ if store.availability_zone == 'epc':
+ uci.state = uci_states.AVAILABLE
+ else:
+ uci.state = vl[0].status
+ uci.flush()
store.status = vl[0].status
store.flush()
@@ -602,11 +677,21 @@
store.device = vl[0].device
store.flush()
except boto.exception.EC2ResponseError, e:
- log.error( "Updating status of volume(s) from cloud for UCI '%s' failed: " % ( uci.name, str(e) ) )
- uci.error( "Updating volume status from cloud failed: " + str(e) )
+ err = "Updating status of volume(s) from cloud failed for UCI '"+ uci.name + "' during general status update: " + str( e )
+ log.error( err )
+ uci.error = err
uci.state = uci_states.ERROR
uci.flush()
return None
+ else:
+ err = "No storage volumes returned by cloud provider on general update"
+ log.error( "%s for UCI '%s'" % ( err, uci.name ) )
+ store.status = store_status.ERROR
+ store.error = err
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
+ store.flush()
def updateSnapshot( self, snapshot ):
# Get credentials associated wit this store
@@ -623,7 +708,7 @@
snapshot.status = snap[0].status
snapshot.flush()
else:
- err = "No snapshots returned by cloud provider"
+ err = "No snapshots returned by cloud provider on general update"
log.error( "%s for UCI '%s'" % ( err, uci.name ) )
snapshot.status = snapshot_status.ERROR
snapshot.error = err
@@ -734,30 +819,25 @@
except: # something failed, so skip
pass
else:
- inst.error = "Starting a machine instance associated with UCI '" + str(inst.uci.name) + "' seems to have failed. " \
- "Because it appears that cloud instance might have gotten started, manual check is recommended."
+ err = "Starting a machine instance (DB id: '"+str(inst.id)+"') associated with this UCI '" + str(inst.uci.name) + \
+ "' seems to have failed. Because it appears that cloud instance might have gotten started, manual check is recommended."
+ inst.error = err
inst.state = instance_states.ERROR
- inst.uci.error = "Starting a machine instance (DB id: '"+str(inst.id)+"') associated with this UCI seems to have failed. " \
- "Because it appears that cloud instance might have gotten started, manual check is recommended."
+ inst.uci.error = err
inst.uci.state = uci_states.ERROR
- log.error( "Starting a machine instance (DB id: '%s') associated with UCI '%s' seems to have failed. " \
- "Because it appears that cloud instance might have gotten started, manual check is recommended."
- % ( inst.id, inst.uci.name ) )
+ log.error( err )
inst.flush()
- inst.uci.flush()
+ inst.uci.flush()
else: #Instance most likely never got processed, so set error message suggesting user to try starting instance again.
- inst.error = "Starting a machine instance associated with UCI '" + str(inst.uci.name) + "' seems to have failed. " \
- "Because it appears that cloud instance never got started, it should be safe to reset state and try " \
- "starting the instance again."
+ err = "Starting a machine instance (DB id: '"+str(inst.id)+"') associated with this UCI '" + str(inst.uci.name) + \
+ "' seems to have failed. Because it appears that cloud instance never got started, it should be safe to reset state and try " \
+ "starting the instance again."
+ inst.error = err
inst.state = instance_states.ERROR
- inst.uci.error = "Starting a machine instance (DB id: '"+str(inst.id)+"') associated with this UCI seems to have failed. " \
- "Because it appears that cloud instance never got started, it should be safe to reset state and try " \
- "starting the instance again."
+ inst.uci.error = err
inst.uci.state = uci_states.ERROR
- log.error( "Starting a machine instance (DB id: '%s') associated with UCI '%s' seems to have failed. " \
- "Because it appears that cloud instance never got started, it should be safe to reset state and try " \
- "starting the instance again." % ( inst.id, inst.uci.name ) )
+ log.error( err )
inst.flush()
inst.uci.flush()
# uw = UCIwrapper( inst.uci )
@@ -781,9 +861,10 @@
region=euca_region,
path=uci.credentials.provider.path )
except boto.exception.EC2ResponseError, e:
- log.error( "Establishing connection with cloud failed: %s" % str(e) )
- uci.error( "Establishing connection with cloud failed: " + str(e) )
- uci.state( uci_states.ERROR )
+ err = "Establishing connection with cloud failed: " + str( e )
+ log.error( err )
+ uci.error = err
+ uci.state = uci_states.ERROR
uci.flush()
return None
diff -r b90db227df6d -r e21f605d2766 lib/galaxy/model/mapping.py
--- a/lib/galaxy/model/mapping.py Thu Nov 12 10:28:54 2009 -0500
+++ b/lib/galaxy/model/mapping.py Thu Nov 12 12:42:17 2009 -0500
@@ -379,13 +379,7 @@
Column( "session_id", Integer, ForeignKey( "galaxy_session.id" ), index=True ),
Column( "history_id", Integer, ForeignKey( "history.id" ), index=True ) )
-
-
-
-
-# ***************************************************************************
-# *************************** Cloud tables***********************************
-# ***************************************************************************
+# *************************** Start cloud tables***********************************
CloudImage.table = Table( "cloud_image", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
@@ -446,6 +440,7 @@
Column( "status", TEXT ),
Column( "device", TEXT ),
Column( "space_consumed", Integer ),
+ Column( "error", TEXT ),
Column( "deleted", Boolean, default=False ) )
CloudSnapshot.table = Table( "cloud_snapshot", metadata,
@@ -493,7 +488,7 @@
Column( "https_connection_factory", TEXT ),
Column( "path", TEXT ),
Column( "deleted", Boolean, default=False ) )
-# ***************************************************************************
+# *************************** End cloud tables***********************************
StoredWorkflow.table = Table( "stored_workflow", metadata,
Column( "id", Integer, primary_key=True ),
@@ -999,9 +994,8 @@
primaryjoin=( WorkflowStepConnection.table.c.input_step_id == WorkflowStep.table.c.id ) ),
output_step=relation( WorkflowStep, backref="output_connections", cascade="all",
primaryjoin=( WorkflowStepConnection.table.c.output_step_id == WorkflowStep.table.c.id ) ) ) )
-# ************************************************************
+
# vvvvvvvvvvvvvvvv Start cloud table mappings vvvvvvvvvvvvvvvv
-# ************************************************************
assign_mapper( context, CloudImage, CloudImage.table )
assign_mapper( context, UCI, UCI.table,
diff -r b90db227df6d -r e21f605d2766 lib/galaxy/model/migrate/versions/0014_cloud_tables.py
--- a/lib/galaxy/model/migrate/versions/0014_cloud_tables.py Thu Nov 12 10:28:54 2009 -0500
+++ b/lib/galaxy/model/migrate/versions/0014_cloud_tables.py Thu Nov 12 12:42:17 2009 -0500
@@ -78,6 +78,7 @@
Column( "status", TEXT ),
Column( "device", TEXT ),
Column( "space_consumed", Integer ),
+ Column( "error", TEXT ),
Column( "deleted", Boolean, default=False ) )
CloudSnapshot_table = Table( "cloud_snapshot", metadata,
@@ -131,21 +132,21 @@
# Load existing tables
metadata.reflect()
-# CloudImage_table.create()
-# UCI_table.create()
-# CloudUserCredentials_table.create()
-# CloudStore_table.create()
+ CloudImage_table.create()
+ UCI_table.create()
+ CloudUserCredentials_table.create()
+ CloudStore_table.create()
CloudSnapshot_table.create()
-# CloudInstance_table.create()
-# CloudProvider_table.create()
+ CloudInstance_table.create()
+ CloudProvider_table.create()
def downgrade():
metadata.reflect()
-# CloudImage_table.drop()
-# CloudInstance_table.drop()
-# CloudStore_table.drop()
+ CloudImage_table.drop()
+ CloudInstance_table.drop()
+ CloudStore_table.drop()
CloudSnapshot_table.drop()
-# CloudUserCredentials_table.drop()
-# UCI_table.drop()
-# CloudProvider_table.drop()
\ No newline at end of file
+ CloudUserCredentials_table.drop()
+ UCI_table.drop()
+ CloudProvider_table.drop()
\ No newline at end of file
diff -r b90db227df6d -r e21f605d2766 lib/galaxy/web/controllers/cloud.py
--- a/lib/galaxy/web/controllers/cloud.py Thu Nov 12 10:28:54 2009 -0500
+++ b/lib/galaxy/web/controllers/cloud.py Thu Nov 12 12:42:17 2009 -0500
@@ -34,6 +34,7 @@
uci_states = Bunch(
NEW_UCI = "newUCI",
NEW = "new",
+ CREATING = "creating",
DELETING_UCI = "deletingUCI",
DELETING = "deleting",
SUBMITTED_UCI = "submittedUCI",
@@ -61,6 +62,7 @@
store_states = Bunch(
IN_USE = "in-use",
CREATING = "creating",
+ DELETED = 'deleted',
ERROR = "error"
)
@@ -111,11 +113,13 @@
.all()
prevInstances = trans.sa_session.query( model.UCI ) \
- .filter_by( user=user ) \
+ .filter_by( user=user, deleted=False ) \
.filter( or_( model.UCI.c.state==uci_states.AVAILABLE,
model.UCI.c.state==uci_states.NEW,
model.UCI.c.state==uci_states.NEW_UCI,
+ model.UCI.c.state==uci_states.CREATING,
model.UCI.c.state==uci_states.ERROR,
+ model.UCI.c.state==uci_states.DELETED,
model.UCI.c.state==uci_states.DELETING,
model.UCI.c.state==uci_states.DELETING_UCI,
model.UCI.c.state==uci_states.SNAPSHOT,
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/7d013eb98022
changeset: 3094:7d013eb98022
user: Kanwei Li <kanwei(a)gmail.com>
date: Thu Nov 12 16:36:07 2009 -0500
description:
Import cloud management module
diffstat:
eggs.ini | 2 +
lib/galaxy/app.py | 4 +-
lib/galaxy/cloud/__init__.py | 620 +++++++++++
lib/galaxy/cloud/providers/ec2.py | 940 +++++++++++++++++
lib/galaxy/cloud/providers/eucalyptus.py | 923 ++++++++++++++++
lib/galaxy/config.py | 7 +
lib/galaxy/model/__init__.py | 54 +
lib/galaxy/model/mapping.py | 147 ++
lib/galaxy/model/migrate/versions/0026_cloud_tables.py | 152 ++
lib/galaxy/web/controllers/cloud.py | 1193 +++++++++++++++++++++
static/images/silk/resultset_previous.png |
templates/admin/index.mako | 9 +
templates/base_panels.mako | 14 +-
templates/cloud/add_credentials.mako | 110 ++
templates/cloud/add_image.mako | 98 +
templates/cloud/add_provider.mako | 252 ++++
templates/cloud/configure_cloud.mako | 367 ++++++
templates/cloud/configure_uci.mako | 116 ++
templates/cloud/edit_credentials.mako | 91 +
templates/cloud/edit_image.mako | 92 +
templates/cloud/edit_provider.mako | 261 ++++
templates/cloud/index.mako | 16 +
templates/cloud/list_images.mako | 90 +
templates/cloud/view_credentials.mako | 157 ++
templates/cloud/view_instance.mako | 140 ++
templates/cloud/view_provider.mako | 126 ++
templates/cloud/view_snapshots.mako | 90 +
templates/cloud/view_usage.mako | 117 ++
templates/root/index.mako | 13 +-
universe_wsgi.ini.sample | 5 +
30 files changed, 6197 insertions(+), 9 deletions(-)
diffs (truncated from 6417 to 3000 lines):
diff -r 0984c3800775 -r 7d013eb98022 eggs.ini
--- a/eggs.ini Thu Nov 12 15:25:48 2009 -0500
+++ b/eggs.ini Thu Nov 12 16:36:07 2009 -0500
@@ -52,6 +52,7 @@
wsgiref = 0.1.2
Babel = 0.9.4
wchartype = 0.1
+boto = 1.8d
; extra version information
[tags]
@@ -102,3 +103,4 @@
wsgiref = http://pypi.python.org/packages/source/w/wsgiref/wsgiref-0.1.2.zip
Babel = http://ftp.edgewall.com/pub/babel/Babel-0.9.4.zip
wchartype = http://ginstrom.com/code/wchartype-0.1.zip
+boto = http://boto.googlecode.com/files/boto-1.8d.tar.gz
\ No newline at end of file
diff -r 0984c3800775 -r 7d013eb98022 lib/galaxy/app.py
--- a/lib/galaxy/app.py Thu Nov 12 15:25:48 2009 -0500
+++ b/lib/galaxy/app.py Thu Nov 12 16:36:07 2009 -0500
@@ -1,6 +1,6 @@
import sys, os, atexit
-from galaxy import config, jobs, util, tools, web
+from galaxy import config, jobs, util, tools, web, cloud
## from galaxy.tracks import store
from galaxy.web import security
import galaxy.model
@@ -68,6 +68,8 @@
# FIXME: These are exposed directly for backward compatibility
self.job_queue = self.job_manager.job_queue
self.job_stop_queue = self.job_manager.job_stop_queue
+ # Start the cloud manager
+ self.cloud_manager = cloud.CloudManager( self )
# Track Store
## self.track_store = store.TrackStoreManager( self.config.track_store_path )
diff -r 0984c3800775 -r 7d013eb98022 lib/galaxy/cloud/__init__.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/cloud/__init__.py Thu Nov 12 16:36:07 2009 -0500
@@ -0,0 +1,620 @@
+import logging, threading, sys, os, time, subprocess, string, tempfile, re, traceback, shutil
+
+from galaxy import util, model, config
+from galaxy.model import mapping
+from galaxy.model.orm import lazyload
+from galaxy.datatypes.tabular import *
+from galaxy.datatypes.interval import *
+from galaxy.datatypes import metadata
+from galaxy.util.bunch import Bunch
+from sqlalchemy import or_
+
+import pkg_resources
+pkg_resources.require( "PasteDeploy" )
+
+from paste.deploy.converters import asbool
+
+from Queue import Queue, Empty
+
+log = logging.getLogger( __name__ )
+
+uci_states = Bunch(
+ NEW_UCI = "newUCI",
+ NEW = "new",
+ CREATING = "creating",
+ DELETING_UCI = "deletingUCI",
+ DELETING = "deleting",
+ DELETED = "deleted",
+ SUBMITTED_UCI = "submittedUCI",
+ SUBMITTED = "submitted",
+ SHUTTING_DOWN_UCI = "shutting-downUCI",
+ SHUTTING_DOWN = "shutting-down",
+ AVAILABLE = "available",
+ RUNNING = "running",
+ PENDING = "pending",
+ ERROR = "error",
+ SNAPSHOT_UCI = "snapshotUCI",
+ SNAPSHOT = "snapshot"
+)
+instance_states = Bunch(
+ TERMINATED = "terminated",
+ SUBMITTED = "submitted",
+ RUNNING = "running",
+ PENDING = "pending",
+ SHUTTING_DOWN = "shutting-down",
+ ERROR = "error"
+)
+
+snapshot_status = Bunch(
+ SUBMITTED = 'submitted',
+ PENDING = 'pending',
+ COMPLETED = 'completed',
+ DELETE = 'delete',
+ DELETED= 'deleted',
+ ERROR = "error"
+)
+
+class CloudManager( object ):
+ """
+ Highest level interface to cloud management.
+ """
+ def __init__( self, app ):
+ self.app = app
+ if self.app.config.get_bool( "enable_cloud_execution", True ):
+ # The dispatcher manager for underlying cloud instances - implements and contacts individual cloud providers
+ self.provider = CloudProvider( app )
+ # Monitor for updating status of cloud instances
+ self.cloud_monitor = CloudMonitor( self.app, self.provider )
+ else:
+ self.job_queue = self.job_stop_queue = NoopCloudMonitor()
+
+ def shutdown( self ):
+ self.cloud_monitor.shutdown()
+
+class Sleeper( object ):
+ """
+ Provides a 'sleep' method that sleeps for a number of seconds *unless*
+ the notify method is called (from a different thread).
+ """
+ def __init__( self ):
+ self.condition = threading.Condition()
+ def sleep( self, seconds ):
+ self.condition.acquire()
+ self.condition.wait( seconds )
+ self.condition.release()
+ def wake( self ):
+ self.condition.acquire()
+ self.condition.notify()
+ self.condition.release()
+
+class CloudMonitor( object ):
+ """
+ Cloud manager, waits for user to instantiate a cloud instance and then invokes a
+ CloudProvider.
+ """
+ STOP_SIGNAL = object()
+ def __init__( self, app, provider ):
+ """Start the cloud manager"""
+ self.app = app
+ # Keep track of the pid that started the cloud manager, only it
+ # has valid threads
+ self.parent_pid = os.getpid()
+
+ # Contains requests that are waiting (only use from monitor thread)
+ self.waiting = []
+
+ # Helper for interruptable sleep
+ self.sleeper = Sleeper()
+ self.running = True
+ self.provider = provider
+ self.monitor_thread = threading.Thread( target=self.__monitor )
+ self.monitor_thread.start()
+ log.info( "Cloud manager started" )
+
+ def __monitor( self ):
+ """
+ Daemon that continuously monitors cloud instance requests as well as state
+ of running instances.
+ """
+ # HACK: Delay until after forking, we need a way to do post fork notification!!!
+ time.sleep( 10 )
+
+ cnt = 0 # Run global update only periodically so keep counter variable
+ while self.running:
+ try:
+# log.debug( "Calling monitor_step" )
+ self.__monitor_step()
+ if cnt%30 == 0: # Run global update every 30 iterations (1 minute)
+ self.provider.update()
+ cnt = 0
+ except:
+ log.exception( "Exception in cloud manager monitor_step" )
+ # Sleep
+ cnt += 1
+ self.sleeper.sleep( 2 )
+
+ def __monitor_step( self ):
+ """
+ Called repeatedly by `monitor` to process cloud instance requests.
+ TODO: Update following description to match the code
+ Gets any new cloud instance requests from the database, then iterates
+ over all new and waiting jobs to check the state of the jobs each
+ depends on. If the job has dependencies that have not finished, it
+ it goes to the waiting queue. If the job has dependencies with errors,
+ it is marked as having errors and removed from the queue. Otherwise,
+ the job is dispatched.
+ """
+ # Get an orm (object relational mapping) session
+ session = mapping.Session()
+ new_requests = []
+
+ for r in session.query( model.UCI ) \
+ .filter( or_( model.UCI.c.state==uci_states.NEW_UCI,
+ model.UCI.c.state==uci_states.SUBMITTED_UCI,
+ model.UCI.c.state==uci_states.SHUTTING_DOWN_UCI,
+ model.UCI.c.state==uci_states.DELETING_UCI,
+ model.UCI.c.state==uci_states.SNAPSHOT_UCI ) ) \
+ .all():
+ uci_wrapper = UCIwrapper( r )
+ new_requests.append( uci_wrapper )
+
+ for uci_wrapper in new_requests:
+ session.clear()
+ self.put( uci_wrapper )
+
+ # Done with the session
+ mapping.Session.remove()
+
+ def put( self, uci_wrapper ):
+ """Add a request to the queue."""
+ self.provider.put( uci_wrapper )
+ self.sleeper.wake()
+
+ def shutdown( self ):
+ """Attempts to gracefully shut down the worker thread"""
+ if self.parent_pid != os.getpid():
+ # We're not the real queue, do nothing
+ return
+ else:
+ log.info( "Sending stop signal to worker thread" )
+ self.running = False
+ self.sleeper.wake()
+ log.info( "cloud manager stopped" )
+ self.dispatcher.shutdown()
+
+class UCIwrapper( object ):
+ """
+ Wraps 'model.UCI' with convenience methods for state management
+ """
+ def __init__( self, uci ):
+ self.uci_id = uci.id
+
+ # --------- Setter methods -----------------
+
+ def change_state( self, uci_state=None, instance_id=None, i_state=None ):
+ """
+ Sets state for UCI and/or UCI's instance with instance_id as provided by cloud provider and stored in local
+ Galaxy database.
+ Need to provide either: (1) state for the UCI, or (2) instance_id and it's state, or (3) all arguments.
+ """
+# log.debug( "Changing state - new uci_state: %s, instance_id: %s, i_state: %s" % ( uci_state, instance_id, i_state ) )
+ if uci_state is not None:
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ uci.state = uci_state
+ uci.flush()
+ if ( instance_id is not None ) and ( i_state is not None ):
+ instance = model.CloudInstance.filter_by( uci_id=self.uci_id, instance_id=instance_id).first()
+ instance.state = i_state
+ instance.flush()
+
+ def set_mi( self, i_index, mi_id ):
+ """
+ Sets Machine Image (MI), e.g., 'ami-66fa190f', for UCI's instance with given index as it
+ is stored in local Galaxy database.
+ """
+ mi = model.CloudImage.filter( model.CloudImage.c.image_id==mi_id ).first()
+ instance = model.CloudInstance.get( i_index )
+ instance.image = mi
+ instance.flush()
+
+ def set_key_pair( self, key_name, key_material=None ):
+ """
+ Sets key pair value for current UCI.
+ """
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ uci.key_pair_name = key_name
+ if key_material is not None:
+ uci.key_pair_material = key_material
+ uci.flush()
+
+ def set_launch_time( self, launch_time, i_index=None, i_id=None ):
+ """
+ Stores launch time in local database for instance with specified index - i_index (as it is stored in local
+ Galaxy database) or with specified instance ID - i_id (as obtained from the cloud provider AND stored
+ in local Galaxy Database). Either 'i_index' or 'i_id' needs to be provided.
+ """
+ if i_index != None:
+ instance = model.CloudInstance.get( i_index )
+ instance.launch_time = launch_time
+ instance.flush()
+ elif i_id != None:
+ instance = model.CloudInstance.filter_by( uci_id=self.uci_id, instance_id=i_id).first()
+ instance.launch_time = launch_time
+ instance.flush()
+
+ def set_uci_launch_time( self, launch_time ):
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ uci.launch_time = launch_time
+ uci.flush()
+
+ def set_stop_time( self, stop_time, i_index=None, i_id=None ):
+ """
+ Stores stop time in local database for instance with specified index - i_index (as it is stored in local
+ Galaxy database) or with specified instance ID - i_id (as obtained from the cloud provider AND stored
+ in local Galaxy Database). Either 'i_index' or 'i_id' needs to be provided.
+ """
+ if i_index != None:
+ instance = model.CloudInstance.get( i_index )
+ instance.stop_time = stop_time
+ instance.flush()
+ elif i_id != None:
+ instance = model.CloudInstance.filter_by( uci_id=self.uci_id, instance_id=i_id).first()
+ instance.stop_time = stop_time
+ instance.flush()
+
+ def reset_uci_launch_time( self ):
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ uci.launch_time = None
+ uci.flush()
+
+ def set_security_group_name( self, security_group_name, i_index=None, i_id=None ):
+ """
+ Stores security group name in local database for instance with specified index - i_index (as it is stored in local
+ Galaxy database) or with specified instance ID - i_id (as obtained from the cloud provider AND stored
+ in local Galaxy Database). Either 'i_index' or 'i_id' needs to be provided.
+ """
+ if i_index != None:
+ instance = model.CloudInstance.get( i_index )
+ instance.security_group = security_group_name
+ instance.flush()
+ elif i_id != None:
+ instance = model.CloudInstance.filter_by( uci_id=self.uci_id, instance_id=i_id).first()
+ instance.security_group = security_group_name
+ instance.flush()
+
+ def set_reservation_id( self, i_index, reservation_id ):
+ instance = model.CloudInstance.get( i_index )
+ instance.reservation_id = reservation_id
+ instance.flush()
+
+ def set_instance_id( self, i_index, instance_id ):
+ """
+ i_index refers to UCI's instance ID as stored in local database
+ instance_id refers to real-world, cloud resource ID (e.g., 'i-78hd823a')
+ """
+ instance = model.CloudInstance.get( i_index )
+ instance.instance_id = instance_id
+ instance.flush()
+
+ def set_public_dns( self, instance_id, public_dns ):
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ uci.instance[instance_id].public_dns = public_dns
+ uci.instance[instance_id].flush()
+
+ def set_private_dns( self, instance_id, private_dns ):
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ uci.instance[instance_id].private_dns = private_dns
+ uci.instance[instance_id].flush()
+
+ def set_store_device( self, store_id, device ):
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ uci.store[store_id].device = device
+ uci.store[store_id].flush()
+
+ def set_store_error( self, error, store_index=None, store_id=None ):
+ if store_index != None:
+ store = model.CloudStore.get( store_index )
+ elif store_id != None:
+ store = model.CloudStore.filter_by( volume_id = store_id ).first()
+ else:
+ return None
+
+ store.error = error
+ store.flush()
+
+ def set_store_status( self, vol_id, status ):
+ vol = model.CloudStore.filter( model.CloudStore.c.volume_id == vol_id ).first()
+ vol.status = status
+ vol.flush()
+
+ def set_snapshot_id( self, snap_index, id ):
+ snap = model.CloudSnapshot.get( snap_index )
+ snap.snapshot_id = id
+ snap.flush()
+
+ def set_snapshot_status( self, status, snap_index=None, snap_id=None ):
+ if snap_index != None:
+ snap = model.CloudSnapshot.get( snap_index )
+ elif snap_id != None:
+ snap = model.CloudSnapshot.filter_by( snapshot_id = snap_id).first()
+ else:
+ return
+ snap.status = status
+ snap.flush()
+
+ def set_snapshot_error( self, error, snap_index=None, snap_id=None, set_status=False ):
+ if snap_index != None:
+ snap = model.CloudSnapshot.get( snap_index )
+ elif snap_id != None:
+ snap = model.CloudSnapshot.filter_by( snapshot_id = snap_id).first()
+ else:
+ return
+ snap.error = error
+
+ if set_status:
+ snap.status = snapshot_status.ERROR
+
+ snap.flush()
+
+ def set_store_availability_zone( self, availability_zone, vol_id=None ):
+ """
+ Sets availability zone of storage volumes for either ALL volumes associated with current
+ UCI or for the volume whose volume ID (e.g., 'vol-39F80512') is provided as argument.
+ """
+ if vol_id is not None:
+ vol = model.CloudStore.filter( model.CloudStore.c.volume_id == vol_id ).all()
+ else:
+ vol = model.CloudStore.filter( model.CloudStore.c.uci_id == self.uci_id ).all()
+
+ for v in vol:
+ v.availability_zone = availability_zone
+ v.flush()
+
+ def set_store_volume_id( self, store_id, volume_id ):
+ """
+ Given store ID associated with this UCI, set volume ID as it is registered
+ on the cloud provider (e.g., vol-39890501)
+ """
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ uci.store[store_id].volume_id = volume_id
+ uci.store[store_id].flush()
+
+ def set_store_instance( self, vol_id, instance_id ):
+ """
+ Stores instance ID that given store volume is attached to. Store volume ID should
+ be given in following format: 'vol-78943248'
+ """
+ vol = model.CloudStore.filter( model.CloudStore.c.volume_id == vol_id ).first()
+ vol.i_id = instance_id
+ vol.flush()
+
+ def set_error( self, error, set_state=False ):
+ """
+ Sets error field of given UCI in local Galaxy database as well as any instances associated with
+ this UCI whose state is 'None' or 'SUBMITTED'. If set_state is set to 'true',
+ method also sets state of give UCI and corresponding instances to 'error'
+ """
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ uci.error = error
+ if set_state:
+ uci.state = uci_states.ERROR
+ instances = model.CloudInstance \
+ .filter_by( uci=uci ) \
+ .filter( or_( model.CloudInstance.c.state==None, model.CloudInstance.c.state==instance_states.SUBMITTED ) ) \
+ .all()
+ for i in instances:
+ i.error = error
+ i.state = instance_states.ERROR
+ i.flush()
+ uci.flush()
+
+ def set_deleted( self ):
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ uci.state = uci_states.DELETED # for bookkeeping reasons, mark as deleted but don't actually delete.
+ uci.deleted = True
+ uci.flush()
+
+ # --------- Getter methods -----------------
+
+ def get_provider_type( self ):
+ """ Returns type of cloud provider associated with given UCI. """
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+# cred_id = uci.credentials_id
+# cred = model.CloudUserCredentials.get( cred_id )
+ return uci.credentials.provider.type
+
+ def get_type( self, i_index ):
+ instance = model.CloudInstance.get( i_index )
+ return instance.type
+
+ def get_state( self ):
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ return uci.state
+
+ def get_instances_indexes( self, state=None ):
+ """
+ Returns indexes of instances associated with given UCI as they are stored in local Galaxy database and
+ whose state corresponds to passed argument. Returned values enable indexing instances from local Galaxy database.
+ """
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ instances = model.CloudInstance.filter_by( uci=uci ).filter( model.CloudInstance.c.state==state ).all()
+ il = []
+ for i in instances:
+ il.append( i.id )
+
+ return il
+
+ def get_instance_state( self, instance_id ):
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ return uci.instance[instance_id].state
+
+ def get_instances_ids( self ):
+ """
+ Returns list IDs of all instances' associated with this UCI that are not in 'terminated' state
+ (e.g., ['i-402906D2', 'i-q0290dsD2'] ).
+ """
+ il = model.CloudInstance.filter_by( uci_id=self.uci_id ).filter( model.CloudInstance.c.state != 'terminated' ).all()
+ instanceList = []
+ for i in il:
+ instanceList.append( i.instance_id )
+ return instanceList
+
+ def get_name( self ):
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ return uci.name
+
+ def get_key_pair_name( self ):
+ """
+ Returns keypair name associated with given UCI.
+ """
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ return uci.key_pair_name
+
+ def get_key_pair_material( self ):
+ """
+ Returns keypair material (i.e., private key) associated with given UCI.
+ """
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ return uci.key_pair_material
+
+ def get_security_group_name( self, i_index=None, i_id=None ):
+ """
+ Given EITHER instance index as it is stored in local Galaxy database OR instance ID as it is
+ obtained from cloud provider and stored in local Galaxy database, return security group name associated
+ with given instance.
+ """
+ if i_index != None:
+ instance = model.CloudInstance.get( i_index )
+ return instance.security_group
+ elif i_id != None:
+ instance = model.CloudInstance.filter_by( uci_id=self.uci_id, instance_id=i_id).first()
+ return instance.security_group
+
+ def get_access_key( self ):
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ return uci.credentials.access_key
+
+ def get_secret_key( self ):
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ return uci.credentials.secret_key
+
+ def get_mi_id( self, instance_id=0 ):
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ return uci.instance[instance_id].mi_id
+
+ def get_public_dns( self, instance_id=0 ):
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ return uci.instance[instance_id].public_dns
+
+ def get_private_dns( self, instance_id=0 ):
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ return uci.instance[instance_id].private_dns
+
+ def get_uci_availability_zone( self ):
+ """
+ Returns UCI's availability zone.
+ Because all of storage volumes associated with a given UCI must be in the same
+ availability zone, availability of a UCI is determined by availability zone of
+ any one storage volume.
+ """
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ return uci.store[0].availability_zone
+
+ def get_store_size( self, store_id=0 ):
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ return uci.store[store_id].size
+
+ def get_store_volume_id( self, store_id=0 ):
+ """
+ Given store ID associated with this UCI, get volume ID as it is registered
+ on the cloud provider (e.g., 'vol-39890501')
+ """
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ return uci.store[store_id].volume_id
+
+ def get_all_stores( self ):
+ """ Returns all storage volumes' database objects associated with this UCI. """
+ return model.CloudStore.filter( model.CloudStore.c.uci_id == self.uci_id ).all()
+
+ def get_snapshots( self, status=None ):
+ """ Returns database objects for all snapshots associated with this UCI and in given status."""
+ return model.CloudSnapshot.filter_by( uci_id=self.uci_id, status=status ).all()
+
+ def get_uci( self ):
+ """ Returns database object for given UCI. """
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ return uci
+
+ def get_provider( self ):
+ """ Returns database object of cloud provider associated with credentials of given UCI. """
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ return uci.credentials.provider
+
+ def uci_launch_time_set( self ):
+ uci = model.UCI.get( self.uci_id )
+ uci.refresh()
+ return uci.launch_time
+
+class CloudProvider( object ):
+ def __init__( self, app ):
+ import providers.eucalyptus
+ import providers.ec2
+
+ self.app = app
+ self.cloud_provider = {}
+ self.cloud_provider["eucalyptus"] = providers.eucalyptus.EucalyptusCloudProvider( app )
+ self.cloud_provider["ec2"] = providers.ec2.EC2CloudProvider( app )
+
+ def put( self, uci_wrapper ):
+ """ Put given request for UCI manipulation into provider's request queue."""
+ self.cloud_provider[uci_wrapper.get_provider_type()].put( uci_wrapper )
+
+ def update( self ):
+ """
+ Runs a global status update across all providers for all UCIs in state other than 'terminated' and 'available'.
+ Reason behind this method is to sync state of local DB and real world resources.
+ """
+ for provider in self.cloud_provider.keys():
+# log.debug( "Running global update for provider: '%s'" % provider )
+ self.cloud_provider[provider].update()
+
+ def shutdown( self ):
+ for runner in self.cloud_provider.itervalues():
+ runner.shutdown()
+
+class NoopCloudMonitor( object ):
+ """
+ Implements the CloudMonitor interface but does nothing
+ """
+ def put( self, *args ):
+ return
+ def shutdown( self ):
+ return
+
diff -r 0984c3800775 -r 7d013eb98022 lib/galaxy/cloud/providers/ec2.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/cloud/providers/ec2.py Thu Nov 12 16:36:07 2009 -0500
@@ -0,0 +1,940 @@
+import subprocess, threading, os, errno, time, datetime
+from Queue import Queue, Empty
+from datetime import datetime
+
+from galaxy import model # Database interaction class
+from galaxy.model import mapping
+from galaxy.datatypes.data import nice_size
+from galaxy.util.bunch import Bunch
+from galaxy.cloud import UCIwrapper
+from Queue import Queue
+from sqlalchemy import or_, and_
+
+import galaxy.eggs
+galaxy.eggs.require("boto")
+from boto.ec2.connection import EC2Connection
+from boto.ec2.regioninfo import RegionInfo
+import boto.exception
+import boto
+
+import logging
+log = logging.getLogger( __name__ )
+
+uci_states = Bunch(
+ NEW_UCI = "newUCI",
+ NEW = "new",
+ CREATING = "creating",
+ DELETING_UCI = "deletingUCI",
+ DELETING = "deleting",
+ SUBMITTED_UCI = "submittedUCI",
+ SUBMITTED = "submitted",
+ SHUTTING_DOWN_UCI = "shutting-downUCI",
+ SHUTTING_DOWN = "shutting-down",
+ AVAILABLE = "available",
+ RUNNING = "running",
+ PENDING = "pending",
+ ERROR = "error",
+ DELETED = "deleted",
+ SNAPSHOT_UCI = "snapshotUCI",
+ SNAPSHOT = "snapshot"
+)
+
+instance_states = Bunch(
+ TERMINATED = "terminated",
+ SUBMITTED = "submitted",
+ RUNNING = "running",
+ PENDING = "pending",
+ SHUTTING_DOWN = "shutting-down",
+ ERROR = "error"
+)
+
+store_status = Bunch(
+ IN_USE = "in-use",
+ CREATING = "creating",
+ DELETED = 'deleted',
+ ERROR = "error"
+)
+
+snapshot_status = Bunch(
+ SUBMITTED = 'submitted',
+ PENDING = 'pending',
+ COMPLETED = 'completed',
+ DELETE = 'delete',
+ DELETED= 'deleted',
+ ERROR = "error"
+)
+
+class EC2CloudProvider( object ):
+ """
+ Amazon EC2-based cloud provider implementation for managing instances.
+ """
+ STOP_SIGNAL = object()
+ def __init__( self, app ):
+ self.type = "ec2" # cloud provider type (e.g., ec2, eucalyptus, opennebula)
+ self.zone = "us-east-1a"
+ self.security_group = "galaxyWeb"
+ self.queue = Queue()
+
+ self.threads = []
+ nworkers = 5
+ log.info( "Starting EC2 cloud controller workers..." )
+ for i in range( nworkers ):
+ worker = threading.Thread( target=self.run_next )
+ worker.start()
+ self.threads.append( worker )
+ log.debug( "%d EC2 cloud workers ready", nworkers )
+
+ def run_next( self ):
+ """Run the next job, waiting until one is available if necessary"""
+ cnt = 0
+ while 1:
+
+ uci_wrapper = self.queue.get()
+ uci_state = uci_wrapper.get_state()
+ if uci_state is self.STOP_SIGNAL:
+ return
+ try:
+ if uci_state==uci_states.NEW:
+ self.createUCI( uci_wrapper )
+ elif uci_state==uci_states.DELETING:
+ self.deleteUCI( uci_wrapper )
+ elif uci_state==uci_states.SUBMITTED:
+ self.startUCI( uci_wrapper )
+ elif uci_state==uci_states.SHUTTING_DOWN:
+ self.stopUCI( uci_wrapper )
+ elif uci_state==uci_states.SNAPSHOT:
+ self.snapshotUCI( uci_wrapper )
+ except:
+ log.exception( "Uncaught exception executing cloud request." )
+ cnt += 1
+
+ def get_connection( self, uci_wrapper ):
+ """
+ Establishes EC2 cloud connection using user's credentials associated with given UCI
+ """
+ log.debug( 'Establishing %s cloud connection.' % self.type )
+ provider = uci_wrapper.get_provider()
+ try:
+ region = RegionInfo( None, provider.region_name, provider.region_endpoint )
+ except Exception, ex:
+ err = "Selecting region with cloud provider failed: " + str( ex )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ return None
+ try:
+ conn = EC2Connection( aws_access_key_id=uci_wrapper.get_access_key(),
+ aws_secret_access_key=uci_wrapper.get_secret_key(),
+ is_secure=provider.is_secure,
+ region=region,
+ path=provider.path )
+ except boto.exception.EC2ResponseError, e:
+ err = "Establishing connection with cloud failed: " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ return None
+
+ return conn
+
+ def check_key_pair( self, uci_wrapper, conn ):
+ """
+ Generate key pair using user's credentials
+ """
+ kp = None
+ kp_name = uci_wrapper.get_name().replace(' ','_') + "_kp"
+ log.debug( "Checking user's key pair: '%s'" % kp_name )
+ try:
+ kp = conn.get_key_pair( kp_name )
+ uci_kp_name = uci_wrapper.get_key_pair_name()
+ uci_material = uci_wrapper.get_key_pair_material()
+ if kp != None:
+ if kp.name != uci_kp_name or uci_material == None:
+ # key pair exists on the cloud but not in local database, so re-generate it (i.e., delete and then create)
+ try:
+ conn.delete_key_pair( kp_name )
+ kp = self.create_key_pair( conn, kp_name )
+ uci_wrapper.set_key_pair( kp.name, kp.material )
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error while deleting key pair: " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ else:
+ try:
+ kp = self.create_key_pair( conn, kp_name )
+ uci_wrapper.set_key_pair( kp.name, kp.material )
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error while creating key pair: " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ except Exception, ex:
+ err = "Exception while creating key pair: " + str( ex )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ except boto.exception.EC2ResponseError, e: # No keypair under this name exists so create it
+ if e.code == 'InvalidKeyPair.NotFound':
+ log.info( "No keypair found, creating keypair '%s'" % kp_name )
+ kp = self.create_key_pair( conn, kp_name )
+ uci_wrapper.set_key_pair( kp.name, kp.material )
+ else:
+ err = "EC2 response error while retrieving key pair: " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+
+ if kp != None:
+ return kp.name
+ else:
+ return None
+
+ def create_key_pair( self, conn, kp_name ):
+ try:
+ return conn.create_key_pair( kp_name )
+ except boto.exception.EC2ResponseError, e:
+ return None
+
+ def get_mi_id( self, uci_wrapper, i_index ):
+ """
+ Get appropriate machine image (mi) based on instance size.
+ """
+ i_type = uci_wrapper.get_type( i_index )
+ if i_type=='m1.small' or i_type=='c1.medium':
+ arch = 'i386'
+ else:
+ arch = 'x86_64'
+
+ mi = model.CloudImage.filter_by( deleted=False, provider_type=self.type, architecture=arch ).first()
+ if mi:
+ return mi.image_id
+ else:
+ err = "Machine image could not be retrieved"
+ log.error( "%s for UCI '%s'." % (err, uci_wrapper.get_name() ) )
+ uci_wrapper.set_error( err+". Contact site administrator to ensure needed machine image is registered.", True )
+ return None
+
+ def shutdown( self ):
+ """Attempts to gracefully shut down the monitor thread"""
+ log.info( "sending stop signal to worker threads in EC2 cloud manager" )
+ for i in range( len( self.threads ) ):
+ self.queue.put( self.STOP_SIGNAL )
+ log.info( "EC2 cloud manager stopped" )
+
+ def put( self, uci_wrapper ):
+ # Get rid of UCI from state description
+ state = uci_wrapper.get_state()
+ uci_wrapper.change_state( state.split('U')[0] ) # remove 'UCI' from end of state description (i.e., mark as accepted and ready for processing)
+ self.queue.put( uci_wrapper )
+
+ def createUCI( self, uci_wrapper ):
+ """
+ Creates User Configured Instance (UCI). Essentially, creates storage volume on cloud provider
+ and registers relevant information in Galaxy database.
+ """
+ conn = self.get_connection( uci_wrapper )
+ if uci_wrapper.get_uci_availability_zone()=='':
+ log.info( "Availability zone for UCI (i.e., storage volume) was not selected, using default zone: %s" % self.zone )
+ uci_wrapper.set_store_availability_zone( self.zone )
+
+ log.info( "Creating volume in zone '%s'..." % uci_wrapper.get_uci_availability_zone() )
+ # Because only 1 storage volume may be created at UCI config time, index of this storage volume in local Galaxy DB w.r.t
+ # current UCI is 0, so reference it in following methods
+ vol = conn.create_volume( uci_wrapper.get_store_size( 0 ), uci_wrapper.get_uci_availability_zone(), snapshot=None )
+ uci_wrapper.set_store_volume_id( 0, vol.id )
+
+ # Wait for a while to ensure volume was created
+# vol_status = vol.status
+# for i in range( 30 ):
+# if vol_status is not "available":
+# log.debug( 'Updating volume status; current status: %s' % vol_status )
+# vol_status = vol.status
+# time.sleep(3)
+# if i is 29:
+# log.debug( "Error while creating volume '%s'; stuck in state '%s'; deleting volume." % ( vol.id, vol_status ) )
+# conn.delete_volume( vol.id )
+# uci_wrapper.change_state( uci_state='error' )
+# return
+
+ # Retrieve created volume again to get updated status
+ try:
+ vl = conn.get_all_volumes( [vol.id] )
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error while retrieving (i.e., updating status) of just created storage volume '" + vol.id + "': " + str( e )
+ log.error( err )
+ uci_wrapper.set_store_status( vol.id, uci_states.ERROR )
+ uci_wrapper.set_error( err, True )
+ return
+ except Exception, ex:
+ err = "Error while retrieving (i.e., updating status) of just created storage volume '" + vol.id + "': " + str( ex )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ return
+
+ if len( vl ) > 0:
+ uci_wrapper.change_state( uci_state=vl[0].status )
+ uci_wrapper.set_store_status( vol.id, vl[0].status )
+ else:
+ err = "Volume '" + vol.id +"' not found by EC2 after being created."
+ log.error( err )
+ uci_wrapper.set_store_status( vol.id, uci_states.ERROR )
+ uci_wrapper.set_error( err, True )
+
+ def deleteUCI( self, uci_wrapper ):
+ """
+ Deletes UCI. NOTE that this implies deletion of any and all data associated
+ with this UCI from the cloud. All data will be deleted.
+ """
+ conn = self.get_connection( uci_wrapper )
+ vl = [] # volume list
+ count = 0 # counter for checking if all volumes assoc. w/ UCI were deleted
+
+ # Get all volumes assoc. w/ UCI, delete them from cloud as well as in local DB
+ vl = uci_wrapper.get_all_stores()
+ deletedList = []
+ failedList = []
+ for v in vl:
+ log.debug( "Deleting volume with id='%s'" % v.volume_id )
+ try:
+ if conn.delete_volume( v.volume_id ):
+ deletedList.append( v.volume_id )
+ v.deleted = True
+ v.flush()
+ count += 1
+ else:
+ failedList.append( v.volume_id )
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error while deleting storage volume '" + v.volume_id + "': " + str( e )
+ log.error( err )
+ uci_wrapper.set_store_error( err, store_id = v.volume_id )
+ uci_wrapper.set_error( err, True )
+
+ # Delete UCI if all of associated
+ if count == len( vl ):
+ uci_wrapper.set_deleted()
+ else:
+ err = "Deleting following volume(s) failed: "+failedList+". However, these volumes were successfully deleted: "+deletedList+". \
+ MANUAL intervention and processing needed."
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+
+ def snapshotUCI( self, uci_wrapper ):
+ """
+ Creates snapshot of all storage volumes associated with this UCI.
+ """
+ if uci_wrapper.get_state() != uci_states.ERROR:
+ conn = self.get_connection( uci_wrapper )
+
+ snapshots = uci_wrapper.get_snapshots( status = snapshot_status.SUBMITTED )
+ for snapshot in snapshots:
+ log.debug( "Snapshot DB id: '%s', volume id: '%s'" % ( snapshot.id, snapshot.store.volume_id ) )
+ try:
+ snap = conn.create_snapshot( volume_id=snapshot.store.volume_id )
+ snap_id = str( snap ).split(':')[1]
+ uci_wrapper.set_snapshot_id( snapshot.id, snap_id )
+ sh = conn.get_all_snapshots( snap_id ) # get updated status
+ uci_wrapper.set_snapshot_status( status=sh[0].status, snap_id=snap_id )
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error while creating snapshot: " + str( e )
+ log.error( err )
+ uci_wrapper.set_snapshot_error( error=err, snap_index=snapshot.id, set_status=True )
+ uci_wrapper.set_error( err, True )
+ return
+ except Exception, ex:
+ err = "Error while creating snapshot: " + str( ex )
+ log.error( err )
+ uci_wrapper.set_snapshot_error( error=err, snap_index=snapshot.id, set_status=True )
+ uci_wrapper.set_error( err, True )
+ return
+
+ uci_wrapper.change_state( uci_state=uci_states.AVAILABLE )
+
+ def addStorageToUCI( self, name ):
+ """ Adds more storage to specified UCI
+ TODO"""
+
+ def dummyStartUCI( self, uci_wrapper ):
+
+ uci = uci_wrapper.get_uci()
+ log.debug( "Would be starting instance '%s'" % uci.name )
+ uci_wrapper.change_state( uci_state.PENDING )
+# log.debug( "Sleeping a bit... (%s)" % uci.name )
+# time.sleep(20)
+# log.debug( "Woke up! (%s)" % uci.name )
+
+ def startUCI( self, uci_wrapper ):
+ """
+ Starts instance(s) of given UCI on the cloud.
+ """
+ if uci_wrapper.get_state() != uci_states.ERROR:
+ conn = self.get_connection( uci_wrapper )
+ self.check_key_pair( uci_wrapper, conn )
+ if uci_wrapper.get_key_pair_name() == None:
+ err = "Key pair not found"
+ log.error( "%s for UCI '%s'." % ( err, uci_wrapper.get_name() ) )
+ uci_wrapper.set_error( err + ". Try resetting the state and starting the instance again.", True )
+ return
+
+ i_indexes = uci_wrapper.get_instances_indexes( state=instance_states.SUBMITTED ) # Get indexes of i_indexes associated with this UCI that are in 'submitted' state
+ log.debug( "Starting instances with IDs: '%s' associated with UCI '%s' " % ( i_indexes, uci_wrapper.get_name(), ) )
+ if len( i_indexes ) > 0:
+ for i_index in i_indexes:
+ # Get machine image for current instance
+ mi_id = self.get_mi_id( uci_wrapper, i_index )
+ log.debug( "mi_id: %s, uci_wrapper.get_key_pair_name(): %s" % ( mi_id, uci_wrapper.get_key_pair_name() ) )
+ uci_wrapper.set_mi( i_index, mi_id )
+
+ # Check if galaxy security group exists (and create it if it does not)
+ log.debug( "Setting up '%s' security group." % self.security_group )
+ try:
+ conn.get_all_security_groups( [self.security_group] ) # security groups
+ except boto.exception.EC2ResponseError, e:
+ if e.code == 'InvalidGroup.NotFound':
+ log.info( "No security group found, creating security group '%s'" % self.security_group )
+ try:
+ gSecurityGroup = conn.create_security_group(self.security_group, 'Security group for Galaxy.')
+ gSecurityGroup.authorize( 'tcp', 80, 80, '0.0.0.0/0' ) # Open HTTP port
+ gSecurityGroup.authorize( 'tcp', 22, 22, '0.0.0.0/0' ) # Open SSH port
+ except boto.exception.EC2ResponseError, ee:
+ err = "EC2 response error while creating security group: " + str( ee )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ else:
+ err = "EC2 response error while retrieving security group: " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+
+
+ if uci_wrapper.get_state() != uci_states.ERROR:
+ # Start an instance
+ log.debug( "Starting instance for UCI '%s'" % uci_wrapper.get_name() )
+ #TODO: Once multiple volumes can be attached to a single instance, update 'userdata' composition
+ userdata = uci_wrapper.get_store_volume_id()+"|"+uci_wrapper.get_access_key()+"|"+uci_wrapper.get_secret_key()
+ log.debug( "Using following command: conn.run_instances( image_id='%s', key_name='%s', security_groups=['%s'], user_data=[OMITTED], instance_type='%s', placement='%s' )"
+ % ( mi_id, uci_wrapper.get_key_pair_name(), self.security_group, uci_wrapper.get_type( i_index ), uci_wrapper.get_uci_availability_zone() ) )
+ reservation = None
+ try:
+ reservation = conn.run_instances( image_id=mi_id,
+ key_name=uci_wrapper.get_key_pair_name(),
+ security_groups=[self.security_group],
+ user_data=userdata,
+ instance_type=uci_wrapper.get_type( i_index ),
+ placement=uci_wrapper.get_uci_availability_zone() )
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error when starting UCI '"+ uci_wrapper.get_name() +"': " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ except Exception, ex:
+ err = "Error when starting UCI '" + uci_wrapper.get_name() + "': " + str( ex )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ # Record newly available instance data into local Galaxy database
+ if reservation:
+ uci_wrapper.set_launch_time( self.format_time( reservation.instances[0].launch_time ), i_index=i_index )
+ if not uci_wrapper.uci_launch_time_set():
+ uci_wrapper.set_uci_launch_time( self.format_time( reservation.instances[0].launch_time ) )
+ try:
+ uci_wrapper.set_reservation_id( i_index, str( reservation ).split(":")[1] )
+ # TODO: if more than a single instance will be started through single reservation, change this reference to element [0]
+ i_id = str( reservation.instances[0]).split(":")[1]
+ uci_wrapper.set_instance_id( i_index, i_id )
+ s = reservation.instances[0].state
+ uci_wrapper.change_state( s, i_id, s )
+ uci_wrapper.set_security_group_name( self.security_group, i_id=i_id )
+ log.debug( "Instance of UCI '%s' started, current state: '%s'" % ( uci_wrapper.get_name(), uci_wrapper.get_state() ) )
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error when retrieving instance information for UCI '" + uci_wrapper.get_name() + "': " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ else:
+ log.error( "UCI '%s' is in 'error' state, starting instance was aborted." % uci_wrapper.get_name() )
+ else:
+ err = "No instances in state '"+ instance_states.SUBMITTED +"' found for UCI '" + uci_wrapper.get_name() + \
+ "'. Nothing to start."
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ else:
+ log.error( "UCI '%s' is in 'error' state, starting instance was aborted." % uci_wrapper.get_name() )
+
+ def stopUCI( self, uci_wrapper):
+ """
+ Stops all of cloud instances associated with given UCI.
+ """
+ conn = self.get_connection( uci_wrapper )
+
+ # Get all instances associated with given UCI
+ il = uci_wrapper.get_instances_ids() # instance list
+ rl = conn.get_all_instances( il ) # Reservation list associated with given instances
+
+ # Initiate shutdown of all instances under given UCI
+ cnt = 0
+ stopped = []
+ not_stopped = []
+ for r in rl:
+ for inst in r.instances:
+ log.debug( "Sending stop signal to instance '%s' associated with reservation '%s'." % ( inst, r ) )
+ try:
+ inst.stop()
+ uci_wrapper.set_stop_time( datetime.utcnow(), i_id=inst.id )
+ uci_wrapper.change_state( instance_id=inst.id, i_state=inst.update() )
+ stopped.append( inst )
+ except boto.exception.EC2ResponseError, e:
+ not_stopped.append( inst )
+ err = "EC2 response error when stopping instance '" + inst.instance_id + "': " + str(e)
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+
+ uci_wrapper.reset_uci_launch_time()
+ log.debug( "Termination was initiated for all instances of UCI '%s'." % uci_wrapper.get_name() )
+
+
+# dbInstances = get_instances( trans, uci ) #TODO: handle list!
+#
+# # Get actual cloud instance object
+# cloudInstance = get_cloud_instance( conn, dbInstances.instance_id )
+#
+# # TODO: Detach persistent storage volume(s) from instance and update volume data in local database
+# stores = get_stores( trans, uci )
+# for i, store in enumerate( stores ):
+# log.debug( "Detaching volume '%s' to instance '%s'." % ( store.volume_id, dbInstances.instance_id ) )
+# mntDevice = store.device
+# volStat = None
+## Detaching volume does not work with Eucalyptus Public Cloud, so comment it out
+## try:
+## volStat = conn.detach_volume( store.volume_id, dbInstances.instance_id, mntDevice )
+## except:
+## log.debug ( 'Error detaching volume; still going to try and stop instance %s.' % dbInstances.instance_id )
+# store.attach_time = None
+# store.device = None
+# store.i_id = None
+# store.status = volStat
+# log.debug ( '***** volume status: %s' % volStat )
+#
+#
+# # Stop the instance and update status in local database
+# cloudInstance.stop()
+# dbInstances.stop_time = datetime.utcnow()
+# while cloudInstance.state != 'terminated':
+# log.debug( "Stopping instance %s state; current state: %s" % ( str( cloudInstance ).split(":")[1], cloudInstance.state ) )
+# time.sleep(3)
+# cloudInstance.update()
+# dbInstances.state = cloudInstance.state
+#
+# # Reset relevant UCI fields
+# uci.state = 'available'
+# uci.launch_time = None
+#
+# # Persist
+# session = trans.sa_session
+## session.save_or_update( stores )
+# session.save_or_update( dbInstances ) # TODO: Is this going to work w/ multiple instances stored in dbInstances variable?
+# session.save_or_update( uci )
+# session.flush()
+# trans.log_event( "User stopped cloud instance '%s'" % uci.name )
+# trans.set_message( "Galaxy instance '%s' stopped." % uci.name )
+
+ def update( self ):
+ """
+ Runs a global status update on all instances that are in 'running', 'pending', or 'shutting-down' state.
+ Also, runs update on all storage volumes that are in 'in-use', 'creating', or 'None' state.
+ Reason behind this method is to sync state of local DB and real-world resources
+ """
+ log.debug( "Running general status update for EC2 UCIs..." )
+ # Update instances
+ instances = model.CloudInstance.filter( or_( model.CloudInstance.c.state==instance_states.RUNNING,
+ model.CloudInstance.c.state==instance_states.PENDING,
+ model.CloudInstance.c.state==instance_states.SHUTTING_DOWN ) ).all()
+ for inst in instances:
+ if self.type == inst.uci.credentials.provider.type:
+ log.debug( "[%s] Running general status update on instance '%s'" % ( inst.uci.credentials.provider.type, inst.instance_id ) )
+ self.updateInstance( inst )
+
+ # Update storage volume(s)
+ stores = model.CloudStore.filter( or_( model.CloudStore.c.status==store_status.IN_USE,
+ model.CloudStore.c.status==store_status.CREATING,
+ model.CloudStore.c.status==None ) ).all()
+ for store in stores:
+ if self.type == store.uci.credentials.provider.type: # and store.volume_id != None:
+ log.debug( "[%s] Running general status update on store with local database ID: '%s'" % ( store.uci.credentials.provider.type, store.id ) )
+ self.updateStore( store )
+# else:
+# log.error( "[%s] There exists an entry for UCI (%s) storage volume without an ID. Storage volume might have been created with "
+# "cloud provider though. Manual check is recommended." % ( store.uci.credentials.provider.type, store.uci.name ) )
+# store.uci.error = "There exists an entry in local database for a storage volume without an ID. Storage volume might have been created " \
+# "with cloud provider though. Manual check is recommended. After understanding what happened, local database entry for given " \
+# "storage volume should be updated."
+# store.status = store_status.ERROR
+# store.uci.state = uci_states.ERROR
+# store.uci.flush()
+# store.flush()
+
+ # Update pending snapshots or delete ones marked for deletion
+ snapshots = model.CloudSnapshot.filter_by( status=snapshot_status.PENDING, status=snapshot_status.DELETE ).all()
+ for snapshot in snapshots:
+ if self.type == snapshot.uci.credentials.provider.type and snapshot.status == snapshot_status.PENDING:
+ log.debug( "[%s] Running general status update on snapshot '%s'" % ( snapshot.uci.credentials.provider.type, snapshot.snapshot_id ) )
+ self.update_snapshot( snapshot )
+ elif self.type == snapshot.uci.credentials.provider.type and snapshot.status == snapshot_status.DELETE:
+ log.debug( "[%s] Initiating deletion of snapshot '%s'" % ( snapshot.uci.credentials.provider.type, snapshot.snapshot_id ) )
+ self.delete_snapshot( snapshot )
+
+ # Attempt at updating any zombie UCIs (i.e., instances that have been in SUBMITTED state for longer than expected - see below for exact time)
+ zombies = model.UCI.filter_by( state=uci_states.SUBMITTED ).all()
+ for zombie in zombies:
+ z_instances = model.CloudInstance.filter_by( uci_id=zombie.id) \
+ .filter( or_( model.CloudInstance.c.state != instance_states.TERMINATED,
+ model.CloudInstance.c.state == None ) ) \
+ .all()
+ for z_inst in z_instances:
+ if self.type == z_inst.uci.credentials.provider.type:
+# log.debug( "z_inst.id: '%s', state: '%s'" % ( z_inst.id, z_inst.state ) )
+ td = datetime.utcnow() - z_inst.update_time
+ if td.seconds > 180: # if instance has been in SUBMITTED state for more than 3 minutes
+ log.debug( "[%s] Running zombie repair update on instance with DB id '%s'" % ( z_inst.uci.credentials.provider.type, z_inst.id ) )
+ self.processZombie( z_inst )
+
+ def updateInstance( self, inst ):
+
+ # Get credentials associated wit this instance
+ uci_id = inst.uci_id
+ uci = model.UCI.get( uci_id )
+ uci.refresh()
+ conn = self.get_connection_from_uci( uci )
+
+ # Get reservations handle for given instance
+ try:
+ rl= conn.get_all_instances( [inst.instance_id] )
+ except boto.exception.EC2ResponseError, e:
+ err = "Retrieving instance(s) from cloud failed for UCI '"+ uci.name +"' during general status update: " + str( e )
+ log.error( err )
+ uci.error = err
+ uci.state = uci_states.ERROR
+ return None
+
+ # Because references to reservations are deleted shortly after instances have been terminated, getting an empty list as a response to a query
+ # typically means the instance has successfully shut down but the check was not performed in short enough amount of time. Until an alternative solution
+ # is found, below code sets state of given UCI to 'error' to indicate to the user something out of ordinary happened.
+ if len( rl ) == 0:
+ err = "Instance ID '"+inst.instance_id+"' was not found by the cloud provider. Instance might have crashed or otherwise been terminated."+ \
+ "Manual check is recommended."
+ log.error( err )
+ inst.error = err
+ uci.error = err
+ inst.state = instance_states.TERMINATED
+ uci.state = uci_states.ERROR
+ uci.launch_time = None
+ inst.flush()
+ uci.flush()
+ # Update instance status in local DB with info from cloud provider
+ for r in rl:
+ for i, cInst in enumerate( r.instances ):
+ try:
+ s = cInst.update()
+ log.debug( "Checking state of cloud instance '%s' associated with UCI '%s' and reservation '%s'. State='%s'" % ( cInst, uci.name, r, s ) )
+ if s != inst.state:
+ inst.state = s
+ inst.flush()
+ # After instance has shut down, ensure UCI is marked as 'available'
+ if s == instance_states.TERMINATED and uci.state != uci_states.ERROR:
+ uci.state = uci_states.AVAILABLE
+ uci.launch_time = None
+ uci.flush()
+ # Making sure state of UCI is updated. Once multiple instances become associated with single UCI, this will need to be changed.
+ if s != uci.state and s != instance_states.TERMINATED:
+ uci.state = s
+ uci.flush()
+ if cInst.public_dns_name != inst.public_dns:
+ inst.public_dns = cInst.public_dns_name
+ inst.flush()
+ if cInst.private_dns_name != inst.private_dns:
+ inst.private_dns = cInst.private_dns_name
+ inst.flush()
+ except boto.exception.EC2ResponseError, e:
+ err = "Updating instance status from cloud failed for UCI '"+ uci.name + "' during general status update: " + str( e )
+ log.error( err )
+ uci.error = err
+ uci.state = uci_states.ERROR
+ return None
+
+ def updateStore( self, store ):
+ # Get credentials associated wit this store
+ uci_id = store.uci_id
+ uci = model.UCI.get( uci_id )
+ uci.refresh()
+ conn = self.get_connection_from_uci( uci )
+
+ # Get reservations handle for given store
+ try:
+ vl = conn.get_all_volumes( [store.volume_id] )
+ except boto.exception.EC2ResponseError, e:
+ err = "Retrieving volume(s) from cloud failed for UCI '"+ uci.name + "' during general status update: " + str( e )
+ log.error( err )
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
+ return None
+
+ # Update store status in local DB with info from cloud provider
+ if len(vl) > 0:
+ try:
+ if store.status != vl[0].status:
+ # In case something failed during creation of UCI but actual storage volume was created and yet
+ # UCI state remained as 'new', try to remedy this by updating UCI state here
+ if ( store.status == None ) and ( store.volume_id != None ):
+ uci.state = vl[0].status
+ uci.flush()
+ # If UCI was marked in state 'CREATING', update its status to reflect new status
+ elif ( uci.state == uci_states.CREATING ):
+ uci.state = vl[0].status
+ uci.flush()
+
+ store.status = vl[0].status
+ store.flush()
+ if store.i_id != vl[0].instance_id:
+ store.i_id = vl[0].instance_id
+ store.flush()
+ if store.attach_time != vl[0].attach_time:
+ store.attach_time = vl[0].attach_time
+ store.flush()
+ if store.device != vl[0].device:
+ store.device = vl[0].device
+ store.flush()
+ except boto.exception.EC2ResponseError, e:
+ err = "Updating status of volume(s) from cloud failed for UCI '"+ uci.name + "' during general status update: " + str( e )
+ log.error( err )
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
+ return None
+ else:
+ err = "No storage volumes returned by cloud provider on general update"
+ log.error( "%s for UCI '%s'" % ( err, uci.name ) )
+ store.status = store_status.ERROR
+ store.error = err
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
+ store.flush()
+
+ def updateSnapshot( self, snapshot ):
+ # Get credentials associated wit this store
+ uci_id = snapshot.uci_id
+ uci = model.UCI.get( uci_id )
+ uci.refresh()
+ conn = self.get_connection_from_uci( uci )
+
+ try:
+ log.debug( "Updating status of snapshot '%s'" % snapshot.snapshot_id )
+ snap = conn.get_all_snapshots( [snapshot.snapshot_id] )
+ if len( snap ) > 0:
+ log.debug( "Snapshot '%s' status: %s" % ( snapshot.snapshot_id, snap[0].status ) )
+ snapshot.status = snap[0].status
+ snapshot.flush()
+ else:
+ err = "No snapshots returned by EC2 on general update"
+ log.error( "%s for UCI '%s'" % ( err, uci.name ) )
+ snapshot.status = snapshot_status.ERROR
+ snapshot.error = err
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
+ snapshot.flush()
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error while updating snapshot status: " + str( e )
+ log.error( err )
+ snapshot.status = snapshot_status.ERROR
+ snapshot.error = err
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
+ snapshot.flush()
+ except Exception, ex:
+ err = "Error while updating snapshot status: " + str( ex )
+ log.error( err )
+ snapshot.status = snapshot_status.ERROR
+ snapshot.error = err
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
+ snapshot.flush()
+
+ def delete_snapshot( self, snapshot ):
+ if snapshot.status == snapshot_status.DELETE:
+ # Get credentials associated wit this store
+ uci_id = snapshot.uci_id
+ uci = model.UCI.get( uci_id )
+ uci.refresh()
+ conn = self.get_connection_from_uci( uci )
+
+ try:
+ log.debug( "Deleting snapshot '%s'" % snapshot.snapshot_id )
+ snap = conn.delete_snapshot( snapshot.snapshot_id )
+ if snap == True:
+ snapshot.deleted = True
+ snapshot.status = snapshot_status.DELETED
+ snapshot.flush()
+ return snap
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error while deleting snapshot: " + str( e )
+ log.error( err )
+ snapshot.status = snapshot_status.ERROR
+ snapshot.error = err
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
+ snapshot.flush()
+ except Exception, ex:
+ err = "Error while deleting snapshot: " + str( ex )
+ log.error( err )
+ snapshot.status = snapshot_status.ERROR
+ snapshot.error = err
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
+ snapshot.flush()
+ else:
+ err = "Cannot delete snapshot '"+snapshot.snapshot_id+"' because its status is '"+snapshot.status+"'. Only snapshots with '" + \
+ snapshot_status.COMPLETED+"' status can be deleted."
+ log.error( err )
+ snapshot.error = err
+ snapshot.flush()
+
+ def processZombie( self, inst ):
+ """
+ Attempt at discovering if starting an instance was successful but local database was not updated
+ accordingly or if something else failed and instance was never started. Currently, no automatic
+ repairs are being attempted; instead, appropriate error messages are set.
+ """
+ # Check if any instance-specific information was written to local DB; if 'yes', set instance and UCI's error message
+ # suggesting manual check.
+ if inst.launch_time != None or inst.reservation_id != None or inst.instance_id != None:
+ # Try to recover state - this is best-case effort, so if something does not work immediately, not
+ # recovery steps are attempted. Recovery is based on hope that instance_id is available in local DB; if not,
+ # report as error.
+ # Fields attempting to be recovered are: reservation_id, instance status, and launch_time
+ if inst.instance_id != None:
+ conn = self.get_connection_from_uci( uci )
+ rl = conn.get_all_instances( [inst.instance_id] ) # reservation list
+ # Update local DB with relevant data from instance
+ if inst.reservation_id == None:
+ try:
+ inst.reservation_id = str(rl[0]).split(":")[1]
+ except: # something failed, so skip
+ pass
+
+ try:
+ state = rl[0].instances[0].update()
+ inst.state = state
+ inst.uci.state = state
+ inst.flush()
+ inst.uci.flush()
+ except: # something failed, so skip
+ pass
+
+ if inst.launch_time == None:
+ try:
+ launch_time = self.format_time( rl[0].instances[0].launch_time )
+ inst.launch_time = launch_time
+ inst.flush()
+ if inst.uci.launch_time == None:
+ inst.uci.launch_time = launch_time
+ inst.uci.flush()
+ except: # something failed, so skip
+ pass
+ else:
+ err = "Starting a machine instance (DB id: '"+str(inst.id)+"') associated with this UCI '" + str(inst.uci.name) + \
+ "' seems to have failed. Because it appears that cloud instance might have gotten started, manual check is recommended."
+ inst.error = err
+ inst.state = instance_states.ERROR
+ inst.uci.error = err
+ inst.uci.state = uci_states.ERROR
+ log.error( err )
+ inst.flush()
+ inst.uci.flush()
+
+ else: #Instance most likely never got processed, so set error message suggesting user to try starting instance again.
+ err = "Starting a machine instance (DB id: '"+str(inst.id)+"') associated with this UCI '" + str(inst.uci.name) + \
+ "' seems to have failed. Because it appears that cloud instance never got started, it should be safe to reset state and try " \
+ "starting the instance again."
+ inst.error = err
+ inst.state = instance_states.ERROR
+ inst.uci.error = err
+ inst.uci.state = uci_states.ERROR
+ log.error( err )
+ inst.flush()
+ inst.uci.flush()
+# uw = UCIwrapper( inst.uci )
+# log.debug( "Try automatically re-submitting UCI '%s'." % uw.get_name() )
+
+ def get_connection_from_uci( self, uci ):
+ """
+ Establishes and returns connection to cloud provider. Information needed to do so is obtained
+ directly from uci database object.
+ """
+ log.debug( 'Establishing %s cloud connection' % self.type )
+ a_key = uci.credentials.access_key
+ s_key = uci.credentials.secret_key
+ # Get connection
+ try:
+ region = RegionInfo( None, uci.credentials.provider.region_name, uci.credentials.provider.region_endpoint )
+ conn = EC2Connection( aws_access_key_id=a_key,
+ aws_secret_access_key=s_key,
+ is_secure=uci.credentials.provider.is_secure,
+ region=region,
+ path=uci.credentials.provider.path )
+ except boto.exception.EC2ResponseError, e:
+ err = "Establishing connection with cloud failed: " + str( e )
+ log.error( err )
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
+ return None
+
+ return conn
+
+# def updateUCI( self, uci ):
+# """
+# Runs a global status update on all storage volumes and all instances that are
+# associated with specified UCI
+# """
+# conn = self.get_connection( uci )
+#
+# # Update status of storage volumes
+# vl = model.CloudStore.filter( model.CloudInstance.c.uci_id == uci.id ).all()
+# vols = []
+# for v in vl:
+# vols.append( v.volume_id )
+# try:
+# volumes = conn.get_all_volumes( vols )
+# for i, v in enumerate( volumes ):
+# uci.store[i].i_id = v.instance_id
+# uci.store[i].status = v.status
+# uci.store[i].device = v.device
+# uci.store[i].flush()
+# except:
+# log.debug( "Error updating status of volume(s) associated with UCI '%s'. Status was not updated." % uci.name )
+# pass
+#
+# # Update status of instances
+# il = model.CloudInstance.filter_by( uci_id=uci.id ).filter( model.CloudInstance.c.state != 'terminated' ).all()
+# instanceList = []
+# for i in il:
+# instanceList.append( i.instance_id )
+# log.debug( 'instanceList: %s' % instanceList )
+# try:
+# reservations = conn.get_all_instances( instanceList )
+# for i, r in enumerate( reservations ):
+# uci.instance[i].state = r.instances[0].update()
+# log.debug('updating instance %s; status: %s' % ( uci.instance[i].instance_id, uci.instance[i].state ) )
+# uci.state = uci.instance[i].state
+# uci.instance[i].public_dns = r.instances[0].dns_name
+# uci.instance[i].private_dns = r.instances[0].private_dns_name
+# uci.instance[i].flush()
+# uci.flush()
+# except:
+# log.debug( "Error updating status of instances associated with UCI '%s'. Instance status was not updated." % uci.name )
+# pass
+
+ # --------- Helper methods ------------
+
+ def format_time( self, time ):
+ dict = {'T':' ', 'Z':''}
+ for i, j in dict.iteritems():
+ time = time.replace(i, j)
+ return time
+
\ No newline at end of file
diff -r 0984c3800775 -r 7d013eb98022 lib/galaxy/cloud/providers/eucalyptus.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/cloud/providers/eucalyptus.py Thu Nov 12 16:36:07 2009 -0500
@@ -0,0 +1,923 @@
+import subprocess, threading, os, errno, time, datetime
+from Queue import Queue, Empty
+from datetime import datetime
+
+from galaxy import model # Database interaction class
+from galaxy.model import mapping
+from galaxy.datatypes.data import nice_size
+from galaxy.util.bunch import Bunch
+from galaxy.cloud import UCIwrapper
+from Queue import Queue
+from sqlalchemy import or_, and_
+
+import galaxy.eggs
+galaxy.eggs.require("boto")
+from boto.ec2.connection import EC2Connection
+from boto.ec2.regioninfo import RegionInfo
+import boto.exception
+import boto
+
+import logging
+log = logging.getLogger( __name__ )
+
+uci_states = Bunch(
+ NEW_UCI = "newUCI",
+ NEW = "new",
+ CREATING = "creating",
+ DELETING_UCI = "deletingUCI",
+ DELETING = "deleting",
+ SUBMITTED_UCI = "submittedUCI",
+ SUBMITTED = "submitted",
+ SHUTTING_DOWN_UCI = "shutting-downUCI",
+ SHUTTING_DOWN = "shutting-down",
+ AVAILABLE = "available",
+ RUNNING = "running",
+ PENDING = "pending",
+ ERROR = "error",
+ DELETED = "deleted",
+ SNAPSHOT_UCI = "snapshotUCI",
+ SNAPSHOT = "snapshot"
+)
+
+instance_states = Bunch(
+ TERMINATED = "terminated",
+ SUBMITTED = "submitted",
+ RUNNING = "running",
+ PENDING = "pending",
+ SHUTTING_DOWN = "shutting-down",
+ ERROR = "error"
+)
+
+store_status = Bunch(
+ IN_USE = "in-use",
+ CREATING = "creating",
+ DELETED = 'deleted',
+ ERROR = "error"
+)
+
+snapshot_status = Bunch(
+ SUBMITTED = 'submitted',
+ PENDING = 'pending',
+ COMPLETED = 'completed',
+ DELETE = 'delete',
+ DELETED= 'deleted',
+ ERROR = "error"
+)
+
+class EucalyptusCloudProvider( object ):
+ """
+ Eucalyptus-based cloud provider implementation for managing instances.
+ """
+ STOP_SIGNAL = object()
+ def __init__( self, app ):
+ self.type = "eucalyptus" # cloud provider type (e.g., ec2, eucalyptus, opennebula)
+ self.zone = "epc"
+ self.queue = Queue()
+
+ self.threads = []
+ nworkers = 5
+ log.info( "Starting eucalyptus cloud controller workers..." )
+ for i in range( nworkers ):
+ worker = threading.Thread( target=self.run_next )
+ worker.start()
+ self.threads.append( worker )
+ log.debug( "%d eucalyptus cloud workers ready", nworkers )
+
+ def run_next( self ):
+ """Run the next job, waiting until one is available if necessary"""
+ cnt = 0
+ while 1:
+ uci_wrapper = self.queue.get()
+ uci_state = uci_wrapper.get_state()
+ if uci_state is self.STOP_SIGNAL:
+ return
+ try:
+ if uci_state==uci_states.NEW:
+ self.createUCI( uci_wrapper )
+ elif uci_state==uci_states.DELETING:
+ self.deleteUCI( uci_wrapper )
+ elif uci_state==uci_states.SUBMITTED:
+ self.startUCI( uci_wrapper )
+ #self.dummyStartUCI( uci_wrapper )
+ elif uci_state==uci_states.SHUTTING_DOWN:
+ self.stopUCI( uci_wrapper )
+ elif uci_state==uci_states.SNAPSHOT:
+ self.snapshotUCI( uci_wrapper )
+ except:
+ log.exception( "Uncaught exception executing cloud request." )
+ cnt += 1
+
+ def get_connection( self, uci_wrapper ):
+ """
+ Establishes eucalyptus cloud connection using user's credentials associated with given UCI
+ """
+ log.debug( 'Establishing %s cloud connection.' % self.type )
+ provider = uci_wrapper.get_provider()
+ try:
+ region = RegionInfo( None, provider.region_name, provider.region_endpoint )
+ except Exception, ex:
+ err = "Selecting region with cloud provider failed: " + str( ex )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ return None
+ try:
+ conn = EC2Connection( aws_access_key_id=uci_wrapper.get_access_key(),
+ aws_secret_access_key=uci_wrapper.get_secret_key(),
+ is_secure=provider.is_secure,
+ port=provider.port,
+ region=region,
+ path=provider.path )
+ except boto.exception.EC2ResponseError, e:
+ err = "Establishing connection with cloud failed: " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ return None
+
+ return conn
+
+ def check_key_pair( self, uci_wrapper, conn ):
+ """
+ Generate key pair using user's credentials
+ """
+ kp = None
+ kp_name = uci_wrapper.get_name().replace(' ','_') + "_kp"
+ log.debug( "Checking user's key pair: '%s'" % kp_name )
+ try:
+ kp = conn.get_key_pair( kp_name )
+ uci_kp_name = uci_wrapper.get_key_pair_name()
+ uci_material = uci_wrapper.get_key_pair_material()
+ if kp != None:
+ if kp.name != uci_kp_name or uci_material == None:
+ # key pair exists on the cloud but not in local database, so re-generate it (i.e., delete and then create)
+ try:
+ conn.delete_key_pair( kp_name )
+ kp = self.create_key_pair( conn, kp_name )
+ uci_wrapper.set_key_pair( kp.name, kp.material )
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error while deleting key pair: " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ else:
+ try:
+ kp = self.create_key_pair( conn, kp_name )
+ uci_wrapper.set_key_pair( kp.name, kp.material )
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error while creating key pair: " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ except Exception, ex:
+ err = "Exception while creating key pair: " + str( ex )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ except boto.exception.EC2ResponseError, e: # No keypair under this name exists so create it
+ if e.code == 'InvalidKeyPair.NotFound':
+ log.info( "No keypair found, creating keypair '%s'" % kp_name )
+ kp = self.create_key_pair( conn, kp_name )
+ uci_wrapper.set_key_pair( kp.name, kp.material )
+ else:
+ err = "EC2 response error while retrieving key pair: " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+
+ if kp != None:
+ return kp.name
+ else:
+ return None
+
+ def create_key_pair( self, conn, kp_name ):
+ try:
+ return conn.create_key_pair( kp_name )
+ except boto.exception.EC2ResponseError, e:
+ return None
+
+ def get_mi_id( self, uci_wrapper, i_index ):
+ """
+ Get appropriate machine image (mi) based on instance size.
+ """
+ i_type = uci_wrapper.get_type( i_index )
+ if i_type=='m1.small' or i_type=='c1.medium':
+ arch = 'i386'
+ else:
+ arch = 'x86_64'
+
+ mi = model.CloudImage.filter_by( deleted=False, provider_type=self.type, architecture=arch ).first()
+ if mi:
+ return mi.image_id
+ else:
+ err = "Machine image could not be retrieved"
+ log.error( "%s for UCI '%s'." % (err, uci_wrapper.get_name() ) )
+ uci_wrapper.set_error( err+". Contact site administrator to ensure needed machine image is registered.", True )
+ return None
+
+ def shutdown( self ):
+ """Attempts to gracefully shut down the monitor thread"""
+ log.info( "sending stop signal to worker threads in eucalyptus cloud manager" )
+ for i in range( len( self.threads ) ):
+ self.queue.put( self.STOP_SIGNAL )
+ log.info( "eucalyptus cloud manager stopped" )
+
+ def put( self, uci_wrapper ):
+ # Get rid of UCI from state description
+ state = uci_wrapper.get_state()
+ uci_wrapper.change_state( state.split('U')[0] ) # remove 'UCI' from end of state description (i.e., mark as accepted and ready for processing)
+ self.queue.put( uci_wrapper )
+
+ def createUCI( self, uci_wrapper ):
+ """
+ Creates User Configured Instance (UCI). Essentially, creates storage volume on cloud provider
+ and registers relevant information in Galaxy database.
+ """
+ conn = self.get_connection( uci_wrapper )
+
+ # Because only 1 storage volume may be created at UCI config time, index of this storage volume in local Galaxy DB w.r.t
+ # current UCI is 0; therefore, it can be referenced in following code
+ log.info( "Creating volume in zone '%s'..." % uci_wrapper.get_uci_availability_zone() )
+ if uci_wrapper.get_uci_availability_zone()=='':
+ log.info( "Availability zone for UCI (i.e., storage volume) was not selected, using default zone: %s" % self.zone )
+ uci_wrapper.set_store_availability_zone( self.zone )
+
+ log.debug( "Creating volume; using command: conn.create_volume( %s, '%s', snapshot=None )" % ( uci_wrapper.get_store_size( 0 ), uci_wrapper.get_uci_availability_zone() ))
+ vol = conn.create_volume( uci_wrapper.get_store_size( 0 ), uci_wrapper.get_uci_availability_zone(), snapshot=None )
+ uci_wrapper.set_store_volume_id( 0, vol.id )
+
+ # Retrieve created volume again to get updated status
+ try:
+ vl = conn.get_all_volumes( [vol.id] )
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error while retrieving (i.e., updating status) of just created storage volume '" + vol.id + "': " + str( e )
+ log.error( err )
+ uci_wrapper.set_store_status( vol.id, uci_states.ERROR )
+ uci_wrapper.set_error( err, True )
+ return
+ except Exception, ex:
+ err = "Error while retrieving (i.e., updating status) of just created storage volume '" + vol.id + "': " + str( ex )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ return
+
+ if len( vl ) > 0:
+ # EPC does not allow creation of storage volumes (it deletes one as soon as it is created, so manually set uci_state here)
+ if vl[0].status == store_status.DELETED:
+ uci_wrapper.change_state( uci_state=uci_states.AVAILABLE )
+ else:
+ uci_wrapper.change_state( uci_state=vl[0].status )
+ uci_wrapper.set_store_status( vol.id, vl[0].status )
+ else:
+ err = "Volume '" + vol.id +"' not found by EC2 after being created."
+ log.error( err )
+ uci_wrapper.set_store_status( vol.id, uci_states.ERROR )
+ uci_wrapper.set_error( err, True )
+
+ def deleteUCI( self, uci_wrapper ):
+ """
+ Deletes UCI. NOTE that this implies deletion of any and all data associated
+ with this UCI from the cloud. All data will be deleted.
+ """
+ conn = self.get_connection( uci_wrapper )
+ vl = [] # volume list
+ count = 0 # counter for checking if all volumes assoc. w/ UCI were deleted
+
+ # Get all volumes assoc. w/ UCI, delete them from cloud as well as in local DB
+ vl = uci_wrapper.get_all_stores()
+ deletedList = []
+ failedList = []
+ for v in vl:
+ log.debug( "Deleting volume with id='%s'" % v.volume_id )
+ try:
+ if conn.delete_volume( v.volume_id ):
+ deletedList.append( v.volume_id )
+ v.deleted = True
+ v.flush()
+ count += 1
+ else:
+ failedList.append( v.volume_id )
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error while deleting storage volume '" + v.volume_id + "': " + str( e )
+ log.error( err )
+ uci_wrapper.set_store_error( err, store_id = v.volume_id )
+ uci_wrapper.set_error( err, True )
+
+ # Delete UCI if all of associated
+ if count == len( vl ):
+ uci_wrapper.set_deleted()
+ else:
+ err = "Deleting following volume(s) failed: "+failedList+". However, these volumes were successfully deleted: "+deletedList+". \
+ MANUAL intervention and processing needed."
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+
+ def snapshotUCI( self, uci_wrapper ):
+ """
+ Creates snapshot of all storage volumes associated with this UCI.
+ """
+ if uci_wrapper.get_state() != uci_states.ERROR:
+ conn = self.get_connection( uci_wrapper )
+
+ snapshots = uci_wrapper.get_snapshots( status = snapshot_status.SUBMITTED )
+ for snapshot in snapshots:
+ log.debug( "Snapshot DB id: '%s', volume id: '%s'" % ( snapshot.id, snapshot.store.volume_id ) )
+ try:
+ snap = conn.create_snapshot( volume_id=snapshot.store.volume_id )
+ snap_id = str( snap ).split(':')[1]
+ uci_wrapper.set_snapshot_id( snapshot.id, snap_id )
+ sh = conn.get_all_snapshots( snap_id ) # get updated status
+ uci_wrapper.set_snapshot_status( status=sh[0].status, snap_id=snap_id )
+ except boto.exception.EC2ResponseError, e:
+ err = "Cloud provider response error while creating snapshot: " + str( e )
+ log.error( err )
+ uci_wrapper.set_snapshot_error( error=err, snap_index=snapshot.id, set_status=True )
+ uci_wrapper.set_error( err, True )
+ return
+ except Exception, ex:
+ err = "Error while creating snapshot: " + str( ex )
+ log.error( err )
+ uci_wrapper.set_snapshot_error( error=err, snap_index=snapshot.id, set_status=True )
+ uci_wrapper.set_error( err, True )
+ return
+
+ uci_wrapper.change_state( uci_state=uci_states.AVAILABLE )
+
+# if uci_wrapper.get_state() != uci_states.ERROR:
+#
+# snapshots = uci_wrapper.get_snapshots( status = 'submitted' )
+# for snapshot in snapshots:
+# uci_wrapper.set_snapshot_id( snapshot.id, None, 'euca_error' )
+#
+# log.debug( "Eucalyptus snapshot attempted by user for UCI '%s'" % uci_wrapper.get_name() )
+# uci_wrapper.set_error( "Eucalyptus does not support creation of snapshots at this moment. No snapshot or other changes were performed. \
+# Feel free to resent state of this instance and use it normally.", True )
+
+
+ def addStorageToUCI( self, uci_wrapper ):
+ """ Adds more storage to specified UCI """
+
+ def dummyStartUCI( self, uci_wrapper ):
+
+ uci = uci_wrapper.get_uci()
+ log.debug( "Would be starting instance '%s'" % uci.name )
+# uci_wrapper.change_state( uci_states.SUBMITTED_UCI )
+# log.debug( "Set UCI state to SUBMITTED_UCI" )
+ log.debug( "Sleeping a bit... (%s)" % uci.name )
+ time.sleep(10)
+ log.debug( "Woke up! (%s)" % uci.name )
+
+ def startUCI( self, uci_wrapper ):
+ """
+ Starts instance(s) of given UCI on the cloud.
+ """
+ if uci_wrapper.get_state() != uci_states.ERROR:
+ conn = self.get_connection( uci_wrapper )
+ self.check_key_pair( uci_wrapper, conn )
+ if uci_wrapper.get_key_pair_name() == None:
+ err = "Key pair not found"
+ log.error( "%s for UCI '%s'." % ( err, uci_wrapper.get_name() ) )
+ uci_wrapper.set_error( err + ". Try resetting the state and starting the instance again.", True )
+ return
+
+ i_indexes = uci_wrapper.get_instances_indexes( state=instance_states.SUBMITTED ) # Get indexes of i_indexes associated with this UCI that are in 'submitted' state
+ log.debug( "Starting instances with IDs: '%s' associated with UCI '%s' " % ( i_indexes, uci_wrapper.get_name(), ) )
+ if len( i_indexes ) > 0:
+ for i_index in i_indexes:
+ # Get machine image for current instance
+ mi_id = self.get_mi_id( uci_wrapper, i_index )
+ log.debug( "mi_id: %s, uci_wrapper.get_key_pair_name(): %s" % ( mi_id, uci_wrapper.get_key_pair_name() ) )
+ uci_wrapper.set_mi( i_index, mi_id )
+
+ if uci_wrapper.get_state() != uci_states.ERROR:
+ # Start an instance
+ log.debug( "Starting UCI instance '%s'" % uci_wrapper.get_name() )
+ log.debug( "Using following command: conn.run_instances( image_id='%s', key_name='%s', instance_type='%s' )"
+ % ( mi_id, uci_wrapper.get_key_pair_name(), uci_wrapper.get_type( i_index ) ) )
+ reservation = None
+ try:
+ reservation = conn.run_instances( image_id=mi_id,
+ key_name=uci_wrapper.get_key_pair_name(),
+ instance_type=uci_wrapper.get_type( i_index ) )
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error when starting UCI '"+ uci_wrapper.get_name() +"': " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ except Exception, ex:
+ err = "Error when starting UCI '" + uci_wrapper.get_name() + "': " + str( ex )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ # Record newly available instance data into local Galaxy database
+ if reservation:
+ uci_wrapper.set_launch_time( self.format_time( reservation.instances[0].launch_time ), i_index=i_index )
+ if not uci_wrapper.uci_launch_time_set():
+ uci_wrapper.set_uci_launch_time( self.format_time( reservation.instances[0].launch_time ) )
+ try:
+ uci_wrapper.set_reservation_id( i_index, str( reservation ).split(":")[1] )
+ # TODO: if more than a single instance will be started through single reservation, change this reference from element [0]
+ i_id = str( reservation.instances[0]).split(":")[1]
+ uci_wrapper.set_instance_id( i_index, i_id )
+ s = reservation.instances[0].state
+ uci_wrapper.change_state( s, i_id, s )
+ log.debug( "Instance of UCI '%s' started, current state: '%s'" % ( uci_wrapper.get_name(), uci_wrapper.get_state() ) )
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error when retrieving instance information for UCI '" + uci_wrapper.get_name() + "': " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ else:
+ log.error( "UCI '%s' is in 'error' state, starting instance was aborted." % uci_wrapper.get_name() )
+ else:
+ err = "No instances in state '"+ instance_states.SUBMITTED +"' found for UCI '" + uci_wrapper.get_name() + \
+ "'. Nothing to start."
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+ else:
+ log.error( "UCI '%s' is in 'error' state, starting instance was aborted." % uci_wrapper.get_name() )
+
+ def stopUCI( self, uci_wrapper):
+ """
+ Stops all of cloud instances associated with given UCI.
+ """
+ conn = self.get_connection( uci_wrapper )
+
+ # Get all instances associated with given UCI
+ il = uci_wrapper.get_instances_ids() # instance list
+ log.debug( 'List of instances being terminated: %s' % il )
+ rl = conn.get_all_instances( il ) # Reservation list associated with given instances
+
+ # Initiate shutdown of all instances under given UCI
+ cnt = 0
+ stopped = []
+ not_stopped = []
+ for r in rl:
+ for inst in r.instances:
+ log.debug( "Sending stop signal to instance '%s' associated with reservation '%s' (UCI: %s)." % ( inst, r, uci_wrapper.get_name() ) )
+ try:
+ inst.stop()
+ uci_wrapper.set_stop_time( datetime.utcnow(), i_id=inst.id )
+ uci_wrapper.change_state( instance_id=inst.id, i_state=inst.update() )
+ stopped.append( inst )
+ except boto.exception.EC2ResponseError, e:
+ not_stopped.append( inst )
+ err = "EC2 response error when stopping instance '" + inst.instance_id + "': " + str( e )
+ log.error( err )
+ uci_wrapper.set_error( err, True )
+
+ uci_wrapper.reset_uci_launch_time()
+ log.debug( "Termination was initiated for all instances of UCI '%s'." % uci_wrapper.get_name() )
+
+# dbInstances = get_instances( trans, uci ) #TODO: handle list!
+#
+# # Get actual cloud instance object
+# cloudInstance = get_cloud_instance( conn, dbInstances.instance_id )
+#
+# # TODO: Detach persistent storage volume(s) from instance and update volume data in local database
+# stores = get_stores( trans, uci )
+# for i, store in enumerate( stores ):
+# log.debug( "Detaching volume '%s' to instance '%s'." % ( store.volume_id, dbInstances.instance_id ) )
+# mntDevice = store.device
+# volStat = None
+## Detaching volume does not work with Eucalyptus Public Cloud, so comment it out
+## try:
+## volStat = conn.detach_volume( store.volume_id, dbInstances.instance_id, mntDevice )
+## except:
+## log.debug ( 'Error detaching volume; still going to try and stop instance %s.' % dbInstances.instance_id )
+# store.attach_time = None
+# store.device = None
+# store.i_id = None
+# store.status = volStat
+# log.debug ( '***** volume status: %s' % volStat )
+#
+# # Stop the instance and update status in local database
+# cloudInstance.stop()
+# dbInstances.stop_time = datetime.utcnow()
+# while cloudInstance.state != 'terminated':
+# log.debug( "Stopping instance %s state; current state: %s" % ( str( cloudInstance ).split(":")[1], cloudInstance.state ) )
+# time.sleep(3)
+# cloudInstance.update()
+# dbInstances.state = cloudInstance.state
+#
+# # Reset relevant UCI fields
+# uci.state = 'available'
+# uci.launch_time = None
+#
+# # Persist
+# session = trans.sa_session
+## session.save_or_update( stores )
+# session.save_or_update( dbInstances ) # TODO: Is this going to work w/ multiple instances stored in dbInstances variable?
+# session.save_or_update( uci )
+# session.flush()
+# trans.log_event( "User stopped cloud instance '%s'" % uci.name )
+# trans.set_message( "Galaxy instance '%s' stopped." % uci.name )
+
+ def update( self ):
+ """
+ Runs a global status update on all instances that are in 'running', 'pending', or 'shutting-down' state.
+ Also, runs update on all storage volumes that are in 'in-use', 'creating', or 'None' state.
+ Reason behind this method is to sync state of local DB and real-world resources
+ """
+ log.debug( "Running general status update for EPC UCIs..." )
+ # Update instances
+ instances = model.CloudInstance.filter( or_( model.CloudInstance.c.state==instance_states.RUNNING,
+ model.CloudInstance.c.state==instance_states.PENDING,
+ model.CloudInstance.c.state==instance_states.SHUTTING_DOWN ) ).all()
+ for inst in instances:
+ if self.type == inst.uci.credentials.provider.type:
+ log.debug( "[%s] Running general status update on instance '%s'" % ( inst.uci.credentials.provider.type, inst.instance_id ) )
+ self.updateInstance( inst )
+
+ # Update storage volume(s)
+ stores = model.CloudStore.filter( or_( model.CloudStore.c.status==store_status.IN_USE,
+ model.CloudStore.c.status==store_status.CREATING,
+ model.CloudStore.c.status==None ) ).all()
+ for store in stores:
+ if self.type == store.uci.credentials.provider.type: # and store.volume_id != None:
+ log.debug( "[%s] Running general status update on store with local database ID: '%s'" % ( store.uci.credentials.provider.type, store.id ) )
+ self.updateStore( store )
+# else:
+# log.error( "[%s] There exists an entry for UCI (%s) storage volume without an ID. Storage volume might have been created with "
+# "cloud provider though. Manual check is recommended." % ( store.uci.credentials.provider.type, store.uci.name ) )
+# store.uci.error = "There exists an entry in local database for a storage volume without an ID. Storage volume might have been created " \
+# "with cloud provider though. Manual check is recommended. After understanding what happened, local database entry for given " \
+# "storage volume should be updated."
+# store.status = store_status.ERROR
+# store.uci.state = uci_states.ERROR
+# store.uci.flush()
+# store.flush()
+
+ # Update pending snapshots or delete ones marked for deletion
+ snapshots = model.CloudSnapshot.filter_by( status=snapshot_status.PENDING, status=snapshot_status.DELETE ).all()
+ for snapshot in snapshots:
+ if self.type == snapshot.uci.credentials.provider.type and snapshot.status == snapshot_status.PENDING:
+ log.debug( "[%s] Running general status update on snapshot '%s'" % ( snapshot.uci.credentials.provider.type, snapshot.snapshot_id ) )
+ self.update_snapshot( snapshot )
+ elif self.type == snapshot.uci.credentials.provider.type and snapshot.status == snapshot_status.DELETE:
+ log.debug( "[%s] Initiating deletion of snapshot '%s'" % ( snapshot.uci.credentials.provider.type, snapshot.snapshot_id ) )
+ self.delete_snapshot( snapshot )
+
+ # Attempt at updating any zombie UCIs (i.e., instances that have been in SUBMITTED state for longer than expected - see below for exact time)
+ zombies = model.UCI.filter_by( state=uci_states.SUBMITTED ).all()
+ for zombie in zombies:
+ log.debug( "zombie UCI: %s" % zombie.name )
+ z_instances = model.CloudInstance \
+ .filter_by( uci_id=zombie.id, state=None ) \
+ .all()
+ for z_inst in z_instances:
+ if self.type == z_inst.uci.credentials.provider.type:
+# log.debug( "z_inst.id: '%s', state: '%s'" % ( z_inst.id, z_inst.state ) )
+ td = datetime.utcnow() - z_inst.update_time
+ log.debug( "z_inst.id: %s, time delta is %s sec" % ( z_inst.id, td.seconds ) )
+ if td.seconds > 180: # if instance has been in SUBMITTED state for more than 3 minutes
+ log.debug( "[%s](td=%s) Running zombie repair update on instance with DB id '%s'" % ( z_inst.uci.credentials.provider.type, td.seconds, z_inst.id ) )
+ self.processZombie( z_inst )
+
+ def updateInstance( self, inst ):
+
+ # Get credentials associated wit this instance
+ uci_id = inst.uci_id
+ uci = model.UCI.get( uci_id )
+ uci.refresh()
+ conn = self.get_connection_from_uci( uci )
+
+ # Get reservations handle for given instance
+ try:
+ rl= conn.get_all_instances( [inst.instance_id] )
+ except boto.exception.EC2ResponseError, e:
+ err = "Retrieving instance(s) from cloud failed for UCI '"+ uci.name +"' during general status update: " + str( e )
+ log.error( err )
+ uci.error = err
+ uci.state = uci_states.ERROR
+ return None
+
+ # Because EPC deletes references to reservations after a short while after instances have terminated, getting an empty list as a response to a query
+ # typically means the instance has successfully shut down but the check was not performed in short enough amount of time. Until alternative solution
+ # is found, below code sets state of given UCI to 'error' to indicate to the user something out of ordinary happened.
+ if len( rl ) == 0:
+ err = "Instance ID '"+inst.instance_id+"' was not found by the cloud provider. Instance might have crashed or otherwise been terminated."+ \
+ "Manual check is recommended."
+ log.error( err )
+ inst.error = err
+ uci.error = err
+ inst.state = instance_states.TERMINATED
+ uci.state = uci_states.ERROR
+ uci.launch_time = None
+ inst.flush()
+ uci.flush()
+ # Update instance status in local DB with info from cloud provider
+ for r in rl:
+ for i, cInst in enumerate( r.instances ):
+ try:
+ s = cInst.update()
+ log.debug( "Checking state of cloud instance '%s' associated with reservation '%s'. State='%s'" % ( cInst, r, s ) )
+ if s != inst.state:
+ inst.state = s
+ inst.flush()
+ # After instance has shut down, ensure UCI is marked as 'available'
+ if s == instance_states.TERMINATED and uci.state != uci_states.ERROR:
+ uci.state = uci_states.AVAILABLE
+ uci.launch_time = None
+ uci.flush()
+ # Making sure state of UCI is updated. Once multiple instances become associated with single UCI, this will need to be changed.
+ if s != uci.state and s != instance_states.TERMINATED:
+ uci.state = s
+ uci.flush()
+ if cInst.public_dns_name != inst.public_dns:
+ inst.public_dns = cInst.public_dns_name
+ inst.flush()
+ if cInst.private_dns_name != inst.private_dns:
+ inst.private_dns = cInst.private_dns_name
+ inst.flush()
+ except boto.exception.EC2ResponseError, e:
+ err = "Updating instance status from cloud failed for UCI '"+ uci.name + "' during general status update: " + str( e )
+ log.error( err )
+ uci.error = err
+ uci.state = uci_states.ERROR
+ return None
+
+ def updateStore( self, store ):
+ # Get credentials associated wit this store
+ uci_id = store.uci_id
+ uci = model.UCI.get( uci_id )
+ uci.refresh()
+ conn = self.get_connection_from_uci( uci )
+
+ try:
+ vl = conn.get_all_volumes( [store.volume_id] )
+ except boto.exception.EC2ResponseError, e:
+ err = "Retrieving volume(s) from cloud failed for UCI '"+ uci.name + "' during general status update: " + str( e )
+ log.error( err )
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
+ return None
+
+ # Update store status in local DB with info from cloud provider
+ if len(vl) > 0:
+ try:
+ if store.status != vl[0].status:
+ # In case something failed during creation of UCI but actual storage volume was created and yet
+ # UCI state remained as 'new', try to remedy this by updating UCI state here
+ if ( store.status == None ) and ( store.volume_id != None ):
+ uci.state = vl[0].status
+ uci.flush()
+ # If UCI was marked in state 'CREATING', update its status to reflect new status
+ elif ( uci.state == uci_states.CREATING ):
+ # Because Eucalyptus Public Cloud (EPC) deletes volumes immediately after they are created, artificially
+ # set status of given UCI to 'available' based on storage volume's availability zone (i.e., it's residing
+ # in EPC as opposed to some other Eucalyptus based cloud that allows creation of storage volumes.
+ if store.availability_zone == 'epc':
+ uci.state = uci_states.AVAILABLE
+ else:
+ uci.state = vl[0].status
+ uci.flush()
+
+ store.status = vl[0].status
+ store.flush()
+ if store.i_id != vl[0].instance_id:
+ store.i_id = vl[0].instance_id
+ store.flush()
+ if store.attach_time != vl[0].attach_time:
+ store.attach_time = vl[0].attach_time
+ store.flush()
+ if store.device != vl[0].device:
+ store.device = vl[0].device
+ store.flush()
+ except boto.exception.EC2ResponseError, e:
+ err = "Updating status of volume(s) from cloud failed for UCI '"+ uci.name + "' during general status update: " + str( e )
+ log.error( err )
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
+ return None
+ else:
+ err = "No storage volumes returned by cloud provider on general update"
+ log.error( "%s for UCI '%s'" % ( err, uci.name ) )
+ store.status = store_status.ERROR
+ store.error = err
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
+ store.flush()
+
+ def updateSnapshot( self, snapshot ):
+ # Get credentials associated wit this store
+ uci_id = snapshot.uci_id
+ uci = model.UCI.get( uci_id )
+ uci.refresh()
+ conn = self.get_connection_from_uci( uci )
+
+ try:
+ log.debug( "Updating status of snapshot '%s'" % snapshot.snapshot_id )
+ snap = conn.get_all_snapshots( [snapshot.snapshot_id] )
+ if len( snap ) > 0:
+ log.debug( "Snapshot '%s' status: %s" % ( snapshot.snapshot_id, snap[0].status ) )
+ snapshot.status = snap[0].status
+ snapshot.flush()
+ else:
+ err = "No snapshots returned by cloud provider on general update"
+ log.error( "%s for UCI '%s'" % ( err, uci.name ) )
+ snapshot.status = snapshot_status.ERROR
+ snapshot.error = err
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
+ snapshot.flush()
+ except boto.exception.EC2ResponseError, e:
+ err = "Cloud provider response error while updating snapshot status: " + str( e )
+ log.error( err )
+ snapshot.status = snapshot_status.ERROR
+ snapshot.error = err
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
+ snapshot.flush()
+ except Exception, ex:
+ err = "Error while updating snapshot status: " + str( ex )
+ log.error( err )
+ snapshot.status = snapshot_status.ERROR
+ snapshot.error = err
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
+ snapshot.flush()
+
+ def delete_snapshot( self, snapshot ):
+ if snapshot.status == snapshot_status.DELETE:
+ # Get credentials associated wit this store
+ uci_id = snapshot.uci_id
+ uci = model.UCI.get( uci_id )
+ uci.refresh()
+ conn = self.get_connection_from_uci( uci )
+
+ try:
+ log.debug( "Deleting snapshot '%s'" % snapshot.snapshot_id )
+ snap = conn.delete_snapshot( snapshot.snapshot_id )
+ if snap == True:
+ snapshot.deleted = True
+ snapshot.status = snapshot_status.DELETED
+ snapshot.flush()
+ return snap
+ except boto.exception.EC2ResponseError, e:
+ err = "EC2 response error while deleting snapshot: " + str( e )
+ log.error( err )
+ snapshot.status = snapshot_status.ERROR
+ snapshot.error = err
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
+ snapshot.flush()
+ except Exception, ex:
+ err = "Error while deleting snapshot: " + str( ex )
+ log.error( err )
+ snapshot.status = snapshot_status.ERROR
+ snapshot.error = err
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
+ snapshot.flush()
+ else:
+ err = "Cannot delete snapshot '"+snapshot.snapshot_id+"' because its status is '"+snapshot.status+"'. Only snapshots with '" + \
+ snapshot_status.COMPLETED+"' status can be deleted."
+ log.error( err )
+ snapshot.error = err
+ snapshot.flush()
+
+ def processZombie( self, inst ):
+ """
+ Attempt at discovering if starting an instance was successful but local database was not updated
+ accordingly or if something else failed and instance was never started. Currently, no automatic
+ repairs are being attempted; instead, appropriate error messages are set.
+ """
+ # Check if any instance-specific information was written to local DB; if 'yes', set instance and UCI's error message
+ # suggesting manual check.
+ if inst.launch_time != None or inst.reservation_id != None or inst.instance_id != None:
+ # Try to recover state - this is best-case effort, so if something does not work immediately, not
+ # recovery steps are attempted. Recovery is based on hope that instance_id is available in local DB; if not,
+ # report as error.
+ # Fields attempting to be recovered are: reservation_id, instance status, and launch_time
+ if inst.instance_id != None:
+ conn = self.get_connection_from_uci( inst.uci )
+ rl = conn.get_all_instances( [inst.instance_id] ) # reservation list
+ # Update local DB with relevant data from instance
+ if inst.reservation_id == None:
+ try:
+ inst.reservation_id = str(rl[0]).split(":")[1]
+ except: # something failed, so skip
+ pass
+
+ try:
+ state = rl[0].instances[0].update()
+ inst.state = state
+ inst.uci.state = state
+ inst.flush()
+ inst.uci.flush()
+ except: # something failed, so skip
+ pass
+
+ if inst.launch_time == None:
+ try:
+ launch_time = self.format_time( rl[0].instances[0].launch_time )
+ inst.launch_time = launch_time
+ inst.flush()
+ if inst.uci.launch_time == None:
+ inst.uci.launch_time = launch_time
+ inst.uci.flush()
+ except: # something failed, so skip
+ pass
+ else:
+ err = "Starting a machine instance (DB id: '"+str(inst.id)+"') associated with this UCI '" + str(inst.uci.name) + \
+ "' seems to have failed. Because it appears that cloud instance might have gotten started, manual check is recommended."
+ inst.error = err
+ inst.state = instance_states.ERROR
+ inst.uci.error = err
+ inst.uci.state = uci_states.ERROR
+ log.error( err )
+ inst.flush()
+ inst.uci.flush()
+
+ else: #Instance most likely never got processed, so set error message suggesting user to try starting instance again.
+ err = "Starting a machine instance (DB id: '"+str(inst.id)+"') associated with this UCI '" + str(inst.uci.name) + \
+ "' seems to have failed. Because it appears that cloud instance never got started, it should be safe to reset state and try " \
+ "starting the instance again."
+ inst.error = err
+ inst.state = instance_states.ERROR
+ inst.uci.error = err
+ inst.uci.state = uci_states.ERROR
+ log.error( err )
+ inst.flush()
+ inst.uci.flush()
+# uw = UCIwrapper( inst.uci )
+# log.debug( "Try automatically re-submitting UCI '%s'." % uw.get_name() )
+
+ def get_connection_from_uci( self, uci ):
+ """
+ Establishes and returns connection to cloud provider. Information needed to do so is obtained
+ directly from uci database object.
+ """
+ log.debug( 'Establishing %s cloud connection.' % self.type )
+ a_key = uci.credentials.access_key
+ s_key = uci.credentials.secret_key
+ # Get connection
+ try:
+ region = RegionInfo( None, uci.credentials.provider.region_name, uci.credentials.provider.region_endpoint )
+ conn = EC2Connection( aws_access_key_id=a_key,
+ aws_secret_access_key=s_key,
+ is_secure=uci.credentials.provider.is_secure,
+ port=uci.credentials.provider.port,
+ region=region,
+ path=uci.credentials.provider.path )
+ except boto.exception.EC2ResponseError, e:
+ err = "Establishing connection with cloud failed: " + str( e )
+ log.error( err )
+ uci.error = err
+ uci.state = uci_states.ERROR
+ uci.flush()
+ return None
+
+ return conn
+
+# def updateUCI( self, uci ):
+# """
+# Runs a global status update on all storage volumes and all instances that are
+# associated with specified UCI
+# """
+# conn = self.get_connection( uci )
+#
+# # Update status of storage volumes
+# vl = model.CloudStore.filter( model.CloudInstance.c.uci_id == uci.id ).all()
+# vols = []
+# for v in vl:
+# vols.append( v.volume_id )
+# try:
+# volumes = conn.get_all_volumes( vols )
+# for i, v in enumerate( volumes ):
+# uci.store[i].i_id = v.instance_id
+# uci.store[i].status = v.status
+# uci.store[i].device = v.device
+# uci.store[i].flush()
+# except:
+# log.debug( "Error updating status of volume(s) associated with UCI '%s'. Status was not updated." % uci.name )
+# pass
+#
+# # Update status of instances
+# il = model.CloudInstance.filter_by( uci_id=uci.id ).filter( model.CloudInstance.c.state != 'terminated' ).all()
+# instanceList = []
+# for i in il:
+# instanceList.append( i.instance_id )
+# log.debug( 'instanceList: %s' % instanceList )
+# try:
+# reservations = conn.get_all_instances( instanceList )
+# for i, r in enumerate( reservations ):
+# uci.instance[i].state = r.instances[0].update()
+# log.debug('updating instance %s; status: %s' % ( uci.instance[i].instance_id, uci.instance[i].state ) )
+# uci.state = uci.instance[i].state
+# uci.instance[i].public_dns = r.instances[0].dns_name
+# uci.instance[i].private_dns = r.instances[0].private_dns_name
+# uci.instance[i].flush()
+# uci.flush()
+# except:
+# log.debug( "Error updating status of instances associated with UCI '%s'. Instance status was not updated." % uci.name )
+# pass
+
+ # --------- Helper methods ------------
+
+ def format_time( self, time ):
+ dict = {'T':' ', 'Z':''}
+ for i, j in dict.iteritems():
+ time = time.replace(i, j)
+ return time
+
\ No newline at end of file
diff -r 0984c3800775 -r 7d013eb98022 lib/galaxy/config.py
--- a/lib/galaxy/config.py Thu Nov 12 15:25:48 2009 -0500
+++ b/lib/galaxy/config.py Thu Nov 12 16:36:07 2009 -0500
@@ -113,6 +113,13 @@
except ConfigParser.NoSectionError:
self.tool_runners = []
self.datatypes_config = kwargs.get( 'datatypes_config_file', 'datatypes_conf.xml' )
+ # Cloud configuration options
+ self.cloud_controller_instance = string_as_bool( kwargs.get( 'cloud_controller_instance', 'False' ) )
+ self.cloud_provider = kwargs.get( 'cloud_provider', None )
+ if self.cloud_controller_instance:
+ self.enable_cloud_execution = string_as_bool( kwargs.get( 'enable_cloud_execution', 'True' ) )
+ else:
+ self.enable_cloud_execution = string_as_bool( kwargs.get( 'enable_cloud_execution', 'False' ) )
def get( self, key, default ):
return self.config_dict.get( key, default )
def get_bool( self, key, default ):
diff -r 0984c3800775 -r 7d013eb98022 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py Thu Nov 12 15:25:48 2009 -0500
+++ b/lib/galaxy/model/__init__.py Thu Nov 12 16:36:07 2009 -0500
@@ -38,6 +38,7 @@
self.username = None
# Relationships
self.histories = []
+ self.credentials = []
def set_password_cleartext( self, cleartext ):
"""Set 'self.password' to the digest of 'cleartext'."""
@@ -1049,7 +1050,60 @@
def __init__( self, galaxy_session, history ):
self.galaxy_session = galaxy_session
self.history = history
+
+class CloudImage( object ):
+ def __init__( self ):
+ self.id = None
+ self.instance_id = None
+ self.state = None
+class UCI( object ):
+ def __init__( self ):
+ self.id = None
+ self.user = None
+
+class CloudInstance( object ):
+ def __init__( self ):
+ self.id = None
+ self.user = None
+ self.name = None
+ self.instance_id = None
+ self.mi = None
+ self.state = None
+ self.public_dns = None
+ self.availability_zone = None
+
+class CloudStore( object ):
+ def __init__( self ):
+ self.id = None
+ self.volume_id = None
+ self.i_id = None
+ self.user = None
+ self.size = None
+ self.availability_zone = None
+
+class CloudSnapshot( object ):
+ def __init__( self ):
+ self.id = None
+ self.user = None
+ self.store_id = None
+ self.snapshot_id = None
+
+class CloudProvider( object ):
+ def __init__( self ):
+ self.id = None
+ self.user = None
+ self.type = None
+
+class CloudUserCredentials( object ):
+ def __init__( self ):
+ self.id = None
+ self.user = None
+ self.name = None
+ self.accessKey = None
+ self.secretKey = None
+ self.credentials = []
+
class StoredWorkflow( object ):
def __init__( self ):
self.id = None
diff -r 0984c3800775 -r 7d013eb98022 lib/galaxy/model/mapping.py
--- a/lib/galaxy/model/mapping.py Thu Nov 12 15:25:48 2009 -0500
+++ b/lib/galaxy/model/mapping.py Thu Nov 12 16:36:07 2009 -0500
@@ -390,6 +390,117 @@
Column( "session_id", Integer, ForeignKey( "galaxy_session.id" ), index=True ),
Column( "history_id", Integer, ForeignKey( "history.id" ), index=True ) )
+# *************************** Start cloud tables***********************************
+CloudImage.table = Table( "cloud_image", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "create_time", DateTime, default=now ),
+ Column( "update_time", DateTime, default=now, onupdate=now ),
+ Column( "provider_type", TEXT ),
+ Column( "image_id", TEXT, nullable=False ),
+ Column( "manifest", TEXT ),
+ Column( "state", TEXT ),
+ Column( "architecture", TEXT ),
+ Column( "deleted", Boolean, default=False ) )
+
+""" UserConfiguredInstance (UCI) table """
+UCI.table = Table( "cloud_uci", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "create_time", DateTime, default=now ),
+ Column( "update_time", DateTime, default=now, onupdate=now ),
+ Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
+ Column( "credentials_id", Integer, ForeignKey( "cloud_user_credentials.id" ), index=True ),
+ Column( "key_pair_name", TEXT ),
+ Column( "key_pair_material", TEXT ),
+ Column( "name", TEXT ),
+ Column( "state", TEXT ),
+ Column( "error", TEXT ),
+ Column( "total_size", Integer ),
+ Column( "launch_time", DateTime ),
+ Column( "deleted", Boolean, default=False ) )
+
+CloudInstance.table = Table( "cloud_instance", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "create_time", DateTime, default=now ),
+ Column( "update_time", DateTime, default=now, onupdate=now ),
+ Column( "launch_time", DateTime ),
+ Column( "stop_time", DateTime ),
+ Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
+ Column( "uci_id", Integer, ForeignKey( "cloud_uci.id" ), index=True ),
+ Column( "type", TEXT ),
+ Column( "reservation_id", TEXT ),
+ Column( "instance_id", TEXT ),
+ Column( "mi_id", TEXT, ForeignKey( "cloud_image.image_id" ), index=True ),
+ Column( "state", TEXT ),
+ Column( "error", TEXT ),
+ Column( "public_dns", TEXT ),
+ Column( "private_dns", TEXT ),
+ Column( "security_group", TEXT ),
+ Column( "availability_zone", TEXT ) )
+
+CloudStore.table = Table( "cloud_store", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "create_time", DateTime, default=now ),
+ Column( "update_time", DateTime, default=now, onupdate=now ),
+ Column( "attach_time", DateTime ),
+ Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
+ Column( "uci_id", Integer, ForeignKey( "cloud_uci.id" ), index=True, nullable=False ),
+ Column( "volume_id", TEXT ),
+ Column( "size", Integer, nullable=False ),
+ Column( "availability_zone", TEXT ),
+ Column( "i_id", TEXT, ForeignKey( "cloud_instance.instance_id" ) ),
+ Column( "status", TEXT ),
+ Column( "device", TEXT ),
+ Column( "space_consumed", Integer ),
+ Column( "error", TEXT ),
+ Column( "deleted", Boolean, default=False ) )
+
+CloudSnapshot.table = Table( "cloud_snapshot", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "create_time", DateTime, default=now ),
+ Column( "update_time", DateTime, default=now, onupdate=now ),
+ Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
+ Column( "uci_id", Integer, ForeignKey( "cloud_uci.id" ), index=True ),
+ Column( "store_id", Integer, ForeignKey( "cloud_store.id" ), index=True, nullable=False ),
+ Column( "snapshot_id", TEXT ),
+ Column( "status", TEXT ),
+ Column( "description", TEXT ),
+ Column( "error", TEXT ),
+ Column( "deleted", Boolean, default=False ) )
+
+CloudUserCredentials.table = Table( "cloud_user_credentials", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "create_time", DateTime, default=now ),
+ Column( "update_time", DateTime, default=now, onupdate=now ),
+ Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
+ Column( "provider_id", Integer, ForeignKey( "cloud_provider.id" ), index=True, nullable=False ),
+ Column( "name", TEXT ),
+ Column( "access_key", TEXT ),
+ Column( "secret_key", TEXT ),
+ Column( "deleted", Boolean, default=False ) )
+
+CloudProvider.table = Table( "cloud_provider", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "create_time", DateTime, default=now ),
+ Column( "update_time", DateTime, default=now, onupdate=now ),
+ Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
+ Column( "type", TEXT, nullable=False ),
+ Column( "name", TEXT ),
+ Column( "region_connection", TEXT ),
+ Column( "region_name", TEXT ),
+ Column( "region_endpoint", TEXT ),
+ Column( "is_secure", Boolean ),
+ Column( "host", TEXT ),
+ Column( "port", Integer ),
+ Column( "proxy", TEXT ),
+ Column( "proxy_port", TEXT ),
+ Column( "proxy_user", TEXT ),
+ Column( "proxy_pass", TEXT ),
+ Column( "debug", Integer ),
+ Column( "https_connection_factory", TEXT ),
+ Column( "path", TEXT ),
+ Column( "deleted", Boolean, default=False ) )
+# *************************** End cloud tables***********************************
+
StoredWorkflow.table = Table( "stored_workflow", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
@@ -1004,6 +1115,42 @@
output_step=relation( WorkflowStep, backref="output_connections", cascade="all",
primaryjoin=( WorkflowStepConnection.table.c.output_step_id == WorkflowStep.table.c.id ) ) ) )
+# vvvvvvvvvvvvvvvv Start cloud table mappings vvvvvvvvvvvvvvvv
+assign_mapper( context, CloudImage, CloudImage.table )
+
+assign_mapper( context, UCI, UCI.table,
+ properties=dict( user=relation( User ),
+ credentials=relation( CloudUserCredentials ),
+ instance=relation( CloudInstance, backref='uci' ),
+ store=relation( CloudStore, backref='uci', cascade='all, delete-orphan' ),
+ snapshot=relation( CloudSnapshot, backref='uci' )
+ ) )
+
+assign_mapper( context, CloudInstance, CloudInstance.table,
+ properties=dict( user=relation( User ),
+ image=relation( CloudImage )
+ ) )
+
+assign_mapper( context, CloudStore, CloudStore.table,
+ properties=dict( user=relation( User ),
+ i=relation( CloudInstance ),
+ snapshot=relation( CloudSnapshot, backref="store" )
+ ) )
+
+assign_mapper( context, CloudSnapshot, CloudSnapshot.table,
+ properties=dict( user=relation( User )
+ ) )
+
+assign_mapper( context, CloudProvider, CloudProvider.table,
+ properties=dict( user=relation( User )
+ ) )
+
+assign_mapper( context, CloudUserCredentials, CloudUserCredentials.table,
+ properties=dict( user=relation( User),
+ provider=relation( CloudProvider )
+ ) )
+# ^^^^^^^^^^^^^^^ End cloud table mappings ^^^^^^^^^^^^^^^^^^
+
assign_mapper( context, StoredWorkflow, StoredWorkflow.table,
properties=dict( user=relation( User ),
workflows=relation( Workflow, backref='stored_workflow',
diff -r 0984c3800775 -r 7d013eb98022 lib/galaxy/model/migrate/versions/0026_cloud_tables.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/model/migrate/versions/0026_cloud_tables.py Thu Nov 12 16:36:07 2009 -0500
@@ -0,0 +1,152 @@
+from sqlalchemy import *
+from migrate import *
+
+import datetime
+now = datetime.datetime.utcnow
+
+# Need our custom types, but don't import anything else from model
+from galaxy.model.custom_types import *
+
+import logging
+log = logging.getLogger( __name__ )
+
+metadata = MetaData( migrate_engine )
+
+def display_migration_details():
+ print
+ print "========================================"
+ print "This script adds tables needed for Galaxy cloud functionality."
+ print "========================================"
+
+CloudImage_table = Table( "cloud_image", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "create_time", DateTime, default=now ),
+ Column( "update_time", DateTime, default=now, onupdate=now ),
+ Column( "provider_type", TEXT ),
+ Column( "image_id", TEXT, nullable=False ),
+ Column( "manifest", TEXT ),
+ Column( "state", TEXT ),
+ Column( "architecture", TEXT ),
+ Column( "deleted", Boolean, default=False ) )
+
+""" UserConfiguredInstance (UCI) table """
+UCI_table = Table( "cloud_uci", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "create_time", DateTime, default=now ),
+ Column( "update_time", DateTime, default=now, onupdate=now ),
+ Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
+ Column( "credentials_id", Integer, ForeignKey( "cloud_user_credentials.id" ), index=True ),
+ Column( "key_pair_name", TEXT ),
+ Column( "key_pair_material", TEXT ),
+ Column( "name", TEXT ),
+ Column( "state", TEXT ),
+ Column( "error", TEXT ),
+ Column( "total_size", Integer ),
+ Column( "launch_time", DateTime ),
+ Column( "deleted", Boolean, default=False ) )
+
+CloudInstance_table = Table( "cloud_instance", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "create_time", DateTime, default=now ),
+ Column( "update_time", DateTime, default=now, onupdate=now ),
+ Column( "launch_time", DateTime ),
+ Column( "stop_time", DateTime ),
+ Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
+ Column( "uci_id", Integer, ForeignKey( "cloud_uci.id" ), index=True ),
+ Column( "type", TEXT ),
+ Column( "reservation_id", TEXT ),
+ Column( "instance_id", TEXT ),
+ Column( "mi_id", TEXT, ForeignKey( "cloud_image.image_id" ), index=True ),
+ Column( "state", TEXT ),
+ Column( "error", TEXT ),
+ Column( "public_dns", TEXT ),
+ Column( "private_dns", TEXT ),
+ Column( "security_group", TEXT ),
+ Column( "availability_zone", TEXT ) )
+
+CloudStore_table = Table( "cloud_store", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "create_time", DateTime, default=now ),
+ Column( "update_time", DateTime, default=now, onupdate=now ),
+ Column( "attach_time", DateTime ),
+ Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
+ Column( "uci_id", Integer, ForeignKey( "cloud_uci.id" ), index=True, nullable=False ),
+ Column( "volume_id", TEXT ),
+ Column( "size", Integer, nullable=False ),
+ Column( "availability_zone", TEXT ),
+ Column( "i_id", TEXT, ForeignKey( "cloud_instance.instance_id" ) ),
+ Column( "status", TEXT ),
+ Column( "device", TEXT ),
+ Column( "space_consumed", Integer ),
+ Column( "error", TEXT ),
+ Column( "deleted", Boolean, default=False ) )
+
+CloudSnapshot_table = Table( "cloud_snapshot", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "create_time", DateTime, default=now ),
+ Column( "update_time", DateTime, default=now, onupdate=now ),
+ Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
+ Column( "uci_id", Integer, ForeignKey( "cloud_uci.id" ), index=True ),
+ Column( "store_id", Integer, ForeignKey( "cloud_store.id" ), index=True, nullable=False ),
+ Column( "snapshot_id", TEXT ),
+ Column( "status", TEXT ),
+ Column( "description", TEXT ),
+ Column( "error", TEXT ),
+ Column( "deleted", Boolean, default=False ) )
+
+CloudUserCredentials_table = Table( "cloud_user_credentials", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "create_time", DateTime, default=now ),
+ Column( "update_time", DateTime, default=now, onupdate=now ),
+ Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
+ Column( "provider_id", Integer, ForeignKey( "cloud_provider.id" ), index=True, nullable=False ),
+ Column( "name", TEXT ),
+ Column( "access_key", TEXT ),
+ Column( "secret_key", TEXT ),
+ Column( "deleted", Boolean, default=False ) )
+
+CloudProvider_table = Table( "cloud_provider", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "create_time", DateTime, default=now ),
+ Column( "update_time", DateTime, default=now, onupdate=now ),
+ Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
+ Column( "type", TEXT, nullable=False ),
+ Column( "name", TEXT ),
+ Column( "region_connection", TEXT ),
+ Column( "region_name", TEXT ),
+ Column( "region_endpoint", TEXT ),
+ Column( "is_secure", Boolean ),
+ Column( "host", TEXT ),
+ Column( "port", Integer ),
+ Column( "proxy", TEXT ),
+ Column( "proxy_port", TEXT ),
+ Column( "proxy_user", TEXT ),
+ Column( "proxy_pass", TEXT ),
+ Column( "debug", Integer ),
+ Column( "https_connection_factory", TEXT ),
+ Column( "path", TEXT ),
+ Column( "deleted", Boolean, default=False ) )
+
+def upgrade():
+ display_migration_details()
+ # Load existing tables
+ metadata.reflect()
+
+ CloudImage_table.create()
+ UCI_table.create()
+ CloudUserCredentials_table.create()
+ CloudStore_table.create()
+ CloudSnapshot_table.create()
+ CloudInstance_table.create()
+ CloudProvider_table.create()
+
+def downgrade():
+ metadata.reflect()
+
+ CloudImage_table.drop()
+ CloudInstance_table.drop()
+ CloudStore_table.drop()
+ CloudSnapshot_table.drop()
+ CloudUserCredentials_table.drop()
+ UCI_table.drop()
+ CloudProvider_table.drop()
\ No newline at end of file
diff -r 0984c3800775 -r 7d013eb98022 lib/galaxy/web/controllers/cloud.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/web/controllers/cloud.py Thu Nov 12 16:36:07 2009 -0500
@@ -0,0 +1,1193 @@
+from galaxy.web.base.controller import *
+
+import pkg_resources
+pkg_resources.require( "simplejson" )
+import simplejson
+import urllib2
+
+from galaxy.tools.parameters import *
+from galaxy.tools import DefaultToolState
+from galaxy.tools.parameters.grouping import Repeat, Conditional
+from galaxy.datatypes.data import Data
+from galaxy.util.odict import odict
+from galaxy.util.bunch import Bunch
+from galaxy.util.topsort import topsort, topsort_levels, CycleError
+from galaxy.model.mapping import desc
+from galaxy.model.orm import *
+from datetime import datetime, timedelta
+
+pkg_resources.require( "WebHelpers" )
+from webhelpers import *
+
+# Required for Cloud tab
+import galaxy.eggs
+galaxy.eggs.require("boto")
+from boto.ec2.connection import EC2Connection
+from boto.ec2.regioninfo import RegionInfo
+from galaxy.cloud import CloudManager
+import boto.exception
+import boto
+
+import logging
+log = logging.getLogger( __name__ )
+
+uci_states = Bunch(
+ NEW_UCI = "newUCI",
+ NEW = "new",
+ CREATING = "creating",
+ DELETING_UCI = "deletingUCI",
+ DELETING = "deleting",
+ SUBMITTED_UCI = "submittedUCI",
+ SUBMITTED = "submitted",
+ SHUTTING_DOWN_UCI = "shutting-downUCI",
+ SHUTTING_DOWN = "shutting-down",
+ AVAILABLE = "available",
+ RUNNING = "running",
+ PENDING = "pending",
+ ERROR = "error",
+ DELETED = "deleted",
+ SNAPSHOT_UCI = "snapshotUCI",
+ SNAPSHOT = "snapshot"
+)
+
1
0