galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
August 2013
- 1 participants
- 149 discussions
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/ed4a98b6ca9f/
Changeset: ed4a98b6ca9f
User: jgoecks
Date: 2013-08-20 20:08:27
Summary: Documentation and cleanup for APIItem mixin.
Affected #: 1 file
diff -r ee0a47c22e2fea2ce9d605a9cfef7071557ca6de -r ed4a98b6ca9f0f4dc6bd6e07cff7ee8ef470d0de lib/galaxy/model/item_attrs.py
--- a/lib/galaxy/model/item_attrs.py
+++ b/lib/galaxy/model/item_attrs.py
@@ -160,19 +160,34 @@
class APIItem:
""" Mixin for api representation. """
- #api_collection_visible_keys = ( 'id' )
- #api_element_visible_keys = ( 'id' )
- def get_api_value( self, view='collection', value_mapper = None ):
+
+ def get_api_value( self, view='collection', value_mapper=None ):
+ """
+ Return item dictionary.
+ """
+
+ if not value_mapper:
+ value_mapper = {}
+
def get_value( key, item ):
+ """
+ Recursive helper function to get item values.
+ """
+ # FIXME: why use exception here? Why not look for key in value_mapper
+ # first and then default to get_api_value?
try:
return item.get_api_value( view=view, value_mapper=value_mapper )
except:
if key in value_mapper:
return value_mapper.get( key )( item )
return item
- if value_mapper is None:
- value_mapper = {}
- rval = {}
+
+ # Create dict to represent item.
+ rval = dict(
+ model_class=self.__class__.__name__
+ )
+
+ # Fill item dict with visible keys.
try:
visible_keys = self.__getattribute__( 'api_' + view + '_visible_keys' )
except AttributeError:
@@ -181,13 +196,12 @@
try:
item = self.__getattribute__( key )
if type( item ) == InstrumentedList:
- rval[key] = []
+ rval[ key ] = []
for i in item:
- rval[key].append( get_value( key, i ) )
+ rval[ key ].append( get_value( key, i ) )
else:
- rval[key] = get_value( key, item )
+ rval[ key ] = get_value( key, item )
except AttributeError:
- rval[key] = None
+ rval[ key ] = None
- rval['model_class'] = self.__class__.__name__
return rval
https://bitbucket.org/galaxy/galaxy-central/commits/a699decd482c/
Changeset: a699decd482c
User: jgoecks
Date: 2013-08-20 20:40:20
Summary: Use APIMixin in History object.
Affected #: 1 file
diff -r ed4a98b6ca9f0f4dc6bd6e07cff7ee8ef470d0de -r a699decd482ce7732ea61ffb8e13714a0f991381 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -662,7 +662,7 @@
self.user = user
self.group = group
-class History( object, UsesAnnotations ):
+class History( object, APIItem, UsesAnnotations ):
api_collection_visible_keys = ( 'id', 'name', 'published', 'deleted' )
api_element_visible_keys = ( 'id', 'name', 'published', 'deleted', 'genome_build', 'purged' )
@@ -680,6 +680,7 @@
self.user = user
self.datasets = []
self.galaxy_sessions = []
+ self.tags = []
def _next_hid( self ):
# TODO: override this with something in the database that ensures
@@ -779,31 +780,20 @@
history_name = unicode(history_name, 'utf-8')
return history_name
- def get_api_value( self, view='collection', value_mapper = None ):
- if value_mapper is None:
- value_mapper = {}
- rval = {}
+ def get_api_value( self, view='collection', value_mapper = None ):
- try:
- visible_keys = self.__getattribute__( 'api_' + view + '_visible_keys' )
- except AttributeError:
- raise Exception( 'Unknown API view: %s' % view )
- for key in visible_keys:
- try:
- rval[key] = self.__getattribute__( key )
- if key in value_mapper:
- rval[key] = value_mapper.get( key )( rval[key] )
- except AttributeError:
- rval[key] = None
-
+ # Get basic value.
+ rval = super( History, self ).get_api_value( view=view, value_mapper=value_mapper )
+
+ # Add tags.
tags_str_list = []
for tag in self.tags:
tag_str = tag.user_tname
if tag.value is not None:
tag_str += ":" + tag.user_value
tags_str_list.append( tag_str )
- rval['tags'] = tags_str_list
- rval['model_class'] = self.__class__.__name__
+ rval[ 'tags' ] = tags_str_list
+
return rval
def set_from_dict( self, new_data ):
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: guerler: Fix Dataviewer compatibility for the updated Backbone version
by commits-noreply@bitbucket.org 20 Aug '13
by commits-noreply@bitbucket.org 20 Aug '13
20 Aug '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/ee0a47c22e2f/
Changeset: ee0a47c22e2f
User: guerler
Date: 2013-08-20 19:02:33
Summary: Fix Dataviewer compatibility for the updated Backbone version
Affected #: 1 file
diff -r c732173231b89ce28bd7b8ee629b1af32c0b37e7 -r ee0a47c22e2fea2ce9d605a9cfef7071557ca6de static/scripts/mvc/data.js
--- a/static/scripts/mvc/data.js
+++ b/static/scripts/mvc/data.js
@@ -20,10 +20,8 @@
},
initialize: function() {
- // Set metadata.
// FIXME: pass back a metadata dict and then Backbone-relational
// can be used unpack metadata automatically.
- this._set_metadata();
// Update metadata on change.
this.on('change', this._set_metadata, this);
},
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: natefoo: Move S3 Object Store to its own module (more to come, probably).
by commits-noreply@bitbucket.org 20 Aug '13
by commits-noreply@bitbucket.org 20 Aug '13
20 Aug '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/c732173231b8/
Changeset: c732173231b8
User: natefoo
Date: 2013-08-20 18:05:30
Summary: Move S3 Object Store to its own module (more to come, probably).
Affected #: 2 files
diff -r 267d294711e9f72dcfbff842ce263a1050be058f -r c732173231b89ce28bd7b8ee629b1af32c0b37e7 lib/galaxy/objectstore/__init__.py
--- a/lib/galaxy/objectstore/__init__.py
+++ b/lib/galaxy/objectstore/__init__.py
@@ -5,14 +5,11 @@
"""
import os
-import sys
import time
import random
import shutil
import logging
import threading
-import subprocess
-from datetime import datetime
from galaxy import util
from galaxy.jobs import Sleeper
@@ -21,15 +18,7 @@
from sqlalchemy.orm import object_session
-import multiprocessing
-from galaxy.objectstore.s3_multipart_upload import multipart_upload
-import boto
-from boto.s3.key import Key
-from boto.s3.connection import S3Connection
-from boto.exception import S3ResponseError
-
log = logging.getLogger( __name__ )
-logging.getLogger('boto').setLevel(logging.INFO) # Otherwise boto is quite noisy
class ObjectStore(object):
@@ -373,490 +362,6 @@
super(CachingObjectStore, self).__init__(self, path, backend)
-class S3ObjectStore(ObjectStore):
- """
- Object store that stores objects as items in an AWS S3 bucket. A local
- cache exists that is used as an intermediate location for files between
- Galaxy and S3.
- """
- def __init__(self, config):
- super(S3ObjectStore, self).__init__()
- self.config = config
- self.staging_path = self.config.file_path
- self.s3_conn = get_OS_connection(self.config)
- self.bucket = self._get_bucket(self.config.os_bucket_name)
- self.use_rr = self.config.os_use_reduced_redundancy
- self.cache_size = self.config.object_store_cache_size
- self.transfer_progress = 0
- # Clean cache only if value is set in universe_wsgi.ini
- if self.cache_size != -1:
- # Convert GBs to bytes for comparison
- self.cache_size = self.cache_size * 1073741824
- # Helper for interruptable sleep
- self.sleeper = Sleeper()
- self.cache_monitor_thread = threading.Thread(target=self.__cache_monitor)
- self.cache_monitor_thread.start()
- log.info("Cache cleaner manager started")
- # Test if 'axel' is available for parallel download and pull the key into cache
- try:
- subprocess.call('axel')
- self.use_axel = True
- except OSError:
- self.use_axel = False
-
- def __cache_monitor(self):
- time.sleep(2) # Wait for things to load before starting the monitor
- while self.running:
- total_size = 0
- # Is this going to be too expensive of an operation to be done frequently?
- file_list = []
- for dirpath, dirnames, filenames in os.walk(self.staging_path):
- for f in filenames:
- fp = os.path.join(dirpath, f)
- file_size = os.path.getsize(fp)
- total_size += file_size
- # Get the time given file was last accessed
- last_access_time = time.localtime(os.stat(fp)[7])
- # Compose a tuple of the access time and the file path
- file_tuple = last_access_time, fp, file_size
- file_list.append(file_tuple)
- # Sort the file list (based on access time)
- file_list.sort()
- # Initiate cleaning once within 10% of the defined cache size?
- cache_limit = self.cache_size * 0.9
- if total_size > cache_limit:
- log.info("Initiating cache cleaning: current cache size: %s; clean until smaller than: %s" \
- % (convert_bytes(total_size), convert_bytes(cache_limit)))
- # How much to delete? If simply deleting up to the cache-10% limit,
- # is likely to be deleting frequently and may run the risk of hitting
- # the limit - maybe delete additional #%?
- # For now, delete enough to leave at least 10% of the total cache free
- delete_this_much = total_size - cache_limit
- self.__clean_cache(file_list, delete_this_much)
- self.sleeper.sleep(30) # Test cache size every 30 seconds?
-
- def __clean_cache(self, file_list, delete_this_much):
- """ Keep deleting files from the file_list until the size of the deleted
- files is greater than the value in delete_this_much parameter.
-
- :type file_list: list
- :param file_list: List of candidate files that can be deleted. This method
- will start deleting files from the beginning of the list so the list
- should be sorted accordingly. The list must contains 3-element tuples,
- positioned as follows: position 0 holds file last accessed timestamp
- (as time.struct_time), position 1 holds file path, and position 2 has
- file size (e.g., (<access time>, /mnt/data/dataset_1.dat), 472394)
-
- :type delete_this_much: int
- :param delete_this_much: Total size of files, in bytes, that should be deleted.
- """
- # Keep deleting datasets from file_list until deleted_amount does not
- # exceed delete_this_much; start deleting from the front of the file list,
- # which assumes the oldest files come first on the list.
- deleted_amount = 0
- for i, f in enumerate(file_list):
- if deleted_amount < delete_this_much:
- deleted_amount += f[2]
- os.remove(f[1])
- # Debugging code for printing deleted files' stats
- # folder, file_name = os.path.split(f[1])
- # file_date = time.strftime("%m/%d/%y %H:%M:%S", f[0])
- # log.debug("%s. %-25s %s, size %s (deleted %s/%s)" \
- # % (i, file_name, convert_bytes(f[2]), file_date, \
- # convert_bytes(deleted_amount), convert_bytes(delete_this_much)))
- else:
- log.debug("Cache cleaning done. Total space freed: %s" % convert_bytes(deleted_amount))
- return
-
- def _get_bucket(self, bucket_name):
- """ Sometimes a handle to a bucket is not established right away so try
- it a few times. Raise error is connection is not established. """
- for i in range(5):
- try:
- bucket = self.s3_conn.get_bucket(bucket_name)
- log.debug("Using cloud object store with bucket '%s'" % bucket.name)
- return bucket
- except S3ResponseError:
- log.debug("Could not get bucket '%s', attempt %s/5" % (bucket_name, i+1))
- time.sleep(2)
- # All the attempts have been exhausted and connection was not established,
- # raise error
- raise S3ResponseError
-
- def _fix_permissions(self, rel_path):
- """ Set permissions on rel_path"""
- for basedir, dirs, files in os.walk(rel_path):
- util.umask_fix_perms(basedir, self.config.umask, 0777, self.config.gid)
- for f in files:
- path = os.path.join(basedir, f)
- # Ignore symlinks
- if os.path.islink(path):
- continue
- util.umask_fix_perms( path, self.config.umask, 0666, self.config.gid )
-
- def _construct_path(self, obj, dir_only=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, **kwargs):
- rel_path = os.path.join(*directory_hash_id(obj.id))
- if extra_dir is not None:
- if extra_dir_at_root:
- rel_path = os.path.join(extra_dir, rel_path)
- else:
- rel_path = os.path.join(rel_path, extra_dir)
- # S3 folders are marked by having trailing '/' so add it now
- rel_path = '%s/' % rel_path
- if not dir_only:
- rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
- return rel_path
-
- def _get_cache_path(self, rel_path):
- return os.path.abspath(os.path.join(self.staging_path, rel_path))
-
- def _get_transfer_progress(self):
- return self.transfer_progress
-
- def _get_size_in_s3(self, rel_path):
- try:
- key = self.bucket.get_key(rel_path)
- if key:
- return key.size
- except S3ResponseError, ex:
- log.error("Could not get size of key '%s' from S3: %s" % (rel_path, ex))
- except Exception, ex:
- log.error("Could not get reference to the key object '%s'; returning -1 for key size: %s" % (rel_path, ex))
- return -1
-
- def _key_exists(self, rel_path):
- exists = False
- try:
- # A hackish way of testing if the rel_path is a folder vs a file
- is_dir = rel_path[-1] == '/'
- if is_dir:
- rs = self.bucket.get_all_keys(prefix=rel_path)
- if len(rs) > 0:
- exists = True
- else:
- exists = False
- else:
- key = Key(self.bucket, rel_path)
- exists = key.exists()
- except S3ResponseError, ex:
- log.error("Trouble checking existence of S3 key '%s': %s" % (rel_path, ex))
- return False
- #print "Checking if '%s' exists in S3: %s" % (rel_path, exists)
- if rel_path[0] == '/':
- raise
- return exists
-
- def _in_cache(self, rel_path):
- """ Check if the given dataset is in the local cache and return True if so. """
- # log.debug("------ Checking cache for rel_path %s" % rel_path)
- cache_path = self._get_cache_path(rel_path)
- return os.path.exists(cache_path)
- # TODO: Part of checking if a file is in cache should be to ensure the
- # size of the cached file matches that on S3. Once the upload tool explicitly
- # creates, this check sould be implemented- in the mean time, it's not
- # looking likely to be implementable reliably.
- # if os.path.exists(cache_path):
- # # print "***1 %s exists" % cache_path
- # if self._key_exists(rel_path):
- # # print "***2 %s exists in S3" % rel_path
- # # Make sure the size in cache is available in its entirety
- # # print "File '%s' cache size: %s, S3 size: %s" % (cache_path, os.path.getsize(cache_path), self._get_size_in_s3(rel_path))
- # if os.path.getsize(cache_path) == self._get_size_in_s3(rel_path):
- # # print "***2.1 %s exists in S3 and the size is the same as in cache (in_cache=True)" % rel_path
- # exists = True
- # else:
- # # print "***2.2 %s exists but differs in size from cache (in_cache=False)" % cache_path
- # exists = False
- # else:
- # # Although not perfect decision making, this most likely means
- # # that the file is currently being uploaded
- # # print "***3 %s found in cache but not in S3 (in_cache=True)" % cache_path
- # exists = True
- # else:
- # return False
-
- def _pull_into_cache(self, rel_path):
- # Ensure the cache directory structure exists (e.g., dataset_#_files/)
- rel_path_dir = os.path.dirname(rel_path)
- if not os.path.exists(self._get_cache_path(rel_path_dir)):
- os.makedirs(self._get_cache_path(rel_path_dir))
- # Now pull in the file
- ok = self._download(rel_path)
- self._fix_permissions(self._get_cache_path(rel_path_dir))
- return ok
-
- def _transfer_cb(self, complete, total):
- self.transfer_progress += 10
-
- def _download(self, rel_path):
- try:
- log.debug("Pulling key '%s' into cache to %s" % (rel_path, self._get_cache_path(rel_path)))
- key = self.bucket.get_key(rel_path)
- # Test if cache is large enough to hold the new file
- if self.cache_size > 0 and key.size > self.cache_size:
- log.critical("File %s is larger (%s) than the cache size (%s). Cannot download." \
- % (rel_path, key.size, self.cache_size))
- return False
- if self.use_axel:
- log.debug("Parallel pulled key '%s' into cache to %s" % (rel_path, self._get_cache_path(rel_path)))
- ncores = multiprocessing.cpu_count()
- url = key.generate_url(7200)
- ret_code = subprocess.call("axel -a -n %s '%s'" % (ncores, url))
- if ret_code == 0:
- return True
- else:
- log.debug("Pulled key '%s' into cache to %s" % (rel_path, self._get_cache_path(rel_path)))
- self.transfer_progress = 0 # Reset transfer progress counter
- key.get_contents_to_filename(self._get_cache_path(rel_path), cb=self._transfer_cb, num_cb=10)
- return True
- except S3ResponseError, ex:
- log.error("Problem downloading key '%s' from S3 bucket '%s': %s" % (rel_path, self.bucket.name, ex))
- return False
-
- def _push_to_os(self, rel_path, source_file=None, from_string=None):
- """
- Push the file pointed to by ``rel_path`` to the object store naming the key
- ``rel_path``. If ``source_file`` is provided, push that file instead while
- still using ``rel_path`` as the key name.
- If ``from_string`` is provided, set contents of the file to the value of
- the string.
- """
- try:
- source_file = source_file if source_file else self._get_cache_path(rel_path)
- if os.path.exists(source_file):
- key = Key(self.bucket, rel_path)
- if os.path.getsize(source_file) == 0 and key.exists():
- log.debug("Wanted to push file '%s' to S3 key '%s' but its size is 0; skipping." % (source_file, rel_path))
- return True
- if from_string:
- key.set_contents_from_string(from_string, reduced_redundancy=self.use_rr)
- log.debug("Pushed data from string '%s' to key '%s'" % (from_string, rel_path))
- else:
- start_time = datetime.now()
- # print "Pushing cache file '%s' of size %s bytes to key '%s'" % (source_file, os.path.getsize(source_file), rel_path)
- # print "+ Push started at '%s'" % start_time
- mb_size = os.path.getsize(source_file) / 1e6
- if mb_size < 60 or self.config.object_store == 'swift':
- self.transfer_progress = 0 # Reset transfer progress counter
- key.set_contents_from_filename(source_file, reduced_redundancy=self.use_rr,
- cb=self._transfer_cb, num_cb=10)
- else:
- multipart_upload(self.bucket, key.name, source_file, mb_size, use_rr=self.use_rr)
- end_time = datetime.now()
- # print "+ Push ended at '%s'; %s bytes transfered in %ssec" % (end_time, os.path.getsize(source_file), end_time-start_time)
- log.debug("Pushed cache file '%s' to key '%s' (%s bytes transfered in %s sec)" % (source_file, rel_path, os.path.getsize(source_file), end_time-start_time))
- return True
- else:
- log.error("Tried updating key '%s' from source file '%s', but source file does not exist."
- % (rel_path, source_file))
- except S3ResponseError, ex:
- log.error("Trouble pushing S3 key '%s' from file '%s': %s" % (rel_path, source_file, ex))
- return False
-
- def file_ready(self, obj, **kwargs):
- """
- A helper method that checks if a file corresponding to a dataset is
- ready and available to be used. Return ``True`` if so, ``False`` otherwise.
- """
- rel_path = self._construct_path(obj, **kwargs)
- # Make sure the size in cache is available in its entirety
- if self._in_cache(rel_path):
- if os.path.getsize(self._get_cache_path(rel_path)) == self._get_size_in_s3(rel_path):
- return True
- log.debug("Waiting for dataset {0} to transfer from OS: {1}/{2}".format(rel_path,
- os.path.getsize(self._get_cache_path(rel_path)), self._get_size_in_s3(rel_path)))
- return False
-
- def exists(self, obj, **kwargs):
- in_cache = in_s3 = False
- rel_path = self._construct_path(obj, **kwargs)
- # Check cache
- if self._in_cache(rel_path):
- in_cache = True
- # Check S3
- in_s3 = self._key_exists(rel_path)
- # log.debug("~~~~~~ File '%s' exists in cache: %s; in s3: %s" % (rel_path, in_cache, in_s3))
- # dir_only does not get synced so shortcut the decision
- dir_only = kwargs.get('dir_only', False)
- if dir_only:
- if in_cache or in_s3:
- return True
- else:
- return False
- # TODO: Sync should probably not be done here. Add this to an async upload stack?
- if in_cache and not in_s3:
- self._push_to_os(rel_path, source_file=self._get_cache_path(rel_path))
- return True
- elif in_s3:
- return True
- else:
- return False
-
- def create(self, obj, **kwargs):
- if not self.exists(obj, **kwargs):
- #print "S3 OS creating a dataset with ID %s" % kwargs
- # Pull out locally used fields
- extra_dir = kwargs.get('extra_dir', None)
- extra_dir_at_root = kwargs.get('extra_dir_at_root', False)
- dir_only = kwargs.get('dir_only', False)
- alt_name = kwargs.get('alt_name', None)
- # print "---- Processing: %s; %s" % (alt_name, locals())
- # Construct hashed path
- rel_path = os.path.join(*directory_hash_id(obj.id))
-
- # Optionally append extra_dir
- if extra_dir is not None:
- if extra_dir_at_root:
- rel_path = os.path.join(extra_dir, rel_path)
- else:
- rel_path = os.path.join(rel_path, extra_dir)
- # Create given directory in cache
- cache_dir = os.path.join(self.staging_path, rel_path)
- if not os.path.exists(cache_dir):
- os.makedirs(cache_dir)
- # Although not really necessary to create S3 folders (because S3 has
- # flat namespace), do so for consistency with the regular file system
- # S3 folders are marked by having trailing '/' so add it now
- # s3_dir = '%s/' % rel_path
- # self._push_to_os(s3_dir, from_string='')
- # If instructed, create the dataset in cache & in S3
- if not dir_only:
- rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
- open(os.path.join(self.staging_path, rel_path), 'w').close()
- self._push_to_os(rel_path, from_string='')
-
- def empty(self, obj, **kwargs):
- if self.exists(obj, **kwargs):
- return bool(self.size(obj, **kwargs) > 0)
- else:
- raise ObjectNotFound( 'objectstore.empty, object does not exist: %s, kwargs: %s'
- %( str( obj ), str( kwargs ) ) )
-
- def size(self, obj, **kwargs):
- rel_path = self._construct_path(obj, **kwargs)
- if self._in_cache(rel_path):
- try:
- return os.path.getsize(self._get_cache_path(rel_path))
- except OSError, ex:
- log.info("Could not get size of file '%s' in local cache, will try S3. Error: %s" % (rel_path, ex))
- elif self.exists(obj, **kwargs):
- return self._get_size_in_s3(rel_path)
- log.warning("Did not find dataset '%s', returning 0 for size" % rel_path)
- return 0
-
- def delete(self, obj, entire_dir=False, **kwargs):
- rel_path = self._construct_path(obj, **kwargs)
- extra_dir = kwargs.get('extra_dir', None)
- try:
- # For the case of extra_files, because we don't have a reference to
- # individual files/keys we need to remove the entire directory structure
- # with all the files in it. This is easy for the local file system,
- # but requires iterating through each individual key in S3 and deleing it.
- if entire_dir and extra_dir:
- shutil.rmtree(self._get_cache_path(rel_path))
- rs = self.bucket.get_all_keys(prefix=rel_path)
- for key in rs:
- log.debug("Deleting key %s" % key.name)
- key.delete()
- return True
- else:
- # Delete from cache first
- os.unlink(self._get_cache_path(rel_path))
- # Delete from S3 as well
- if self._key_exists(rel_path):
- key = Key(self.bucket, rel_path)
- log.debug("Deleting key %s" % key.name)
- key.delete()
- return True
- except S3ResponseError, ex:
- log.error("Could not delete key '%s' from S3: %s" % (rel_path, ex))
- except OSError, ex:
- log.error('%s delete error %s' % (self._get_filename(obj, **kwargs), ex))
- return False
-
- def get_data(self, obj, start=0, count=-1, **kwargs):
- rel_path = self._construct_path(obj, **kwargs)
- # Check cache first and get file if not there
- if not self._in_cache(rel_path):
- self._pull_into_cache(rel_path)
- #else:
- # print "(cccc) Getting '%s' from cache" % self._get_cache_path(rel_path)
- # Read the file content from cache
- data_file = open(self._get_cache_path(rel_path), 'r')
- data_file.seek(start)
- content = data_file.read(count)
- data_file.close()
- return content
-
- def get_filename(self, obj, **kwargs):
- #print "S3 get_filename for dataset: %s" % dataset_id
- dir_only = kwargs.get('dir_only', False)
- rel_path = self._construct_path(obj, **kwargs)
- cache_path = self._get_cache_path(rel_path)
- # S3 does not recognize directories as files so cannot check if those exist.
- # So, if checking dir only, ensure given dir exists in cache and return
- # the expected cache path.
- # dir_only = kwargs.get('dir_only', False)
- # if dir_only:
- # if not os.path.exists(cache_path):
- # os.makedirs(cache_path)
- # return cache_path
- # Check if the file exists in the cache first
- if self._in_cache(rel_path):
- return cache_path
- # Check if the file exists in persistent storage and, if it does, pull it into cache
- elif self.exists(obj, **kwargs):
- if dir_only: # Directories do not get pulled into cache
- return cache_path
- else:
- if self._pull_into_cache(rel_path):
- return cache_path
- # For the case of retrieving a directory only, return the expected path
- # even if it does not exist.
- # if dir_only:
- # return cache_path
- raise ObjectNotFound( 'objectstore.get_filename, no cache_path: %s, kwargs: %s'
- %( str( obj ), str( kwargs ) ) )
- # return cache_path # Until the upload tool does not explicitly create the dataset, return expected path
-
- def update_from_file(self, obj, file_name=None, create=False, **kwargs):
- if create:
- self.create(obj, **kwargs)
- if self.exists(obj, **kwargs):
- rel_path = self._construct_path(obj, **kwargs)
- # Chose whether to use the dataset file itself or an alternate file
- if file_name:
- source_file = os.path.abspath(file_name)
- # Copy into cache
- cache_file = self._get_cache_path(rel_path)
- try:
- if source_file != cache_file:
- # FIXME? Should this be a `move`?
- shutil.copy2(source_file, cache_file)
- self._fix_permissions(cache_file)
- except OSError, ex:
- log.error("Trouble copying source file '%s' to cache '%s': %s" % (source_file, cache_file, ex))
- else:
- source_file = self._get_cache_path(rel_path)
- # Update the file on S3
- self._push_to_os(rel_path, source_file)
- else:
- raise ObjectNotFound( 'objectstore.update_from_file, object does not exist: %s, kwargs: %s'
- %( str( obj ), str( kwargs ) ) )
-
- def get_object_url(self, obj, **kwargs):
- if self.exists(obj, **kwargs):
- rel_path = self._construct_path(obj, **kwargs)
- try:
- key = Key(self.bucket, rel_path)
- return key.generate_url(expires_in = 86400) # 24hrs
- except S3ResponseError, ex:
- log.warning("Trouble generating URL for dataset '%s': %s" % (rel_path, ex))
- return None
-
- def get_store_usage_percent(self):
- return 0.0
-
class DistributedObjectStore(ObjectStore):
"""
ObjectStore that defers to a list of backends, for getting objects the
@@ -1011,6 +516,7 @@
return id
return None
+
class HierarchicalObjectStore(ObjectStore):
"""
ObjectStore that defers to a list of backends, for getting objects the
@@ -1021,6 +527,7 @@
def __init__(self, backends=[]):
super(HierarchicalObjectStore, self).__init__()
+
def build_object_store_from_config(config, fsmon=False):
""" Depending on the configuration setting, invoke the appropriate object store
"""
@@ -1028,6 +535,7 @@
if store == 'disk':
return DiskObjectStore(config=config)
elif store == 's3' or store == 'swift':
+ from galaxy.objectstore.s3 import S3ObjectStore
return S3ObjectStore(config=config)
elif store == 'distributed':
return DistributedObjectStore(config=config, fsmon=fsmon)
diff -r 267d294711e9f72dcfbff842ce263a1050be058f -r c732173231b89ce28bd7b8ee629b1af32c0b37e7 lib/galaxy/objectstore/s3.py
--- /dev/null
+++ b/lib/galaxy/objectstore/s3.py
@@ -0,0 +1,535 @@
+"""
+Object Store plugin for the Amazon Simple Storage Service (S3)
+"""
+
+import os
+import time
+import shutil
+import logging
+import threading
+import subprocess
+from datetime import datetime
+
+from galaxy import util
+from galaxy.jobs import Sleeper
+from galaxy.model import directory_hash_id
+from galaxy.objectstore import ObjectStore, convert_bytes
+from galaxy.exceptions import ObjectNotFound, ObjectInvalid
+
+import multiprocessing
+from galaxy.objectstore.s3_multipart_upload import multipart_upload
+import boto
+from boto.s3.key import Key
+from boto.s3.connection import S3Connection
+from boto.exception import S3ResponseError
+
+log = logging.getLogger( __name__ )
+logging.getLogger('boto').setLevel(logging.INFO) # Otherwise boto is quite noisy
+
+
+class S3ObjectStore(ObjectStore):
+ """
+ Object store that stores objects as items in an AWS S3 bucket. A local
+ cache exists that is used as an intermediate location for files between
+ Galaxy and S3.
+ """
+ def __init__(self, config):
+ super(S3ObjectStore, self).__init__()
+ self.config = config
+ self.staging_path = self.config.file_path
+ self.s3_conn = get_OS_connection(self.config)
+ self.bucket = self._get_bucket(self.config.os_bucket_name)
+ self.use_rr = self.config.os_use_reduced_redundancy
+ self.cache_size = self.config.object_store_cache_size
+ self.transfer_progress = 0
+ # Clean cache only if value is set in universe_wsgi.ini
+ if self.cache_size != -1:
+ # Convert GBs to bytes for comparison
+ self.cache_size = self.cache_size * 1073741824
+ # Helper for interruptable sleep
+ self.sleeper = Sleeper()
+ self.cache_monitor_thread = threading.Thread(target=self.__cache_monitor)
+ self.cache_monitor_thread.start()
+ log.info("Cache cleaner manager started")
+ # Test if 'axel' is available for parallel download and pull the key into cache
+ try:
+ subprocess.call('axel')
+ self.use_axel = True
+ except OSError:
+ self.use_axel = False
+
+ def __cache_monitor(self):
+ time.sleep(2) # Wait for things to load before starting the monitor
+ while self.running:
+ total_size = 0
+ # Is this going to be too expensive of an operation to be done frequently?
+ file_list = []
+ for dirpath, dirnames, filenames in os.walk(self.staging_path):
+ for f in filenames:
+ fp = os.path.join(dirpath, f)
+ file_size = os.path.getsize(fp)
+ total_size += file_size
+ # Get the time given file was last accessed
+ last_access_time = time.localtime(os.stat(fp)[7])
+ # Compose a tuple of the access time and the file path
+ file_tuple = last_access_time, fp, file_size
+ file_list.append(file_tuple)
+ # Sort the file list (based on access time)
+ file_list.sort()
+ # Initiate cleaning once within 10% of the defined cache size?
+ cache_limit = self.cache_size * 0.9
+ if total_size > cache_limit:
+ log.info("Initiating cache cleaning: current cache size: %s; clean until smaller than: %s" \
+ % (convert_bytes(total_size), convert_bytes(cache_limit)))
+ # How much to delete? If simply deleting up to the cache-10% limit,
+ # is likely to be deleting frequently and may run the risk of hitting
+ # the limit - maybe delete additional #%?
+ # For now, delete enough to leave at least 10% of the total cache free
+ delete_this_much = total_size - cache_limit
+ self.__clean_cache(file_list, delete_this_much)
+ self.sleeper.sleep(30) # Test cache size every 30 seconds?
+
+ def __clean_cache(self, file_list, delete_this_much):
+ """ Keep deleting files from the file_list until the size of the deleted
+ files is greater than the value in delete_this_much parameter.
+
+ :type file_list: list
+ :param file_list: List of candidate files that can be deleted. This method
+ will start deleting files from the beginning of the list so the list
+ should be sorted accordingly. The list must contains 3-element tuples,
+ positioned as follows: position 0 holds file last accessed timestamp
+ (as time.struct_time), position 1 holds file path, and position 2 has
+ file size (e.g., (<access time>, /mnt/data/dataset_1.dat), 472394)
+
+ :type delete_this_much: int
+ :param delete_this_much: Total size of files, in bytes, that should be deleted.
+ """
+ # Keep deleting datasets from file_list until deleted_amount does not
+ # exceed delete_this_much; start deleting from the front of the file list,
+ # which assumes the oldest files come first on the list.
+ deleted_amount = 0
+ for i, f in enumerate(file_list):
+ if deleted_amount < delete_this_much:
+ deleted_amount += f[2]
+ os.remove(f[1])
+ # Debugging code for printing deleted files' stats
+ # folder, file_name = os.path.split(f[1])
+ # file_date = time.strftime("%m/%d/%y %H:%M:%S", f[0])
+ # log.debug("%s. %-25s %s, size %s (deleted %s/%s)" \
+ # % (i, file_name, convert_bytes(f[2]), file_date, \
+ # convert_bytes(deleted_amount), convert_bytes(delete_this_much)))
+ else:
+ log.debug("Cache cleaning done. Total space freed: %s" % convert_bytes(deleted_amount))
+ return
+
+ def _get_bucket(self, bucket_name):
+ """ Sometimes a handle to a bucket is not established right away so try
+ it a few times. Raise error is connection is not established. """
+ for i in range(5):
+ try:
+ bucket = self.s3_conn.get_bucket(bucket_name)
+ log.debug("Using cloud object store with bucket '%s'" % bucket.name)
+ return bucket
+ except S3ResponseError:
+ log.debug("Could not get bucket '%s', attempt %s/5" % (bucket_name, i+1))
+ time.sleep(2)
+ # All the attempts have been exhausted and connection was not established,
+ # raise error
+ raise S3ResponseError
+
+ def _fix_permissions(self, rel_path):
+ """ Set permissions on rel_path"""
+ for basedir, dirs, files in os.walk(rel_path):
+ util.umask_fix_perms(basedir, self.config.umask, 0777, self.config.gid)
+ for f in files:
+ path = os.path.join(basedir, f)
+ # Ignore symlinks
+ if os.path.islink(path):
+ continue
+ util.umask_fix_perms( path, self.config.umask, 0666, self.config.gid )
+
+ def _construct_path(self, obj, dir_only=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, **kwargs):
+ rel_path = os.path.join(*directory_hash_id(obj.id))
+ if extra_dir is not None:
+ if extra_dir_at_root:
+ rel_path = os.path.join(extra_dir, rel_path)
+ else:
+ rel_path = os.path.join(rel_path, extra_dir)
+ # S3 folders are marked by having trailing '/' so add it now
+ rel_path = '%s/' % rel_path
+ if not dir_only:
+ rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
+ return rel_path
+
+ def _get_cache_path(self, rel_path):
+ return os.path.abspath(os.path.join(self.staging_path, rel_path))
+
+ def _get_transfer_progress(self):
+ return self.transfer_progress
+
+ def _get_size_in_s3(self, rel_path):
+ try:
+ key = self.bucket.get_key(rel_path)
+ if key:
+ return key.size
+ except S3ResponseError, ex:
+ log.error("Could not get size of key '%s' from S3: %s" % (rel_path, ex))
+ except Exception, ex:
+ log.error("Could not get reference to the key object '%s'; returning -1 for key size: %s" % (rel_path, ex))
+ return -1
+
+ def _key_exists(self, rel_path):
+ exists = False
+ try:
+ # A hackish way of testing if the rel_path is a folder vs a file
+ is_dir = rel_path[-1] == '/'
+ if is_dir:
+ rs = self.bucket.get_all_keys(prefix=rel_path)
+ if len(rs) > 0:
+ exists = True
+ else:
+ exists = False
+ else:
+ key = Key(self.bucket, rel_path)
+ exists = key.exists()
+ except S3ResponseError, ex:
+ log.error("Trouble checking existence of S3 key '%s': %s" % (rel_path, ex))
+ return False
+ #print "Checking if '%s' exists in S3: %s" % (rel_path, exists)
+ if rel_path[0] == '/':
+ raise
+ return exists
+
+ def _in_cache(self, rel_path):
+ """ Check if the given dataset is in the local cache and return True if so. """
+ # log.debug("------ Checking cache for rel_path %s" % rel_path)
+ cache_path = self._get_cache_path(rel_path)
+ return os.path.exists(cache_path)
+ # TODO: Part of checking if a file is in cache should be to ensure the
+ # size of the cached file matches that on S3. Once the upload tool explicitly
+ # creates, this check sould be implemented- in the mean time, it's not
+ # looking likely to be implementable reliably.
+ # if os.path.exists(cache_path):
+ # # print "***1 %s exists" % cache_path
+ # if self._key_exists(rel_path):
+ # # print "***2 %s exists in S3" % rel_path
+ # # Make sure the size in cache is available in its entirety
+ # # print "File '%s' cache size: %s, S3 size: %s" % (cache_path, os.path.getsize(cache_path), self._get_size_in_s3(rel_path))
+ # if os.path.getsize(cache_path) == self._get_size_in_s3(rel_path):
+ # # print "***2.1 %s exists in S3 and the size is the same as in cache (in_cache=True)" % rel_path
+ # exists = True
+ # else:
+ # # print "***2.2 %s exists but differs in size from cache (in_cache=False)" % cache_path
+ # exists = False
+ # else:
+ # # Although not perfect decision making, this most likely means
+ # # that the file is currently being uploaded
+ # # print "***3 %s found in cache but not in S3 (in_cache=True)" % cache_path
+ # exists = True
+ # else:
+ # return False
+
+ def _pull_into_cache(self, rel_path):
+ # Ensure the cache directory structure exists (e.g., dataset_#_files/)
+ rel_path_dir = os.path.dirname(rel_path)
+ if not os.path.exists(self._get_cache_path(rel_path_dir)):
+ os.makedirs(self._get_cache_path(rel_path_dir))
+ # Now pull in the file
+ ok = self._download(rel_path)
+ self._fix_permissions(self._get_cache_path(rel_path_dir))
+ return ok
+
+ def _transfer_cb(self, complete, total):
+ self.transfer_progress += 10
+
+ def _download(self, rel_path):
+ try:
+ log.debug("Pulling key '%s' into cache to %s" % (rel_path, self._get_cache_path(rel_path)))
+ key = self.bucket.get_key(rel_path)
+ # Test if cache is large enough to hold the new file
+ if self.cache_size > 0 and key.size > self.cache_size:
+ log.critical("File %s is larger (%s) than the cache size (%s). Cannot download." \
+ % (rel_path, key.size, self.cache_size))
+ return False
+ if self.use_axel:
+ log.debug("Parallel pulled key '%s' into cache to %s" % (rel_path, self._get_cache_path(rel_path)))
+ ncores = multiprocessing.cpu_count()
+ url = key.generate_url(7200)
+ ret_code = subprocess.call("axel -a -n %s '%s'" % (ncores, url))
+ if ret_code == 0:
+ return True
+ else:
+ log.debug("Pulled key '%s' into cache to %s" % (rel_path, self._get_cache_path(rel_path)))
+ self.transfer_progress = 0 # Reset transfer progress counter
+ key.get_contents_to_filename(self._get_cache_path(rel_path), cb=self._transfer_cb, num_cb=10)
+ return True
+ except S3ResponseError, ex:
+ log.error("Problem downloading key '%s' from S3 bucket '%s': %s" % (rel_path, self.bucket.name, ex))
+ return False
+
+ def _push_to_os(self, rel_path, source_file=None, from_string=None):
+ """
+ Push the file pointed to by ``rel_path`` to the object store naming the key
+ ``rel_path``. If ``source_file`` is provided, push that file instead while
+ still using ``rel_path`` as the key name.
+ If ``from_string`` is provided, set contents of the file to the value of
+ the string.
+ """
+ try:
+ source_file = source_file if source_file else self._get_cache_path(rel_path)
+ if os.path.exists(source_file):
+ key = Key(self.bucket, rel_path)
+ if os.path.getsize(source_file) == 0 and key.exists():
+ log.debug("Wanted to push file '%s' to S3 key '%s' but its size is 0; skipping." % (source_file, rel_path))
+ return True
+ if from_string:
+ key.set_contents_from_string(from_string, reduced_redundancy=self.use_rr)
+ log.debug("Pushed data from string '%s' to key '%s'" % (from_string, rel_path))
+ else:
+ start_time = datetime.now()
+ # print "Pushing cache file '%s' of size %s bytes to key '%s'" % (source_file, os.path.getsize(source_file), rel_path)
+ # print "+ Push started at '%s'" % start_time
+ mb_size = os.path.getsize(source_file) / 1e6
+ if mb_size < 60 or self.config.object_store == 'swift':
+ self.transfer_progress = 0 # Reset transfer progress counter
+ key.set_contents_from_filename(source_file, reduced_redundancy=self.use_rr,
+ cb=self._transfer_cb, num_cb=10)
+ else:
+ multipart_upload(self.bucket, key.name, source_file, mb_size, use_rr=self.use_rr)
+ end_time = datetime.now()
+ # print "+ Push ended at '%s'; %s bytes transfered in %ssec" % (end_time, os.path.getsize(source_file), end_time-start_time)
+ log.debug("Pushed cache file '%s' to key '%s' (%s bytes transfered in %s sec)" % (source_file, rel_path, os.path.getsize(source_file), end_time-start_time))
+ return True
+ else:
+ log.error("Tried updating key '%s' from source file '%s', but source file does not exist."
+ % (rel_path, source_file))
+ except S3ResponseError, ex:
+ log.error("Trouble pushing S3 key '%s' from file '%s': %s" % (rel_path, source_file, ex))
+ return False
+
+ def file_ready(self, obj, **kwargs):
+ """
+ A helper method that checks if a file corresponding to a dataset is
+ ready and available to be used. Return ``True`` if so, ``False`` otherwise.
+ """
+ rel_path = self._construct_path(obj, **kwargs)
+ # Make sure the size in cache is available in its entirety
+ if self._in_cache(rel_path):
+ if os.path.getsize(self._get_cache_path(rel_path)) == self._get_size_in_s3(rel_path):
+ return True
+ log.debug("Waiting for dataset {0} to transfer from OS: {1}/{2}".format(rel_path,
+ os.path.getsize(self._get_cache_path(rel_path)), self._get_size_in_s3(rel_path)))
+ return False
+
+ def exists(self, obj, **kwargs):
+ in_cache = in_s3 = False
+ rel_path = self._construct_path(obj, **kwargs)
+ # Check cache
+ if self._in_cache(rel_path):
+ in_cache = True
+ # Check S3
+ in_s3 = self._key_exists(rel_path)
+ # log.debug("~~~~~~ File '%s' exists in cache: %s; in s3: %s" % (rel_path, in_cache, in_s3))
+ # dir_only does not get synced so shortcut the decision
+ dir_only = kwargs.get('dir_only', False)
+ if dir_only:
+ if in_cache or in_s3:
+ return True
+ else:
+ return False
+ # TODO: Sync should probably not be done here. Add this to an async upload stack?
+ if in_cache and not in_s3:
+ self._push_to_os(rel_path, source_file=self._get_cache_path(rel_path))
+ return True
+ elif in_s3:
+ return True
+ else:
+ return False
+
+ def create(self, obj, **kwargs):
+ if not self.exists(obj, **kwargs):
+ #print "S3 OS creating a dataset with ID %s" % kwargs
+ # Pull out locally used fields
+ extra_dir = kwargs.get('extra_dir', None)
+ extra_dir_at_root = kwargs.get('extra_dir_at_root', False)
+ dir_only = kwargs.get('dir_only', False)
+ alt_name = kwargs.get('alt_name', None)
+ # print "---- Processing: %s; %s" % (alt_name, locals())
+ # Construct hashed path
+ rel_path = os.path.join(*directory_hash_id(obj.id))
+
+ # Optionally append extra_dir
+ if extra_dir is not None:
+ if extra_dir_at_root:
+ rel_path = os.path.join(extra_dir, rel_path)
+ else:
+ rel_path = os.path.join(rel_path, extra_dir)
+ # Create given directory in cache
+ cache_dir = os.path.join(self.staging_path, rel_path)
+ if not os.path.exists(cache_dir):
+ os.makedirs(cache_dir)
+ # Although not really necessary to create S3 folders (because S3 has
+ # flat namespace), do so for consistency with the regular file system
+ # S3 folders are marked by having trailing '/' so add it now
+ # s3_dir = '%s/' % rel_path
+ # self._push_to_os(s3_dir, from_string='')
+ # If instructed, create the dataset in cache & in S3
+ if not dir_only:
+ rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
+ open(os.path.join(self.staging_path, rel_path), 'w').close()
+ self._push_to_os(rel_path, from_string='')
+
+ def empty(self, obj, **kwargs):
+ if self.exists(obj, **kwargs):
+ return bool(self.size(obj, **kwargs) > 0)
+ else:
+ raise ObjectNotFound( 'objectstore.empty, object does not exist: %s, kwargs: %s'
+ %( str( obj ), str( kwargs ) ) )
+
+ def size(self, obj, **kwargs):
+ rel_path = self._construct_path(obj, **kwargs)
+ if self._in_cache(rel_path):
+ try:
+ return os.path.getsize(self._get_cache_path(rel_path))
+ except OSError, ex:
+ log.info("Could not get size of file '%s' in local cache, will try S3. Error: %s" % (rel_path, ex))
+ elif self.exists(obj, **kwargs):
+ return self._get_size_in_s3(rel_path)
+ log.warning("Did not find dataset '%s', returning 0 for size" % rel_path)
+ return 0
+
+ def delete(self, obj, entire_dir=False, **kwargs):
+ rel_path = self._construct_path(obj, **kwargs)
+ extra_dir = kwargs.get('extra_dir', None)
+ try:
+ # For the case of extra_files, because we don't have a reference to
+ # individual files/keys we need to remove the entire directory structure
+ # with all the files in it. This is easy for the local file system,
+ # but requires iterating through each individual key in S3 and deleing it.
+ if entire_dir and extra_dir:
+ shutil.rmtree(self._get_cache_path(rel_path))
+ rs = self.bucket.get_all_keys(prefix=rel_path)
+ for key in rs:
+ log.debug("Deleting key %s" % key.name)
+ key.delete()
+ return True
+ else:
+ # Delete from cache first
+ os.unlink(self._get_cache_path(rel_path))
+ # Delete from S3 as well
+ if self._key_exists(rel_path):
+ key = Key(self.bucket, rel_path)
+ log.debug("Deleting key %s" % key.name)
+ key.delete()
+ return True
+ except S3ResponseError, ex:
+ log.error("Could not delete key '%s' from S3: %s" % (rel_path, ex))
+ except OSError, ex:
+ log.error('%s delete error %s' % (self._get_filename(obj, **kwargs), ex))
+ return False
+
+ def get_data(self, obj, start=0, count=-1, **kwargs):
+ rel_path = self._construct_path(obj, **kwargs)
+ # Check cache first and get file if not there
+ if not self._in_cache(rel_path):
+ self._pull_into_cache(rel_path)
+ #else:
+ # print "(cccc) Getting '%s' from cache" % self._get_cache_path(rel_path)
+ # Read the file content from cache
+ data_file = open(self._get_cache_path(rel_path), 'r')
+ data_file.seek(start)
+ content = data_file.read(count)
+ data_file.close()
+ return content
+
+ def get_filename(self, obj, **kwargs):
+ #print "S3 get_filename for dataset: %s" % dataset_id
+ dir_only = kwargs.get('dir_only', False)
+ rel_path = self._construct_path(obj, **kwargs)
+ cache_path = self._get_cache_path(rel_path)
+ # S3 does not recognize directories as files so cannot check if those exist.
+ # So, if checking dir only, ensure given dir exists in cache and return
+ # the expected cache path.
+ # dir_only = kwargs.get('dir_only', False)
+ # if dir_only:
+ # if not os.path.exists(cache_path):
+ # os.makedirs(cache_path)
+ # return cache_path
+ # Check if the file exists in the cache first
+ if self._in_cache(rel_path):
+ return cache_path
+ # Check if the file exists in persistent storage and, if it does, pull it into cache
+ elif self.exists(obj, **kwargs):
+ if dir_only: # Directories do not get pulled into cache
+ return cache_path
+ else:
+ if self._pull_into_cache(rel_path):
+ return cache_path
+ # For the case of retrieving a directory only, return the expected path
+ # even if it does not exist.
+ # if dir_only:
+ # return cache_path
+ raise ObjectNotFound( 'objectstore.get_filename, no cache_path: %s, kwargs: %s'
+ %( str( obj ), str( kwargs ) ) )
+ # return cache_path # Until the upload tool does not explicitly create the dataset, return expected path
+
+ def update_from_file(self, obj, file_name=None, create=False, **kwargs):
+ if create:
+ self.create(obj, **kwargs)
+ if self.exists(obj, **kwargs):
+ rel_path = self._construct_path(obj, **kwargs)
+ # Chose whether to use the dataset file itself or an alternate file
+ if file_name:
+ source_file = os.path.abspath(file_name)
+ # Copy into cache
+ cache_file = self._get_cache_path(rel_path)
+ try:
+ if source_file != cache_file:
+ # FIXME? Should this be a `move`?
+ shutil.copy2(source_file, cache_file)
+ self._fix_permissions(cache_file)
+ except OSError, ex:
+ log.error("Trouble copying source file '%s' to cache '%s': %s" % (source_file, cache_file, ex))
+ else:
+ source_file = self._get_cache_path(rel_path)
+ # Update the file on S3
+ self._push_to_os(rel_path, source_file)
+ else:
+ raise ObjectNotFound( 'objectstore.update_from_file, object does not exist: %s, kwargs: %s'
+ %( str( obj ), str( kwargs ) ) )
+
+ def get_object_url(self, obj, **kwargs):
+ if self.exists(obj, **kwargs):
+ rel_path = self._construct_path(obj, **kwargs)
+ try:
+ key = Key(self.bucket, rel_path)
+ return key.generate_url(expires_in = 86400) # 24hrs
+ except S3ResponseError, ex:
+ log.warning("Trouble generating URL for dataset '%s': %s" % (rel_path, ex))
+ return None
+
+ def get_store_usage_percent(self):
+ return 0.0
+
+
+def get_OS_connection(config):
+ """
+ Get a connection object for a cloud Object Store specified in the config.
+ Currently, this is a ``boto`` connection object.
+ """
+ log.debug("Getting a connection object for '{0}' object store".format(config.object_store))
+ a_key = config.os_access_key
+ s_key = config.os_secret_key
+ if config.object_store == 's3':
+ return S3Connection(a_key, s_key)
+ else:
+ # Establish the connection now
+ calling_format = boto.s3.connection.OrdinaryCallingFormat()
+ s3_conn = boto.connect_s3(aws_access_key_id=a_key,
+ aws_secret_access_key=s_key,
+ is_secure=config.os_is_secure,
+ host=config.os_host,
+ port=int(config.os_port),
+ calling_format=calling_format,
+ path=config.os_conn_path)
+ return s3_conn
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: guerler: Unused overlay in visualization mako
by commits-noreply@bitbucket.org 20 Aug '13
by commits-noreply@bitbucket.org 20 Aug '13
20 Aug '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/267d294711e9/
Changeset: 267d294711e9
User: guerler
Date: 2013-08-20 17:44:35
Summary: Unused overlay in visualization mako
Affected #: 1 file
diff -r 6e148e87d81925bb0b920e9e66c87cda0bd358c5 -r 267d294711e9f72dcfbff842ce263a1050be058f templates/webapps/galaxy/galaxy.panels.mako
--- a/templates/webapps/galaxy/galaxy.panels.mako
+++ b/templates/webapps/galaxy/galaxy.panels.mako
@@ -12,7 +12,6 @@
'left_panel' : False,
'right_panel' : False,
'message_box' : False,
- 'overlay' : False,
## root
'root' : h.url_for("/"),
@@ -158,16 +157,12 @@
</%def>
## overlay
-<%def name="overlay( title='', content='', visible=False )">
+<%def name="overlay( title='', content='')"><%def name="title()"></%def><%def name="content()"></%def><%
- if visible:
- display = "style='display: block;'"
- overlay_class = "in"
- else:
- display = "style='display: none;'"
- overlay_class = ""
+ display = "style='display: none;'"
+ overlay_class = ""
%><div id="overlay" ${display}>
@@ -227,7 +222,7 @@
%endif
## overlay
- ${self.overlay(visible=self.galaxy_config['overlay'])}
+ ${self.overlay()}
## left panel
%if self.galaxy_config['left_panel']:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/29ab5a6d75a7/
Changeset: 29ab5a6d75a7
Branch: stable
User: greg
Date: 2013-08-20 17:13:54
Summary: Fix for retrieving an installed tool shed repository record from the Galaxy database during installation of a repository dependency where the dependency definition defines a changeset_revision that is older than the changeset_revision associated with the installed repository. This occurs only with repositories of type tool_dependency_definition.
Affected #: 4 files
diff -r 6822f41bc9bb2a2bf4673d6dcdeb1939730d970f -r 29ab5a6d75a778cd4989adf881c47916607b9acb lib/galaxy/webapps/tool_shed/controllers/repository.py
--- a/lib/galaxy/webapps/tool_shed/controllers/repository.py
+++ b/lib/galaxy/webapps/tool_shed/controllers/repository.py
@@ -1639,9 +1639,14 @@
repository = suc.get_repository_by_name_and_owner( trans.app, name, owner )
repository_id = trans.security.encode_id( repository.id )
repository_clone_url = suc.generate_clone_url_for_repository_in_tool_shed( trans, repository )
- repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans, repository_id, changeset_revision )
repo_dir = repository.repo_path( trans.app )
repo = hg.repository( suc.get_configured_ui(), repo_dir )
+ repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans, repository_id, changeset_revision )
+ if not repository_metadata:
+ # The received changeset_revision is no longer associated with metadata, so get the next changeset_revision in the repository
+ # changelog that is associated with metadata.
+ changeset_revision = suc.get_next_downloadable_changeset_revision( repository, repo, after_changeset_revision=changeset_revision )
+ repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans, repository_id, changeset_revision )
ctx = suc.get_changectx_for_changeset( repo, changeset_revision )
repo_info_dict = repository_util.create_repo_info_dict( trans=trans,
repository_clone_url=repository_clone_url,
@@ -2138,6 +2143,25 @@
self.email_alerts_repository_grid.title = "Set email alerts for repository changes"
return self.email_alerts_repository_grid( trans, **kwd )
+ @web.expose
+ def next_installable_changeset_revision( self, trans, **kwd ):
+ """
+ Handle a request from a Galaxy instance where the changeset_revision defined for a repository in a dependency definition file is older
+ than the changeset_revision associated with the installed repository. This will occur with repository's of type tool_dependency_definition,
+ and this scenario will occur while repository dependency hierarchies are bing installed.
+ """
+ name = kwd.get( 'name', None )
+ owner = kwd.get( 'owner', None )
+ changeset_revision = kwd.get( 'changeset_revision', None )
+ repository = suc.get_repository_by_name_and_owner( trans.app, name, owner )
+ repo_dir = repository.repo_path( trans.app )
+ repo = hg.repository( suc.get_configured_ui(), repo_dir )
+ # Get the next installable changeset_revision beyond the received changeset_revision.
+ changeset_revision = suc.get_next_downloadable_changeset_revision( repository, repo, changeset_revision )
+ if changeset_revision:
+ return changeset_revision
+ return ''
+
@web.json
def open_folder( self, trans, folder_path ):
# Avoid caching
@@ -2199,8 +2223,6 @@
older changeset_revsion, but later the repository was updated in the tool shed and the Galaxy admin is trying to install the latest
changeset revision of the same repository instead of updating the one that was previously installed.
"""
- message = kwd.get( 'message', '' )
- status = kwd.get( 'status', 'done' )
name = kwd.get( 'name', None )
owner = kwd.get( 'owner', None )
changeset_revision = kwd.get( 'changeset_revision', None )
diff -r 6822f41bc9bb2a2bf4673d6dcdeb1939730d970f -r 29ab5a6d75a778cd4989adf881c47916607b9acb lib/tool_shed/galaxy_install/tool_dependencies/common_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/common_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/common_util.py
@@ -11,6 +11,12 @@
log = logging.getLogger( __name__ )
+def clean_tool_shed_url( base_url ):
+ if base_url:
+ protocol, base = base_url.split( '://' )
+ return base.rstrip( '/' )
+ return base_url
+
def create_env_var_dict( elem, tool_dependency_install_dir=None, tool_shed_repository_install_dir=None ):
env_var_name = elem.get( 'name', 'PATH' )
env_var_action = elem.get( 'action', 'prepend_to' )
@@ -121,6 +127,7 @@
repository_owner = elem.get( 'owner', None )
changeset_revision = elem.get( 'changeset_revision', None )
if toolshed and repository_name and repository_owner and changeset_revision:
+ toolshed = clean_tool_shed_url( toolshed )
repository = suc.get_repository_for_dependency_relationship( app, toolshed, repository_name, repository_owner, changeset_revision )
if repository:
for sub_elem in elem:
diff -r 6822f41bc9bb2a2bf4673d6dcdeb1939730d970f -r 29ab5a6d75a778cd4989adf881c47916607b9acb lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
@@ -17,10 +17,6 @@
log = logging.getLogger( __name__ )
-def clean_tool_shed_url( base_url ):
- protocol, base = base_url.split( '://' )
- return base.rstrip( '/' )
-
def create_temporary_tool_dependencies_config( app, tool_shed_url, name, owner, changeset_revision ):
"""Make a call to the tool shed to get the required repository's tool_dependencies.xml file."""
url = url_join( tool_shed_url,
@@ -54,7 +50,7 @@
def get_tool_shed_repository_by_tool_shed_name_owner_changeset_revision( app, tool_shed_url, name, owner, changeset_revision ):
sa_session = app.model.context.current
- tool_shed = clean_tool_shed_url( tool_shed_url )
+ tool_shed = common_util.clean_tool_shed_url( tool_shed_url )
tool_shed_repository = sa_session.query( app.model.ToolShedRepository ) \
.filter( and_( app.model.ToolShedRepository.table.c.tool_shed == tool_shed,
app.model.ToolShedRepository.table.c.name == name,
diff -r 6822f41bc9bb2a2bf4673d6dcdeb1939730d970f -r 29ab5a6d75a778cd4989adf881c47916607b9acb lib/tool_shed/util/shed_util_common.py
--- a/lib/tool_shed/util/shed_util_common.py
+++ b/lib/tool_shed/util/shed_util_common.py
@@ -760,6 +760,20 @@
name=name,
owner=owner,
changeset_revision=changeset_revision )
+ if not repository:
+ # The received changeset_revision is no longer installable, so get the next changeset_revision in the repository's changelog in the
+ # tool shed that is associated with repository_metadata.
+ tool_shed_url = get_url_from_tool_shed( app, tool_shed )
+ url = url_join( tool_shed_url,
+ 'repository/next_installable_changeset_revision?galaxy_url=%s&name=%s&owner=%s&changeset_revision=%s' % \
+ ( url_for( '/', qualified=True ), name, owner, changeset_revision ) )
+ text = common_util.tool_shed_get( app, tool_shed_url, url )
+ if text:
+ repository = get_tool_shed_repository_by_shed_name_owner_changeset_revision( app=app,
+ tool_shed=tool_shed,
+ name=name,
+ owner=owner,
+ changeset_revision=text )
return repository
def get_repository_file_contents( file_path ):
https://bitbucket.org/galaxy/galaxy-central/commits/6e148e87d819/
Changeset: 6e148e87d819
User: greg
Date: 2013-08-20 17:14:53
Summary: Merge with 29ab5a6d75a778cd4989adf881c47916607b9acb
Affected #: 4 files
diff -r c42567f43aa762a45f7eb7fc0917260edcaa5636 -r 6e148e87d81925bb0b920e9e66c87cda0bd358c5 lib/galaxy/webapps/tool_shed/controllers/repository.py
--- a/lib/galaxy/webapps/tool_shed/controllers/repository.py
+++ b/lib/galaxy/webapps/tool_shed/controllers/repository.py
@@ -1716,9 +1716,14 @@
repository = suc.get_repository_by_name_and_owner( trans.app, name, owner )
repository_id = trans.security.encode_id( repository.id )
repository_clone_url = suc.generate_clone_url_for_repository_in_tool_shed( trans, repository )
- repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans, repository_id, changeset_revision )
repo_dir = repository.repo_path( trans.app )
repo = hg.repository( suc.get_configured_ui(), repo_dir )
+ repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans, repository_id, changeset_revision )
+ if not repository_metadata:
+ # The received changeset_revision is no longer associated with metadata, so get the next changeset_revision in the repository
+ # changelog that is associated with metadata.
+ changeset_revision = suc.get_next_downloadable_changeset_revision( repository, repo, after_changeset_revision=changeset_revision )
+ repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans, repository_id, changeset_revision )
ctx = suc.get_changectx_for_changeset( repo, changeset_revision )
repo_info_dict = repository_util.create_repo_info_dict( trans=trans,
repository_clone_url=repository_clone_url,
@@ -2215,6 +2220,25 @@
self.email_alerts_repository_grid.title = "Set email alerts for repository changes"
return self.email_alerts_repository_grid( trans, **kwd )
+ @web.expose
+ def next_installable_changeset_revision( self, trans, **kwd ):
+ """
+ Handle a request from a Galaxy instance where the changeset_revision defined for a repository in a dependency definition file is older
+ than the changeset_revision associated with the installed repository. This will occur with repository's of type tool_dependency_definition,
+ and this scenario will occur while repository dependency hierarchies are bing installed.
+ """
+ name = kwd.get( 'name', None )
+ owner = kwd.get( 'owner', None )
+ changeset_revision = kwd.get( 'changeset_revision', None )
+ repository = suc.get_repository_by_name_and_owner( trans.app, name, owner )
+ repo_dir = repository.repo_path( trans.app )
+ repo = hg.repository( suc.get_configured_ui(), repo_dir )
+ # Get the next installable changeset_revision beyond the received changeset_revision.
+ changeset_revision = suc.get_next_downloadable_changeset_revision( repository, repo, changeset_revision )
+ if changeset_revision:
+ return changeset_revision
+ return ''
+
@web.json
def open_folder( self, trans, folder_path ):
# Avoid caching
@@ -2276,8 +2300,6 @@
older changeset_revsion, but later the repository was updated in the tool shed and the Galaxy admin is trying to install the latest
changeset revision of the same repository instead of updating the one that was previously installed.
"""
- message = kwd.get( 'message', '' )
- status = kwd.get( 'status', 'done' )
name = kwd.get( 'name', None )
owner = kwd.get( 'owner', None )
changeset_revision = kwd.get( 'changeset_revision', None )
diff -r c42567f43aa762a45f7eb7fc0917260edcaa5636 -r 6e148e87d81925bb0b920e9e66c87cda0bd358c5 lib/tool_shed/galaxy_install/tool_dependencies/common_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/common_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/common_util.py
@@ -12,6 +12,12 @@
log = logging.getLogger( __name__ )
+def clean_tool_shed_url( base_url ):
+ if base_url:
+ protocol, base = base_url.split( '://' )
+ return base.rstrip( '/' )
+ return base_url
+
def create_env_var_dict( elem, tool_dependency_install_dir=None, tool_shed_repository_install_dir=None ):
env_var_name = elem.get( 'name', 'PATH' )
env_var_action = elem.get( 'action', 'prepend_to' )
@@ -129,6 +135,7 @@
repository_owner = elem.get( 'owner', None )
changeset_revision = elem.get( 'changeset_revision', None )
if toolshed and repository_name and repository_owner and changeset_revision:
+ toolshed = clean_tool_shed_url( toolshed )
repository = suc.get_repository_for_dependency_relationship( app, toolshed, repository_name, repository_owner, changeset_revision )
if repository:
for sub_elem in elem:
diff -r c42567f43aa762a45f7eb7fc0917260edcaa5636 -r 6e148e87d81925bb0b920e9e66c87cda0bd358c5 lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
@@ -18,10 +18,6 @@
log = logging.getLogger( __name__ )
-def clean_tool_shed_url( base_url ):
- protocol, base = base_url.split( '://' )
- return base.rstrip( '/' )
-
def create_temporary_tool_dependencies_config( app, tool_shed_url, name, owner, changeset_revision ):
"""Make a call to the tool shed to get the required repository's tool_dependencies.xml file."""
url = url_join( tool_shed_url,
@@ -55,7 +51,7 @@
def get_tool_shed_repository_by_tool_shed_name_owner_changeset_revision( app, tool_shed_url, name, owner, changeset_revision ):
sa_session = app.model.context.current
- tool_shed = clean_tool_shed_url( tool_shed_url )
+ tool_shed = common_util.clean_tool_shed_url( tool_shed_url )
tool_shed_repository = sa_session.query( app.model.ToolShedRepository ) \
.filter( and_( app.model.ToolShedRepository.table.c.tool_shed == tool_shed,
app.model.ToolShedRepository.table.c.name == name,
diff -r c42567f43aa762a45f7eb7fc0917260edcaa5636 -r 6e148e87d81925bb0b920e9e66c87cda0bd358c5 lib/tool_shed/util/shed_util_common.py
--- a/lib/tool_shed/util/shed_util_common.py
+++ b/lib/tool_shed/util/shed_util_common.py
@@ -867,6 +867,20 @@
name=name,
owner=owner,
changeset_revision=changeset_revision )
+ if not repository:
+ # The received changeset_revision is no longer installable, so get the next changeset_revision in the repository's changelog in the
+ # tool shed that is associated with repository_metadata.
+ tool_shed_url = get_url_from_tool_shed( app, tool_shed )
+ url = url_join( tool_shed_url,
+ 'repository/next_installable_changeset_revision?galaxy_url=%s&name=%s&owner=%s&changeset_revision=%s' % \
+ ( url_for( '/', qualified=True ), name, owner, changeset_revision ) )
+ text = common_util.tool_shed_get( app, tool_shed_url, url )
+ if text:
+ repository = get_tool_shed_repository_by_shed_name_owner_changeset_revision( app=app,
+ tool_shed=tool_shed,
+ name=name,
+ owner=owner,
+ changeset_revision=text )
return repository
def get_repository_file_contents( file_path ):
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Filter invalid objects when generating the list of repository_dependencies objects that are associated with a tool shed repository installed into Galaxy.
by commits-noreply@bitbucket.org 19 Aug '13
by commits-noreply@bitbucket.org 19 Aug '13
19 Aug '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/c42567f43aa7/
Changeset: c42567f43aa7
User: greg
Date: 2013-08-19 19:19:56
Summary: Filter invalid objects when generating the list of repository_dependencies objects that are associated with a tool shed repository installed into Galaxy.
Affected #: 1 file
diff -r e9ee9c5d30aef4390a94823990221d3876785726 -r c42567f43aa762a45f7eb7fc0917260edcaa5636 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -3548,7 +3548,8 @@
for rrda in self.required_repositories:
repository_dependency = rrda.repository_dependency
required_repository = repository_dependency.repository
- required_repositories.append( required_repository )
+ if required_repository:
+ required_repositories.append( required_repository )
return required_repositories
@property
def installed_repository_dependencies( self ):
@@ -3568,6 +3569,7 @@
return missing_required_repositories
@property
def repository_dependencies_being_installed( self ):
+ """Return the repository's repository dependencies that are currently being installed."""
required_repositories_being_installed = []
for required_repository in self.repository_dependencies:
if required_repository.status == self.installation_status.INSTALLING:
@@ -3575,6 +3577,7 @@
return required_repositories_being_installed
@property
def repository_dependencies_missing_or_being_installed( self ):
+ """Return the repository's repository dependencies that are either missing or currently being installed."""
required_repositories_missing_or_being_installed = []
for required_repository in self.repository_dependencies:
if required_repository.status in [ self.installation_status.ERROR,
@@ -3585,6 +3588,7 @@
return required_repositories_missing_or_being_installed
@property
def repository_dependencies_with_installation_errors( self ):
+ """Return the repository's repository dependencies that have installation errors."""
required_repositories_with_installation_errors = []
for required_repository in self.repository_dependencies:
if required_repository.status == self.installation_status.ERROR:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
3 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/6822f41bc9bb/
Changeset: 6822f41bc9bb
Branch: stable
User: Dave Bouvier
Date: 2013-08-19 19:06:17
Summary: Fix for case where running functional tests might overwrite certain files in database/files.
Affected #: 1 file
diff -r d05bf67aefa670f4d004db3c1494e9052c0c0d9c -r 6822f41bc9bb2a2bf4673d6dcdeb1939730d970f scripts/functional_tests.py
--- a/scripts/functional_tests.py
+++ b/scripts/functional_tests.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python
import os, sys, shutil, tempfile, re
+from ConfigParser import SafeConfigParser
# Assume we are run from the galaxy root directory, add lib to the python path
cwd = os.getcwd()
@@ -86,6 +87,39 @@
global_conf.update( get_static_settings() )
return global_conf
+def generate_config_file( input_filename, output_filename, config_items ):
+ '''
+ Generate a config file with the configuration that has been defined for the embedded web application.
+ This is mostly relevant when setting metadata externally, since the script for doing that does not
+ have access to app.config.
+ '''
+ cp = SafeConfigParser()
+ cp.read( input_filename )
+ config_items_by_section = []
+ for label, value in config_items:
+ found = False
+ # Attempt to determine the correct section for this configuration option.
+ for section in cp.sections():
+ if cp.has_option( section, label ):
+ config_tuple = section, label, value
+ config_items_by_section.append( config_tuple )
+ found = True
+ continue
+ # Default to app:main if no section was found.
+ if not found:
+ config_tuple = 'app:main', label, value
+ config_items_by_section.append( config_tuple )
+ print( config_items_by_section )
+ # Replace the default values with the provided configuration.
+ for section, label, value in config_items_by_section:
+
+ if cp.has_option( section, label ):
+ cp.remove_option( section, label )
+ cp.set( section, label, str( value ) )
+ fh = open( output_filename, 'w' )
+ cp.write( fh )
+ fh.close()
+
def run_tests( test_config ):
loader = nose.loader.TestLoader( config=test_config )
plug_loader = test_config.plugins.prepareTestLoader( loader )
@@ -145,6 +179,9 @@
shed_tool_data_table_config = 'shed_tool_data_table_conf.xml'
tool_dependency_dir = os.environ.get( 'GALAXY_TOOL_DEPENDENCY_DIR', None )
use_distributed_object_store = os.environ.get( 'GALAXY_USE_DISTRIBUTED_OBJECT_STORE', False )
+ galaxy_test_tmp_dir = os.environ.get( 'GALAXY_TEST_TMP_DIR', None )
+ if galaxy_test_tmp_dir is None:
+ galaxy_test_tmp_dir = tempfile.mkdtemp()
if start_server:
psu_production = False
@@ -193,13 +230,14 @@
default_cluster_job_runner = default_cluster_job_runner )
psu_production = True
else:
+ # Configure the database path.
if 'GALAXY_TEST_DBPATH' in os.environ:
- db_path = os.environ['GALAXY_TEST_DBPATH']
+ galaxy_db_path = os.environ[ 'GALAXY_TEST_DBPATH' ]
else:
- tempdir = tempfile.mkdtemp()
- db_path = tempfile.mkdtemp( prefix='database_', dir=tempdir )
- # FIXME: This is a workaround for cases where metadata is being set externally.
- file_path = os.path.join( 'database', 'files' )
+ tempdir = tempfile.mkdtemp( dir=galaxy_test_tmp_dir )
+ galaxy_db_path = os.path.join( tempdir, 'database' )
+ # Configure the paths Galaxy needs to test tools.
+ file_path = os.path.join( galaxy_db_path, 'files' )
new_file_path = tempfile.mkdtemp( prefix='new_files_path_', dir=tempdir )
job_working_directory = tempfile.mkdtemp( prefix='job_working_directory_', dir=tempdir )
if 'GALAXY_TEST_DBURI' in os.environ:
@@ -217,9 +255,32 @@
# ---- Build Application --------------------------------------------------
app = None
if start_server:
- global_conf = { '__file__' : 'universe_wsgi.ini.sample' }
+ kwargs = dict( admin_users = 'test(a)bx.psu.edu',
+ allow_library_path_paste = True,
+ allow_user_creation = True,
+ allow_user_deletion = True,
+ database_connection = database_connection,
+ datatype_converters_config_file = "datatype_converters_conf.xml.sample",
+ file_path = file_path,
+ id_secret = 'changethisinproductiontoo',
+ job_queue_workers = 5,
+ job_working_directory = job_working_directory,
+ library_import_dir = library_import_dir,
+ log_destination = "stdout",
+ new_file_path = new_file_path,
+ running_functional_tests = True,
+ shed_tool_data_table_config = shed_tool_data_table_config,
+ template_path = "templates",
+ test_conf = "test.conf",
+ tool_config_file = tool_config_file,
+ tool_data_table_config_path = tool_data_table_config_path,
+ tool_path = tool_path,
+ tool_parse_help = False,
+ update_integrated_tool_panel = False,
+ use_heartbeat = False,
+ user_library_import_dir = user_library_import_dir )
if psu_production:
- global_conf = None
+ kwargs[ 'global_conf' ] = None
if not database_connection.startswith( 'sqlite://' ):
kwargs[ 'database_engine_option_max_overflow' ] = '20'
kwargs[ 'database_engine_option_pool_size' ] = '10'
@@ -228,40 +289,29 @@
if use_distributed_object_store:
kwargs[ 'object_store' ] = 'distributed'
kwargs[ 'distributed_object_store_config_file' ] = 'distributed_object_store_conf.xml.sample'
+ # If the user has passed in a path for the .ini file, do not overwrite it.
+ galaxy_config_file = os.environ.get( 'GALAXY_TEST_INI_FILE', None )
+ if not galaxy_config_file:
+ galaxy_config_file = os.path.join( galaxy_test_tmp_dir, 'functional_tests_wsgi.ini' )
+ config_items = []
+ for label in kwargs:
+ config_tuple = label, kwargs[ label ]
+ config_items.append( config_tuple )
+ # Write a temporary file, based on universe_wsgi.ini.sample, using the configuration options defined above.
+ generate_config_file( 'universe_wsgi.ini.sample', galaxy_config_file, config_items )
+ # Set the global_conf[ '__file__' ] option to the location of the temporary .ini file, which gets passed to set_metadata.sh.
+ kwargs[ 'global_conf' ] = get_webapp_global_conf()
+ kwargs[ 'global_conf' ][ '__file__' ] = galaxy_config_file
+ kwargs[ 'config_file' ] = galaxy_config_file
# Build the Universe Application
- app = UniverseApplication( admin_users = 'test(a)bx.psu.edu',
- allow_library_path_paste = True,
- allow_user_creation = True,
- allow_user_deletion = True,
- database_connection = database_connection,
- datatype_converters_config_file = "datatype_converters_conf.xml.sample",
- file_path = file_path,
- global_conf = global_conf,
- id_secret = 'changethisinproductiontoo',
- job_queue_workers = 5,
- job_working_directory = job_working_directory,
- library_import_dir = library_import_dir,
- log_destination = "stdout",
- new_file_path = new_file_path,
- running_functional_tests = True,
- shed_tool_data_table_config = shed_tool_data_table_config,
- template_path = "templates",
- test_conf = "test.conf",
- tool_config_file = tool_config_file,
- tool_data_table_config_path = tool_data_table_config_path,
- tool_path = tool_path,
- tool_parse_help = False,
- update_integrated_tool_panel = False,
- use_heartbeat = False,
- user_library_import_dir = user_library_import_dir,
- **kwargs )
+ app = UniverseApplication( **kwargs )
log.info( "Embedded Universe application started" )
# ---- Run webserver ------------------------------------------------------
server = None
if start_server:
- webapp = buildapp.app_factory( get_webapp_global_conf(), app=app,
+ webapp = buildapp.app_factory( kwargs[ 'global_conf' ], app=app,
use_translogger=False, static_enabled=STATIC_ENABLED )
if galaxy_test_port is not None:
server = httpserver.serve( webapp, host=galaxy_test_host, port=galaxy_test_port, start_loop=False )
https://bitbucket.org/galaxy/galaxy-central/commits/2ca0b65d5005/
Changeset: 2ca0b65d5005
User: Dave Bouvier
Date: 2013-08-19 19:06:42
Summary: Merge in bugfix from stable.
Affected #: 2 files
diff -r 2d64260eb6e31a992b78d82ee50f351bc3b95d07 -r 2ca0b65d5005f35df6b9c05ebcec64f9164ab972 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -3,3 +3,4 @@
75f09617abaadbc8cc732bb8ee519decaeb56ea7 release_2013.04.01
2cc8d10988e03257dc7b97f8bb332c7df745d1dd security_2013.04.08
524f246ca85395082719ae7a6ff72260d7ad5612 release_2013.06.03
+1ae95b3aa98d1ccf15b243ac3ce6a895eb7efc53 release_2013.08.12
diff -r 2d64260eb6e31a992b78d82ee50f351bc3b95d07 -r 2ca0b65d5005f35df6b9c05ebcec64f9164ab972 scripts/functional_tests.py
--- a/scripts/functional_tests.py
+++ b/scripts/functional_tests.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python
import os, sys, shutil, tempfile, re
+from ConfigParser import SafeConfigParser
# Assume we are run from the galaxy root directory, add lib to the python path
cwd = os.getcwd()
@@ -86,6 +87,39 @@
global_conf.update( get_static_settings() )
return global_conf
+def generate_config_file( input_filename, output_filename, config_items ):
+ '''
+ Generate a config file with the configuration that has been defined for the embedded web application.
+ This is mostly relevant when setting metadata externally, since the script for doing that does not
+ have access to app.config.
+ '''
+ cp = SafeConfigParser()
+ cp.read( input_filename )
+ config_items_by_section = []
+ for label, value in config_items:
+ found = False
+ # Attempt to determine the correct section for this configuration option.
+ for section in cp.sections():
+ if cp.has_option( section, label ):
+ config_tuple = section, label, value
+ config_items_by_section.append( config_tuple )
+ found = True
+ continue
+ # Default to app:main if no section was found.
+ if not found:
+ config_tuple = 'app:main', label, value
+ config_items_by_section.append( config_tuple )
+ print( config_items_by_section )
+ # Replace the default values with the provided configuration.
+ for section, label, value in config_items_by_section:
+
+ if cp.has_option( section, label ):
+ cp.remove_option( section, label )
+ cp.set( section, label, str( value ) )
+ fh = open( output_filename, 'w' )
+ cp.write( fh )
+ fh.close()
+
def run_tests( test_config ):
loader = nose.loader.TestLoader( config=test_config )
plug_loader = test_config.plugins.prepareTestLoader( loader )
@@ -145,6 +179,9 @@
shed_tool_data_table_config = 'shed_tool_data_table_conf.xml'
tool_dependency_dir = os.environ.get( 'GALAXY_TOOL_DEPENDENCY_DIR', None )
use_distributed_object_store = os.environ.get( 'GALAXY_USE_DISTRIBUTED_OBJECT_STORE', False )
+ galaxy_test_tmp_dir = os.environ.get( 'GALAXY_TEST_TMP_DIR', None )
+ if galaxy_test_tmp_dir is None:
+ galaxy_test_tmp_dir = tempfile.mkdtemp()
if start_server:
psu_production = False
@@ -193,13 +230,14 @@
default_cluster_job_runner = default_cluster_job_runner )
psu_production = True
else:
+ # Configure the database path.
if 'GALAXY_TEST_DBPATH' in os.environ:
- db_path = os.environ['GALAXY_TEST_DBPATH']
+ galaxy_db_path = os.environ[ 'GALAXY_TEST_DBPATH' ]
else:
- tempdir = tempfile.mkdtemp()
- db_path = tempfile.mkdtemp( prefix='database_', dir=tempdir )
- # FIXME: This is a workaround for cases where metadata is being set externally.
- file_path = os.path.join( 'database', 'files' )
+ tempdir = tempfile.mkdtemp( dir=galaxy_test_tmp_dir )
+ galaxy_db_path = os.path.join( tempdir, 'database' )
+ # Configure the paths Galaxy needs to test tools.
+ file_path = os.path.join( galaxy_db_path, 'files' )
new_file_path = tempfile.mkdtemp( prefix='new_files_path_', dir=tempdir )
job_working_directory = tempfile.mkdtemp( prefix='job_working_directory_', dir=tempdir )
if 'GALAXY_TEST_DBURI' in os.environ:
@@ -217,9 +255,32 @@
# ---- Build Application --------------------------------------------------
app = None
if start_server:
- global_conf = { '__file__' : 'universe_wsgi.ini.sample' }
+ kwargs = dict( admin_users = 'test(a)bx.psu.edu',
+ allow_library_path_paste = True,
+ allow_user_creation = True,
+ allow_user_deletion = True,
+ database_connection = database_connection,
+ datatype_converters_config_file = "datatype_converters_conf.xml.sample",
+ file_path = file_path,
+ id_secret = 'changethisinproductiontoo',
+ job_queue_workers = 5,
+ job_working_directory = job_working_directory,
+ library_import_dir = library_import_dir,
+ log_destination = "stdout",
+ new_file_path = new_file_path,
+ running_functional_tests = True,
+ shed_tool_data_table_config = shed_tool_data_table_config,
+ template_path = "templates",
+ test_conf = "test.conf",
+ tool_config_file = tool_config_file,
+ tool_data_table_config_path = tool_data_table_config_path,
+ tool_path = tool_path,
+ tool_parse_help = False,
+ update_integrated_tool_panel = False,
+ use_heartbeat = False,
+ user_library_import_dir = user_library_import_dir )
if psu_production:
- global_conf = None
+ kwargs[ 'global_conf' ] = None
if not database_connection.startswith( 'sqlite://' ):
kwargs[ 'database_engine_option_max_overflow' ] = '20'
kwargs[ 'database_engine_option_pool_size' ] = '10'
@@ -228,40 +289,29 @@
if use_distributed_object_store:
kwargs[ 'object_store' ] = 'distributed'
kwargs[ 'distributed_object_store_config_file' ] = 'distributed_object_store_conf.xml.sample'
+ # If the user has passed in a path for the .ini file, do not overwrite it.
+ galaxy_config_file = os.environ.get( 'GALAXY_TEST_INI_FILE', None )
+ if not galaxy_config_file:
+ galaxy_config_file = os.path.join( galaxy_test_tmp_dir, 'functional_tests_wsgi.ini' )
+ config_items = []
+ for label in kwargs:
+ config_tuple = label, kwargs[ label ]
+ config_items.append( config_tuple )
+ # Write a temporary file, based on universe_wsgi.ini.sample, using the configuration options defined above.
+ generate_config_file( 'universe_wsgi.ini.sample', galaxy_config_file, config_items )
+ # Set the global_conf[ '__file__' ] option to the location of the temporary .ini file, which gets passed to set_metadata.sh.
+ kwargs[ 'global_conf' ] = get_webapp_global_conf()
+ kwargs[ 'global_conf' ][ '__file__' ] = galaxy_config_file
+ kwargs[ 'config_file' ] = galaxy_config_file
# Build the Universe Application
- app = UniverseApplication( admin_users = 'test(a)bx.psu.edu',
- allow_library_path_paste = True,
- allow_user_creation = True,
- allow_user_deletion = True,
- database_connection = database_connection,
- datatype_converters_config_file = "datatype_converters_conf.xml.sample",
- file_path = file_path,
- global_conf = global_conf,
- id_secret = 'changethisinproductiontoo',
- job_queue_workers = 5,
- job_working_directory = job_working_directory,
- library_import_dir = library_import_dir,
- log_destination = "stdout",
- new_file_path = new_file_path,
- running_functional_tests = True,
- shed_tool_data_table_config = shed_tool_data_table_config,
- template_path = "templates",
- test_conf = "test.conf",
- tool_config_file = tool_config_file,
- tool_data_table_config_path = tool_data_table_config_path,
- tool_path = tool_path,
- tool_parse_help = False,
- update_integrated_tool_panel = False,
- use_heartbeat = False,
- user_library_import_dir = user_library_import_dir,
- **kwargs )
+ app = UniverseApplication( **kwargs )
log.info( "Embedded Universe application started" )
# ---- Run webserver ------------------------------------------------------
server = None
if start_server:
- webapp = buildapp.app_factory( get_webapp_global_conf(), app=app,
+ webapp = buildapp.app_factory( kwargs[ 'global_conf' ], app=app,
use_translogger=False, static_enabled=STATIC_ENABLED )
if galaxy_test_port is not None:
server = httpserver.serve( webapp, host=galaxy_test_host, port=galaxy_test_port, start_loop=False )
https://bitbucket.org/galaxy/galaxy-central/commits/e9ee9c5d30ae/
Changeset: e9ee9c5d30ae
User: Dave Bouvier
Date: 2013-08-19 19:08:45
Summary: Remove duplicate method from the install and test's functional_tests.py.
Affected #: 1 file
diff -r 2ca0b65d5005f35df6b9c05ebcec64f9164ab972 -r e9ee9c5d30aef4390a94823990221d3876785726 test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -6,7 +6,6 @@
import os, sys, shutil, tempfile, re, string, urllib, platform
from time import strftime
-from ConfigParser import SafeConfigParser
# Assume we are run from the galaxy root directory, add lib to the python path
cwd = os.getcwd()
@@ -21,10 +20,11 @@
default_galaxy_locales = 'en'
default_galaxy_test_file_dir = "test-data"
os.environ[ 'GALAXY_INSTALL_TEST_TMP_DIR' ] = galaxy_test_tmp_dir
-new_path = [ os.path.join( cwd, "lib" ), os.path.join( cwd, 'test' ), os.path.join( cwd, 'scripts', 'api' ) ]
+new_path = [ os.path.join( cwd, "scripts" ), os.path.join( cwd, "lib" ), os.path.join( cwd, 'test' ), os.path.join( cwd, 'scripts', 'api' ) ]
new_path.extend( sys.path )
sys.path = new_path
+from functional_tests import generate_config_file
from galaxy import eggs
eggs.require( "nose" )
@@ -254,36 +254,6 @@
success = result.wasSuccessful()
return success
-def generate_config_file( input_filename, output_filename, config_items ):
- '''
- Generate a config file with the configuration that has been defined for the embedded web application.
- This is mostly relevant when setting metadata externally, since the script for doing that does not
- have access to app.config.
- '''
- cp = SafeConfigParser()
- cp.read( input_filename )
- config_items_by_section = []
- for label, value in config_items:
- found = False
- # Attempt to determine the correct section for this configuration option.
- for section in cp.sections():
- if cp.has_option( section, label ):
- config_tuple = section, label, value
- config_items_by_section.append( config_tuple )
- found = True
- continue
- # Default to app:main if no section was found.
- if not found:
- config_tuple = 'app:main', label, value
- config_items_by_section.append( config_tuple )
- # Replace the default values with the provided configuration.
- for section, label, value in config_items_by_section:
- cp.remove_option( section, label )
- cp.set( section, label, str( value ) )
- fh = open( output_filename, 'w' )
- cp.write( fh )
- fh.close()
-
def get_api_url( base, parts=[], params=None, key=None ):
if 'api' in parts and parts.index( 'api' ) != 0:
parts.pop( parts.index( 'api' ) )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: Dave Bouvier: Fix for exporting a repository that depends on a repository that has unicode characters in the description or long description.
by commits-noreply@bitbucket.org 19 Aug '13
by commits-noreply@bitbucket.org 19 Aug '13
19 Aug '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/2d64260eb6e3/
Changeset: 2d64260eb6e3
User: Dave Bouvier
Date: 2013-08-19 17:49:49
Summary: Fix for exporting a repository that depends on a repository that has unicode characters in the description or long description.
Affected #: 2 files
diff -r 778b6f7e338af5458abf1e54eaf20eefa17ef50b -r 2d64260eb6e31a992b78d82ee50f351bc3b95d07 lib/tool_shed/galaxy_install/repository_util.py
--- a/lib/tool_shed/galaxy_install/repository_util.py
+++ b/lib/tool_shed/galaxy_install/repository_util.py
@@ -83,8 +83,8 @@
requirements_dict[ 'changeset_revision' ] = changeset_revision
new_tool_dependencies[ dependency_key ] = requirements_dict
tool_dependencies = new_tool_dependencies
- # Cast unicode to string.
- repo_info_dict[ str( repository.name ) ] = ( str( repository.description ),
+ # Cast unicode to string, with the exception of description, since it is free text and can contain special characters.
+ repo_info_dict[ str( repository.name ) ] = ( repository.description,
str( repository_clone_url ),
str( changeset_revision ),
str( ctx_rev ),
diff -r 778b6f7e338af5458abf1e54eaf20eefa17ef50b -r 2d64260eb6e31a992b78d82ee50f351bc3b95d07 lib/tool_shed/util/export_util.py
--- a/lib/tool_shed/util/export_util.py
+++ b/lib/tool_shed/util/export_util.py
@@ -243,8 +243,9 @@
attributes[ 'type' ] = str( repository.type )
# We have to associate the public username since the user_id will be different between tool sheds.
attributes[ 'username' ] = str( repository.user.username )
- sub_elements[ 'description' ] = str( repository.description )
- sub_elements[ 'long_description' ] = str( repository.long_description )
+ # Don't coerce description or long description from unicode to string because the fields are free text.
+ sub_elements[ 'description' ] = repository.description
+ sub_elements[ 'long_description' ] = repository.long_description
sub_elements[ 'archive' ] = archive_name
# Keep track of Category associations.
categories = []
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
16 Aug '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/778b6f7e338a/
Changeset: 778b6f7e338a
User: jgoecks
Date: 2013-08-16 18:06:55
Summary: Import fixes.
Affected #: 2 files
diff -r a62c04e39017dcc15d272ff30c73dd02ea73d330 -r 778b6f7e338af5458abf1e54eaf20eefa17ef50b lib/galaxy/tools/actions/history_imp_exp.py
--- a/lib/galaxy/tools/actions/history_imp_exp.py
+++ b/lib/galaxy/tools/actions/history_imp_exp.py
@@ -1,7 +1,7 @@
-import tempfile
+import tempfile, os
from __init__ import ToolAction
from galaxy.util.odict import odict
-from galaxy.tools.imp_exp import *
+from galaxy.tools.imp_exp import JobImportHistoryArchiveWrapper, JobExportHistoryArchiveWrapper
import logging
log = logging.getLogger( __name__ )
diff -r a62c04e39017dcc15d272ff30c73dd02ea73d330 -r 778b6f7e338af5458abf1e54eaf20eefa17ef50b lib/galaxy/tools/imp_exp/__init__.py
--- a/lib/galaxy/tools/imp_exp/__init__.py
+++ b/lib/galaxy/tools/imp_exp/__init__.py
@@ -26,7 +26,7 @@
<data format="gzip" name="output_file"/></outputs></tool>
- """
+ """
# Load export tool.
tmp_name = tempfile.NamedTemporaryFile()
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
16 Aug '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/a62c04e39017/
Changeset: a62c04e39017
User: dannon
Date: 2013-08-16 15:09:54
Summary: Fix cloudlaunch text.
Affected #: 1 file
diff -r 0977d06f1b8229c711ca5e04fdfa30656318eca9 -r a62c04e39017dcc15d272ff30c73dd02ea73d330 templates/webapps/galaxy/cloud/index.mako
--- a/templates/webapps/galaxy/cloud/index.mako
+++ b/templates/webapps/galaxy/cloud/index.mako
@@ -178,11 +178,14 @@
<div id="launchFormContainer" class="toolForm"><form id="cloudlaunch_form" action="${h.url_for( controller='/cloudlaunch', action='launch_instance')}" method="post">
- <p>To launch a Galaxy Cloud Cluster, enter your AWS Secret Key ID, and Secret Key. Galaxy will use
- these to present appropriate options for launching your cluster. Note that using this form to
- launch computational resources in the Amazon Cloud will result in costs to the account indicated
- above. See <a href="http://aws.amazon.com/ec2/pricing/">Amazon's pricing</a> for more information.
- options for launching your cluster.</p></p>
+ <p>To launch a Galaxy Cloud Cluster, enter your AWS Secret
+ Key ID, and Secret Key. Galaxy will use these to present
+ appropriate options for launching your cluster. Note that
+ using this form to launch computational resources in the
+ Amazon Cloud will result in costs to the account indicated
+ above.
+ See <a href="http://aws.amazon.com/ec2/pricing/">Amazon's
+ pricing</a> for more information.</p><div class="form-row"><label for="id_key_id">Key ID</label>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0