galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
January 2012
- 1 participants
- 95 discussions
commit/galaxy-central: jgoecks: User check when fetching visualization chrom information.
by Bitbucket 10 Jan '12
by Bitbucket 10 Jan '12
10 Jan '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/8288ad7d6676/
changeset: 8288ad7d6676
user: jgoecks
date: 2012-01-10 18:39:59
summary: User check when fetching visualization chrom information.
affected #: 1 file
diff -r 55c537958418c66afad2ad79690a98589816f644 -r 8288ad7d66762867c6a9f3d0e1e3891e68142ad2 lib/galaxy/web/controllers/tracks.py
--- a/lib/galaxy/web/controllers/tracks.py
+++ b/lib/galaxy/web/controllers/tracks.py
@@ -336,7 +336,7 @@
len_file = None
len_ds = None
user_keys = {}
- if 'dbkeys' in dbkey_user.preferences:
+ if dbkey_user and 'dbkeys' in dbkey_user.preferences:
user_keys = from_json_string( dbkey_user.preferences['dbkeys'] )
if dbkey in user_keys:
dbkey_attributes = user_keys[ dbkey ]
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
09 Jan '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/46169ca11f43/
changeset: 46169ca11f43
user: natefoo
date: 2012-01-09 19:19:56
summary: Make paths to metadata temp files absolute.
affected #: 4 files
diff -r 2acedad2ef87c907975dd5afab5eef9ccde31987 -r 46169ca11f432651d3ff08281dfaf896ff4f5b8c lib/galaxy/datatypes/metadata.py
--- a/lib/galaxy/datatypes/metadata.py
+++ b/lib/galaxy/datatypes/metadata.py
@@ -1,6 +1,7 @@
import sys, logging, copy, shutil, weakref, cPickle, tempfile, os
+from os.path import abspath
-from galaxy.util import string_as_bool, relpath, stringify_dictionary_keys, listify
+from galaxy.util import string_as_bool, stringify_dictionary_keys, listify
from galaxy.util.odict import odict
from galaxy.web import form_builder
import galaxy.model
@@ -476,7 +477,7 @@
def file_name( self ):
if self._filename is None:
#we need to create a tmp file, accessable across all nodes/heads, save the name, and return it
- self._filename = relpath( tempfile.NamedTemporaryFile( dir = self.tmp_dir, prefix = "metadata_temp_file_" ).name )
+ self._filename = abspath( tempfile.NamedTemporaryFile( dir = self.tmp_dir, prefix = "metadata_temp_file_" ).name )
open( self._filename, 'wb+' ) #create an empty file, so it can't be reused using tempfile
return self._filename
def to_JSON( self ):
@@ -563,7 +564,7 @@
#is located differently, i.e. on a cluster node with a different filesystem structure
#file to store existing dataset
- metadata_files.filename_in = relpath( tempfile.NamedTemporaryFile( dir = tmp_dir, prefix = "metadata_in_%s_" % key ).name )
+ metadata_files.filename_in = abspath( tempfile.NamedTemporaryFile( dir = tmp_dir, prefix = "metadata_in_%s_" % key ).name )
#FIXME: HACK
#sqlalchemy introduced 'expire_on_commit' flag for sessionmaker at version 0.5x
@@ -574,17 +575,17 @@
cPickle.dump( dataset, open( metadata_files.filename_in, 'wb+' ) )
#file to store metadata results of set_meta()
- metadata_files.filename_out = relpath( tempfile.NamedTemporaryFile( dir = tmp_dir, prefix = "metadata_out_%s_" % key ).name )
+ metadata_files.filename_out = abspath( tempfile.NamedTemporaryFile( dir = tmp_dir, prefix = "metadata_out_%s_" % key ).name )
open( metadata_files.filename_out, 'wb+' ) # create the file on disk, so it cannot be reused by tempfile (unlikely, but possible)
#file to store a 'return code' indicating the results of the set_meta() call
#results code is like (True/False - if setting metadata was successful/failed , exception or string of reason of success/failure )
- metadata_files.filename_results_code = relpath( tempfile.NamedTemporaryFile( dir = tmp_dir, prefix = "metadata_results_%s_" % key ).name )
+ metadata_files.filename_results_code = abspath( tempfile.NamedTemporaryFile( dir = tmp_dir, prefix = "metadata_results_%s_" % key ).name )
simplejson.dump( ( False, 'External set_meta() not called' ), open( metadata_files.filename_results_code, 'wb+' ) ) # create the file on disk, so it cannot be reused by tempfile (unlikely, but possible)
#file to store kwds passed to set_meta()
- metadata_files.filename_kwds = relpath( tempfile.NamedTemporaryFile( dir = tmp_dir, prefix = "metadata_kwds_%s_" % key ).name )
+ metadata_files.filename_kwds = abspath( tempfile.NamedTemporaryFile( dir = tmp_dir, prefix = "metadata_kwds_%s_" % key ).name )
simplejson.dump( kwds, open( metadata_files.filename_kwds, 'wb+' ), ensure_ascii=True )
#existing metadata file parameters need to be overridden with cluster-writable file locations
- metadata_files.filename_override_metadata = relpath( tempfile.NamedTemporaryFile( dir = tmp_dir, prefix = "metadata_override_%s_" % key ).name )
+ metadata_files.filename_override_metadata = abspath( tempfile.NamedTemporaryFile( dir = tmp_dir, prefix = "metadata_override_%s_" % key ).name )
open( metadata_files.filename_override_metadata, 'wb+' ) # create the file on disk, so it cannot be reused by tempfile (unlikely, but possible)
override_metadata = []
for meta_key, spec_value in dataset.metadata.spec.iteritems():
diff -r 2acedad2ef87c907975dd5afab5eef9ccde31987 -r 46169ca11f432651d3ff08281dfaf896ff4f5b8c lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py
+++ b/lib/galaxy/jobs/__init__.py
@@ -709,11 +709,11 @@
try:
for fname in self.extra_filenames:
os.remove( fname )
- self.app.object_store.delete(self.get_job(), base_dir='job_work', dir_only=True, extra_dir=str(self.job_id))
if self.app.config.set_metadata_externally:
self.external_output_metadata.cleanup_external_metadata( self.sa_session )
galaxy.tools.imp_exp.JobExportHistoryArchiveWrapper( self.job_id ).cleanup_after_job( self.sa_session )
galaxy.tools.imp_exp.JobImportHistoryArchiveWrapper( self.job_id ).cleanup_after_job( self.sa_session )
+ self.app.object_store.delete(self.get_job(), base_dir='job_work', entire_dir=True, dir_only=True, extra_dir=str(self.job_id))
except:
log.exception( "Unable to cleanup job %d" % self.job_id )
diff -r 2acedad2ef87c907975dd5afab5eef9ccde31987 -r 46169ca11f432651d3ff08281dfaf896ff4f5b8c lib/galaxy/jobs/runners/__init__.py
--- a/lib/galaxy/jobs/runners/__init__.py
+++ b/lib/galaxy/jobs/runners/__init__.py
@@ -31,7 +31,7 @@
commands += "; cd %s; " % os.path.abspath( os.getcwd() )
commands += job_wrapper.setup_external_metadata(
exec_dir = os.path.abspath( os.getcwd() ),
- tmp_dir = self.app.config.new_file_path,
+ tmp_dir = job_wrapper.working_directory,
dataset_files_path = self.app.model.Dataset.file_path,
output_fnames = job_wrapper.get_output_fnames(),
set_extension = False,
diff -r 2acedad2ef87c907975dd5afab5eef9ccde31987 -r 46169ca11f432651d3ff08281dfaf896ff4f5b8c lib/galaxy/jobs/runners/local.py
--- a/lib/galaxy/jobs/runners/local.py
+++ b/lib/galaxy/jobs/runners/local.py
@@ -110,6 +110,7 @@
if job_wrapper.get_state() not in [ model.Job.states.ERROR, model.Job.states.DELETED ] and self.app.config.set_metadata_externally and job_wrapper.output_paths:
external_metadata_script = job_wrapper.setup_external_metadata( output_fnames = job_wrapper.get_output_fnames(),
set_extension = True,
+ tmp_dir = job_wrapper.working_directory,
kwds = { 'overwrite' : False } ) #we don't want to overwrite metadata that was copied over in init_meta(), as per established behavior
log.debug( 'executing external set_meta script for job %d: %s' % ( job_wrapper.job_id, external_metadata_script ) )
external_metadata_proc = subprocess.Popen( args = external_metadata_script,
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/05f017e906b3/
changeset: 05f017e906b3
user: jgoecks
date: 2012-01-09 18:12:32
summary: Trackster: add icon that shows more features by increasing the number of rows shown. Update tipsy text in 'get more data' icons' to include message.
affected #: 4 files
diff -r 4a9f7ff4e2c02bf87d33590061e09ef4e70fb84e -r 05f017e906b33847b0d8ce50fcc3157ffa6ca0aa static/images/fugue/exclamation.png
Binary file static/images/fugue/exclamation.png has changed
diff -r 4a9f7ff4e2c02bf87d33590061e09ef4e70fb84e -r 05f017e906b33847b0d8ce50fcc3157ffa6ca0aa static/june_2007_style/blue/trackster.css
--- a/static/june_2007_style/blue/trackster.css
+++ b/static/june_2007_style/blue/trackster.css
@@ -57,6 +57,7 @@
.layer-transparent.active{background:transparent url(../images/fugue/layer-transparent.png) no-repeat;}
.arrow-resize-090{background:transparent url(../images/fugue/arrow-resize-090-bw.png) no-repeat;}
.arrow-resize-090.active{background:transparent url(../images/fugue/arrow-resize-090.png) no-repeat;}
+.exclamation{background:transparent url(../images/fugue/exclamation.png) no-repeat;}
.layers-stack{background:transparent url(../images/fugue/layers-stack-bw.png) no-repeat;}
.layers-stack:hover{background:transparent url(../images/fugue/layers-stack.png) no-repeat;}
.chevron-expand{background:transparent url(../images/fugue/chevron-expand-bw.png) no-repeat;}
diff -r 4a9f7ff4e2c02bf87d33590061e09ef4e70fb84e -r 05f017e906b33847b0d8ce50fcc3157ffa6ca0aa static/june_2007_style/trackster.css.tmpl
--- a/static/june_2007_style/trackster.css.tmpl
+++ b/static/june_2007_style/trackster.css.tmpl
@@ -297,6 +297,9 @@
.menu-button {
margin: 0px 4px 0px 4px;
}
+.exclamation{
+ background: transparent url(../images/fugue/exclamation.png) no-repeat;
+}
.layer-transparent {
background: transparent url(../images/fugue/layer-transparent-bw.png) no-repeat;
}
diff -r 4a9f7ff4e2c02bf87d33590061e09ef4e70fb84e -r 05f017e906b33847b0d8ce50fcc3157ffa6ca0aa static/scripts/trackster.js
--- a/static/scripts/trackster.js
+++ b/static/scripts/trackster.js
@@ -2507,7 +2507,7 @@
};
extend(SummaryTreeTile.prototype, Tile.prototype);
-var FeatureTrackTile = function(track, index, resolution, canvas, data, mode, message, feature_mapper) {
+var FeatureTrackTile = function(track, index, resolution, canvas, data, w_scale, mode, message, all_slotted, feature_mapper) {
// Attribute init.
Tile.call(this, track, index, resolution, canvas, data);
this.mode = mode;
@@ -2515,40 +2515,60 @@
this.feature_mapper = feature_mapper;
// Add message + action icons to tile's html.
- if (this.message) {
+ if (this.message || !all_slotted) {
var
+ tile = this;
canvas = this.html_elt.children()[0],
- message_div = $("<div/>").addClass("tile-message").text(this.message)
+ message_div = $("<div/>").addClass("tile-message")
// -1 to account for border.
- .css({'height': ERROR_PADDING-1, 'width': canvas.width}).prependTo(this.html_elt),
- more_down_icon = $("<a href='javascript:void(0);'/>").addClass("icon more-down")
- .attr("title", "Get more data including depth").tipsy( {gravity: 's'} ).appendTo(message_div),
- more_across_icon = $("<a href='javascript:void(0);'/>").addClass("icon more-across")
- .attr("title", "Get more data excluding depth").tipsy( {gravity: 's'} ).appendTo(message_div);
+ .css({'height': ERROR_PADDING-1, 'width': canvas.width}).prependTo(this.html_elt);
+
+ // Handle when not all elements are slotted.
+ if (!all_slotted) {
+ var icon = $("<a href='javascript:void(0);'/>").addClass("icon exclamation")
+ .attr("title", "To minimize track height, not all features in this region are displayed. Click to display more.")
+ .tipsy( {gravity: 's'} ).appendTo(message_div)
+ .click(function () {
+ $(".tipsy").hide();
+ tile.track.slotters[w_scale].max_rows *= 2;
+ tile.track.request_draw(true);
+ });
+ }
+
+ // Handle message; only message currently is that only the first N elements are displayed.
+ if (this.message) {
+ var
+ num_features = data.length,
+ more_down_icon = $("<a href='javascript:void(0);'/>").addClass("icon more-down")
+ .attr("title", "For speed, only the first " + num_features + " features in this region were obtained from server. Click to get more data including depth")
+ .tipsy( {gravity: 's'} ).appendTo(message_div),
+ more_across_icon = $("<a href='javascript:void(0);'/>").addClass("icon more-across")
+ .attr("title", "For speed, only the first " + num_features + " features in this region were obtained from server. Click to get more data excluding depth")
+ .tipsy( {gravity: 's'} ).appendTo(message_div);
- // Set up actions for icons.
- var tile = this;
- more_down_icon.click(function() {
- // Mark tile as stale, request more data, and redraw track.
- tile.stale = true;
- track.data_manager.get_more_data(tile.low, tile.high, track.mode, tile.resolution, {}, track.data_manager.DEEP_DATA_REQ);
- $(".tipsy").hide();
- track.request_draw();
- }).dblclick(function(e) {
- // Do not propogate as this would normally zoom in.
- e.stopPropagation();
- });
+ // Set up actions for icons.
+ more_down_icon.click(function() {
+ // Mark tile as stale, request more data, and redraw track.
+ tile.stale = true;
+ track.data_manager.get_more_data(tile.low, tile.high, track.mode, tile.resolution, {}, track.data_manager.DEEP_DATA_REQ);
+ $(".tipsy").hide();
+ track.request_draw();
+ }).dblclick(function(e) {
+ // Do not propogate as this would normally zoom in.
+ e.stopPropagation();
+ });
- more_across_icon.click(function() {
- // Mark tile as stale, request more data, and redraw track.
- tile.stale = true;
- track.data_manager.get_more_data(tile.low, tile.high, track.mode, tile.resolution, {}, track.data_manager.BROAD_DATA_REQ);
- $(".tipsy").hide();
- track.request_draw();
- }).dblclick(function(e) {
- // Do not propogate as this would normally zoom in.
- e.stopPropagation();
- });
+ more_across_icon.click(function() {
+ // Mark tile as stale, request more data, and redraw track.
+ tile.stale = true;
+ track.data_manager.get_more_data(tile.low, tile.high, track.mode, tile.resolution, {}, track.data_manager.BROAD_DATA_REQ);
+ $(".tipsy").hide();
+ track.request_draw();
+ }).dblclick(function(e) {
+ // Do not propogate as this would normally zoom in.
+ e.stopPropagation();
+ });
+ }
}
};
extend(FeatureTrackTile.prototype, Tile.prototype);
@@ -4003,7 +4023,7 @@
var dummy_context = this.view.canvas_manager.dummy_context,
slotter = this.slotters[level];
if (!slotter || (slotter.mode !== mode)) {
- slotter = new (slotting.FeatureSlotter)( level, mode === "Pack", MAX_FEATURE_DEPTH, function ( x ) { return dummy_context.measureText( x ) } );
+ slotter = new (slotting.FeatureSlotter)( level, mode, MAX_FEATURE_DEPTH, function ( x ) { return dummy_context.measureText( x ) } );
this.slotters[level] = slotter;
}
@@ -4188,8 +4208,11 @@
// Handle row-by-row tracks
- // Filter features.
- var filtered = [];
+ // Preprocessing: filter features and determine whether all unfiltered features have been slotted.
+ var
+ filtered = [],
+ slots = this.slotters[w_scale].slots;
+ all_slotted = true;
if ( result.data ) {
var filters = this.filters_manager.filters;
for (var i = 0, len = result.data.length; i < len; i++) {
@@ -4205,7 +4228,12 @@
}
}
if (!hide_feature) {
+ // Feature visible.
filtered.push(feature);
+ // Set flag if not slotted.
+ if ( !(feature[0] in slots) ) {
+ all_slotted = false;
+ }
}
}
}
@@ -4226,12 +4254,11 @@
if (result.data) {
// Draw features.
- slots = this.slotters[w_scale].slots;
feature_mapper = painter.draw(ctx, canvas.width, canvas.height, w_scale, slots);
feature_mapper.translation = -left_offset;
}
- return new FeatureTrackTile(track, tile_index, resolution, canvas, result.data, mode, result.message, feature_mapper);
+ return new FeatureTrackTile(track, tile_index, resolution, canvas, result.data, w_scale, mode, result.message, all_slotted, feature_mapper);
},
/**
* Returns true if data is compatible with a given mode.
@@ -4342,11 +4369,12 @@
* This implementation is incremental, any feature assigned a slot will be
* retained for slotting future features.
*/
-exports.FeatureSlotter = function (w_scale, include_label, max_rows, measureText) {
+exports.FeatureSlotter = function (w_scale, mode, max_rows, measureText) {
this.slots = {};
this.start_end_dct = {};
this.w_scale = w_scale;
- this.include_label = include_label;
+ this.mode = mode;
+ this.include_label = (mode === "Pack");
this.max_rows = max_rows;
this.measureText = measureText;
};
https://bitbucket.org/galaxy/galaxy-central/changeset/2acedad2ef87/
changeset: 2acedad2ef87
user: jgoecks
date: 2012-01-09 18:12:49
summary: Merge.
affected #: 4 files
diff -r 05f017e906b33847b0d8ce50fcc3157ffa6ca0aa -r 2acedad2ef87c907975dd5afab5eef9ccde31987 lib/galaxy/app.py
--- a/lib/galaxy/app.py
+++ b/lib/galaxy/app.py
@@ -70,6 +70,8 @@
self.update_manager = update_manager.UpdateManager( self )
# Manage installed tool shed repositories
self.installed_repository_manager = galaxy.tool_shed.InstalledRepositoryManager( self )
+ # Load datatype display applications defined in local datatypes_conf.xml
+ self.datatypes_registry.load_display_applications()
# Load datatype converters defined in local datatypes_conf.xml
self.datatypes_registry.load_datatype_converters( self.toolbox )
# Load history import/export tools
diff -r 05f017e906b33847b0d8ce50fcc3157ffa6ca0aa -r 2acedad2ef87c907975dd5afab5eef9ccde31987 lib/galaxy/datatypes/registry.py
--- a/lib/galaxy/datatypes/registry.py
+++ b/lib/galaxy/datatypes/registry.py
@@ -28,19 +28,28 @@
self.set_external_metadata_tool = None
self.sniff_order = []
self.upload_file_formats = []
+ # Datatype elements defined in local datatypes_conf.xml
+ # that contain display applications
+ self.display_app_containers = []
+ # Datatype elements in datatypes_conf.xml included in installed
+ # tool shed repositories that contain display applications
+ self.proprietary_display_app_containers = []
# Map a display application id to a display application
self.display_applications = odict()
+ # The following 2 attributes are used in the to_xml_file()
+ # method to persist the current state into an xml file.
+ self.display_path_attr = None
self.converters_path_attr = None
# The 'default' converters_path defined in local datatypes_conf.xml
- self.datatype_converters_path = None
+ self.converters_path = None
# The 'default' display_path defined in local datatypes_conf.xml
self.display_applications_path = None
+ self.inherit_display_application_by_class = []
self.datatype_elems = []
self.sniffer_elems = []
self.xml_filename = None
def load_datatypes( self, root_dir=None, config=None, imported_modules=None ):
if root_dir and config:
- inherit_display_application_by_class = []
# Parse datatypes_conf.xml
tree = galaxy.util.parse_xml( config )
root = tree.getroot()
@@ -48,11 +57,11 @@
self.log.debug( 'Loading datatypes from %s' % config )
registration = root.find( 'registration' )
# Set default paths defined in local datatypes_conf.xml.
- if not self.datatype_converters_path:
+ if not self.converters_path:
self.converters_path_attr = registration.get( 'converters_path', 'lib/galaxy/datatypes/converters' )
- self.datatype_converters_path = os.path.join( root_dir, self.converters_path_attr )
- if not os.path.isdir( self.datatype_converters_path ):
- raise ConfigurationError( "Directory does not exist: %s" % self.datatype_converters_path )
+ self.converters_path = os.path.join( root_dir, self.converters_path_attr )
+ if not os.path.isdir( self.converters_path ):
+ raise ConfigurationError( "Directory does not exist: %s" % self.converters_path )
if not self.display_applications_path:
self.display_path_attr = registration.get( 'display_path', 'display_applications' )
self.display_applications_path = os.path.join( root_dir, self.display_path_attr )
@@ -60,7 +69,7 @@
# Keep an in-memory list of datatype elems to enable persistence.
self.datatype_elems.append( elem )
try:
- extension = elem.get( 'extension', None )
+ extension = elem.get( 'extension', None )
dtype = elem.get( 'type', None )
type_extension = elem.get( 'type_extension', None )
mimetype = elem.get( 'mimetype', None )
@@ -128,30 +137,14 @@
mimetype = composite_file.get( 'mimetype', None )
self.datatypes_by_extension[extension].add_composite_file( name, optional=optional, mimetype=mimetype )
for display_app in elem.findall( 'display' ):
- display_file = os.path.join( self.display_applications_path, display_app.get( 'file', None ) )
- try:
- inherit = galaxy.util.string_as_bool( display_app.get( 'inherit', 'False' ) )
- display_app = DisplayApplication.from_file( display_file, self )
- if display_app:
- if display_app.id in self.display_applications:
- #if we already loaded this display application, we'll use the first one again
- display_app = self.display_applications[ display_app.id ]
- self.log.debug( "Loaded display application '%s' for datatype '%s', inherit=%s" % ( display_app.id, extension, inherit ) )
- self.display_applications[ display_app.id ] = display_app #Display app by id
- self.datatypes_by_extension[ extension ].add_display_application( display_app )
- if inherit and ( self.datatypes_by_extension[extension], display_app ) not in inherit_display_application_by_class:
- #subclass inheritance will need to wait until all datatypes have been loaded
- inherit_display_application_by_class.append( ( self.datatypes_by_extension[extension], display_app ) )
- except:
- self.log.exception( "error reading display application from path: %s" % display_file )
+ if imported_modules:
+ if elem not in self.proprietary_display_app_containers:
+ self.proprietary_display_app_containers.append( elem )
+ else:
+ if elem not in self.display_app_containers:
+ self.display_app_containers.append( elem )
except Exception, e:
self.log.warning( 'Error loading datatype "%s", problem: %s' % ( extension, str( e ) ) )
- # Handle display_application subclass inheritance here:
- for ext, d_type1 in self.datatypes_by_extension.iteritems():
- for d_type2, display_app in inherit_display_application_by_class:
- current_app = d_type1.get_display_application( display_app.id, None )
- if current_app is None and isinstance( d_type1, type( d_type2 ) ):
- d_type1.add_display_application( display_app )
# Load datatype sniffers from the config
sniffers = root.find( 'sniffers' )
if sniffers:
@@ -339,7 +332,7 @@
if converter_path:
config_path = os.path.join( converter_path, tool_config )
else:
- config_path = os.path.join( self.datatype_converters_path, tool_config )
+ config_path = os.path.join( self.converters_path, tool_config )
try:
converter = toolbox.load_tool( config_path )
toolbox.tools_by_id[ converter.id ] = converter
@@ -348,7 +341,44 @@
self.datatype_converters[ source_datatype ][ target_datatype ] = converter
self.log.debug( "Loaded converter: %s", converter.id )
except Exception, e:
- self.log.exception( "Error loading converter (%s): %s" % ( converter_path, str( e ) ) )
+ self.log.exception( "Error loading converter (%s): %s" % ( config_path, str( e ) ) )
+ def load_display_applications( self, display_path=None ):
+ if display_path:
+ # Load display applications defined by datatypes_conf.xml
+ # included in installed tool shed repository.
+ datatype_elems = self.proprietary_display_app_containers
+ else:
+ # Load display applications defined by local datatypes_conf.xml.
+ datatype_elems = self.display_app_containers
+ for elem in datatype_elems:
+ extension = elem.get( 'extension', None )
+ for display_app in elem.findall( 'display' ):
+ display_file = display_app.get( 'file', None )
+ if display_path:
+ config_path = os.path.join( display_path, display_file )
+ else:
+ config_path = os.path.join( self.display_applications_path, display_file )
+ try:
+ inherit = galaxy.util.string_as_bool( display_app.get( 'inherit', 'False' ) )
+ display_app = DisplayApplication.from_file( config_path, self )
+ if display_app:
+ if display_app.id in self.display_applications:
+ # If we already loaded this display application, we'll use the first one loaded.
+ display_app = self.display_applications[ display_app.id ]
+ self.log.debug( "Loaded display application '%s' for datatype '%s', inherit=%s" % ( display_app.id, extension, inherit ) )
+ self.display_applications[ display_app.id ] = display_app
+ self.datatypes_by_extension[ extension ].add_display_application( display_app )
+ if inherit and ( self.datatypes_by_extension[ extension ], display_app ) not in self.inherit_display_application_by_class:
+ self.inherit_display_application_by_class.append( ( self.datatypes_by_extension[extension], display_app ) )
+ except Exception, e:
+ self.log.exception( "Error loading display application (%s): %s" % ( config_path, str( e ) ) )
+ # Handle display_application subclass inheritance.
+ for extension, d_type1 in self.datatypes_by_extension.iteritems():
+ for d_type2, display_app in self.inherit_display_application_by_class:
+ current_app = d_type1.get_display_application( display_app.id, None )
+ if current_app is None and isinstance( d_type1, type( d_type2 ) ):
+ self.log.debug( "Adding inherited display application '%s' to datatype '%s'" % ( display_app.id, extension ) )
+ d_type1.add_display_application( display_app )
def load_external_metadata_tool( self, toolbox ):
"""Adds a tool which is used to set external metadata"""
#we need to be able to add a job to the queue to set metadata. The queue will currently only accept jobs with an associated tool.
diff -r 05f017e906b33847b0d8ce50fcc3157ffa6ca0aa -r 2acedad2ef87c907975dd5afab5eef9ccde31987 lib/galaxy/tool_shed/__init__.py
--- a/lib/galaxy/tool_shed/__init__.py
+++ b/lib/galaxy/tool_shed/__init__.py
@@ -23,9 +23,11 @@
path_items = datatypes_config.split( 'repos' )
relative_install_dir = '%srepos/%s/%s/%s' % \
( path_items[0], tool_shed_repository.owner, tool_shed_repository.name, tool_shed_repository.installed_changeset_revision )
- converter_path = load_datatypes( self.app, datatypes_config, relative_install_dir )
+ converter_path, display_path = load_datatypes( self.app, datatypes_config, relative_install_dir )
if converter_path:
# Load proprietary datatype converters
self.app.datatypes_registry.load_datatype_converters( self.app.toolbox, converter_path=converter_path )
- # TODO: handle display_applications
-
\ No newline at end of file
+ if display_path:
+ # Load proprietary datatype display applications
+ app.datatypes_registry.load_display_applications( display_path=display_path )
+
\ No newline at end of file
diff -r 05f017e906b33847b0d8ce50fcc3157ffa6ca0aa -r 2acedad2ef87c907975dd5afab5eef9ccde31987 lib/galaxy/util/shed_util.py
--- a/lib/galaxy/util/shed_util.py
+++ b/lib/galaxy/util/shed_util.py
@@ -449,7 +449,10 @@
# Parse datatypes_config.
tree = util.parse_xml( datatypes_config )
datatypes_config_root = tree.getroot()
+ # Path to datatype converters
converter_path = None
+ # Path to datatype display applications
+ display_path = None
relative_path_to_datatype_file_name = None
datatype_files = datatypes_config_root.find( 'datatype_files' )
datatype_class_modules = []
@@ -500,14 +503,13 @@
log.debug( "Exception importing datatypes code file %s: %s" % ( str( relative_path_to_datatype_file_name ), str( e ) ) )
finally:
lock.release()
- # Handle data type converters.
+ # Handle data type converters and display applications.
for elem in registration.findall( 'datatype' ):
if not converter_path:
# If any of the <datatype> tag sets contain <converter> tags, set the converter_path
- # if it is not already set. This requires repsitories to place all converters in the
+ # if it is not already set. This requires developers to place all converters in the
# same subdirectory within the repository hierarchy.
for converter in elem.findall( 'converter' ):
- converter_path = None
converter_config = converter.get( 'file', None )
if converter_config:
for root, dirs, files in os.walk( relative_install_dir ):
@@ -518,9 +520,23 @@
break
if converter_path:
break
- else:
+ if not display_path:
+ # If any of the <datatype> tag sets contain <display> tags, set the display_path
+ # if it is not already set. This requires developers to place all display acpplications
+ # in the same subdirectory within the repository hierarchy.
+ for display_app in elem.findall( 'display' ):
+ display_config = display_app.get( 'file', None )
+ if display_config:
+ for root, dirs, files in os.walk( relative_install_dir ):
+ if root.find( '.hg' ) < 0:
+ for name in files:
+ if name == display_config:
+ display_path = root
+ break
+ if display_path:
+ break
+ if converter_path and display_path:
break
- # TODO: handle display_applications
else:
# The repository includes a dataypes_conf.xml file, but no code file that
# contains data type classes. This implies that the data types in datayptes_conf.xml
@@ -528,7 +544,7 @@
imported_modules = []
# Load proprietary datatypes
app.datatypes_registry.load_datatypes( root_dir=app.config.root, config=datatypes_config, imported_modules=imported_modules )
- return converter_path
+ return converter_path, display_path
def load_repository_contents( app, name, description, owner, changeset_revision, tool_path, repository_clone_url, relative_install_dir,
current_working_dir, tmp_name, tool_section=None, shed_tool_conf=None, new_install=True ):
# This method is used by the InstallManager, which does not have access to trans.
@@ -544,11 +560,13 @@
if 'datatypes_config' in metadata_dict:
datatypes_config = os.path.abspath( metadata_dict[ 'datatypes_config' ] )
# Load data types required by tools.
- converter_path = load_datatypes( app, datatypes_config, relative_install_dir )
+ converter_path, display_path = load_datatypes( app, datatypes_config, relative_install_dir )
if converter_path:
# Load proprietary datatype converters
app.datatypes_registry.load_datatype_converters( app.toolbox, converter_path=converter_path )
- # TODO: handle display_applications
+ if display_path:
+ # Load proprietary datatype display applications
+ app.datatypes_registry.load_display_applications( display_path=display_path )
if 'tools' in metadata_dict:
repository_tools_tups = []
for tool_dict in metadata_dict[ 'tools' ]:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Add support for handling datatype display applications in datatypes_conf.xml files included in installed tool shed repositories.
by Bitbucket 09 Jan '12
by Bitbucket 09 Jan '12
09 Jan '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/4b7209d64451/
changeset: 4b7209d64451
user: greg
date: 2012-01-09 18:12:19
summary: Add support for handling datatype display applications in datatypes_conf.xml files included in installed tool shed repositories.
affected #: 4 files
diff -r 4a9f7ff4e2c02bf87d33590061e09ef4e70fb84e -r 4b7209d64451695eb86ea32d3dff0c999f81d977 lib/galaxy/app.py
--- a/lib/galaxy/app.py
+++ b/lib/galaxy/app.py
@@ -70,6 +70,8 @@
self.update_manager = update_manager.UpdateManager( self )
# Manage installed tool shed repositories
self.installed_repository_manager = galaxy.tool_shed.InstalledRepositoryManager( self )
+ # Load datatype display applications defined in local datatypes_conf.xml
+ self.datatypes_registry.load_display_applications()
# Load datatype converters defined in local datatypes_conf.xml
self.datatypes_registry.load_datatype_converters( self.toolbox )
# Load history import/export tools
diff -r 4a9f7ff4e2c02bf87d33590061e09ef4e70fb84e -r 4b7209d64451695eb86ea32d3dff0c999f81d977 lib/galaxy/datatypes/registry.py
--- a/lib/galaxy/datatypes/registry.py
+++ b/lib/galaxy/datatypes/registry.py
@@ -28,19 +28,28 @@
self.set_external_metadata_tool = None
self.sniff_order = []
self.upload_file_formats = []
+ # Datatype elements defined in local datatypes_conf.xml
+ # that contain display applications
+ self.display_app_containers = []
+ # Datatype elements in datatypes_conf.xml included in installed
+ # tool shed repositories that contain display applications
+ self.proprietary_display_app_containers = []
# Map a display application id to a display application
self.display_applications = odict()
+ # The following 2 attributes are used in the to_xml_file()
+ # method to persist the current state into an xml file.
+ self.display_path_attr = None
self.converters_path_attr = None
# The 'default' converters_path defined in local datatypes_conf.xml
- self.datatype_converters_path = None
+ self.converters_path = None
# The 'default' display_path defined in local datatypes_conf.xml
self.display_applications_path = None
+ self.inherit_display_application_by_class = []
self.datatype_elems = []
self.sniffer_elems = []
self.xml_filename = None
def load_datatypes( self, root_dir=None, config=None, imported_modules=None ):
if root_dir and config:
- inherit_display_application_by_class = []
# Parse datatypes_conf.xml
tree = galaxy.util.parse_xml( config )
root = tree.getroot()
@@ -48,11 +57,11 @@
self.log.debug( 'Loading datatypes from %s' % config )
registration = root.find( 'registration' )
# Set default paths defined in local datatypes_conf.xml.
- if not self.datatype_converters_path:
+ if not self.converters_path:
self.converters_path_attr = registration.get( 'converters_path', 'lib/galaxy/datatypes/converters' )
- self.datatype_converters_path = os.path.join( root_dir, self.converters_path_attr )
- if not os.path.isdir( self.datatype_converters_path ):
- raise ConfigurationError( "Directory does not exist: %s" % self.datatype_converters_path )
+ self.converters_path = os.path.join( root_dir, self.converters_path_attr )
+ if not os.path.isdir( self.converters_path ):
+ raise ConfigurationError( "Directory does not exist: %s" % self.converters_path )
if not self.display_applications_path:
self.display_path_attr = registration.get( 'display_path', 'display_applications' )
self.display_applications_path = os.path.join( root_dir, self.display_path_attr )
@@ -60,7 +69,7 @@
# Keep an in-memory list of datatype elems to enable persistence.
self.datatype_elems.append( elem )
try:
- extension = elem.get( 'extension', None )
+ extension = elem.get( 'extension', None )
dtype = elem.get( 'type', None )
type_extension = elem.get( 'type_extension', None )
mimetype = elem.get( 'mimetype', None )
@@ -128,30 +137,14 @@
mimetype = composite_file.get( 'mimetype', None )
self.datatypes_by_extension[extension].add_composite_file( name, optional=optional, mimetype=mimetype )
for display_app in elem.findall( 'display' ):
- display_file = os.path.join( self.display_applications_path, display_app.get( 'file', None ) )
- try:
- inherit = galaxy.util.string_as_bool( display_app.get( 'inherit', 'False' ) )
- display_app = DisplayApplication.from_file( display_file, self )
- if display_app:
- if display_app.id in self.display_applications:
- #if we already loaded this display application, we'll use the first one again
- display_app = self.display_applications[ display_app.id ]
- self.log.debug( "Loaded display application '%s' for datatype '%s', inherit=%s" % ( display_app.id, extension, inherit ) )
- self.display_applications[ display_app.id ] = display_app #Display app by id
- self.datatypes_by_extension[ extension ].add_display_application( display_app )
- if inherit and ( self.datatypes_by_extension[extension], display_app ) not in inherit_display_application_by_class:
- #subclass inheritance will need to wait until all datatypes have been loaded
- inherit_display_application_by_class.append( ( self.datatypes_by_extension[extension], display_app ) )
- except:
- self.log.exception( "error reading display application from path: %s" % display_file )
+ if imported_modules:
+ if elem not in self.proprietary_display_app_containers:
+ self.proprietary_display_app_containers.append( elem )
+ else:
+ if elem not in self.display_app_containers:
+ self.display_app_containers.append( elem )
except Exception, e:
self.log.warning( 'Error loading datatype "%s", problem: %s' % ( extension, str( e ) ) )
- # Handle display_application subclass inheritance here:
- for ext, d_type1 in self.datatypes_by_extension.iteritems():
- for d_type2, display_app in inherit_display_application_by_class:
- current_app = d_type1.get_display_application( display_app.id, None )
- if current_app is None and isinstance( d_type1, type( d_type2 ) ):
- d_type1.add_display_application( display_app )
# Load datatype sniffers from the config
sniffers = root.find( 'sniffers' )
if sniffers:
@@ -339,7 +332,7 @@
if converter_path:
config_path = os.path.join( converter_path, tool_config )
else:
- config_path = os.path.join( self.datatype_converters_path, tool_config )
+ config_path = os.path.join( self.converters_path, tool_config )
try:
converter = toolbox.load_tool( config_path )
toolbox.tools_by_id[ converter.id ] = converter
@@ -348,7 +341,44 @@
self.datatype_converters[ source_datatype ][ target_datatype ] = converter
self.log.debug( "Loaded converter: %s", converter.id )
except Exception, e:
- self.log.exception( "Error loading converter (%s): %s" % ( converter_path, str( e ) ) )
+ self.log.exception( "Error loading converter (%s): %s" % ( config_path, str( e ) ) )
+ def load_display_applications( self, display_path=None ):
+ if display_path:
+ # Load display applications defined by datatypes_conf.xml
+ # included in installed tool shed repository.
+ datatype_elems = self.proprietary_display_app_containers
+ else:
+ # Load display applications defined by local datatypes_conf.xml.
+ datatype_elems = self.display_app_containers
+ for elem in datatype_elems:
+ extension = elem.get( 'extension', None )
+ for display_app in elem.findall( 'display' ):
+ display_file = display_app.get( 'file', None )
+ if display_path:
+ config_path = os.path.join( display_path, display_file )
+ else:
+ config_path = os.path.join( self.display_applications_path, display_file )
+ try:
+ inherit = galaxy.util.string_as_bool( display_app.get( 'inherit', 'False' ) )
+ display_app = DisplayApplication.from_file( config_path, self )
+ if display_app:
+ if display_app.id in self.display_applications:
+ # If we already loaded this display application, we'll use the first one loaded.
+ display_app = self.display_applications[ display_app.id ]
+ self.log.debug( "Loaded display application '%s' for datatype '%s', inherit=%s" % ( display_app.id, extension, inherit ) )
+ self.display_applications[ display_app.id ] = display_app
+ self.datatypes_by_extension[ extension ].add_display_application( display_app )
+ if inherit and ( self.datatypes_by_extension[ extension ], display_app ) not in self.inherit_display_application_by_class:
+ self.inherit_display_application_by_class.append( ( self.datatypes_by_extension[extension], display_app ) )
+ except Exception, e:
+ self.log.exception( "Error loading display application (%s): %s" % ( config_path, str( e ) ) )
+ # Handle display_application subclass inheritance.
+ for extension, d_type1 in self.datatypes_by_extension.iteritems():
+ for d_type2, display_app in self.inherit_display_application_by_class:
+ current_app = d_type1.get_display_application( display_app.id, None )
+ if current_app is None and isinstance( d_type1, type( d_type2 ) ):
+ self.log.debug( "Adding inherited display application '%s' to datatype '%s'" % ( display_app.id, extension ) )
+ d_type1.add_display_application( display_app )
def load_external_metadata_tool( self, toolbox ):
"""Adds a tool which is used to set external metadata"""
#we need to be able to add a job to the queue to set metadata. The queue will currently only accept jobs with an associated tool.
diff -r 4a9f7ff4e2c02bf87d33590061e09ef4e70fb84e -r 4b7209d64451695eb86ea32d3dff0c999f81d977 lib/galaxy/tool_shed/__init__.py
--- a/lib/galaxy/tool_shed/__init__.py
+++ b/lib/galaxy/tool_shed/__init__.py
@@ -23,9 +23,11 @@
path_items = datatypes_config.split( 'repos' )
relative_install_dir = '%srepos/%s/%s/%s' % \
( path_items[0], tool_shed_repository.owner, tool_shed_repository.name, tool_shed_repository.installed_changeset_revision )
- converter_path = load_datatypes( self.app, datatypes_config, relative_install_dir )
+ converter_path, display_path = load_datatypes( self.app, datatypes_config, relative_install_dir )
if converter_path:
# Load proprietary datatype converters
self.app.datatypes_registry.load_datatype_converters( self.app.toolbox, converter_path=converter_path )
- # TODO: handle display_applications
-
\ No newline at end of file
+ if display_path:
+ # Load proprietary datatype display applications
+ app.datatypes_registry.load_display_applications( display_path=display_path )
+
\ No newline at end of file
diff -r 4a9f7ff4e2c02bf87d33590061e09ef4e70fb84e -r 4b7209d64451695eb86ea32d3dff0c999f81d977 lib/galaxy/util/shed_util.py
--- a/lib/galaxy/util/shed_util.py
+++ b/lib/galaxy/util/shed_util.py
@@ -449,7 +449,10 @@
# Parse datatypes_config.
tree = util.parse_xml( datatypes_config )
datatypes_config_root = tree.getroot()
+ # Path to datatype converters
converter_path = None
+ # Path to datatype display applications
+ display_path = None
relative_path_to_datatype_file_name = None
datatype_files = datatypes_config_root.find( 'datatype_files' )
datatype_class_modules = []
@@ -500,14 +503,13 @@
log.debug( "Exception importing datatypes code file %s: %s" % ( str( relative_path_to_datatype_file_name ), str( e ) ) )
finally:
lock.release()
- # Handle data type converters.
+ # Handle data type converters and display applications.
for elem in registration.findall( 'datatype' ):
if not converter_path:
# If any of the <datatype> tag sets contain <converter> tags, set the converter_path
- # if it is not already set. This requires repsitories to place all converters in the
+ # if it is not already set. This requires developers to place all converters in the
# same subdirectory within the repository hierarchy.
for converter in elem.findall( 'converter' ):
- converter_path = None
converter_config = converter.get( 'file', None )
if converter_config:
for root, dirs, files in os.walk( relative_install_dir ):
@@ -518,9 +520,23 @@
break
if converter_path:
break
- else:
+ if not display_path:
+ # If any of the <datatype> tag sets contain <display> tags, set the display_path
+ # if it is not already set. This requires developers to place all display acpplications
+ # in the same subdirectory within the repository hierarchy.
+ for display_app in elem.findall( 'display' ):
+ display_config = display_app.get( 'file', None )
+ if display_config:
+ for root, dirs, files in os.walk( relative_install_dir ):
+ if root.find( '.hg' ) < 0:
+ for name in files:
+ if name == display_config:
+ display_path = root
+ break
+ if display_path:
+ break
+ if converter_path and display_path:
break
- # TODO: handle display_applications
else:
# The repository includes a dataypes_conf.xml file, but no code file that
# contains data type classes. This implies that the data types in datayptes_conf.xml
@@ -528,7 +544,7 @@
imported_modules = []
# Load proprietary datatypes
app.datatypes_registry.load_datatypes( root_dir=app.config.root, config=datatypes_config, imported_modules=imported_modules )
- return converter_path
+ return converter_path, display_path
def load_repository_contents( app, name, description, owner, changeset_revision, tool_path, repository_clone_url, relative_install_dir,
current_working_dir, tmp_name, tool_section=None, shed_tool_conf=None, new_install=True ):
# This method is used by the InstallManager, which does not have access to trans.
@@ -544,11 +560,13 @@
if 'datatypes_config' in metadata_dict:
datatypes_config = os.path.abspath( metadata_dict[ 'datatypes_config' ] )
# Load data types required by tools.
- converter_path = load_datatypes( app, datatypes_config, relative_install_dir )
+ converter_path, display_path = load_datatypes( app, datatypes_config, relative_install_dir )
if converter_path:
# Load proprietary datatype converters
app.datatypes_registry.load_datatype_converters( app.toolbox, converter_path=converter_path )
- # TODO: handle display_applications
+ if display_path:
+ # Load proprietary datatype display applications
+ app.datatypes_registry.load_display_applications( display_path=display_path )
if 'tools' in metadata_dict:
repository_tools_tups = []
for tool_dict in metadata_dict[ 'tools' ]:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
3 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/aa2d3c5f577b/
changeset: aa2d3c5f577b
user: jgoecks
date: 2012-01-09 15:48:42
summary: Trackster: organization and variable renaming in slotter code.
affected #: 1 file
diff -r 94d54ff122b0dead007901d1a0fcaebf5c329d0f -r aa2d3c5f577b9b26ef6765a9f7928c1ee948ce92 static/scripts/trackster.js
--- a/static/scripts/trackster.js
+++ b/static/scripts/trackster.js
@@ -3871,7 +3871,7 @@
this.show_labels_scale = 0.001;
this.showing_details = false;
this.summary_draw_height = 30;
- this.inc_slots = {};
+ this.slotters = {};
this.start_end_dct = {};
this.left_offset = 200;
@@ -3988,10 +3988,8 @@
},
/**
* Place features in slots for drawing (i.e. pack features).
- * this.inc_slots[level] is created in this method. this.inc_slots[level]
- * is a dictionary of slotted features; key is feature uid, value is a dictionary
- * with keys 'slot' and 'text'.
- * Returns the number of slots used to pack features.
+ * this.slotters[level] is created in this method. this.slotters[level]
+ * is a Slotter object. Returns the number of slots used to pack features.
*/
incremental_slots: function(level, features, mode) {
@@ -3999,14 +3997,13 @@
// need to create new slots.
var dummy_context = this.view.canvas_manager.dummy_context,
- inc_slots = this.inc_slots[level];
- if (!inc_slots || (inc_slots.mode !== mode)) {
- inc_slots = new (slotting.FeatureSlotter)( level, mode === "Pack", MAX_FEATURE_DEPTH, function ( x ) { return dummy_context.measureText( x ) } );
- inc_slots.mode = mode;
- this.inc_slots[level] = inc_slots;
+ slotter = this.slotters[level];
+ if (!slotter || (slotter.mode !== mode)) {
+ slotter = new (slotting.FeatureSlotter)( level, mode === "Pack", MAX_FEATURE_DEPTH, function ( x ) { return dummy_context.measureText( x ) } );
+ this.slotters[level] = slotter;
}
- return inc_slots.slot_features( features );
+ return slotter.slot_features( features );
},
/**
* Given feature data, returns summary tree data. Feature data must be sorted by start
@@ -4225,7 +4222,7 @@
if (result.data) {
// Draw features.
- slots = this.inc_slots[w_scale].slots;
+ slots = this.slotters[w_scale].slots;
feature_mapper = painter.draw(ctx, canvas.width, canvas.height, w_scale, slots);
feature_mapper.translation = -left_offset;
}
@@ -4341,7 +4338,7 @@
* This implementation is incremental, any feature assigned a slot will be
* retained for slotting future features.
*/
-exports.FeatureSlotter = function ( w_scale, include_label, max_rows, measureText ) {
+exports.FeatureSlotter = function (w_scale, include_label, max_rows, measureText) {
this.slots = {};
this.start_end_dct = {};
this.w_scale = w_scale;
@@ -4356,8 +4353,12 @@
*/
extend( exports.FeatureSlotter.prototype, {
slot_features: function( features ) {
- var w_scale = this.w_scale, inc_slots = this.slots, start_end_dct = this.start_end_dct,
- undone = [], slotted = [], highest_slot = 0, max_rows = this.max_rows;
+ var w_scale = this.w_scale,
+ start_end_dct = this.start_end_dct,
+ undone = [],
+ slotted = [],
+ highest_slot = 0,
+ max_rows = this.max_rows;
// If feature already exists in slots (from previously seen tiles), use the same slot,
// otherwise if not seen, add to "undone" list for slot calculation.
@@ -4368,9 +4369,9 @@
for (var i = 0, len = features.length; i < len; i++) {
var feature = features[i],
feature_uid = feature[0];
- if (inc_slots[feature_uid] !== undefined) {
- highest_slot = Math.max(highest_slot, inc_slots[feature_uid]);
- slotted.push(inc_slots[feature_uid]);
+ if (this.slots[feature_uid] !== undefined) {
+ highest_slot = Math.max(highest_slot, this.slots[feature_uid]);
+ slotted.push(this.slots[feature_uid]);
} else {
undone.push(i);
}
@@ -4462,7 +4463,7 @@
start_end_dct[slot_num] = [];
}
start_end_dct[slot_num].push([f_start, f_end]);
- inc_slots[feature_uid] = slot_num;
+ this.slots[feature_uid] = slot_num;
highest_slot = Math.max(highest_slot, slot_num);
}
else {
https://bitbucket.org/galaxy/galaxy-central/changeset/38c1df19946e/
changeset: 38c1df19946e
user: jgoecks
date: 2012-01-09 16:10:17
summary: Trackster: add tipsy for icons used to get more data.
affected #: 1 file
diff -r aa2d3c5f577b9b26ef6765a9f7928c1ee948ce92 -r 38c1df19946ece9670d6cfd8df6baef73655dded static/scripts/trackster.js
--- a/static/scripts/trackster.js
+++ b/static/scripts/trackster.js
@@ -2518,11 +2518,13 @@
if (this.message) {
var
canvas = this.html_elt.children()[0],
- message_div = $("<div/>").addClass("tile-message").text(this.message).
+ message_div = $("<div/>").addClass("tile-message").text(this.message)
// -1 to account for border.
- css({'height': ERROR_PADDING-1, 'width': canvas.width}).prependTo(this.html_elt),
- more_down_icon = $("<a href='javascript:void(0);'/>").addClass("icon more-down").appendTo(message_div),
- more_across_icon = $("<a href='javascript:void(0);'/>").addClass("icon more-across").appendTo(message_div);
+ .css({'height': ERROR_PADDING-1, 'width': canvas.width}).prependTo(this.html_elt),
+ more_down_icon = $("<a href='javascript:void(0);'/>").addClass("icon more-down")
+ .attr("title", "Get more data including depth").tipsy( {gravity: 's'} ).appendTo(message_div),
+ more_across_icon = $("<a href='javascript:void(0);'/>").addClass("icon more-across")
+ .attr("title", "Get more data excluding depth").tipsy( {gravity: 's'} ).appendTo(message_div);
// Set up actions for icons.
var tile = this;
@@ -2530,6 +2532,7 @@
// Mark tile as stale, request more data, and redraw track.
tile.stale = true;
track.data_manager.get_more_data(tile.low, tile.high, track.mode, tile.resolution, {}, track.data_manager.DEEP_DATA_REQ);
+ $(".tipsy").hide();
track.request_draw();
}).dblclick(function(e) {
// Do not propogate as this would normally zoom in.
@@ -2540,6 +2543,7 @@
// Mark tile as stale, request more data, and redraw track.
tile.stale = true;
track.data_manager.get_more_data(tile.low, tile.high, track.mode, tile.resolution, {}, track.data_manager.BROAD_DATA_REQ);
+ $(".tipsy").hide();
track.request_draw();
}).dblclick(function(e) {
// Do not propogate as this would normally zoom in.
https://bitbucket.org/galaxy/galaxy-central/changeset/4a9f7ff4e2c0/
changeset: 4a9f7ff4e2c0
user: jgoecks
date: 2012-01-09 16:13:42
summary: Merge.
affected #: 14 files
diff -r 38c1df19946ece9670d6cfd8df6baef73655dded -r 4a9f7ff4e2c02bf87d33590061e09ef4e70fb84e distributed_object_store_conf.xml.sample
--- a/distributed_object_store_conf.xml.sample
+++ b/distributed_object_store_conf.xml.sample
@@ -1,11 +1,11 @@
<?xml version="1.0"?><backends>
- <backend name="files1" type="disk" weight="1">
+ <backend id="files1" type="disk" weight="1"><files_dir path="database/files1"/><extra_dir type="temp" path="database/tmp1"/><extra_dir type="job_work" path="database/job_working_directory1"/></backend>
- <backend name="files2" type="disk" weight="1">
+ <backend id="files2" type="disk" weight="1"><files_dir path="database/files2"/><extra_dir type="temp" path="database/tmp2"/><extra_dir type="job_work" path="database/job_working_directory2"/>
diff -r 38c1df19946ece9670d6cfd8df6baef73655dded -r 4a9f7ff4e2c02bf87d33590061e09ef4e70fb84e lib/galaxy/datatypes/metadata.py
--- a/lib/galaxy/datatypes/metadata.py
+++ b/lib/galaxy/datatypes/metadata.py
@@ -439,7 +439,7 @@
if mf is None:
mf = self.new_file( dataset = parent, **value.kwds )
# Ensure the metadata file gets updated with content
- parent.dataset.object_store.update_from_file( parent.dataset.id, file_name=value.file_name, extra_dir='_metadata_files', extra_dir_at_root=True, alt_name=os.path.basename(mf.file_name) )
+ parent.dataset.object_store.update_from_file( parent.dataset, file_name=value.file_name, extra_dir='_metadata_files', extra_dir_at_root=True, alt_name=os.path.basename(mf.file_name) )
os.unlink( value.file_name )
value = mf.id
return value
diff -r 38c1df19946ece9670d6cfd8df6baef73655dded -r 4a9f7ff4e2c02bf87d33590061e09ef4e70fb84e lib/galaxy/exceptions/__init__.py
--- a/lib/galaxy/exceptions/__init__.py
+++ b/lib/galaxy/exceptions/__init__.py
@@ -22,3 +22,7 @@
class ObjectNotFound( Exception ):
""" Accessed object was not found """
pass
+
+class ObjectInvalid( Exception ):
+ """ Accessed object store ID is invalid """
+ pass
diff -r 38c1df19946ece9670d6cfd8df6baef73655dded -r 4a9f7ff4e2c02bf87d33590061e09ef4e70fb84e lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py
+++ b/lib/galaxy/jobs/__init__.py
@@ -320,19 +320,9 @@
# With job outputs in the working directory, we need the working
# directory to be set before prepare is run, or else premature deletion
# and job recovery fail.
- # Attempt to put the working directory in the same store as the output dataset(s)
- store_name = None
- da = None
- if job.output_datasets:
- da = job.output_datasets[0]
- elif job.output_library_datasets:
- da = job.output_library_datasets[0]
- if da is not None:
- store_name = self.app.object_store.store_name(da.dataset.id)
# Create the working dir if necessary
- if not self.app.object_store.exists(self.job_id, base_dir='job_work', dir_only=True, extra_dir=str(self.job_id)):
- self.app.object_store.create(self.job_id, base_dir='job_work', dir_only=True, extra_dir=str(self.job_id), store_name=store_name)
- self.working_directory = self.app.object_store.get_filename(self.job_id, base_dir='job_work', dir_only=True, extra_dir=str(self.job_id))
+ self.app.object_store.create(job, base_dir='job_work', dir_only=True, extra_dir=str(self.job_id))
+ self.working_directory = self.app.object_store.get_filename(job, base_dir='job_work', dir_only=True, extra_dir=str(self.job_id))
log.debug('(%s) Working directory for job is: %s' % (self.job_id, self.working_directory))
self.output_paths = None
self.output_dataset_paths = None
@@ -482,7 +472,7 @@
dataset.extension = 'data'
# Update (non-library) job output datasets through the object store
if dataset not in job.output_library_datasets:
- self.app.object_store.update_from_file(dataset.id, create=True)
+ self.app.object_store.update_from_file(dataset.dataset, create=True)
self.sa_session.add( dataset )
self.sa_session.flush()
job.state = job.states.ERROR
@@ -606,7 +596,7 @@
dataset.set_size()
# Update (non-library) job output datasets through the object store
if dataset not in job.output_library_datasets:
- self.app.object_store.update_from_file(dataset.id, create=True)
+ self.app.object_store.update_from_file(dataset.dataset, create=True)
if context['stderr']:
dataset.blurb = "error"
elif dataset.has_data():
@@ -719,8 +709,7 @@
try:
for fname in self.extra_filenames:
os.remove( fname )
- if self.working_directory is not None and os.path.isdir( self.working_directory ):
- shutil.rmtree( self.working_directory )
+ self.app.object_store.delete(self.get_job(), base_dir='job_work', dir_only=True, extra_dir=str(self.job_id))
if self.app.config.set_metadata_externally:
self.external_output_metadata.cleanup_external_metadata( self.sa_session )
galaxy.tools.imp_exp.JobExportHistoryArchiveWrapper( self.job_id ).cleanup_after_job( self.sa_session )
diff -r 38c1df19946ece9670d6cfd8df6baef73655dded -r 4a9f7ff4e2c02bf87d33590061e09ef4e70fb84e lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -16,7 +16,6 @@
from galaxy.util.hash_util import *
from galaxy.web.form_builder import *
from galaxy.model.item_attrs import UsesAnnotations, APIItem
-from galaxy.exceptions import ObjectNotFound
from sqlalchemy.orm import object_session
from sqlalchemy.sql.expression import func
import os.path, os, errno, codecs, operator, socket, pexpect, logging, time, shutil
@@ -650,12 +649,7 @@
if not self.external_filename:
assert self.id is not None, "ID must be set before filename used (commit the object)"
assert self.object_store is not None, "Object Store has not been initialized for dataset %s" % self.id
- try:
- filename = self.object_store.get_filename( self.id )
- except ObjectNotFound, e:
- # Create file if it does not exist
- self.object_store.create( self.id )
- filename = self.object_store.get_filename( self.id )
+ filename = self.object_store.get_filename( self )
return filename
else:
filename = self.external_filename
@@ -669,7 +663,7 @@
file_name = property( get_file_name, set_file_name )
@property
def extra_files_path( self ):
- return self.object_store.get_filename( self.id, dir_only=True, extra_dir=self._extra_files_path or "dataset_%d_files" % self.id)
+ return self.object_store.get_filename( self, dir_only=True, extra_dir=self._extra_files_path or "dataset_%d_files" % self.id )
def get_size( self, nice_size=False ):
"""Returns the size of the data on disk"""
if self.file_size:
@@ -679,16 +673,16 @@
return self.file_size
else:
if nice_size:
- return galaxy.datatypes.data.nice_size( self.object_store.size(self.id) )
+ return galaxy.datatypes.data.nice_size( self.object_store.size(self) )
else:
- return self.object_store.size(self.id)
+ return self.object_store.size(self)
def set_size( self ):
"""Returns the size of the data on disk"""
if not self.file_size:
if self.external_filename:
self.file_size = os.path.getsize(self.external_filename)
else:
- self.file_size = self.object_store.size(self.id)
+ self.file_size = self.object_store.size(self)
def get_total_size( self ):
if self.total_size is not None:
return self.total_size
@@ -703,7 +697,7 @@
if self.file_size is None:
self.set_size()
self.total_size = self.file_size or 0
- if self.object_store.exists(self.id, extra_dir=self._extra_files_path or "dataset_%d_files" % self.id, dir_only=True):
+ if self.object_store.exists(self, extra_dir=self._extra_files_path or "dataset_%d_files" % self.id, dir_only=True):
for root, dirs, files in os.walk( self.extra_files_path ):
self.total_size += sum( [ os.path.getsize( os.path.join( root, file ) ) for file in files ] )
def has_data( self ):
@@ -721,7 +715,7 @@
# FIXME: sqlalchemy will replace this
def _delete(self):
"""Remove the file that corresponds to this data"""
- self.object_store.delete(self.id)
+ self.object_store.delete(self)
@property
def user_can_purge( self ):
return self.purged == False \
@@ -730,9 +724,9 @@
def full_delete( self ):
"""Remove the file and extra files, marks deleted and purged"""
# os.unlink( self.file_name )
- self.object_store.delete(self.id)
- if self.object_store.exists(self.id, extra_dir=self._extra_files_path or "dataset_%d_files" % self.id, dir_only=True):
- self.object_store.delete(self.id, entire_dir=True, extra_dir=self._extra_files_path or "dataset_%d_files" % self.id, dir_only=True)
+ self.object_store.delete(self)
+ if self.object_store.exists(self, extra_dir=self._extra_files_path or "dataset_%d_files" % self.id, dir_only=True):
+ self.object_store.delete(self, entire_dir=True, extra_dir=self._extra_files_path or "dataset_%d_files" % self.id, dir_only=True)
# if os.path.exists( self.extra_files_path ):
# shutil.rmtree( self.extra_files_path )
# TODO: purge metadata files
@@ -1798,8 +1792,11 @@
assert self.id is not None, "ID must be set before filename used (commit the object)"
# Ensure the directory structure and the metadata file object exist
try:
- self.history_dataset.dataset.object_store.create( self.id, extra_dir='_metadata_files', extra_dir_at_root=True, alt_name="metadata_%d.dat" % self.id )
- path = self.history_dataset.dataset.object_store.get_filename( self.id, extra_dir='_metadata_files', extra_dir_at_root=True, alt_name="metadata_%d.dat" % self.id )
+ da = self.history_dataset or self.library_dataset
+ if self.object_store_id is None and da is not None:
+ self.object_store_id = da.dataset.object_store_id
+ da.dataset.object_store.create( self, extra_dir='_metadata_files', extra_dir_at_root=True, alt_name="metadata_%d.dat" % self.id )
+ path = da.dataset.object_store.get_filename( self, extra_dir='_metadata_files', extra_dir_at_root=True, alt_name="metadata_%d.dat" % self.id )
return path
except AttributeError:
# In case we're not working with the history_dataset
diff -r 38c1df19946ece9670d6cfd8df6baef73655dded -r 4a9f7ff4e2c02bf87d33590061e09ef4e70fb84e lib/galaxy/model/mapping.py
--- a/lib/galaxy/model/mapping.py
+++ b/lib/galaxy/model/mapping.py
@@ -130,6 +130,7 @@
Column( "deleted", Boolean, index=True, default=False ),
Column( "purged", Boolean, index=True, default=False ),
Column( "purgable", Boolean, default=True ),
+ Column( "object_store_id", TrimmedString( 255 ), index=True ),
Column( "external_filename" , TEXT ),
Column( "_extra_files_path", TEXT ),
Column( 'file_size', Numeric( 15, 0 ) ),
@@ -410,6 +411,7 @@
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=True ),
Column( "job_runner_name", String( 255 ) ),
Column( "job_runner_external_id", String( 255 ) ),
+ Column( "object_store_id", TrimmedString( 255 ), index=True ),
Column( "imported", Boolean, default=False, index=True ) )
JobParameter.table = Table( "job_parameter", metadata,
@@ -641,6 +643,7 @@
Column( "lda_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), index=True, nullable=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, index=True, default=now, onupdate=now ),
+ Column( "object_store_id", TrimmedString( 255 ), index=True ),
Column( "deleted", Boolean, index=True, default=False ),
Column( "purged", Boolean, index=True, default=False ) )
diff -r 38c1df19946ece9670d6cfd8df6baef73655dded -r 4a9f7ff4e2c02bf87d33590061e09ef4e70fb84e lib/galaxy/model/migrate/versions/0089_add_object_store_id_columns.py
--- /dev/null
+++ b/lib/galaxy/model/migrate/versions/0089_add_object_store_id_columns.py
@@ -0,0 +1,38 @@
+"""
+Migration script to add 'object_store_id' column to various tables
+"""
+
+from sqlalchemy import *
+from sqlalchemy.orm import *
+from migrate import *
+from migrate.changeset import *
+
+import logging
+log = logging.getLogger( __name__ )
+from galaxy.model.custom_types import TrimmedString
+
+metadata = MetaData( migrate_engine )
+db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, autocommit=True ) )
+
+def upgrade():
+ print __doc__
+ metadata.reflect()
+ for t_name in ( 'dataset', 'job', 'metadata_file' ):
+ t = Table( t_name, metadata, autoload=True )
+ c = Column( "object_store_id", TrimmedString( 255 ), index=True )
+ try:
+ c.create( t )
+ assert c is t.c.object_store_id
+ except Exception, e:
+ print "Adding object_store_id column to %s table failed: %s" % ( t_name, str( e ) )
+ log.debug( "Adding object_store_id column to %s table failed: %s" % ( t_name, str( e ) ) )
+
+def downgrade():
+ metadata.reflect()
+ for t_name in ( 'dataset', 'job', 'metadata_file' ):
+ t = Table( t_name, metadata, autoload=True )
+ try:
+ t.c.object_store_id.drop()
+ except Exception, e:
+ print "Dropping object_store_id column from %s table failed: %s" % ( t_name, str( e ) )
+ log.debug( "Dropping object_store_id column from %s table failed: %s" % ( t_name, str( e ) ) )
diff -r 38c1df19946ece9670d6cfd8df6baef73655dded -r 4a9f7ff4e2c02bf87d33590061e09ef4e70fb84e lib/galaxy/objectstore/__init__.py
--- a/lib/galaxy/objectstore/__init__.py
+++ b/lib/galaxy/objectstore/__init__.py
@@ -18,7 +18,9 @@
from galaxy.jobs import Sleeper
from galaxy.model import directory_hash_id
from galaxy.objectstore.s3_multipart_upload import multipart_upload
-from galaxy.exceptions import ObjectNotFound
+from galaxy.exceptions import ObjectNotFound, ObjectInvalid
+
+from sqlalchemy.orm import object_session
from boto.s3.key import Key
from boto.s3.connection import S3Connection
@@ -40,14 +42,15 @@
self.running = False
self.extra_dirs = {}
- def exists(self, dataset_id, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
+ def exists(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
- Returns True if the object identified by `dataset_id` exists in this
- file store, False otherwise.
+ Returns True if the object identified by `obj` exists in this file
+ store, False otherwise.
FIELD DESCRIPTIONS (these apply to all the methods in this class):
- :type dataset_id: int
- :param dataset_id: Galaxy-assigned database ID of the dataset to be checked.
+ :type obj: object
+ :param obj: A Galaxy object with an assigned database ID accessible via
+ the .id attribute.
:type base_dir: string
:param base_dir: A key in self.extra_dirs corresponding to the base
@@ -56,21 +59,21 @@
:type dir_only: bool
:param dir_only: If True, check only the path where the file
- identified by `dataset_id` should be located, not the
- dataset itself. This option applies to `extra_dir`
- argument as well.
+ identified by `obj` should be located, not the dataset
+ itself. This option applies to `extra_dir` argument as
+ well.
:type extra_dir: string
:param extra_dir: Append `extra_dir` to the directory structure where
- the dataset identified by `dataset_id` should be located.
- (e.g., 000/extra_dir/dataset_id)
+ the dataset identified by `obj` should be located.
+ (e.g., 000/extra_dir/obj.id)
:type extra_dir_at_root: bool
:param extra_dir_at_root: Applicable only if `extra_dir` is set.
If True, the `extra_dir` argument is placed at
root of the created directory structure rather
- than at the end (e.g., extra_dir/000/dataset_id
- vs. 000/extra_dir/dataset_id)
+ than at the end (e.g., extra_dir/000/obj.id
+ vs. 000/extra_dir/obj.id)
:type alt_name: string
:param alt_name: Use this name as the alternative name for the created
@@ -78,53 +81,39 @@
"""
raise NotImplementedError()
- def store_name(self, dataset_id, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
- """
- Returns the name of the store in which the object identified by
- `dataset_id` exists, or None if it does not exist or the store is the
- default store.
- """
- return None
-
- def file_ready(self, dataset_id, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
+ def file_ready(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
""" A helper method that checks if a file corresponding to a dataset
is ready and available to be used. Return True if so, False otherwise."""
return True
- def create(self, dataset_id, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, store_name=None):
+ def create(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
- Mark the object identified by `dataset_id` as existing in the store, but
- with no content. This method will create a proper directory structure for
+ Mark the object identified by `obj` as existing in the store, but with
+ no content. This method will create a proper directory structure for
the file if the directory does not already exist.
See `exists` method for the description of other fields.
-
- :type store_name: string
- :param store_name: Backend store in which to create the dataset, if
- this store contains more than one backend. If the
- named backend does not exist, a backend will be
- chosen by the store.
"""
raise NotImplementedError()
-
- def empty(self, dataset_id, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
+
+ def empty(self, obj, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
- Test if the object identified by `dataset_id` has content.
+ Test if the object identified by `obj` has content.
If the object does not exist raises `ObjectNotFound`.
See `exists` method for the description of the fields.
"""
raise NotImplementedError()
- def size(self, dataset_id, extra_dir=None, extra_dir_at_root=False, alt_name=None):
+ def size(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
- Return size of the object identified by `dataset_id`.
+ Return size of the object identified by `obj`.
If the object does not exist, return 0.
See `exists` method for the description of the fields.
"""
raise NotImplementedError()
- def delete(self, dataset_id, entire_dir=False, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
+ def delete(self, obj, entire_dir=False, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
- Deletes the object identified by `dataset_id`.
+ Deletes the object identified by `obj`.
See `exists` method for the description of other fields.
:type entire_dir: bool
:param entire_dir: If True, delete the entire directory pointed to by
@@ -133,10 +122,10 @@
"""
raise NotImplementedError()
- def get_data(self, dataset_id, start=0, count=-1, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
+ def get_data(self, obj, start=0, count=-1, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
Fetch `count` bytes of data starting at offset `start` from the
- object identified uniquely by `dataset_id`.
+ object identified uniquely by `obj`.
If the object does not exist raises `ObjectNotFound`.
See `exists` method for the description of other fields.
@@ -148,15 +137,15 @@
"""
raise NotImplementedError()
- def get_filename(self, dataset_id, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
+ def get_filename(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
Get the expected filename (including the absolute path) which can be used
- to access the contents of the object uniquely identified by `dataset_id`.
+ to access the contents of the object uniquely identified by `obj`.
See `exists` method for the description of the fields.
"""
raise NotImplementedError()
- def update_from_file(self, dataset_id, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, file_name=None, create=False):
+ def update_from_file(self, obj, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, file_name=None, create=False):
"""
Inform the store that the file associated with the object has been
updated. If `file_name` is provided, update from that file instead
@@ -166,14 +155,14 @@
:type file_name: string
:param file_name: Use file pointed to by `file_name` as the source for
- updating the dataset identified by `dataset_id`
+ updating the dataset identified by `obj`
:type create: bool
:param create: If True and the default dataset does not exist, create it first.
"""
raise NotImplementedError()
- def get_object_url(self, dataset_id, extra_dir=None, extra_dir_at_root=False, alt_name=None):
+ def get_object_url(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
If the store supports direct URL access, return a URL. Otherwise return
None.
@@ -210,22 +199,24 @@
super(DiskObjectStore, self).__init__()
self.file_path = file_path or config.file_path
self.config = config
+ self.extra_dirs['job_work'] = config.job_working_directory
+ self.extra_dirs['temp'] = config.new_file_path
if extra_dirs is not None:
- self.extra_dirs = extra_dirs
+ self.extra_dirs.update( extra_dirs )
- def _get_filename(self, dataset_id, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
+ def _get_filename(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""Class method that returns the absolute path for the file corresponding
- to the `dataset_id` regardless of whether the file exists.
+ to the `obj`.id regardless of whether the file exists.
"""
- path = self._construct_path(dataset_id, dir_only=dir_only, extra_dir=extra_dir, extra_dir_at_root=extra_dir_at_root, alt_name=alt_name, old_style=True)
+ path = self._construct_path(obj, base_dir=base_dir, dir_only=dir_only, extra_dir=extra_dir, extra_dir_at_root=extra_dir_at_root, alt_name=alt_name, old_style=True)
# For backward compatibility, check the old style root path first; otherwise,
# construct hashed path
if not os.path.exists(path):
- return self._construct_path(dataset_id, dir_only=dir_only, extra_dir=extra_dir, extra_dir_at_root=extra_dir_at_root, alt_name=alt_name)
+ return self._construct_path(obj, base_dir=base_dir, dir_only=dir_only, extra_dir=extra_dir, extra_dir_at_root=extra_dir_at_root, alt_name=alt_name)
- def _construct_path(self, dataset_id, old_style=False, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
+ def _construct_path(self, obj, old_style=False, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
""" Construct the expected absolute path for accessing the object
- identified by `dataset_id`.
+ identified by `obj`.id.
:type base_dir: string
:param base_dir: A key in self.extra_dirs corresponding to the base
@@ -234,13 +225,13 @@
:type dir_only: bool
:param dir_only: If True, check only the path where the file
- identified by `dataset_id` should be located, not the
+ identified by `obj` should be located, not the
dataset itself. This option applies to `extra_dir`
argument as well.
:type extra_dir: string
:param extra_dir: Append the value of this parameter to the expected path
- used to access the object identified by `dataset_id`
+ used to access the object identified by `obj`
(e.g., /files/000/<extra_dir>/dataset_10.dat).
:type alt_name: string
@@ -252,16 +243,16 @@
the composed directory structure does not include a hash id
(e.g., /files/dataset_10.dat (old) vs. /files/000/dataset_10.dat (new))
"""
- base = self.file_path
- if base_dir in self.extra_dirs:
- base = self.extra_dirs.get(base_dir)
+ base = self.extra_dirs.get(base_dir, self.file_path)
if old_style:
if extra_dir is not None:
path = os.path.join(base, extra_dir)
else:
path = base
else:
- rel_path = os.path.join(*directory_hash_id(dataset_id))
+ # Construct hashed path
+ rel_path = os.path.join(*directory_hash_id(obj.id))
+ # Optionally append extra_dir
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
@@ -269,103 +260,86 @@
rel_path = os.path.join(rel_path, extra_dir)
path = os.path.join(base, rel_path)
if not dir_only:
- path = os.path.join(path, alt_name if alt_name else "dataset_%s.dat" % dataset_id)
+ path = os.path.join(path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
return os.path.abspath(path)
- def exists(self, dataset_id, **kwargs):
- path = self._construct_path(dataset_id, old_style=True, **kwargs)
+ def exists(self, obj, **kwargs):
+ path = self._construct_path(obj, old_style=True, **kwargs)
# For backward compatibility, check root path first; otherwise, construct
# and check hashed path
- if not os.path.exists(path):
- path = self._construct_path(dataset_id, **kwargs)
- return os.path.exists(path)
+ if os.path.exists(path):
+ return True
+ else:
+ path = self._construct_path(obj, **kwargs)
+ return os.path.exists(path)
- def create(self, dataset_id, **kwargs):
- kwargs.pop('store_name', None)
- if not self.exists(dataset_id, **kwargs):
- # Pull out locally used fields
- extra_dir = kwargs.get('extra_dir', None)
- extra_dir_at_root = kwargs.get('extra_dir_at_root', False)
+ def create(self, obj, **kwargs):
+ if not self.exists(obj, **kwargs):
+ path = self._construct_path(obj, **kwargs)
dir_only = kwargs.get('dir_only', False)
- alt_name = kwargs.get('alt_name', None)
- base_dir_key = kwargs.get('base_dir', None)
- # Construct hashed path
- path = os.path.join(*directory_hash_id(dataset_id))
- # Optionally append extra_dir
- if extra_dir is not None:
- if extra_dir_at_root:
- path = os.path.join(extra_dir, path)
- else:
- path = os.path.join(path, extra_dir)
- # Combine the constructted path with the root dir for all files
- base_dir = self.extra_dirs.get(base_dir_key, self.file_path)
- path = os.path.join(base_dir, path)
# Create directory if it does not exist
- if not os.path.exists(path):
- os.makedirs(path)
+ dir = path if dir_only else os.path.dirname(path)
+ if not os.path.exists(dir):
+ os.makedirs(dir)
+ # Create the file if it does not exist
if not dir_only:
- path = os.path.join(path, alt_name if alt_name else "dataset_%s.dat" % dataset_id)
open(path, 'w').close()
- util.umask_fix_perms( path, self.config.umask, 0666 )
+ util.umask_fix_perms(path, self.config.umask, 0666)
- def empty(self, dataset_id, **kwargs):
- return os.path.getsize(self.get_filename(dataset_id, **kwargs)) > 0
+ def empty(self, obj, **kwargs):
+ return os.path.getsize(self.get_filename(obj, **kwargs)) > 0
- def size(self, dataset_id, **kwargs):
- if self.exists(dataset_id, **kwargs):
+ def size(self, obj, **kwargs):
+ if self.exists(obj, **kwargs):
try:
- return os.path.getsize(self.get_filename(dataset_id, **kwargs))
+ return os.path.getsize(self.get_filename(obj, **kwargs))
except OSError:
return 0
else:
return 0
- def delete(self, dataset_id, entire_dir=False, **kwargs):
- path = self.get_filename(dataset_id, **kwargs)
+ def delete(self, obj, entire_dir=False, **kwargs):
+ path = self.get_filename(obj, **kwargs)
extra_dir = kwargs.get('extra_dir', None)
try:
if entire_dir and extra_dir:
shutil.rmtree(path)
return True
- if self.exists(dataset_id, **kwargs):
+ if self.exists(obj, **kwargs):
os.remove(path)
return True
except OSError, ex:
- log.critical('%s delete error %s' % (self._get_filename(dataset_id, **kwargs), ex))
+ log.critical('%s delete error %s' % (self._get_filename(obj, **kwargs), ex))
return False
- def get_data(self, dataset_id, start=0, count=-1, **kwargs):
- data_file = open(self.get_filename(dataset_id, **kwargs), 'r')
+ def get_data(self, obj, start=0, count=-1, **kwargs):
+ data_file = open(self.get_filename(obj, **kwargs), 'r')
data_file.seek(start)
content = data_file.read(count)
data_file.close()
return content
- def get_filename(self, dataset_id, **kwargs):
- path = self._construct_path(dataset_id, old_style=True, **kwargs)
+ def get_filename(self, obj, **kwargs):
+ path = self._construct_path(obj, old_style=True, **kwargs)
# For backward compatibility, check root path first; otherwise, construct
- # and check hashed path
+ # and return hashed path
if os.path.exists(path):
return path
else:
- path = self._construct_path(dataset_id, **kwargs)
- if os.path.exists(path):
- return path
- else:
- raise ObjectNotFound()
+ return self._construct_path(obj, **kwargs)
- def update_from_file(self, dataset_id, file_name=None, create=False, **kwargs):
+ def update_from_file(self, obj, file_name=None, create=False, **kwargs):
""" `create` parameter is not used in this implementation """
if create:
- self.create(dataset_id, **kwargs)
- if file_name and self.exists(dataset_id, **kwargs):
+ self.create(obj, **kwargs)
+ if file_name and self.exists(obj, **kwargs):
try:
- shutil.copy(file_name, self.get_filename(dataset_id, **kwargs))
+ shutil.copy(file_name, self.get_filename(obj, **kwargs))
except IOError, ex:
log.critical('Error copying %s to %s: %s' % (file_name,
- self._get_filename(dataset_id, **kwargs), ex))
+ self._get_filename(obj, **kwargs), ex))
- def get_object_url(self, dataset_id, **kwargs):
+ def get_object_url(self, obj, **kwargs):
return None
@@ -494,8 +468,8 @@
continue
util.umask_fix_perms( path, self.config.umask, 0666, self.config.gid )
- def _construct_path(self, dataset_id, dir_only=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
- rel_path = os.path.join(*directory_hash_id(dataset_id))
+ def _construct_path(self, obj, dir_only=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
+ rel_path = os.path.join(*directory_hash_id(obj.id))
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
@@ -504,7 +478,7 @@
# S3 folders are marked by having trailing '/' so add it now
rel_path = '%s/' % rel_path
if not dir_only:
- rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % dataset_id)
+ rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
return rel_path
def _get_cache_path(self, rel_path):
@@ -665,18 +639,18 @@
log.error("Trouble pushing S3 key '%s' from file '%s': %s" % (rel_path, source_file, ex))
return False
- def file_ready(self, dataset_id, **kwargs):
+ def file_ready(self, obj, **kwargs):
""" A helper method that checks if a file corresponding to a dataset
is ready and available to be used. Return True if so, False otherwise."""
- rel_path = self._construct_path(dataset_id, **kwargs)
+ rel_path = self._construct_path(obj, **kwargs)
# Make sure the size in cache is available in its entirety
if self._in_cache(rel_path) and os.path.getsize(self._get_cache_path(rel_path)) == self._get_size_in_s3(rel_path):
return True
return False
- def exists(self, dataset_id, **kwargs):
+ def exists(self, obj, **kwargs):
in_cache = in_s3 = False
- rel_path = self._construct_path(dataset_id, **kwargs)
+ rel_path = self._construct_path(obj, **kwargs)
# Check cache
if self._in_cache(rel_path):
in_cache = True
@@ -699,9 +673,8 @@
else:
return False
- def create(self, dataset_id, **kwargs):
- kwargs.pop('store_name', None)
- if not self.exists(dataset_id, **kwargs):
+ def create(self, obj, **kwargs):
+ if not self.exists(obj, **kwargs):
#print "S3 OS creating a dataset with ID %s" % dataset_id
# Pull out locally used fields
extra_dir = kwargs.get('extra_dir', None)
@@ -710,7 +683,7 @@
alt_name = kwargs.get('alt_name', None)
# print "---- Processing: %s; %s" % (alt_name, locals())
# Construct hashed path
- rel_path = os.path.join(*directory_hash_id(dataset_id))
+ rel_path = os.path.join(*directory_hash_id(obj))
# Optionally append extra_dir
if extra_dir is not None:
if extra_dir_at_root:
@@ -728,30 +701,30 @@
# self._push_to_s3(s3_dir, from_string='')
# If instructed, create the dataset in cache & in S3
if not dir_only:
- rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % dataset_id)
+ rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
open(os.path.join(self.staging_path, rel_path), 'w').close()
self._push_to_s3(rel_path, from_string='')
- def empty(self, dataset_id, **kwargs):
- if self.exists(dataset_id, **kwargs):
- return bool(self.size(dataset_id, **kwargs) > 0)
+ def empty(self, obj, **kwargs):
+ if self.exists(obj, **kwargs):
+ return bool(self.size(obj, **kwargs) > 0)
else:
raise ObjectNotFound()
- def size(self, dataset_id, **kwargs):
- rel_path = self._construct_path(dataset_id, **kwargs)
+ def size(self, obj, **kwargs):
+ rel_path = self._construct_path(obj, **kwargs)
if self._in_cache(rel_path):
try:
return os.path.getsize(self._get_cache_path(rel_path))
except OSError, ex:
log.info("Could not get size of file '%s' in local cache, will try S3. Error: %s" % (rel_path, ex))
- elif self.exists(dataset_id, **kwargs):
+ elif self.exists(obj, **kwargs):
return self._get_size_in_s3(rel_path)
log.warning("Did not find dataset '%s', returning 0 for size" % rel_path)
return 0
- def delete(self, dataset_id, entire_dir=False, **kwargs):
- rel_path = self._construct_path(dataset_id, **kwargs)
+ def delete(self, obj, entire_dir=False, **kwargs):
+ rel_path = self._construct_path(obj, **kwargs)
extra_dir = kwargs.get('extra_dir', None)
try:
# For the case of extra_files, because we don't have a reference to
@@ -777,11 +750,11 @@
except S3ResponseError, ex:
log.error("Could not delete key '%s' from S3: %s" % (rel_path, ex))
except OSError, ex:
- log.error('%s delete error %s' % (self._get_filename(dataset_id, **kwargs), ex))
+ log.error('%s delete error %s' % (self._get_filename(obj, **kwargs), ex))
return False
- def get_data(self, dataset_id, start=0, count=-1, **kwargs):
- rel_path = self._construct_path(dataset_id, **kwargs)
+ def get_data(self, obj, start=0, count=-1, **kwargs):
+ rel_path = self._construct_path(obj, **kwargs)
# Check cache first and get file if not there
if not self._in_cache(rel_path):
self._pull_into_cache(rel_path)
@@ -794,10 +767,10 @@
data_file.close()
return content
- def get_filename(self, dataset_id, **kwargs):
+ def get_filename(self, obj, **kwargs):
#print "S3 get_filename for dataset: %s" % dataset_id
dir_only = kwargs.get('dir_only', False)
- rel_path = self._construct_path(dataset_id, **kwargs)
+ rel_path = self._construct_path(obj, **kwargs)
cache_path = self._get_cache_path(rel_path)
# S3 does not recognize directories as files so cannot check if those exist.
# So, if checking dir only, ensure given dir exists in cache and return
@@ -811,7 +784,7 @@
if self._in_cache(rel_path):
return cache_path
# Check if the file exists in persistent storage and, if it does, pull it into cache
- elif self.exists(dataset_id, **kwargs):
+ elif self.exists(obj, **kwargs):
if dir_only: # Directories do not get pulled into cache
return cache_path
else:
@@ -824,11 +797,11 @@
raise ObjectNotFound()
# return cache_path # Until the upload tool does not explicitly create the dataset, return expected path
- def update_from_file(self, dataset_id, file_name=None, create=False, **kwargs):
+ def update_from_file(self, obj, file_name=None, create=False, **kwargs):
if create:
- self.create(dataset_id, **kwargs)
- if self.exists(dataset_id, **kwargs):
- rel_path = self._construct_path(dataset_id, **kwargs)
+ self.create(obj, **kwargs)
+ if self.exists(obj, **kwargs):
+ rel_path = self._construct_path(obj, **kwargs)
# Chose whether to use the dataset file itself or an alternate file
if file_name:
source_file = os.path.abspath(file_name)
@@ -848,9 +821,9 @@
else:
raise ObjectNotFound()
- def get_object_url(self, dataset_id, **kwargs):
- if self.exists(dataset_id, **kwargs):
- rel_path = self._construct_path(dataset_id, **kwargs)
+ def get_object_url(self, obj, **kwargs):
+ if self.exists(obj, **kwargs):
+ rel_path = self._construct_path(obj, **kwargs)
try:
key = Key(self.bucket, rel_path)
return key.generate_url(expires_in = 86400) # 24hrs
@@ -873,7 +846,7 @@
"requires a config file, please set one in " \
"'distributed_object_store_config_file')"
self.backends = {}
- self.weighted_backend_names = []
+ self.weighted_backend_ids = []
random.seed()
@@ -884,7 +857,7 @@
root = tree.getroot()
log.debug('Loading backends for distributed object store from %s' % self.distributed_config)
for elem in [ e for e in root if e.tag == 'backend' ]:
- name = elem.get('name')
+ id = elem.get('id')
weight = int(elem.get('weight', 1))
if elem.get('type', 'disk'):
path = None
@@ -895,94 +868,90 @@
elif sub.tag == 'extra_dir':
type = sub.get('type')
extra_dirs[type] = sub.get('path')
- self.backends[name] = DiskObjectStore(config, file_path=path, extra_dirs=extra_dirs)
- log.debug("Loaded disk backend '%s' with weight %s and file_path: %s" % (name, weight, path))
+ self.backends[id] = DiskObjectStore(config, file_path=path, extra_dirs=extra_dirs)
+ log.debug("Loaded disk backend '%s' with weight %s and file_path: %s" % (id, weight, path))
if extra_dirs:
log.debug(" Extra directories:")
for type, dir in extra_dirs.items():
log.debug(" %s: %s" % (type, dir))
for i in range(0, weight):
- # The simplest way to do weighting: add backend names to a
+ # The simplest way to do weighting: add backend ids to a
# sequence the number of times equalling weight, then randomly
# choose a backend from that sequence at creation
- self.weighted_backend_names.append(name)
+ self.weighted_backend_ids.append(id)
- def exists(self, dataset_id, **kwargs):
- store = self.__get_store_for(dataset_id, **kwargs)
- return store is not None
+ def exists(self, obj, **kwargs):
+ return self.__call_method('exists', obj, False, False, **kwargs)
- def store_name(self, dataset_id, **kwargs):
- for name, store in self.backends.items():
- if store.exists(dataset_id, **kwargs):
- return name
- return None
+ #def store_id(self, obj, **kwargs):
+ # return self.__get_store_id_for(obj, **kwargs)[0]
- def file_ready(self, dataset_id, **kwargs):
- store = self.__get_store_for(dataset_id, **kwargs)
- if store is not None:
- return store.file_ready(dataset_id, **kwargs)
- return False
+ def file_ready(self, obj, **kwargs):
+ return self.__call_method('file_ready', obj, False, False, **kwargs)
- def create(self, dataset_id, **kwargs):
- store_name = kwargs.pop('store_name', None)
- if not self.exists(dataset_id, **kwargs):
- if store_name is None or store_name not in self.backends:
- store_name = random.choice(self.weighted_backend_names)
- log.debug("Selected backend '%s' for creation of dataset %s" % (store_name, dataset_id))
+ def create(self, obj, **kwargs):
+ """
+ create() is the only method in which obj.object_store_id may be None
+ """
+ if obj.object_store_id is None or not self.exists(obj, **kwargs):
+ if obj.object_store_id is None or obj.object_store_id not in self.backends:
+ obj.object_store_id = random.choice(self.weighted_backend_ids)
+ object_session( obj ).add( obj )
+ object_session( obj ).flush()
+ log.debug("Selected backend '%s' for creation of %s %s" % (obj.object_store_id, obj.__class__.__name__, obj.id))
else:
- log.debug("Using preferred backend '%s' for creation of dataset %s" % (store_name, dataset_id))
- return self.backends[store_name].create(dataset_id, **kwargs)
+ log.debug("Using preferred backend '%s' for creation of %s %s" % (obj.object_store_id, obj.__class__.__name__, obj.id))
+ self.backends[obj.object_store_id].create(obj, **kwargs)
- def empty(self, dataset_id, **kwargs):
- store = self.__get_store_for(dataset_id, **kwargs)
- if store is not None:
- return store.empty(dataset_id, **kwargs)
- return True
+ def empty(self, obj, **kwargs):
+ return self.__call_method('empty', obj, True, False, **kwargs)
- def size(self, dataset_id, **kwargs):
- store = self.__get_store_for(dataset_id, **kwargs)
- if store is not None:
- return store.size(dataset_id, **kwargs)
- return 0
+ def size(self, obj, **kwargs):
+ return self.__call_method('size', obj, 0, False, **kwargs)
- def delete(self, dataset_id, entire_dir=False, **kwargs):
- store = self.__get_store_for(dataset_id, **kwargs)
- if store is not None:
- return store.delete(dataset_id, entire_dir=entire_dir, **kwargs)
- return False
+ def delete(self, obj, entire_dir=False, **kwargs):
+ return self.__call_method('delete', obj, False, False, **kwargs)
- def get_data(self, dataset_id, start=0, count=-1, **kwargs):
- store = self.__get_store_for(dataset_id, **kwargs)
- if store is not None:
- return store.get_data(dataset_id, start=0, count=-1, **kwargs)
- raise ObjectNotFound()
+ def get_data(self, obj, start=0, count=-1, **kwargs):
+ return self.__call_method('get_data', obj, ObjectNotFound, True, **kwargs)
- def get_filename(self, dataset_id, **kwargs):
- store = self.__get_store_for(dataset_id, **kwargs)
- if store is not None:
- return store.get_filename(dataset_id, **kwargs)
- raise ObjectNotFound()
+ def get_filename(self, obj, **kwargs):
+ return self.__call_method('get_filename', obj, ObjectNotFound, True, **kwargs)
- def update_from_file(self, dataset_id, file_name=None, create=False, **kwargs):
- store = self.__get_store_for(dataset_id, **kwargs)
- if store is not None:
- return store.update_from_file(dataset_id, file_name=file_name, create=create, **kwargs)
- if create:
- store_name = random.choice(self.weighted_backend_names)
- return self.backends[store_name].update_from_file(dataset_id, file_name=file_name, create=create, **kwargs)
- raise ObjectNotFound()
+ def update_from_file(self, obj, file_name=None, create=False, **kwargs):
+ # can raise ObjectLocationMismatch
+ # TODO: handling create=True here? probably not since create() is called from w/in, so a store will be selected there
+ #if create and not self.exists(obj, **kwargs):
+ # store_id = random.choice(self.weighted_backend_names)
+ return self.__call_method('update_from_file', obj, ObjectNotFound, True, **kwargs)
- def get_object_url(self, dataset_id, **kwargs):
- # FIXME: dir_only
- store = self.__get_store_for(dataset_id, **kwargs)
- if store is not None:
- return store.get_object_url(dataset_id, **kwargs)
- return None
+ def get_object_url(self, obj, **kwargs):
+ return self.__call_method('get_object_url', obj, None, False, **kwargs)
- def __get_store_for(self, dataset_id, **kwargs):
- for store in self.backends.values():
- if store.exists(dataset_id, **kwargs):
- return store
+ def __call_method(self, method, obj, default, default_is_exception, **kwargs):
+ object_store_id = self.__get_store_id_for(obj, **kwargs)
+ if object_store_id is not None:
+ return self.backends[object_store_id].__getattribute__(method)(obj, **kwargs)
+ if default_is_exception:
+ raise default()
+ else:
+ return default
+
+ def __get_store_id_for(self, obj, **kwargs):
+ if obj.object_store_id is not None and obj.object_store_id in self.backends:
+ return obj.object_store_id
+ else:
+ # if this instance has been switched from a non-distributed to a
+ # distributed object store, or if the object's store id is invalid,
+ # try to locate the object
+ log.warning('The backend object store ID (%s) for %s object with ID %s is invalid' % (obj.object_store_id, obj.__class__.__name__, obj.id))
+ for id, store in self.backends.items():
+ if store.exists(obj, **kwargs):
+ log.warning('%s object with ID %s found in backend object store with ID %s' % (obj.__class__.__name__, obj.id, id))
+ obj.object_store_id = id
+ object_session( obj ).add( obj )
+ object_session( obj ).flush()
+ return id
return None
class HierarchicalObjectStore(ObjectStore):
diff -r 38c1df19946ece9670d6cfd8df6baef73655dded -r 4a9f7ff4e2c02bf87d33590061e09ef4e70fb84e lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -1859,7 +1859,7 @@
a_files = os.listdir( temp_file_path )
if len( a_files ) > 0:
for f in a_files:
- self.app.object_store.update_from_file(hda.dataset.id,
+ self.app.object_store.update_from_file(hda.dataset,
extra_dir="dataset_%d_files" % hda.dataset.id,
alt_name = f,
file_name = os.path.join(temp_file_path, f),
@@ -1899,7 +1899,7 @@
sa_session=self.sa_session )
self.app.security_agent.copy_dataset_permissions( outdata.dataset, child_dataset.dataset )
# Move data from temp location to dataset location
- self.app.object_store.update_from_file(child_dataset.dataset.id, filename, create=True)
+ self.app.object_store.update_from_file(child_dataset.dataset, filename, create=True)
self.sa_session.add( child_dataset )
self.sa_session.flush()
child_dataset.set_size()
@@ -1967,7 +1967,7 @@
self.sa_session.add( primary_data )
self.sa_session.flush()
# Move data from temp location to dataset location
- self.app.object_store.update_from_file(primary_data.dataset.id, filename, create=True)
+ self.app.object_store.update_from_file(primary_data.dataset, filename, create=True)
primary_data.set_size()
primary_data.name = "%s (%s)" % ( outdata.name, designation )
primary_data.info = outdata.info
diff -r 38c1df19946ece9670d6cfd8df6baef73655dded -r 4a9f7ff4e2c02bf87d33590061e09ef4e70fb84e lib/galaxy/tools/actions/__init__.py
--- a/lib/galaxy/tools/actions/__init__.py
+++ b/lib/galaxy/tools/actions/__init__.py
@@ -228,8 +228,7 @@
# datasets first, then create the associations
parent_to_child_pairs = []
child_dataset_names = set()
- store_name = None
- store_name_set = False # this is needed since None is a valid value for store_name
+ object_store_id = None
for name, output in tool.outputs.items():
for filter in output.filters:
try:
@@ -292,12 +291,12 @@
trans.sa_session.add( data )
trans.sa_session.flush()
trans.app.security_agent.set_all_dataset_permissions( data.dataset, output_permissions )
- # Create an empty file immediately
- trans.app.object_store.create( data.id, store_name=store_name )
- if not store_name_set:
- # Ensure all other datasets in this job are created in the same store
- store_name = trans.app.object_store.store_name( data.id )
- store_name_set = True
+ # Create an empty file immediately. The first dataset will be
+ # created in the "default" store, all others will be created in
+ # the same store as the first.
+ data.dataset.object_store_id = object_store_id
+ trans.app.object_store.create( data.dataset )
+ object_store_id = data.dataset.object_store_id # these will be the same thing after the first output
# This may not be neccesary with the new parent/child associations
data.designation = name
# Copy metadata from one of the inputs if requested.
@@ -382,6 +381,7 @@
job.add_input_dataset( name, None )
for name, dataset in out_data.iteritems():
job.add_output_dataset( name, dataset )
+ job.object_store_id = object_store_id
trans.sa_session.add( job )
trans.sa_session.flush()
# Some tools are not really executable, but jobs are still created for them ( for record keeping ).
diff -r 38c1df19946ece9670d6cfd8df6baef73655dded -r 4a9f7ff4e2c02bf87d33590061e09ef4e70fb84e lib/galaxy/tools/actions/upload_common.py
--- a/lib/galaxy/tools/actions/upload_common.py
+++ b/lib/galaxy/tools/actions/upload_common.py
@@ -319,8 +319,7 @@
for name, value in tool.params_to_strings( params, trans.app ).iteritems():
job.add_parameter( name, value )
job.add_parameter( 'paramfile', to_json_string( json_file_path ) )
- store_name = None
- store_name_set = False # this is needed since None is a valid value for store_name
+ object_store_id = None
for i, dataset in enumerate( data_list ):
if folder:
job.add_output_library_dataset( 'output%i' % i, dataset )
@@ -328,11 +327,12 @@
job.add_output_dataset( 'output%i' % i, dataset )
# Create an empty file immediately
if not dataset.dataset.external_filename:
- trans.app.object_store.create( dataset.dataset.id, store_name=store_name )
+ dataset.dataset.object_store_id = object_store_id
+ trans.app.object_store.create( dataset.dataset )
+ object_store_id = dataset.dataset.object_store_id
+ trans.sa_session.add( dataset )
# open( dataset.file_name, "w" ).close()
- if not store_name_set:
- store_name = trans.app.object_store.store_name( dataset.dataset.id )
- store_name_set = True
+ job.object_store_id = object_store_id
job.state = job.states.NEW
trans.sa_session.add( job )
trans.sa_session.flush()
diff -r 38c1df19946ece9670d6cfd8df6baef73655dded -r 4a9f7ff4e2c02bf87d33590061e09ef4e70fb84e lib/galaxy/web/controllers/dataset.py
--- a/lib/galaxy/web/controllers/dataset.py
+++ b/lib/galaxy/web/controllers/dataset.py
@@ -360,16 +360,16 @@
data = self._check_dataset(trans, dataset_id)
if isinstance( data, basestring ):
return data
- log.debug( "dataset.py -> transfer_status: Checking transfer status for dataset %s..." % data.id )
+ log.debug( "Checking transfer status for dataset %s..." % data.dataset.id )
# Pulling files in extra_files_path into cache is not handled via this
# method but that's primarily because those files are typically linked to
# through tool's output page anyhow so tying a JavaScript event that will
# call this method does not seem doable?
- if trans.app.object_store.file_ready(data.id):
+ if data.dataset.external_filename:
return True
else:
- return False
+ return trans.app.object_store.file_ready(data.dataset)
@web.expose
def display(self, trans, dataset_id=None, preview=False, filename=None, to_ext=None, **kwd):
@@ -382,7 +382,7 @@
if filename and filename != "index":
# For files in extra_files_path
- file_path = trans.app.object_store.get_filename(data.dataset.id, extra_dir='dataset_%s_files' % data.dataset.id, alt_name=filename)
+ file_path = trans.app.object_store.get_filename(data.dataset, extra_dir='dataset_%s_files' % data.dataset.id, alt_name=filename)
if os.path.exists( file_path ):
if os.path.isdir( file_path ):
return trans.show_error_message( "Directory listing is not allowed." ) #TODO: Reconsider allowing listing of directories?
diff -r 38c1df19946ece9670d6cfd8df6baef73655dded -r 4a9f7ff4e2c02bf87d33590061e09ef4e70fb84e lib/galaxy/web/controllers/history.py
--- a/lib/galaxy/web/controllers/history.py
+++ b/lib/galaxy/web/controllers/history.py
@@ -663,7 +663,7 @@
trans.response.set_content_type( 'application/x-gzip' )
else:
trans.response.set_content_type( 'application/x-tar' )
- return trans.app.object_store.get_data(jeha.dataset.id)
+ return trans.app.object_store.get_data(jeha.dataset)
elif jeha.job.state in [ model.Job.states.RUNNING, model.Job.states.QUEUED, model.Job.states.WAITING ]:
return trans.show_message( "Still exporting history %(n)s; please check back soon. Link: <a href='%(s)s'>%(s)s</a>" \
% ( { 'n' : history.name, 's' : url_for( action="export_archive", id=id, qualified=True ) } ) )
diff -r 38c1df19946ece9670d6cfd8df6baef73655dded -r 4a9f7ff4e2c02bf87d33590061e09ef4e70fb84e lib/galaxy/web/controllers/library_common.py
--- a/lib/galaxy/web/controllers/library_common.py
+++ b/lib/galaxy/web/controllers/library_common.py
@@ -1647,6 +1647,7 @@
for ldda_id in ldda_ids:
try:
ldda = trans.sa_session.query( trans.app.model.LibraryDatasetDatasetAssociation ).get( trans.security.decode_id( ldda_id ) )
+ assert not ldda.dataset.purged
lddas.append( ldda )
except:
ldda = None
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: natefoo: Object Store: Store the backend object store's ID in the database when using the distributed object store.
by Bitbucket 09 Jan '12
by Bitbucket 09 Jan '12
09 Jan '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/ff576e7f989c/
changeset: ff576e7f989c
user: natefoo
date: 2012-01-09 16:06:23
summary: Object Store: Store the backend object store's ID in the database when using the distributed object store.
affected #: 14 files
diff -r 94d54ff122b0dead007901d1a0fcaebf5c329d0f -r ff576e7f989c186dcec3c6af76ffa37aad4994bf distributed_object_store_conf.xml.sample
--- a/distributed_object_store_conf.xml.sample
+++ b/distributed_object_store_conf.xml.sample
@@ -1,11 +1,11 @@
<?xml version="1.0"?><backends>
- <backend name="files1" type="disk" weight="1">
+ <backend id="files1" type="disk" weight="1"><files_dir path="database/files1"/><extra_dir type="temp" path="database/tmp1"/><extra_dir type="job_work" path="database/job_working_directory1"/></backend>
- <backend name="files2" type="disk" weight="1">
+ <backend id="files2" type="disk" weight="1"><files_dir path="database/files2"/><extra_dir type="temp" path="database/tmp2"/><extra_dir type="job_work" path="database/job_working_directory2"/>
diff -r 94d54ff122b0dead007901d1a0fcaebf5c329d0f -r ff576e7f989c186dcec3c6af76ffa37aad4994bf lib/galaxy/datatypes/metadata.py
--- a/lib/galaxy/datatypes/metadata.py
+++ b/lib/galaxy/datatypes/metadata.py
@@ -439,7 +439,7 @@
if mf is None:
mf = self.new_file( dataset = parent, **value.kwds )
# Ensure the metadata file gets updated with content
- parent.dataset.object_store.update_from_file( parent.dataset.id, file_name=value.file_name, extra_dir='_metadata_files', extra_dir_at_root=True, alt_name=os.path.basename(mf.file_name) )
+ parent.dataset.object_store.update_from_file( parent.dataset, file_name=value.file_name, extra_dir='_metadata_files', extra_dir_at_root=True, alt_name=os.path.basename(mf.file_name) )
os.unlink( value.file_name )
value = mf.id
return value
diff -r 94d54ff122b0dead007901d1a0fcaebf5c329d0f -r ff576e7f989c186dcec3c6af76ffa37aad4994bf lib/galaxy/exceptions/__init__.py
--- a/lib/galaxy/exceptions/__init__.py
+++ b/lib/galaxy/exceptions/__init__.py
@@ -22,3 +22,7 @@
class ObjectNotFound( Exception ):
""" Accessed object was not found """
pass
+
+class ObjectInvalid( Exception ):
+ """ Accessed object store ID is invalid """
+ pass
diff -r 94d54ff122b0dead007901d1a0fcaebf5c329d0f -r ff576e7f989c186dcec3c6af76ffa37aad4994bf lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py
+++ b/lib/galaxy/jobs/__init__.py
@@ -320,19 +320,9 @@
# With job outputs in the working directory, we need the working
# directory to be set before prepare is run, or else premature deletion
# and job recovery fail.
- # Attempt to put the working directory in the same store as the output dataset(s)
- store_name = None
- da = None
- if job.output_datasets:
- da = job.output_datasets[0]
- elif job.output_library_datasets:
- da = job.output_library_datasets[0]
- if da is not None:
- store_name = self.app.object_store.store_name(da.dataset.id)
# Create the working dir if necessary
- if not self.app.object_store.exists(self.job_id, base_dir='job_work', dir_only=True, extra_dir=str(self.job_id)):
- self.app.object_store.create(self.job_id, base_dir='job_work', dir_only=True, extra_dir=str(self.job_id), store_name=store_name)
- self.working_directory = self.app.object_store.get_filename(self.job_id, base_dir='job_work', dir_only=True, extra_dir=str(self.job_id))
+ self.app.object_store.create(job, base_dir='job_work', dir_only=True, extra_dir=str(self.job_id))
+ self.working_directory = self.app.object_store.get_filename(job, base_dir='job_work', dir_only=True, extra_dir=str(self.job_id))
log.debug('(%s) Working directory for job is: %s' % (self.job_id, self.working_directory))
self.output_paths = None
self.output_dataset_paths = None
@@ -482,7 +472,7 @@
dataset.extension = 'data'
# Update (non-library) job output datasets through the object store
if dataset not in job.output_library_datasets:
- self.app.object_store.update_from_file(dataset.id, create=True)
+ self.app.object_store.update_from_file(dataset.dataset, create=True)
self.sa_session.add( dataset )
self.sa_session.flush()
job.state = job.states.ERROR
@@ -606,7 +596,7 @@
dataset.set_size()
# Update (non-library) job output datasets through the object store
if dataset not in job.output_library_datasets:
- self.app.object_store.update_from_file(dataset.id, create=True)
+ self.app.object_store.update_from_file(dataset.dataset, create=True)
if context['stderr']:
dataset.blurb = "error"
elif dataset.has_data():
@@ -719,8 +709,7 @@
try:
for fname in self.extra_filenames:
os.remove( fname )
- if self.working_directory is not None and os.path.isdir( self.working_directory ):
- shutil.rmtree( self.working_directory )
+ self.app.object_store.delete(self.get_job(), base_dir='job_work', dir_only=True, extra_dir=str(self.job_id))
if self.app.config.set_metadata_externally:
self.external_output_metadata.cleanup_external_metadata( self.sa_session )
galaxy.tools.imp_exp.JobExportHistoryArchiveWrapper( self.job_id ).cleanup_after_job( self.sa_session )
diff -r 94d54ff122b0dead007901d1a0fcaebf5c329d0f -r ff576e7f989c186dcec3c6af76ffa37aad4994bf lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -16,7 +16,6 @@
from galaxy.util.hash_util import *
from galaxy.web.form_builder import *
from galaxy.model.item_attrs import UsesAnnotations, APIItem
-from galaxy.exceptions import ObjectNotFound
from sqlalchemy.orm import object_session
from sqlalchemy.sql.expression import func
import os.path, os, errno, codecs, operator, socket, pexpect, logging, time, shutil
@@ -650,12 +649,7 @@
if not self.external_filename:
assert self.id is not None, "ID must be set before filename used (commit the object)"
assert self.object_store is not None, "Object Store has not been initialized for dataset %s" % self.id
- try:
- filename = self.object_store.get_filename( self.id )
- except ObjectNotFound, e:
- # Create file if it does not exist
- self.object_store.create( self.id )
- filename = self.object_store.get_filename( self.id )
+ filename = self.object_store.get_filename( self )
return filename
else:
filename = self.external_filename
@@ -669,7 +663,7 @@
file_name = property( get_file_name, set_file_name )
@property
def extra_files_path( self ):
- return self.object_store.get_filename( self.id, dir_only=True, extra_dir=self._extra_files_path or "dataset_%d_files" % self.id)
+ return self.object_store.get_filename( self, dir_only=True, extra_dir=self._extra_files_path or "dataset_%d_files" % self.id )
def get_size( self, nice_size=False ):
"""Returns the size of the data on disk"""
if self.file_size:
@@ -679,16 +673,16 @@
return self.file_size
else:
if nice_size:
- return galaxy.datatypes.data.nice_size( self.object_store.size(self.id) )
+ return galaxy.datatypes.data.nice_size( self.object_store.size(self) )
else:
- return self.object_store.size(self.id)
+ return self.object_store.size(self)
def set_size( self ):
"""Returns the size of the data on disk"""
if not self.file_size:
if self.external_filename:
self.file_size = os.path.getsize(self.external_filename)
else:
- self.file_size = self.object_store.size(self.id)
+ self.file_size = self.object_store.size(self)
def get_total_size( self ):
if self.total_size is not None:
return self.total_size
@@ -703,7 +697,7 @@
if self.file_size is None:
self.set_size()
self.total_size = self.file_size or 0
- if self.object_store.exists(self.id, extra_dir=self._extra_files_path or "dataset_%d_files" % self.id, dir_only=True):
+ if self.object_store.exists(self, extra_dir=self._extra_files_path or "dataset_%d_files" % self.id, dir_only=True):
for root, dirs, files in os.walk( self.extra_files_path ):
self.total_size += sum( [ os.path.getsize( os.path.join( root, file ) ) for file in files ] )
def has_data( self ):
@@ -721,7 +715,7 @@
# FIXME: sqlalchemy will replace this
def _delete(self):
"""Remove the file that corresponds to this data"""
- self.object_store.delete(self.id)
+ self.object_store.delete(self)
@property
def user_can_purge( self ):
return self.purged == False \
@@ -730,9 +724,9 @@
def full_delete( self ):
"""Remove the file and extra files, marks deleted and purged"""
# os.unlink( self.file_name )
- self.object_store.delete(self.id)
- if self.object_store.exists(self.id, extra_dir=self._extra_files_path or "dataset_%d_files" % self.id, dir_only=True):
- self.object_store.delete(self.id, entire_dir=True, extra_dir=self._extra_files_path or "dataset_%d_files" % self.id, dir_only=True)
+ self.object_store.delete(self)
+ if self.object_store.exists(self, extra_dir=self._extra_files_path or "dataset_%d_files" % self.id, dir_only=True):
+ self.object_store.delete(self, entire_dir=True, extra_dir=self._extra_files_path or "dataset_%d_files" % self.id, dir_only=True)
# if os.path.exists( self.extra_files_path ):
# shutil.rmtree( self.extra_files_path )
# TODO: purge metadata files
@@ -1798,8 +1792,11 @@
assert self.id is not None, "ID must be set before filename used (commit the object)"
# Ensure the directory structure and the metadata file object exist
try:
- self.history_dataset.dataset.object_store.create( self.id, extra_dir='_metadata_files', extra_dir_at_root=True, alt_name="metadata_%d.dat" % self.id )
- path = self.history_dataset.dataset.object_store.get_filename( self.id, extra_dir='_metadata_files', extra_dir_at_root=True, alt_name="metadata_%d.dat" % self.id )
+ da = self.history_dataset or self.library_dataset
+ if self.object_store_id is None and da is not None:
+ self.object_store_id = da.dataset.object_store_id
+ da.dataset.object_store.create( self, extra_dir='_metadata_files', extra_dir_at_root=True, alt_name="metadata_%d.dat" % self.id )
+ path = da.dataset.object_store.get_filename( self, extra_dir='_metadata_files', extra_dir_at_root=True, alt_name="metadata_%d.dat" % self.id )
return path
except AttributeError:
# In case we're not working with the history_dataset
diff -r 94d54ff122b0dead007901d1a0fcaebf5c329d0f -r ff576e7f989c186dcec3c6af76ffa37aad4994bf lib/galaxy/model/mapping.py
--- a/lib/galaxy/model/mapping.py
+++ b/lib/galaxy/model/mapping.py
@@ -130,6 +130,7 @@
Column( "deleted", Boolean, index=True, default=False ),
Column( "purged", Boolean, index=True, default=False ),
Column( "purgable", Boolean, default=True ),
+ Column( "object_store_id", TrimmedString( 255 ), index=True ),
Column( "external_filename" , TEXT ),
Column( "_extra_files_path", TEXT ),
Column( 'file_size', Numeric( 15, 0 ) ),
@@ -410,6 +411,7 @@
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=True ),
Column( "job_runner_name", String( 255 ) ),
Column( "job_runner_external_id", String( 255 ) ),
+ Column( "object_store_id", TrimmedString( 255 ), index=True ),
Column( "imported", Boolean, default=False, index=True ) )
JobParameter.table = Table( "job_parameter", metadata,
@@ -641,6 +643,7 @@
Column( "lda_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), index=True, nullable=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, index=True, default=now, onupdate=now ),
+ Column( "object_store_id", TrimmedString( 255 ), index=True ),
Column( "deleted", Boolean, index=True, default=False ),
Column( "purged", Boolean, index=True, default=False ) )
diff -r 94d54ff122b0dead007901d1a0fcaebf5c329d0f -r ff576e7f989c186dcec3c6af76ffa37aad4994bf lib/galaxy/model/migrate/versions/0089_add_object_store_id_columns.py
--- /dev/null
+++ b/lib/galaxy/model/migrate/versions/0089_add_object_store_id_columns.py
@@ -0,0 +1,38 @@
+"""
+Migration script to add 'object_store_id' column to various tables
+"""
+
+from sqlalchemy import *
+from sqlalchemy.orm import *
+from migrate import *
+from migrate.changeset import *
+
+import logging
+log = logging.getLogger( __name__ )
+from galaxy.model.custom_types import TrimmedString
+
+metadata = MetaData( migrate_engine )
+db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, autocommit=True ) )
+
+def upgrade():
+ print __doc__
+ metadata.reflect()
+ for t_name in ( 'dataset', 'job', 'metadata_file' ):
+ t = Table( t_name, metadata, autoload=True )
+ c = Column( "object_store_id", TrimmedString( 255 ), index=True )
+ try:
+ c.create( t )
+ assert c is t.c.object_store_id
+ except Exception, e:
+ print "Adding object_store_id column to %s table failed: %s" % ( t_name, str( e ) )
+ log.debug( "Adding object_store_id column to %s table failed: %s" % ( t_name, str( e ) ) )
+
+def downgrade():
+ metadata.reflect()
+ for t_name in ( 'dataset', 'job', 'metadata_file' ):
+ t = Table( t_name, metadata, autoload=True )
+ try:
+ t.c.object_store_id.drop()
+ except Exception, e:
+ print "Dropping object_store_id column from %s table failed: %s" % ( t_name, str( e ) )
+ log.debug( "Dropping object_store_id column from %s table failed: %s" % ( t_name, str( e ) ) )
diff -r 94d54ff122b0dead007901d1a0fcaebf5c329d0f -r ff576e7f989c186dcec3c6af76ffa37aad4994bf lib/galaxy/objectstore/__init__.py
--- a/lib/galaxy/objectstore/__init__.py
+++ b/lib/galaxy/objectstore/__init__.py
@@ -18,7 +18,9 @@
from galaxy.jobs import Sleeper
from galaxy.model import directory_hash_id
from galaxy.objectstore.s3_multipart_upload import multipart_upload
-from galaxy.exceptions import ObjectNotFound
+from galaxy.exceptions import ObjectNotFound, ObjectInvalid
+
+from sqlalchemy.orm import object_session
from boto.s3.key import Key
from boto.s3.connection import S3Connection
@@ -40,14 +42,15 @@
self.running = False
self.extra_dirs = {}
- def exists(self, dataset_id, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
+ def exists(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
- Returns True if the object identified by `dataset_id` exists in this
- file store, False otherwise.
+ Returns True if the object identified by `obj` exists in this file
+ store, False otherwise.
FIELD DESCRIPTIONS (these apply to all the methods in this class):
- :type dataset_id: int
- :param dataset_id: Galaxy-assigned database ID of the dataset to be checked.
+ :type obj: object
+ :param obj: A Galaxy object with an assigned database ID accessible via
+ the .id attribute.
:type base_dir: string
:param base_dir: A key in self.extra_dirs corresponding to the base
@@ -56,21 +59,21 @@
:type dir_only: bool
:param dir_only: If True, check only the path where the file
- identified by `dataset_id` should be located, not the
- dataset itself. This option applies to `extra_dir`
- argument as well.
+ identified by `obj` should be located, not the dataset
+ itself. This option applies to `extra_dir` argument as
+ well.
:type extra_dir: string
:param extra_dir: Append `extra_dir` to the directory structure where
- the dataset identified by `dataset_id` should be located.
- (e.g., 000/extra_dir/dataset_id)
+ the dataset identified by `obj` should be located.
+ (e.g., 000/extra_dir/obj.id)
:type extra_dir_at_root: bool
:param extra_dir_at_root: Applicable only if `extra_dir` is set.
If True, the `extra_dir` argument is placed at
root of the created directory structure rather
- than at the end (e.g., extra_dir/000/dataset_id
- vs. 000/extra_dir/dataset_id)
+ than at the end (e.g., extra_dir/000/obj.id
+ vs. 000/extra_dir/obj.id)
:type alt_name: string
:param alt_name: Use this name as the alternative name for the created
@@ -78,53 +81,39 @@
"""
raise NotImplementedError()
- def store_name(self, dataset_id, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
- """
- Returns the name of the store in which the object identified by
- `dataset_id` exists, or None if it does not exist or the store is the
- default store.
- """
- return None
-
- def file_ready(self, dataset_id, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
+ def file_ready(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
""" A helper method that checks if a file corresponding to a dataset
is ready and available to be used. Return True if so, False otherwise."""
return True
- def create(self, dataset_id, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None, store_name=None):
+ def create(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
- Mark the object identified by `dataset_id` as existing in the store, but
- with no content. This method will create a proper directory structure for
+ Mark the object identified by `obj` as existing in the store, but with
+ no content. This method will create a proper directory structure for
the file if the directory does not already exist.
See `exists` method for the description of other fields.
-
- :type store_name: string
- :param store_name: Backend store in which to create the dataset, if
- this store contains more than one backend. If the
- named backend does not exist, a backend will be
- chosen by the store.
"""
raise NotImplementedError()
-
- def empty(self, dataset_id, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
+
+ def empty(self, obj, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
- Test if the object identified by `dataset_id` has content.
+ Test if the object identified by `obj` has content.
If the object does not exist raises `ObjectNotFound`.
See `exists` method for the description of the fields.
"""
raise NotImplementedError()
- def size(self, dataset_id, extra_dir=None, extra_dir_at_root=False, alt_name=None):
+ def size(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
- Return size of the object identified by `dataset_id`.
+ Return size of the object identified by `obj`.
If the object does not exist, return 0.
See `exists` method for the description of the fields.
"""
raise NotImplementedError()
- def delete(self, dataset_id, entire_dir=False, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
+ def delete(self, obj, entire_dir=False, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
- Deletes the object identified by `dataset_id`.
+ Deletes the object identified by `obj`.
See `exists` method for the description of other fields.
:type entire_dir: bool
:param entire_dir: If True, delete the entire directory pointed to by
@@ -133,10 +122,10 @@
"""
raise NotImplementedError()
- def get_data(self, dataset_id, start=0, count=-1, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
+ def get_data(self, obj, start=0, count=-1, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
Fetch `count` bytes of data starting at offset `start` from the
- object identified uniquely by `dataset_id`.
+ object identified uniquely by `obj`.
If the object does not exist raises `ObjectNotFound`.
See `exists` method for the description of other fields.
@@ -148,15 +137,15 @@
"""
raise NotImplementedError()
- def get_filename(self, dataset_id, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
+ def get_filename(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
Get the expected filename (including the absolute path) which can be used
- to access the contents of the object uniquely identified by `dataset_id`.
+ to access the contents of the object uniquely identified by `obj`.
See `exists` method for the description of the fields.
"""
raise NotImplementedError()
- def update_from_file(self, dataset_id, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, file_name=None, create=False):
+ def update_from_file(self, obj, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, file_name=None, create=False):
"""
Inform the store that the file associated with the object has been
updated. If `file_name` is provided, update from that file instead
@@ -166,14 +155,14 @@
:type file_name: string
:param file_name: Use file pointed to by `file_name` as the source for
- updating the dataset identified by `dataset_id`
+ updating the dataset identified by `obj`
:type create: bool
:param create: If True and the default dataset does not exist, create it first.
"""
raise NotImplementedError()
- def get_object_url(self, dataset_id, extra_dir=None, extra_dir_at_root=False, alt_name=None):
+ def get_object_url(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""
If the store supports direct URL access, return a URL. Otherwise return
None.
@@ -210,22 +199,24 @@
super(DiskObjectStore, self).__init__()
self.file_path = file_path or config.file_path
self.config = config
+ self.extra_dirs['job_work'] = config.job_working_directory
+ self.extra_dirs['temp'] = config.new_file_path
if extra_dirs is not None:
- self.extra_dirs = extra_dirs
+ self.extra_dirs.update( extra_dirs )
- def _get_filename(self, dataset_id, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
+ def _get_filename(self, obj, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
"""Class method that returns the absolute path for the file corresponding
- to the `dataset_id` regardless of whether the file exists.
+ to the `obj`.id regardless of whether the file exists.
"""
- path = self._construct_path(dataset_id, dir_only=dir_only, extra_dir=extra_dir, extra_dir_at_root=extra_dir_at_root, alt_name=alt_name, old_style=True)
+ path = self._construct_path(obj, base_dir=base_dir, dir_only=dir_only, extra_dir=extra_dir, extra_dir_at_root=extra_dir_at_root, alt_name=alt_name, old_style=True)
# For backward compatibility, check the old style root path first; otherwise,
# construct hashed path
if not os.path.exists(path):
- return self._construct_path(dataset_id, dir_only=dir_only, extra_dir=extra_dir, extra_dir_at_root=extra_dir_at_root, alt_name=alt_name)
+ return self._construct_path(obj, base_dir=base_dir, dir_only=dir_only, extra_dir=extra_dir, extra_dir_at_root=extra_dir_at_root, alt_name=alt_name)
- def _construct_path(self, dataset_id, old_style=False, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
+ def _construct_path(self, obj, old_style=False, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
""" Construct the expected absolute path for accessing the object
- identified by `dataset_id`.
+ identified by `obj`.id.
:type base_dir: string
:param base_dir: A key in self.extra_dirs corresponding to the base
@@ -234,13 +225,13 @@
:type dir_only: bool
:param dir_only: If True, check only the path where the file
- identified by `dataset_id` should be located, not the
+ identified by `obj` should be located, not the
dataset itself. This option applies to `extra_dir`
argument as well.
:type extra_dir: string
:param extra_dir: Append the value of this parameter to the expected path
- used to access the object identified by `dataset_id`
+ used to access the object identified by `obj`
(e.g., /files/000/<extra_dir>/dataset_10.dat).
:type alt_name: string
@@ -252,16 +243,16 @@
the composed directory structure does not include a hash id
(e.g., /files/dataset_10.dat (old) vs. /files/000/dataset_10.dat (new))
"""
- base = self.file_path
- if base_dir in self.extra_dirs:
- base = self.extra_dirs.get(base_dir)
+ base = self.extra_dirs.get(base_dir, self.file_path)
if old_style:
if extra_dir is not None:
path = os.path.join(base, extra_dir)
else:
path = base
else:
- rel_path = os.path.join(*directory_hash_id(dataset_id))
+ # Construct hashed path
+ rel_path = os.path.join(*directory_hash_id(obj.id))
+ # Optionally append extra_dir
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
@@ -269,103 +260,86 @@
rel_path = os.path.join(rel_path, extra_dir)
path = os.path.join(base, rel_path)
if not dir_only:
- path = os.path.join(path, alt_name if alt_name else "dataset_%s.dat" % dataset_id)
+ path = os.path.join(path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
return os.path.abspath(path)
- def exists(self, dataset_id, **kwargs):
- path = self._construct_path(dataset_id, old_style=True, **kwargs)
+ def exists(self, obj, **kwargs):
+ path = self._construct_path(obj, old_style=True, **kwargs)
# For backward compatibility, check root path first; otherwise, construct
# and check hashed path
- if not os.path.exists(path):
- path = self._construct_path(dataset_id, **kwargs)
- return os.path.exists(path)
+ if os.path.exists(path):
+ return True
+ else:
+ path = self._construct_path(obj, **kwargs)
+ return os.path.exists(path)
- def create(self, dataset_id, **kwargs):
- kwargs.pop('store_name', None)
- if not self.exists(dataset_id, **kwargs):
- # Pull out locally used fields
- extra_dir = kwargs.get('extra_dir', None)
- extra_dir_at_root = kwargs.get('extra_dir_at_root', False)
+ def create(self, obj, **kwargs):
+ if not self.exists(obj, **kwargs):
+ path = self._construct_path(obj, **kwargs)
dir_only = kwargs.get('dir_only', False)
- alt_name = kwargs.get('alt_name', None)
- base_dir_key = kwargs.get('base_dir', None)
- # Construct hashed path
- path = os.path.join(*directory_hash_id(dataset_id))
- # Optionally append extra_dir
- if extra_dir is not None:
- if extra_dir_at_root:
- path = os.path.join(extra_dir, path)
- else:
- path = os.path.join(path, extra_dir)
- # Combine the constructted path with the root dir for all files
- base_dir = self.extra_dirs.get(base_dir_key, self.file_path)
- path = os.path.join(base_dir, path)
# Create directory if it does not exist
- if not os.path.exists(path):
- os.makedirs(path)
+ dir = path if dir_only else os.path.dirname(path)
+ if not os.path.exists(dir):
+ os.makedirs(dir)
+ # Create the file if it does not exist
if not dir_only:
- path = os.path.join(path, alt_name if alt_name else "dataset_%s.dat" % dataset_id)
open(path, 'w').close()
- util.umask_fix_perms( path, self.config.umask, 0666 )
+ util.umask_fix_perms(path, self.config.umask, 0666)
- def empty(self, dataset_id, **kwargs):
- return os.path.getsize(self.get_filename(dataset_id, **kwargs)) > 0
+ def empty(self, obj, **kwargs):
+ return os.path.getsize(self.get_filename(obj, **kwargs)) > 0
- def size(self, dataset_id, **kwargs):
- if self.exists(dataset_id, **kwargs):
+ def size(self, obj, **kwargs):
+ if self.exists(obj, **kwargs):
try:
- return os.path.getsize(self.get_filename(dataset_id, **kwargs))
+ return os.path.getsize(self.get_filename(obj, **kwargs))
except OSError:
return 0
else:
return 0
- def delete(self, dataset_id, entire_dir=False, **kwargs):
- path = self.get_filename(dataset_id, **kwargs)
+ def delete(self, obj, entire_dir=False, **kwargs):
+ path = self.get_filename(obj, **kwargs)
extra_dir = kwargs.get('extra_dir', None)
try:
if entire_dir and extra_dir:
shutil.rmtree(path)
return True
- if self.exists(dataset_id, **kwargs):
+ if self.exists(obj, **kwargs):
os.remove(path)
return True
except OSError, ex:
- log.critical('%s delete error %s' % (self._get_filename(dataset_id, **kwargs), ex))
+ log.critical('%s delete error %s' % (self._get_filename(obj, **kwargs), ex))
return False
- def get_data(self, dataset_id, start=0, count=-1, **kwargs):
- data_file = open(self.get_filename(dataset_id, **kwargs), 'r')
+ def get_data(self, obj, start=0, count=-1, **kwargs):
+ data_file = open(self.get_filename(obj, **kwargs), 'r')
data_file.seek(start)
content = data_file.read(count)
data_file.close()
return content
- def get_filename(self, dataset_id, **kwargs):
- path = self._construct_path(dataset_id, old_style=True, **kwargs)
+ def get_filename(self, obj, **kwargs):
+ path = self._construct_path(obj, old_style=True, **kwargs)
# For backward compatibility, check root path first; otherwise, construct
- # and check hashed path
+ # and return hashed path
if os.path.exists(path):
return path
else:
- path = self._construct_path(dataset_id, **kwargs)
- if os.path.exists(path):
- return path
- else:
- raise ObjectNotFound()
+ return self._construct_path(obj, **kwargs)
- def update_from_file(self, dataset_id, file_name=None, create=False, **kwargs):
+ def update_from_file(self, obj, file_name=None, create=False, **kwargs):
""" `create` parameter is not used in this implementation """
if create:
- self.create(dataset_id, **kwargs)
- if file_name and self.exists(dataset_id, **kwargs):
+ self.create(obj, **kwargs)
+ if file_name and self.exists(obj, **kwargs):
try:
- shutil.copy(file_name, self.get_filename(dataset_id, **kwargs))
+ shutil.copy(file_name, self.get_filename(obj, **kwargs))
except IOError, ex:
log.critical('Error copying %s to %s: %s' % (file_name,
- self._get_filename(dataset_id, **kwargs), ex))
+ self._get_filename(obj, **kwargs), ex))
- def get_object_url(self, dataset_id, **kwargs):
+ def get_object_url(self, obj, **kwargs):
return None
@@ -494,8 +468,8 @@
continue
util.umask_fix_perms( path, self.config.umask, 0666, self.config.gid )
- def _construct_path(self, dataset_id, dir_only=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
- rel_path = os.path.join(*directory_hash_id(dataset_id))
+ def _construct_path(self, obj, dir_only=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
+ rel_path = os.path.join(*directory_hash_id(obj.id))
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
@@ -504,7 +478,7 @@
# S3 folders are marked by having trailing '/' so add it now
rel_path = '%s/' % rel_path
if not dir_only:
- rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % dataset_id)
+ rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
return rel_path
def _get_cache_path(self, rel_path):
@@ -665,18 +639,18 @@
log.error("Trouble pushing S3 key '%s' from file '%s': %s" % (rel_path, source_file, ex))
return False
- def file_ready(self, dataset_id, **kwargs):
+ def file_ready(self, obj, **kwargs):
""" A helper method that checks if a file corresponding to a dataset
is ready and available to be used. Return True if so, False otherwise."""
- rel_path = self._construct_path(dataset_id, **kwargs)
+ rel_path = self._construct_path(obj, **kwargs)
# Make sure the size in cache is available in its entirety
if self._in_cache(rel_path) and os.path.getsize(self._get_cache_path(rel_path)) == self._get_size_in_s3(rel_path):
return True
return False
- def exists(self, dataset_id, **kwargs):
+ def exists(self, obj, **kwargs):
in_cache = in_s3 = False
- rel_path = self._construct_path(dataset_id, **kwargs)
+ rel_path = self._construct_path(obj, **kwargs)
# Check cache
if self._in_cache(rel_path):
in_cache = True
@@ -699,9 +673,8 @@
else:
return False
- def create(self, dataset_id, **kwargs):
- kwargs.pop('store_name', None)
- if not self.exists(dataset_id, **kwargs):
+ def create(self, obj, **kwargs):
+ if not self.exists(obj, **kwargs):
#print "S3 OS creating a dataset with ID %s" % dataset_id
# Pull out locally used fields
extra_dir = kwargs.get('extra_dir', None)
@@ -710,7 +683,7 @@
alt_name = kwargs.get('alt_name', None)
# print "---- Processing: %s; %s" % (alt_name, locals())
# Construct hashed path
- rel_path = os.path.join(*directory_hash_id(dataset_id))
+ rel_path = os.path.join(*directory_hash_id(obj))
# Optionally append extra_dir
if extra_dir is not None:
if extra_dir_at_root:
@@ -728,30 +701,30 @@
# self._push_to_s3(s3_dir, from_string='')
# If instructed, create the dataset in cache & in S3
if not dir_only:
- rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % dataset_id)
+ rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
open(os.path.join(self.staging_path, rel_path), 'w').close()
self._push_to_s3(rel_path, from_string='')
- def empty(self, dataset_id, **kwargs):
- if self.exists(dataset_id, **kwargs):
- return bool(self.size(dataset_id, **kwargs) > 0)
+ def empty(self, obj, **kwargs):
+ if self.exists(obj, **kwargs):
+ return bool(self.size(obj, **kwargs) > 0)
else:
raise ObjectNotFound()
- def size(self, dataset_id, **kwargs):
- rel_path = self._construct_path(dataset_id, **kwargs)
+ def size(self, obj, **kwargs):
+ rel_path = self._construct_path(obj, **kwargs)
if self._in_cache(rel_path):
try:
return os.path.getsize(self._get_cache_path(rel_path))
except OSError, ex:
log.info("Could not get size of file '%s' in local cache, will try S3. Error: %s" % (rel_path, ex))
- elif self.exists(dataset_id, **kwargs):
+ elif self.exists(obj, **kwargs):
return self._get_size_in_s3(rel_path)
log.warning("Did not find dataset '%s', returning 0 for size" % rel_path)
return 0
- def delete(self, dataset_id, entire_dir=False, **kwargs):
- rel_path = self._construct_path(dataset_id, **kwargs)
+ def delete(self, obj, entire_dir=False, **kwargs):
+ rel_path = self._construct_path(obj, **kwargs)
extra_dir = kwargs.get('extra_dir', None)
try:
# For the case of extra_files, because we don't have a reference to
@@ -777,11 +750,11 @@
except S3ResponseError, ex:
log.error("Could not delete key '%s' from S3: %s" % (rel_path, ex))
except OSError, ex:
- log.error('%s delete error %s' % (self._get_filename(dataset_id, **kwargs), ex))
+ log.error('%s delete error %s' % (self._get_filename(obj, **kwargs), ex))
return False
- def get_data(self, dataset_id, start=0, count=-1, **kwargs):
- rel_path = self._construct_path(dataset_id, **kwargs)
+ def get_data(self, obj, start=0, count=-1, **kwargs):
+ rel_path = self._construct_path(obj, **kwargs)
# Check cache first and get file if not there
if not self._in_cache(rel_path):
self._pull_into_cache(rel_path)
@@ -794,10 +767,10 @@
data_file.close()
return content
- def get_filename(self, dataset_id, **kwargs):
+ def get_filename(self, obj, **kwargs):
#print "S3 get_filename for dataset: %s" % dataset_id
dir_only = kwargs.get('dir_only', False)
- rel_path = self._construct_path(dataset_id, **kwargs)
+ rel_path = self._construct_path(obj, **kwargs)
cache_path = self._get_cache_path(rel_path)
# S3 does not recognize directories as files so cannot check if those exist.
# So, if checking dir only, ensure given dir exists in cache and return
@@ -811,7 +784,7 @@
if self._in_cache(rel_path):
return cache_path
# Check if the file exists in persistent storage and, if it does, pull it into cache
- elif self.exists(dataset_id, **kwargs):
+ elif self.exists(obj, **kwargs):
if dir_only: # Directories do not get pulled into cache
return cache_path
else:
@@ -824,11 +797,11 @@
raise ObjectNotFound()
# return cache_path # Until the upload tool does not explicitly create the dataset, return expected path
- def update_from_file(self, dataset_id, file_name=None, create=False, **kwargs):
+ def update_from_file(self, obj, file_name=None, create=False, **kwargs):
if create:
- self.create(dataset_id, **kwargs)
- if self.exists(dataset_id, **kwargs):
- rel_path = self._construct_path(dataset_id, **kwargs)
+ self.create(obj, **kwargs)
+ if self.exists(obj, **kwargs):
+ rel_path = self._construct_path(obj, **kwargs)
# Chose whether to use the dataset file itself or an alternate file
if file_name:
source_file = os.path.abspath(file_name)
@@ -848,9 +821,9 @@
else:
raise ObjectNotFound()
- def get_object_url(self, dataset_id, **kwargs):
- if self.exists(dataset_id, **kwargs):
- rel_path = self._construct_path(dataset_id, **kwargs)
+ def get_object_url(self, obj, **kwargs):
+ if self.exists(obj, **kwargs):
+ rel_path = self._construct_path(obj, **kwargs)
try:
key = Key(self.bucket, rel_path)
return key.generate_url(expires_in = 86400) # 24hrs
@@ -873,7 +846,7 @@
"requires a config file, please set one in " \
"'distributed_object_store_config_file')"
self.backends = {}
- self.weighted_backend_names = []
+ self.weighted_backend_ids = []
random.seed()
@@ -884,7 +857,7 @@
root = tree.getroot()
log.debug('Loading backends for distributed object store from %s' % self.distributed_config)
for elem in [ e for e in root if e.tag == 'backend' ]:
- name = elem.get('name')
+ id = elem.get('id')
weight = int(elem.get('weight', 1))
if elem.get('type', 'disk'):
path = None
@@ -895,94 +868,90 @@
elif sub.tag == 'extra_dir':
type = sub.get('type')
extra_dirs[type] = sub.get('path')
- self.backends[name] = DiskObjectStore(config, file_path=path, extra_dirs=extra_dirs)
- log.debug("Loaded disk backend '%s' with weight %s and file_path: %s" % (name, weight, path))
+ self.backends[id] = DiskObjectStore(config, file_path=path, extra_dirs=extra_dirs)
+ log.debug("Loaded disk backend '%s' with weight %s and file_path: %s" % (id, weight, path))
if extra_dirs:
log.debug(" Extra directories:")
for type, dir in extra_dirs.items():
log.debug(" %s: %s" % (type, dir))
for i in range(0, weight):
- # The simplest way to do weighting: add backend names to a
+ # The simplest way to do weighting: add backend ids to a
# sequence the number of times equalling weight, then randomly
# choose a backend from that sequence at creation
- self.weighted_backend_names.append(name)
+ self.weighted_backend_ids.append(id)
- def exists(self, dataset_id, **kwargs):
- store = self.__get_store_for(dataset_id, **kwargs)
- return store is not None
+ def exists(self, obj, **kwargs):
+ return self.__call_method('exists', obj, False, False, **kwargs)
- def store_name(self, dataset_id, **kwargs):
- for name, store in self.backends.items():
- if store.exists(dataset_id, **kwargs):
- return name
- return None
+ #def store_id(self, obj, **kwargs):
+ # return self.__get_store_id_for(obj, **kwargs)[0]
- def file_ready(self, dataset_id, **kwargs):
- store = self.__get_store_for(dataset_id, **kwargs)
- if store is not None:
- return store.file_ready(dataset_id, **kwargs)
- return False
+ def file_ready(self, obj, **kwargs):
+ return self.__call_method('file_ready', obj, False, False, **kwargs)
- def create(self, dataset_id, **kwargs):
- store_name = kwargs.pop('store_name', None)
- if not self.exists(dataset_id, **kwargs):
- if store_name is None or store_name not in self.backends:
- store_name = random.choice(self.weighted_backend_names)
- log.debug("Selected backend '%s' for creation of dataset %s" % (store_name, dataset_id))
+ def create(self, obj, **kwargs):
+ """
+ create() is the only method in which obj.object_store_id may be None
+ """
+ if obj.object_store_id is None or not self.exists(obj, **kwargs):
+ if obj.object_store_id is None or obj.object_store_id not in self.backends:
+ obj.object_store_id = random.choice(self.weighted_backend_ids)
+ object_session( obj ).add( obj )
+ object_session( obj ).flush()
+ log.debug("Selected backend '%s' for creation of %s %s" % (obj.object_store_id, obj.__class__.__name__, obj.id))
else:
- log.debug("Using preferred backend '%s' for creation of dataset %s" % (store_name, dataset_id))
- return self.backends[store_name].create(dataset_id, **kwargs)
+ log.debug("Using preferred backend '%s' for creation of %s %s" % (obj.object_store_id, obj.__class__.__name__, obj.id))
+ self.backends[obj.object_store_id].create(obj, **kwargs)
- def empty(self, dataset_id, **kwargs):
- store = self.__get_store_for(dataset_id, **kwargs)
- if store is not None:
- return store.empty(dataset_id, **kwargs)
- return True
+ def empty(self, obj, **kwargs):
+ return self.__call_method('empty', obj, True, False, **kwargs)
- def size(self, dataset_id, **kwargs):
- store = self.__get_store_for(dataset_id, **kwargs)
- if store is not None:
- return store.size(dataset_id, **kwargs)
- return 0
+ def size(self, obj, **kwargs):
+ return self.__call_method('size', obj, 0, False, **kwargs)
- def delete(self, dataset_id, entire_dir=False, **kwargs):
- store = self.__get_store_for(dataset_id, **kwargs)
- if store is not None:
- return store.delete(dataset_id, entire_dir=entire_dir, **kwargs)
- return False
+ def delete(self, obj, entire_dir=False, **kwargs):
+ return self.__call_method('delete', obj, False, False, **kwargs)
- def get_data(self, dataset_id, start=0, count=-1, **kwargs):
- store = self.__get_store_for(dataset_id, **kwargs)
- if store is not None:
- return store.get_data(dataset_id, start=0, count=-1, **kwargs)
- raise ObjectNotFound()
+ def get_data(self, obj, start=0, count=-1, **kwargs):
+ return self.__call_method('get_data', obj, ObjectNotFound, True, **kwargs)
- def get_filename(self, dataset_id, **kwargs):
- store = self.__get_store_for(dataset_id, **kwargs)
- if store is not None:
- return store.get_filename(dataset_id, **kwargs)
- raise ObjectNotFound()
+ def get_filename(self, obj, **kwargs):
+ return self.__call_method('get_filename', obj, ObjectNotFound, True, **kwargs)
- def update_from_file(self, dataset_id, file_name=None, create=False, **kwargs):
- store = self.__get_store_for(dataset_id, **kwargs)
- if store is not None:
- return store.update_from_file(dataset_id, file_name=file_name, create=create, **kwargs)
- if create:
- store_name = random.choice(self.weighted_backend_names)
- return self.backends[store_name].update_from_file(dataset_id, file_name=file_name, create=create, **kwargs)
- raise ObjectNotFound()
+ def update_from_file(self, obj, file_name=None, create=False, **kwargs):
+ # can raise ObjectLocationMismatch
+ # TODO: handling create=True here? probably not since create() is called from w/in, so a store will be selected there
+ #if create and not self.exists(obj, **kwargs):
+ # store_id = random.choice(self.weighted_backend_names)
+ return self.__call_method('update_from_file', obj, ObjectNotFound, True, **kwargs)
- def get_object_url(self, dataset_id, **kwargs):
- # FIXME: dir_only
- store = self.__get_store_for(dataset_id, **kwargs)
- if store is not None:
- return store.get_object_url(dataset_id, **kwargs)
- return None
+ def get_object_url(self, obj, **kwargs):
+ return self.__call_method('get_object_url', obj, None, False, **kwargs)
- def __get_store_for(self, dataset_id, **kwargs):
- for store in self.backends.values():
- if store.exists(dataset_id, **kwargs):
- return store
+ def __call_method(self, method, obj, default, default_is_exception, **kwargs):
+ object_store_id = self.__get_store_id_for(obj, **kwargs)
+ if object_store_id is not None:
+ return self.backends[object_store_id].__getattribute__(method)(obj, **kwargs)
+ if default_is_exception:
+ raise default()
+ else:
+ return default
+
+ def __get_store_id_for(self, obj, **kwargs):
+ if obj.object_store_id is not None and obj.object_store_id in self.backends:
+ return obj.object_store_id
+ else:
+ # if this instance has been switched from a non-distributed to a
+ # distributed object store, or if the object's store id is invalid,
+ # try to locate the object
+ log.warning('The backend object store ID (%s) for %s object with ID %s is invalid' % (obj.object_store_id, obj.__class__.__name__, obj.id))
+ for id, store in self.backends.items():
+ if store.exists(obj, **kwargs):
+ log.warning('%s object with ID %s found in backend object store with ID %s' % (obj.__class__.__name__, obj.id, id))
+ obj.object_store_id = id
+ object_session( obj ).add( obj )
+ object_session( obj ).flush()
+ return id
return None
class HierarchicalObjectStore(ObjectStore):
diff -r 94d54ff122b0dead007901d1a0fcaebf5c329d0f -r ff576e7f989c186dcec3c6af76ffa37aad4994bf lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -1859,7 +1859,7 @@
a_files = os.listdir( temp_file_path )
if len( a_files ) > 0:
for f in a_files:
- self.app.object_store.update_from_file(hda.dataset.id,
+ self.app.object_store.update_from_file(hda.dataset,
extra_dir="dataset_%d_files" % hda.dataset.id,
alt_name = f,
file_name = os.path.join(temp_file_path, f),
@@ -1899,7 +1899,7 @@
sa_session=self.sa_session )
self.app.security_agent.copy_dataset_permissions( outdata.dataset, child_dataset.dataset )
# Move data from temp location to dataset location
- self.app.object_store.update_from_file(child_dataset.dataset.id, filename, create=True)
+ self.app.object_store.update_from_file(child_dataset.dataset, filename, create=True)
self.sa_session.add( child_dataset )
self.sa_session.flush()
child_dataset.set_size()
@@ -1967,7 +1967,7 @@
self.sa_session.add( primary_data )
self.sa_session.flush()
# Move data from temp location to dataset location
- self.app.object_store.update_from_file(primary_data.dataset.id, filename, create=True)
+ self.app.object_store.update_from_file(primary_data.dataset, filename, create=True)
primary_data.set_size()
primary_data.name = "%s (%s)" % ( outdata.name, designation )
primary_data.info = outdata.info
diff -r 94d54ff122b0dead007901d1a0fcaebf5c329d0f -r ff576e7f989c186dcec3c6af76ffa37aad4994bf lib/galaxy/tools/actions/__init__.py
--- a/lib/galaxy/tools/actions/__init__.py
+++ b/lib/galaxy/tools/actions/__init__.py
@@ -228,8 +228,7 @@
# datasets first, then create the associations
parent_to_child_pairs = []
child_dataset_names = set()
- store_name = None
- store_name_set = False # this is needed since None is a valid value for store_name
+ object_store_id = None
for name, output in tool.outputs.items():
for filter in output.filters:
try:
@@ -292,12 +291,12 @@
trans.sa_session.add( data )
trans.sa_session.flush()
trans.app.security_agent.set_all_dataset_permissions( data.dataset, output_permissions )
- # Create an empty file immediately
- trans.app.object_store.create( data.id, store_name=store_name )
- if not store_name_set:
- # Ensure all other datasets in this job are created in the same store
- store_name = trans.app.object_store.store_name( data.id )
- store_name_set = True
+ # Create an empty file immediately. The first dataset will be
+ # created in the "default" store, all others will be created in
+ # the same store as the first.
+ data.dataset.object_store_id = object_store_id
+ trans.app.object_store.create( data.dataset )
+ object_store_id = data.dataset.object_store_id # these will be the same thing after the first output
# This may not be neccesary with the new parent/child associations
data.designation = name
# Copy metadata from one of the inputs if requested.
@@ -382,6 +381,7 @@
job.add_input_dataset( name, None )
for name, dataset in out_data.iteritems():
job.add_output_dataset( name, dataset )
+ job.object_store_id = object_store_id
trans.sa_session.add( job )
trans.sa_session.flush()
# Some tools are not really executable, but jobs are still created for them ( for record keeping ).
diff -r 94d54ff122b0dead007901d1a0fcaebf5c329d0f -r ff576e7f989c186dcec3c6af76ffa37aad4994bf lib/galaxy/tools/actions/upload_common.py
--- a/lib/galaxy/tools/actions/upload_common.py
+++ b/lib/galaxy/tools/actions/upload_common.py
@@ -319,8 +319,7 @@
for name, value in tool.params_to_strings( params, trans.app ).iteritems():
job.add_parameter( name, value )
job.add_parameter( 'paramfile', to_json_string( json_file_path ) )
- store_name = None
- store_name_set = False # this is needed since None is a valid value for store_name
+ object_store_id = None
for i, dataset in enumerate( data_list ):
if folder:
job.add_output_library_dataset( 'output%i' % i, dataset )
@@ -328,11 +327,12 @@
job.add_output_dataset( 'output%i' % i, dataset )
# Create an empty file immediately
if not dataset.dataset.external_filename:
- trans.app.object_store.create( dataset.dataset.id, store_name=store_name )
+ dataset.dataset.object_store_id = object_store_id
+ trans.app.object_store.create( dataset.dataset )
+ object_store_id = dataset.dataset.object_store_id
+ trans.sa_session.add( dataset )
# open( dataset.file_name, "w" ).close()
- if not store_name_set:
- store_name = trans.app.object_store.store_name( dataset.dataset.id )
- store_name_set = True
+ job.object_store_id = object_store_id
job.state = job.states.NEW
trans.sa_session.add( job )
trans.sa_session.flush()
diff -r 94d54ff122b0dead007901d1a0fcaebf5c329d0f -r ff576e7f989c186dcec3c6af76ffa37aad4994bf lib/galaxy/web/controllers/dataset.py
--- a/lib/galaxy/web/controllers/dataset.py
+++ b/lib/galaxy/web/controllers/dataset.py
@@ -360,16 +360,16 @@
data = self._check_dataset(trans, dataset_id)
if isinstance( data, basestring ):
return data
- log.debug( "dataset.py -> transfer_status: Checking transfer status for dataset %s..." % data.id )
+ log.debug( "Checking transfer status for dataset %s..." % data.dataset.id )
# Pulling files in extra_files_path into cache is not handled via this
# method but that's primarily because those files are typically linked to
# through tool's output page anyhow so tying a JavaScript event that will
# call this method does not seem doable?
- if trans.app.object_store.file_ready(data.id):
+ if data.dataset.external_filename:
return True
else:
- return False
+ return trans.app.object_store.file_ready(data.dataset)
@web.expose
def display(self, trans, dataset_id=None, preview=False, filename=None, to_ext=None, **kwd):
@@ -382,7 +382,7 @@
if filename and filename != "index":
# For files in extra_files_path
- file_path = trans.app.object_store.get_filename(data.dataset.id, extra_dir='dataset_%s_files' % data.dataset.id, alt_name=filename)
+ file_path = trans.app.object_store.get_filename(data.dataset, extra_dir='dataset_%s_files' % data.dataset.id, alt_name=filename)
if os.path.exists( file_path ):
if os.path.isdir( file_path ):
return trans.show_error_message( "Directory listing is not allowed." ) #TODO: Reconsider allowing listing of directories?
diff -r 94d54ff122b0dead007901d1a0fcaebf5c329d0f -r ff576e7f989c186dcec3c6af76ffa37aad4994bf lib/galaxy/web/controllers/history.py
--- a/lib/galaxy/web/controllers/history.py
+++ b/lib/galaxy/web/controllers/history.py
@@ -663,7 +663,7 @@
trans.response.set_content_type( 'application/x-gzip' )
else:
trans.response.set_content_type( 'application/x-tar' )
- return trans.app.object_store.get_data(jeha.dataset.id)
+ return trans.app.object_store.get_data(jeha.dataset)
elif jeha.job.state in [ model.Job.states.RUNNING, model.Job.states.QUEUED, model.Job.states.WAITING ]:
return trans.show_message( "Still exporting history %(n)s; please check back soon. Link: <a href='%(s)s'>%(s)s</a>" \
% ( { 'n' : history.name, 's' : url_for( action="export_archive", id=id, qualified=True ) } ) )
diff -r 94d54ff122b0dead007901d1a0fcaebf5c329d0f -r ff576e7f989c186dcec3c6af76ffa37aad4994bf lib/galaxy/web/controllers/library_common.py
--- a/lib/galaxy/web/controllers/library_common.py
+++ b/lib/galaxy/web/controllers/library_common.py
@@ -1647,6 +1647,7 @@
for ldda_id in ldda_ids:
try:
ldda = trans.sa_session.query( trans.app.model.LibraryDatasetDatasetAssociation ).get( trans.security.decode_id( ldda_id ) )
+ assert not ldda.dataset.purged
lddas.append( ldda )
except:
ldda = None
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jgoecks: Trackster: fix bug with multiple color pickers in track config.
by Bitbucket 09 Jan '12
by Bitbucket 09 Jan '12
09 Jan '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/94d54ff122b0/
changeset: 94d54ff122b0
user: jgoecks
date: 2012-01-07 18:02:30
summary: Trackster: fix bug with multiple color pickers in track config.
affected #: 1 file
diff -r d48aa90428593ec9a1ede108aaf97a03b69a086f -r 94d54ff122b0dead007901d1a0fcaebf5c329d0f static/scripts/trackster.js
--- a/static/scripts/trackster.js
+++ b/static/scripts/trackster.js
@@ -2411,25 +2411,33 @@
var input = $('<input />').attr("id", id ).attr("name", id ).val( value );
// Color picker in tool tip style float
var tip = $( "<div class='tipsy tipsy-west' style='position: absolute;' />" ).hide();
- // Inner div for padding purposes
- var tip_inner = $("<div style='background-color: black; padding: 10px;'></div>").appendTo(tip);
- var farb_container = $("<div/>")
- .appendTo(tip_inner)
- .farbtastic( { width: 100, height: 100, callback: input, color: value });
- // Outer div container input and tip for hover to work
- $("<div />").append( input ).append( tip ).appendTo( row ).bind( "click", function ( e ) {
- tip.css( {
- // left: $(this).position().left + ( $(input).width() / 2 ) - 60,
- // top: $(this).position().top + $(this.height)
- left: $(this).position().left + $(input).width() + 5,
- top: $(this).position().top - ( $(tip).height() / 2 ) + ( $(input).height() / 2 )
- } ).show();
- $(document).bind( "click.color-picker", function() {
- tip.hide();
- $(document).unbind( "click.color-picker" );
- });
- e.stopPropagation();
- });
+
+ // Use function to fix values of input and tip.
+ (function(fixed_input, fixed_tip) {
+ // Inner div for padding purposes
+ var tip_inner = $("<div style='background-color: black; padding: 10px;'></div>").appendTo(fixed_tip);
+ var farb_container = $("<div/>")
+ .appendTo(tip_inner)
+ .farbtastic( { width: 100, height: 100, callback: fixed_input, color: value });
+ // Outer div container input and tip for hover to work
+ $("<div />").append( fixed_input ).append( fixed_tip ).appendTo( row ).bind( "click", function ( e ) {
+ // Hide other pickers.
+ $(".tipsy").hide();
+
+ // Show current picker.
+ fixed_tip.css( {
+ // left: $(this).position().left + ( $(input).width() / 2 ) - 60,
+ // top: $(this).position().top + $(this.height)
+ left: $(this).position().left + $(fixed_input).width() + 5,
+ top: $(this).position().top - ( $(fixed_tip).height() / 2 ) + ( $(fixed_input).height() / 2 )
+ } ).show();
+ $(document).bind( "click.color-picker", function() {
+ fixed_tip.hide();
+ $(document).unbind( "click.color-picker" );
+ });
+ e.stopPropagation();
+ });
+ })(input, tip);
}
else {
row.append( $('<input />').attr("id", id ).attr("name", id ).val( value ) );
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Eliminate all references and support for datatype indexers since they have never been used - datatype converters do the same thing.
by Bitbucket 06 Jan '12
by Bitbucket 06 Jan '12
06 Jan '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/d48aa9042859/
changeset: d48aa9042859
user: greg
date: 2012-01-06 21:43:35
summary: Eliminate all references and support for datatype indexers since they have never been used - datatype converters do the same thing.
affected #: 13 files
diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f datatypes_conf.xml.sample
--- a/datatypes_conf.xml.sample
+++ b/datatypes_conf.xml.sample
@@ -35,12 +35,6 @@
<datatype extension="bigwig" type="galaxy.datatypes.binary:BigWig" mimetype="application/octet-stream" display_in_upload="true"><display file="ucsc/bigwig.xml" /></datatype>
- <datatype extension="coverage" type="galaxy.datatypes.coverage:LastzCoverage" display_in_upload="true">
- <indexer file="coverage.xml" />
- </datatype>
- <datatype extension="coverage" type="galaxy.datatypes.coverage:LastzCoverage" display_in_upload="true">
- <indexer file="coverage.xml" />
- </datatype><!-- MSI added Datatypes --><datatype extension="csv" type="galaxy.datatypes.tabular:Tabular" subclass="True" display_in_upload="true" /><!-- FIXME: csv is 'tabular'ized data, but not 'tab-delimited'; the class used here is intended for 'tab-delimited' --><!-- End MSI added Datatypes -->
@@ -95,7 +89,6 @@
<converter file="interval_to_bedstrict_converter.xml" target_datatype="bedstrict"/><converter file="interval_to_bed6_converter.xml" target_datatype="bed6"/><converter file="interval_to_bed12_converter.xml" target_datatype="bed12"/>
- <indexer file="interval_awk.xml" /><!-- <display file="ucsc/interval_as_bed.xml" inherit="True" /> --><display file="genetrack.xml" inherit="True"/><display file="ensembl/ensembl_interval_as_bed.xml" inherit="True"/>
diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/app.py
--- a/lib/galaxy/app.py
+++ b/lib/galaxy/app.py
@@ -74,15 +74,12 @@
self.datatypes_registry.load_datatype_converters( self.toolbox )
# Load history import/export tools
load_history_imp_exp_tools( self.toolbox )
- #load external metadata tool
+ # Load external metadata tool
self.datatypes_registry.load_external_metadata_tool( self.toolbox )
- # Load datatype indexers defined in local datatypes_conf.xml
- self.datatypes_registry.load_datatype_indexers( self.toolbox )
- # Load proprietary datatypes defined in datatypes_conf.xml files in all installed tool
- # shed repositories. This will also load all proprietary datatype converters, indexers
- # and display_applications.
+ # Load proprietary datatypes defined in datatypes_conf.xml files in all installed tool shed
+ # repositories. This will also load all proprietary datatype converters and display_applications.
self.installed_repository_manager.load_proprietary_datatypes()
- #Load security policy
+ # Load security policy
self.security_agent = self.model.security_agent
self.host_security_agent = galaxy.security.HostAgent( model=self.security_agent.model, permitted_actions=self.security_agent.permitted_actions )
# Load quota management
diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/datatypes/indexers/coverage.py
--- a/lib/galaxy/datatypes/indexers/coverage.py
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Read a chromosome of coverage data, and write it as a npy array, as
-well as averages over regions of progressively larger size in powers of 10
-"""
-
-from __future__ import division
-
-import sys
-from galaxy import eggs
-import pkg_resources; pkg_resources.require( "bx-python" )
-import bx.wiggle
-from bx.cookbook import doc_optparse
-from bx import misc
-max2 = max
-pkg_resources.require("numpy>=1.2.1")
-from numpy import *
-import tempfile
-import os
-
-def write_chrom(max, out_base, instream):
-
- scores = zeros( max, float32 ) * nan
- # Fill array from wiggle
- max_value = 0
- min_value = 0
- for line in instream:
- line = line.rstrip("\n\r")
- (chrom, pos, val) = line.split("\t")
- pos, val = int(pos), float(val)
- scores[pos] = val
-
- # Write ra
- fname = "%s_%d" % ( out_base, 1 )
- save( fname, scores )
- os.rename( fname+".npy", fname )
-
- # Write average
- for window in 10, 100, 1000, 10000, 100000:
- input = scores.copy()
- size = len( input )
- input.resize( ( ( size / window ), window ) )
- masked = ma.masked_array( input, isnan( input ) )
- averaged = mean( masked, 1 )
- averaged.set_fill_value( nan )
- fname = "%s_%d" % ( out_base, window )
- save( fname, averaged.filled() )
- del masked, averaged
- os.rename( fname+".npy", fname )
-
-def main():
- max = int( 512*1024*1024 )
- # get chroms and lengths
- chroms = {}
- LEN = {}
- for line in open(sys.argv[1],"r"):
- line = line.rstrip("\r\n")
- fields = line.split("\t")
- (chrom, pos, forward) = fields[0:3]
- reverse = 0
- if len(fields) == 4: reverse = int(fields[3])
- forward = int(forward)+reverse
- pos = int(pos)
- chrom_file = chroms.get(chrom, None)
- if not chrom_file:
- chrom_file = chroms[chrom] = tempfile.NamedTemporaryFile()
- chrom_file.write("%s\t%s\t%s\n" % (chrom,pos,forward))
- LEN[chrom] = max2( LEN.get(chrom,0), pos+1 )
- for chrom, stream in chroms.items():
- stream.seek(0)
- prefix = os.path.join(sys.argv[2], chrom)
- write_chrom( LEN[chrom], prefix, stream )
-
- manifest_file = open( os.path.join( sys.argv[2], "manifest.tab" ),"w" )
- for key, value in LEN.items():
- print >> manifest_file, "%s\t%s" % (key, value)
- manifest_file.close()
-
-
-if __name__ == "__main__": main()
diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/datatypes/indexers/coverage.xml
--- a/lib/galaxy/datatypes/indexers/coverage.xml
+++ /dev/null
@@ -1,12 +0,0 @@
-<tool id="INDEXER_Coverage_0" name="Index Coverage for Track Viewer">
- <!-- Used internally to generate track indexes -->
- <command interpreter="python">coverage.py $input_dataset $store_path 2>&1
- </command>
- <inputs>
- <page>
- <param format="coverage" name="input_dataset" type="data" label="Choose coverage"/>
- </page>
- </inputs>
- <help>
- </help>
-</tool>
diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/datatypes/indexers/interval.awk
--- a/lib/galaxy/datatypes/indexers/interval.awk
+++ /dev/null
@@ -1,43 +0,0 @@
-BEGIN {
- # from galaxy.utils
- mapped_chars[">"] = "__gt__"
- mapped_chars["<"] = "__lt__"
- mapped_chars["'"] = "__sq__"
- mapped_chars["\""] = "__dq__"
- mapped_chars["\\["] = "__ob__"
- mapped_chars["\\]"] = "__cb__"
- mapped_chars["\\{"] = "__oc__"
- mapped_chars["\\}"] = "__cc__"
- mapped_chars["@"] = "__at__"
- # additional, not in galaxy.utils
- mapped_chars["/"] = "__fs__"
- mapped_chars["^manifest\.tab$"] = "__manifest.tab__"
-}
-function escape_filename( name )
-{
- for( char in mapped_chars ) {
- gsub( char, mapped_chars[char], name )
- }
- return name
-}
-!_[$chrom]++ {
- # close files only when we switch to a new one.
- fn && close(fn)
- fn = storepath "/" escape_filename($1) }
-{
- print $0 >> fn;
- # the || part is needed to catch 0 length chromosomes, which
- # should never happen but...
- if ($end > chroms[$chrom] || !chroms[$chrom])
- chroms[$chrom] = $end }
-END {
- fn = storepath "/manifest.tab"
- for( x in chroms ) {
- # add line to manifest
- print x "\t" chroms[x] >> fn
- chromfile = storepath "/" escape_filename(x)
- # sort in-place
- system( "sort -f -n -k " chrom " -k " start " -k " end " -o " chromfile " " chromfile )
- close(chromfile)
- }
-}
\ No newline at end of file
diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/datatypes/indexers/interval.py
--- a/lib/galaxy/datatypes/indexers/interval.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-"""
-Generate indices for track browsing of an interval file.
-
-usage: %prog bed_file out_directory
- -1, --cols1=N,N,N,N: Columns for chrom, start, end, strand in interval file
-"""
-import sys
-from galaxy import eggs
-import pkg_resources; pkg_resources.require( "bx-python" )
-from bx.intervals import io
-from bx.cookbook import doc_optparse
-import psyco_full
-import commands
-import os
-from os import environ
-import tempfile
-from bisect import bisect
-
-def divide( intervals, out_path ):
- manifest = {}
- current_file = None
- lastchrom = ""
- for line in intervals:
- try:
- chrom = line.chrom
- except AttributeError, e:
- continue
- manifest[chrom] = max(manifest.get(chrom,0),line.end)
- if not lastchrom == chrom:
- if current_file:
- current_file.close()
- current_file = open( os.path.join( out_path, "%s" % chrom), "a" )
- print >> current_file, "\t".join(line)
- lastchrom = chrom
- if current_file:
- current_file.close()
- return manifest
-
-if __name__ == "__main__":
- options, args = doc_optparse.parse( __doc__ )
- try:
- chr_col_1, start_col_1, end_col_1, strand_col_1 = [int(x)-1 for x in options.cols1.split(',')]
- in_fname, out_path = args
- except:
- doc_optparse.exception()
-
- # Sort through a tempfile first
- temp_file = tempfile.NamedTemporaryFile(mode="r")
- environ['LC_ALL'] = 'POSIX'
- commandline = "sort -f -n -k %d -k %d -k %d -o %s %s" % (chr_col_1+1,start_col_1+1,end_col_1+1, temp_file.name, in_fname)
- errorcode, stdout = commands.getstatusoutput(commandline)
-
- temp_file.seek(0)
- interval = io.NiceReaderWrapper( temp_file,
- chrom_col=chr_col_1,
- start_col=start_col_1,
- end_col=end_col_1,
- strand_col=strand_col_1,
- fix_strand=True )
- manifest = divide( interval, out_path )
- manifest_file = open( os.path.join( out_path, "manifest.tab" ),"w" )
- for key, value in manifest.items():
- print >> manifest_file, "%s\t%s" % (key, value)
- manifest_file.close()
- temp_file.close()
diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/datatypes/indexers/interval.xml
--- a/lib/galaxy/datatypes/indexers/interval.xml
+++ /dev/null
@@ -1,14 +0,0 @@
-<tool id="INDEXER_Interval_0" name="Index Interval for Track Viewer">
- <!-- Used internally to generate track indexes -->
- <command interpreter="python">interval.py $input_dataset
- -1 ${input_dataset.metadata.chromCol},${input_dataset.metadata.startCol},${input_dataset.metadata.endCol},${input_dataset.metadata.strandCol}
- $store_path 2>&1
- </command>
- <inputs>
- <page>
- <param format="interval" name="input_dataset" type="data" label="Choose intervals"/>
- </page>
- </inputs>
- <help>
- </help>
-</tool>
diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/datatypes/indexers/interval_awk.xml
--- a/lib/galaxy/datatypes/indexers/interval_awk.xml
+++ /dev/null
@@ -1,16 +0,0 @@
-<tool id="INDEXER_Interval_0" name="Index Interval for Track Viewer">
- <!-- Used internally to generate track indexes -->
- <command interpreter="awk -f">interval.awk
- chrom=${input_dataset.metadata.chromCol} start=${input_dataset.metadata.startCol}
- end=${input_dataset.metadata.endCol} strand=${input_dataset.metadata.strandCol}
- storepath=${store_path}
- $input_dataset 2>&1
- </command>
- <inputs>
- <page>
- <param format="interval" name="input_dataset" type="data" label="Choose intervals"/>
- </page>
- </inputs>
- <help>
- </help>
-</tool>
diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/datatypes/indexers/wiggle.py
--- a/lib/galaxy/datatypes/indexers/wiggle.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Read a chromosome of wiggle data, and write it as a npy array, as
-well as averages over regions of progressively larger size in powers of 10
-"""
-
-from __future__ import division
-
-import sys
-from galaxy import eggs
-import pkg_resources; pkg_resources.require( "bx-python" )
-import bx.wiggle
-from bx.cookbook import doc_optparse
-from bx import misc
-max2 = max
-pkg_resources.require("numpy>=1.2.1")
-from numpy import *
-import tempfile
-import os
-from galaxy.tracks.store import sanitize_name
-
-
-def write_chrom(max, out_base, instream):
-
- scores = zeros( max, float32 ) * nan
- # Fill array from wiggle
- for line in instream:
- line = line.rstrip("\n\r")
- (chrom, pos, val) = line.split("\t")
- pos, val = int(pos), float(val)
- scores[pos] = val
-
- # Write ra
- fname = "%s_%d" % ( out_base, 1 )
- save( fname, scores )
- os.rename( fname+".npy", fname )
-
- # Write average
- for window in 10, 100, 1000, 10000, 100000:
- input = scores.copy()
- size = len( input )
- input.resize( ( ( size / window ), window ) )
- masked = ma.masked_array( input, isnan( input ) )
- averaged = mean( masked, 1 )
- averaged.set_fill_value( nan )
- fname = "%s_%d" % ( out_base, window )
- save( fname, averaged.filled() )
- del masked, averaged
- os.rename( fname+".npy", fname )
-
-def main():
- max = int( 512*1024*1024 )
- # get chroms and lengths
- chroms = {}
- LEN = {}
- for (chrom, pos, val) in bx.wiggle.Reader( open(sys.argv[1],"r") ):
- chrom_file = chroms.get(chrom, None)
- if not chrom_file:
- chrom_file = chroms[chrom] = tempfile.NamedTemporaryFile()
- chrom_file.write("%s\t%s\t%s\n" % (chrom,pos,val))
- LEN[chrom] = max2( LEN.get(chrom,0), pos+1 )
- for chrom, stream in chroms.items():
- stream.seek(0)
- prefix = os.path.join(sys.argv[2], sanitize_name(chrom))
- write_chrom( LEN[chrom], prefix, stream )
-
- manifest_file = open( os.path.join( sys.argv[2], "manifest.tab" ),"w" )
- for key, value in LEN.items():
- print >> manifest_file, "%s\t%s" % (key, value)
- manifest_file.close()
-
-
-if __name__ == "__main__": main()
diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/datatypes/indexers/wiggle.xml
--- a/lib/galaxy/datatypes/indexers/wiggle.xml
+++ /dev/null
@@ -1,12 +0,0 @@
-<tool id="INDEXER_Wiggle_0" name="Index Wiggle for Track Viewer">
- <!-- Used internally to generate track indexes -->
- <command interpreter="python">wiggle.py $input_dataset $store_path 2>&1
- </command>
- <inputs>
- <page>
- <param format="wiggle" name="input_dataset" type="data" label="Choose wiggle"/>
- </page>
- </inputs>
- <help>
- </help>
-</tool>
diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/datatypes/registry.py
--- a/lib/galaxy/datatypes/registry.py
+++ b/lib/galaxy/datatypes/registry.py
@@ -18,7 +18,6 @@
self.datatypes_by_extension = {}
self.mimetypes_by_extension = {}
self.datatype_converters = odict()
- self.datatype_indexers = odict()
# Converters defined in local datatypes_conf.xml
self.converters = []
# Converters defined in datatypes_conf.xml included
@@ -27,11 +26,6 @@
self.converter_deps = {}
self.available_tracks = []
self.set_external_metadata_tool = None
- # Indexers defined in local datatypes_conf.xml
- self.indexers = []
- # Indexers defined in datatypes_conf.xml included
- # in installed tool shed repositories.
- self.proprietary_indexers = []
self.sniff_order = []
self.upload_file_formats = []
# Map a display application id to a display application
@@ -39,10 +33,6 @@
self.converters_path_attr = None
# The 'default' converters_path defined in local datatypes_conf.xml
self.datatype_converters_path = None
- self.indexers_path_attr = None
- # The 'default' indexers_path defined in local datatypes_conf.xml
- self.datatype_indexers_path = None
- self.display_path_attr = None
# The 'default' display_path defined in local datatypes_conf.xml
self.display_applications_path = None
self.datatype_elems = []
@@ -63,11 +53,6 @@
self.datatype_converters_path = os.path.join( root_dir, self.converters_path_attr )
if not os.path.isdir( self.datatype_converters_path ):
raise ConfigurationError( "Directory does not exist: %s" % self.datatype_converters_path )
- if not self.datatype_indexers_path:
- self.indexers_path_attr = registration.get( 'indexers_path', 'lib/galaxy/datatypes/indexers' )
- self.datatype_indexers_path = os.path.join( root_dir, self.indexers_path_attr )
- if not os.path.isdir( self.datatype_indexers_path ):
- raise ConfigurationError( "Directory does not exist: %s" % self.datatype_indexers_path )
if not self.display_applications_path:
self.display_path_attr = registration.get( 'display_path', 'display_applications' )
self.display_applications_path = os.path.join( root_dir, self.display_path_attr )
@@ -134,11 +119,6 @@
self.proprietary_converters.append( ( converter_config, extension, target_datatype ) )
else:
self.converters.append( ( converter_config, extension, target_datatype ) )
- for indexer in elem.findall( 'indexer' ):
- # Build the list of datatype indexers for track building
- indexer_config = indexer.get( 'file', None )
- if indexer_config:
- self.indexers.append( (indexer_config, extension) )
for composite_file in elem.findall( 'composite_file' ):
# add composite files
name = composite_file.get( 'name', None )
@@ -393,26 +373,6 @@
toolbox.tools_by_id[ set_meta_tool.id ] = set_meta_tool
self.set_external_metadata_tool = set_meta_tool
self.log.debug( "Loaded external metadata tool: %s", self.set_external_metadata_tool.id )
- def load_datatype_indexers( self, toolbox, indexer_path=None ):
- """Adds indexers from self.indexers to the toolbox from app"""
- if indexer_path:
- # Load indexers defined by datatypes_conf.xml
- # included in installed tool shed repository.
- indexers = self.proprietary_indexers
- else:
- # Load indexers defined by local datatypes_conf.xml.
- indexers = self.indexers
- for elem in indexers:
- tool_config = elem[0]
- datatype = elem[1]
- if indexer_path:
- config_path = os.path.join( indexer_path, tool_config )
- else:
- config_path = os.path.join( self.datatype_indexers_path, tool_config )
- indexer = toolbox.load_tool( config_path )
- toolbox.tools_by_id[ indexer.id ] = indexer
- self.datatype_indexers[ datatype ] = indexer
- self.log.debug( "Loaded indexer: %s", indexer.id )
def get_converters_by_datatype(self, ext):
"""Returns available converters by source type"""
converters = odict()
@@ -425,18 +385,6 @@
if ext in self.datatype_converters.keys():
converters.update(self.datatype_converters[ext])
return converters
- def get_indexers_by_datatype( self, ext ):
- """Returns indexers based on datatype"""
- class_chain = list()
- source_datatype = type(self.get_datatype_by_extension(ext))
- for ext_spec in self.datatype_indexers.keys():
- datatype = type(self.get_datatype_by_extension(ext_spec))
- if issubclass( source_datatype, datatype ):
- class_chain.append( ext_spec )
- # Prioritize based on class chain
- ext2type = lambda x: self.get_datatype_by_extension(x)
- class_chain = sorted(class_chain, lambda x,y: issubclass(ext2type(x),ext2type(y)) and -1 or 1)
- return [self.datatype_indexers[x] for x in class_chain]
def get_converter_by_target_type(self, source_ext, target_ext):
"""Returns a converter based on source and target datatypes"""
converters = self.get_converters_by_datatype(source_ext)
@@ -494,17 +442,13 @@
converters_path_str = ' converters_path="%s"' % self.converters_path_attr
else:
converters_path_str = ''
- if self.indexers_path_attr:
- indexers_path_str = ' indexers_path="%s"' % self.indexers_path_attr
- else:
- indexers_path_str = ''
if self.display_path_attr:
display_path_str = ' display_path="%s"' % self.display_path_attr
else:
display_path_str = ''
os.write( fd, '<?xml version="1.0"?>\n' )
os.write( fd, '<datatypes>\n' )
- os.write( fd, '<registration%s%s%s>\n' % ( converters_path_str, indexers_path_str, display_path_str ) )
+ os.write( fd, '<registration%s%s>\n' % ( converters_path_str, display_path_str ) )
for elem in self.datatype_elems:
os.write( fd, '%s' % galaxy.util.xml_to_string( elem ) )
os.write( fd, '</registration>\n' )
diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/tool_shed/__init__.py
--- a/lib/galaxy/tool_shed/__init__.py
+++ b/lib/galaxy/tool_shed/__init__.py
@@ -23,12 +23,9 @@
path_items = datatypes_config.split( 'repos' )
relative_install_dir = '%srepos/%s/%s/%s' % \
( path_items[0], tool_shed_repository.owner, tool_shed_repository.name, tool_shed_repository.installed_changeset_revision )
- converter_path, indexer_path = load_datatypes( self.app, datatypes_config, relative_install_dir )
+ converter_path = load_datatypes( self.app, datatypes_config, relative_install_dir )
if converter_path:
# Load proprietary datatype converters
self.app.datatypes_registry.load_datatype_converters( self.app.toolbox, converter_path=converter_path )
- if indexer_path:
- # Load proprietary datatype indexers
- self.app.datatypes_registry.load_datatype_indexers( self.app.toolbox, indexer_path=indexer_path )
# TODO: handle display_applications
\ No newline at end of file
diff -r 4fdceec512f5c38e1a8bdb03cba6afe77a6e8fef -r d48aa90428593ec9a1ede108aaf97a03b69a086f lib/galaxy/util/shed_util.py
--- a/lib/galaxy/util/shed_util.py
+++ b/lib/galaxy/util/shed_util.py
@@ -109,7 +109,6 @@
Update the received metadata_dict with changes that have been applied
to the received datatypes_config. This method is used by the InstallManager,
which does not have access to trans.
- TODO: Handle converters, indexers, sniffers, etc...
"""
# Parse datatypes_config.
tree = ElementTree.parse( datatypes_config )
@@ -451,7 +450,6 @@
tree = util.parse_xml( datatypes_config )
datatypes_config_root = tree.getroot()
converter_path = None
- indexer_path = None
relative_path_to_datatype_file_name = None
datatype_files = datatypes_config_root.find( 'datatype_files' )
datatype_class_modules = []
@@ -502,7 +500,7 @@
log.debug( "Exception importing datatypes code file %s: %s" % ( str( relative_path_to_datatype_file_name ), str( e ) ) )
finally:
lock.release()
- # Handle data type converters and indexers.
+ # Handle data type converters.
for elem in registration.findall( 'datatype' ):
if not converter_path:
# If any of the <datatype> tag sets contain <converter> tags, set the converter_path
@@ -520,23 +518,7 @@
break
if converter_path:
break
- if not indexer_path:
- # If any of the <datatype> tag sets contain <indexer> tags, set the indexer_path
- # if it is not already set. This requires repsitories to place all indexers in the
- # same subdirectory within the repository hierarchy.
- for indexer in elem.findall( 'indexer' ):
- indexer_path = None
- indexer_config = indexer.get( 'file', None )
- if indexer_config:
- for root, dirs, files in os.walk( relative_install_dir ):
- if root.find( '.hg' ) < 0:
- for name in files:
- if name == indexer_config:
- indexer_path = root
- break
- if indexer_path:
- break
- if converter_path and indexer_path:
+ else:
break
# TODO: handle display_applications
else:
@@ -546,7 +528,7 @@
imported_modules = []
# Load proprietary datatypes
app.datatypes_registry.load_datatypes( root_dir=app.config.root, config=datatypes_config, imported_modules=imported_modules )
- return converter_path, indexer_path
+ return converter_path
def load_repository_contents( app, name, description, owner, changeset_revision, tool_path, repository_clone_url, relative_install_dir,
current_working_dir, tmp_name, tool_section=None, shed_tool_conf=None, new_install=True ):
# This method is used by the InstallManager, which does not have access to trans.
@@ -562,13 +544,10 @@
if 'datatypes_config' in metadata_dict:
datatypes_config = os.path.abspath( metadata_dict[ 'datatypes_config' ] )
# Load data types required by tools.
- converter_path, indexer_path = load_datatypes( app, datatypes_config, relative_install_dir )
+ converter_path = load_datatypes( app, datatypes_config, relative_install_dir )
if converter_path:
# Load proprietary datatype converters
app.datatypes_registry.load_datatype_converters( app.toolbox, converter_path=converter_path )
- if indexer_path:
- # Load proprietary datatype indexers
- app.datatypes_registry.load_datatype_indexers( app.toolbox, indexer_path=indexer_path )
# TODO: handle display_applications
if 'tools' in metadata_dict:
repository_tools_tups = []
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/ea42944229df/
changeset: ea42944229df
user: jgoecks
date: 2012-01-06 17:30:04
summary: Trackster: ensure all visible composite track tiles have the same height so that line tracks are rendered correctly.
affected #: 1 file
diff -r 85075ee87e1630104ee66f45e77ab2af2d28941c -r ea42944229df83e5db5227af945232ca564db6d5 static/scripts/trackster.js
--- a/static/scripts/trackster.js
+++ b/static/scripts/trackster.js
@@ -3144,11 +3144,14 @@
* Retrieves from cache, draws, or sets up drawing for a single tile. Returns either a Tile object or a
* jQuery.Deferred object that is fulfilled when tile can be drawn again.
*/
- draw_helper: function(force, width, tile_index, resolution, parent_element, w_scale, more_tile_data) {
+ draw_helper: function(force, width, tile_index, resolution, parent_element, w_scale, kwargs) {
var track = this,
key = this._gen_tile_cache_key(width, w_scale, tile_index),
tile_low = tile_index * DENSITY * resolution,
tile_high = tile_low + DENSITY * resolution;
+
+ // Init kwargs if necessary to avoid having to check if kwargs defined.
+ if (!kwargs) { kwargs = {}; }
// Check tile cache, if found show existing tile in correct position
var tile = (force ? undefined : track.tile_cache.get(key));
@@ -3178,7 +3181,7 @@
// If we can draw now, do so.
if ( can_draw_now ) {
// Set up and draw tile.
- extend(tile_data, more_tile_data);
+ extend(tile_data, kwargs[ 'more_tile_data' ] );
// HACK: this is FeatureTrack-specific.
// If track mode is Auto, determine mode and update.
@@ -3459,12 +3462,15 @@
this.action_icons.tools_icon.hide();
},
can_draw: Drawable.prototype.can_draw,
- draw_helper: function(force, width, tile_index, resolution, parent_element, w_scale, more_tile_data) {
+ draw_helper: function(force, width, tile_index, resolution, parent_element, w_scale, kwargs) {
// FIXME: this function is similar to TiledTrack.draw_helper -- can the two be merged/refactored?
var track = this,
key = this._gen_tile_cache_key(width, w_scale, tile_index),
tile_low = tile_index * DENSITY * resolution,
tile_high = tile_low + DENSITY * resolution;
+
+ // Init kwargs if necessary to avoid having to check if kwargs defined.
+ if (!kwargs) { kwargs = {}; }
// Check tile cache, if found show existing tile in correct position
var tile = (force ? undefined : track.tile_cache.get(key));
@@ -3503,7 +3509,7 @@
// If we can draw now, do so.
if ( can_draw_now ) {
// Set up and draw tile.
- extend(tile_data, more_tile_data);
+ extend(tile_data, kwargs[ 'more_tile_data' ] );
this.tile_predraw_init();
@@ -3517,7 +3523,7 @@
height = 0,
track_modes = [];
- // Set height to be the max height for all tracks. Also, record track modes.
+ // Get max height for all tracks and record track modes.
var track_canvas_height = 0;
for (var i = 0; i < this.drawables.length; i++, all_data_index += 2) {
track = this.drawables[i];
@@ -3536,9 +3542,12 @@
if (track_canvas_height > height) { height = track_canvas_height; }
}
+ //
// Draw all tracks on a single tile.
+ //
canvas.width = width;
- canvas.height = height;
+ // Height is specified in kwargs or is the height found above.
+ canvas.height = (kwargs['height'] ? kwargs['height'] : height);
all_data_index = 0;
var ctx = canvas.getContext('2d');
ctx.translate(this.left_offset, 0);
@@ -3605,7 +3614,22 @@
postdraw_actions: function(tiles, width, w_scale, clear_after) {
TiledTrack.prototype.postdraw_actions.call(this, tiles, width, w_scale, clear_after);
- // TODO: all tiles must be the same size in order to draw LineTracks.
+ // All tiles must be the same height in order to draw LineTracks, so redraw tiles as needed.
+ var max_height = -1;
+ for (var i = 0; i < tiles.length; i++) {
+ var height = tiles[i].html_elt.find("canvas").height();
+ if (height > max_height) {
+ max_height = height;
+ }
+ }
+
+ for (var i = 0; i < tiles.length; i++) {
+ var tile = tiles[i];
+ if (tile.html_elt.find("canvas").height() !== max_height) {
+ this.draw_helper(true, width, tile.index, tile.resolution, tile.html_elt.parent(), w_scale, { height: max_height } );
+ tile.html_elt.remove();
+ }
+ }
}
});
@@ -3905,7 +3929,7 @@
var tile = tiles[i];
if (tile.max_val !== global_max) {
tile.html_elt.remove();
- track.draw_helper(true, width, tile.index, tile.resolution, tile.html_elt.parent(), w_scale, { max: global_max });
+ track.draw_helper(true, width, tile.index, tile.resolution, tile.html_elt.parent(), w_scale, { more_tile_data: { max: global_max } } );
}
}
}
https://bitbucket.org/galaxy/galaxy-central/changeset/24b1918b7a8a/
changeset: 24b1918b7a8a
user: jgoecks
date: 2012-01-06 17:41:15
summary: Trackster: update criteria for allowing composite tracks; any number of line tracks and a feature track can now be composited.
affected #: 1 file
diff -r ea42944229df83e5db5227af945232ca564db6d5 -r 24b1918b7a8a50482d210d84b76ef7e2071efba1 static/scripts/trackster.js
--- a/static/scripts/trackster.js
+++ b/static/scripts/trackster.js
@@ -1061,33 +1061,26 @@
// Determine if a composite track can be created. Current criteria:
// (a) all tracks are the same;
// OR
- // (b) there is a LineTrack and a FeatureTrack.
+ // (b) there is a single FeatureTrack.
//
/// All tracks the same?
- var can_composite = true,
- a_type = this.drawables[0].get_type();
+ var drawable,
+ same_type = true,
+ a_type = this.drawables[0].get_type(),
+ num_feature_tracks = 0;
for (var i = 0; i < this.drawables.length; i++) {
- if ( this.drawables[i].get_type() !== a_type ) {
+ drawable = this.drawables[i]
+ if (drawable.get_type() !== a_type) {
can_composite = false;
break;
}
+ if (drawable instanceof FeatureTrack) {
+ num_feature_tracks++;
+ }
}
- if (!can_composite) {
- if ( this.drawables.length == 2 &&
- (
- (this.drawables[0] instanceof FeatureTrack &&
- this.drawables[1] instanceof LineTrack)
- ||
- (this.drawables[0] instanceof LineTrack &&
- this.drawables[1] instanceof FeatureTrack)
- )
- )
- can_composite = true;
- }
-
- if (can_composite) {
+ if (same_type || num_feature_tracks === 1) {
this.action_icons.composite_icon.show();
}
else {
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jgoecks: Trackster: enable composite tracks composed of a FeatureTrack and LineTrack. Refactor to always handle left_offset only in draw_helper.
by Bitbucket 06 Jan '12
by Bitbucket 06 Jan '12
06 Jan '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/85075ee87e16/
changeset: 85075ee87e16
user: jgoecks
date: 2012-01-06 01:46:49
summary: Trackster: enable composite tracks composed of a FeatureTrack and LineTrack. Refactor to always handle left_offset only in draw_helper.
affected #: 1 file
diff -r 999dc276912c8beda98b0a978d2ad945bd13c726 -r 85075ee87e1630104ee66f45e77ab2af2d28941c static/scripts/trackster.js
--- a/static/scripts/trackster.js
+++ b/static/scripts/trackster.js
@@ -1057,7 +1057,14 @@
this.request_draw();
},
update_icons: function() {
- // Only show composite icon if all tracks are the same type.
+ //
+ // Determine if a composite track can be created. Current criteria:
+ // (a) all tracks are the same;
+ // OR
+ // (b) there is a LineTrack and a FeatureTrack.
+ //
+
+ /// All tracks the same?
var can_composite = true,
a_type = this.drawables[0].get_type();
for (var i = 0; i < this.drawables.length; i++) {
@@ -1067,13 +1074,26 @@
}
}
+ if (!can_composite) {
+ if ( this.drawables.length == 2 &&
+ (
+ (this.drawables[0] instanceof FeatureTrack &&
+ this.drawables[1] instanceof LineTrack)
+ ||
+ (this.drawables[0] instanceof LineTrack &&
+ this.drawables[1] instanceof FeatureTrack)
+ )
+ )
+ can_composite = true;
+ }
+
if (can_composite) {
this.action_icons.composite_icon.show();
}
else {
this.action_icons.composite_icon.hide();
$(".tipsy").remove();
- }
+ }
},
/**
* Add composite track to group that includes all of group's tracks.
@@ -2930,7 +2950,8 @@
// Create copy.
var new_track = new this.constructor(this.name, this.view, container, this.hda_ldda, this.dataset_id, this.prefs,
this.filters, this.tool, this.data_manager);
- // Misc. init and return.
+ // Misc. init and return.
+ new_track.change_mode(this.mode);
new_track.enabled = this.enabled;
return new_track;
},
@@ -3178,7 +3199,9 @@
canvas.width = width;
canvas.height = height;
- var tile = track.draw_tile(tile_data, canvas, mode, resolution, tile_index, w_scale, seq_data);
+ var ctx = canvas.getContext('2d');
+ ctx.translate(this.left_offset, 0);
+ var tile = track.draw_tile(tile_data, ctx, mode, resolution, tile_index, w_scale, seq_data);
// Don't cache, show if no tile.
if (tile !== undefined) {
@@ -3208,14 +3231,14 @@
/**
* Draw a track tile.
* @param result result from server
- * @param canvas canvas to draw on
+ * @param ctx canvas context to draw on
* @param mode mode to draw in
* @param resolution view resolution
* @param tile_index index of tile to be drawn
* @param w_scale pixels per base
* @param ref_seq reference sequence data
*/
- draw_tile: function(result, canvas, mode, resolution, tile_index, w_scale, ref_seq) {
+ draw_tile: function(result, ctx, mode, resolution, tile_index, w_scale, ref_seq) {
console.log("Warning: TiledTrack.draw_tile() not implemented.");
},
/**
@@ -3356,12 +3379,13 @@
var CompositeTrack = function(name, view, container, drawables) {
// HACK: modes should be static class vars for most tracks and should update as
// needed for CompositeTracks
- this.display_modes = ["Histogram", "Line", "Filled", "Intensity"];
+ this.display_modes = drawables[0].display_modes;
TiledTrack.call(this, name, view, container);
// Init drawables; each drawable is a copy so that config/preferences
- // are independent of each other.
+ // are independent of each other. Also init left offset.
this.drawables = [];
+ this.left_offset = 0;
if (drawables) {
var
ids = [],
@@ -3370,6 +3394,11 @@
drawable = drawables[i];
ids.push(drawable.dataset_id);
this.drawables[i] = drawable.copy();
+
+ // Track's left offset is the max of all tracks.
+ if (drawable.left_offset > this.left_offset) {
+ this.left_offset = drawable.left_offset;
+ }
}
this.enabled = true;
}
@@ -3483,24 +3512,46 @@
tile_bounds = track._get_tile_bounds(tile_index, resolution),
tile_low = tile_bounds[0],
tile_high = tile_bounds[1],
- width = Math.ceil( (tile_high - tile_low) * w_scale ),
- // FIXME: need to set height to be max for all tracks.
- height = track.get_canvas_height(tile_data, track.mode, w_scale, width);
+ all_data_index = 0,
+ width = Math.ceil( (tile_high - tile_low) * w_scale ) + this.left_offset,
+ height = 0,
+ track_modes = [];
+
+ // Set height to be the max height for all tracks. Also, record track modes.
+ var track_canvas_height = 0;
+ for (var i = 0; i < this.drawables.length; i++, all_data_index += 2) {
+ track = this.drawables[i];
+ tile_data = all_data[ all_data_index ];
+
+ // HACK: this is FeatureTrack-specific.
+ // If track mode is Auto, determine mode and update.
+ var mode = track.mode;
+ if (mode === "Auto") {
+ mode = track.get_mode(tile_data);
+ track.update_auto_mode(mode);
+ }
+ track_modes.push(mode)
+
+ track_canvas_height = track.get_canvas_height(tile_data, mode, w_scale, width);
+ if (track_canvas_height > height) { height = track_canvas_height; }
+ }
// Draw all tracks on a single tile.
canvas.width = width;
canvas.height = height;
- var all_data_index = 0
+ all_data_index = 0;
+ var ctx = canvas.getContext('2d');
+ ctx.translate(this.left_offset, 0);
for (var i = 0; i < this.drawables.length; i++, all_data_index += 2) {
track = this.drawables[i];
tile_data = all_data[ all_data_index ];
seq_data = all_data[ all_data_index + 1 ];
- tile = track.draw_tile(tile_data, canvas, track.mode, resolution, tile_index, w_scale, seq_data);
+ tile = track.draw_tile(tile_data, ctx, track_modes[i], resolution, tile_index, w_scale, seq_data);
}
// Don't cache, show if no tile.
- track.tile_cache.set(key, tile);
- track.show_tile(tile, parent_element, w_scale);
+ this.tile_cache.set(key, tile);
+ this.show_tile(tile, parent_element, w_scale);
return tile;
}
@@ -3546,6 +3597,15 @@
track.prefs.min_value = min;
track.prefs.max_value = max;
}
+ },
+ /**
+ * Actions to be taken after draw has been completed. Draw is completed when all tiles have been
+ * drawn/fetched and shown.
+ */
+ postdraw_actions: function(tiles, width, w_scale, clear_after) {
+ TiledTrack.prototype.postdraw_actions.call(this, tiles, width, w_scale, clear_after);
+
+ // TODO: all tiles must be the same size in order to draw LineTracks.
}
});
@@ -3573,7 +3633,7 @@
/**
* Draw ReferenceTrack tile.
*/
- draw_tile: function(seq, canvas, mode, resolution, tile_index, w_scale) {
+ draw_tile: function(seq, ctx, mode, resolution, tile_index, w_scale) {
var track = this;
if (w_scale > this.view.canvas_manager.char_width_px) {
@@ -3581,7 +3641,7 @@
track.content_div.css("height", "0px");
return;
}
- var ctx = canvas.getContext("2d");
+ var canvas = ctx.canvas;
ctx.font = ctx.canvas.manager.default_font;
ctx.textAlign = "center";
seq = seq.data;
@@ -3720,7 +3780,7 @@
/**
* Draw LineTrack tile.
*/
- draw_tile: function(result, canvas, mode, resolution, tile_index, w_scale) {
+ draw_tile: function(result, ctx, mode, resolution, tile_index, w_scale) {
// FIXME: is this needed?
if (this.vertical_range === undefined) {
return;
@@ -3728,7 +3788,7 @@
// Paint onto canvas.
var
- ctx = canvas.getContext("2d"),
+ canvas = ctx.canvas,
tile_bounds = this._get_tile_bounds(tile_index, resolution),
tile_low = tile_bounds[0],
tile_high = tile_bounds[1],
@@ -4057,15 +4117,16 @@
/**
* Draw FeatureTrack tile.
* @param result result from server
- * @param canvas canvas to draw on
+ * @param cxt canvas context to draw on
* @param mode mode to draw in
* @param resolution view resolution
* @param tile_index index of tile to be drawn
* @param w_scale pixels per base
* @param ref_seq reference sequence data
*/
- draw_tile: function(result, canvas, mode, resolution, tile_index, w_scale, ref_seq) {
+ draw_tile: function(result, ctx, mode, resolution, tile_index, w_scale, ref_seq) {
var track = this,
+ canvas = ctx.canvas,
tile_bounds = this._get_tile_bounds(tile_index, resolution),
tile_low = tile_bounds[0],
tile_high = tile_bounds[1],
@@ -4092,9 +4153,9 @@
}
// Paint summary tree into canvas
var painter = new painters.SummaryTreePainter(result, tile_low, tile_high, this.prefs);
- var ctx = canvas.getContext("2d");
- // Deal with left_offset by translating.
- ctx.translate(left_offset, SUMMARY_TREE_TOP_PADDING);
+ // FIXME: it shouldn't be necessary to build in padding.
+ ctx.translate(0, SUMMARY_TREE_TOP_PADDING);
+ ctx.globalCompositeOperation = "darker";
painter.draw(ctx, canvas.width, canvas.height, w_scale);
return new SummaryTreeTile(track, tile_index, resolution, canvas, result.data, result.max);
}
@@ -4131,7 +4192,7 @@
var feature_mapper = null;
// console.log(( tile_low - this.view.low ) * w_scale, tile_index, w_scale);
- var ctx = canvas.getContext("2d");
+ ctx.globalCompositeOperation = "darker";
ctx.fillStyle = this.prefs.block_color;
ctx.font = ctx.canvas.manager.default_font;
ctx.textAlign = "right";
@@ -4140,7 +4201,6 @@
if (result.data) {
// Draw features.
slots = this.inc_slots[w_scale].slots;
- ctx.translate(left_offset, 0);
feature_mapper = painter.draw(ctx, canvas.width, canvas.height, w_scale, slots);
feature_mapper.translation = -left_offset;
}
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0