galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
November 2013
- 1 participants
- 208 discussions
3 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/842d13d7925c/
Changeset: 842d13d7925c
User: jgoecks
Date: 2013-11-29 17:41:26
Summary: Trackster: move static CSS settings out of JavaScript and into CSS file.
Affected #: 3 files
diff -r 8b613225f1def637af4774b860578a77a7fb0fe4 -r 842d13d7925c67764a0099a445b133392a0b0e13 static/scripts/viz/trackster/tracks.js
--- a/static/scripts/viz/trackster/tracks.js
+++ b/static/scripts/viz/trackster/tracks.js
@@ -32,8 +32,7 @@
var moveable = function(element, handle_class, container_selector, element_js_obj) {
// HACK: set default value for container selector.
container_selector = ".group";
- var css_border_props = {};
-
+
// Register element with its object.
html_elt_js_obj_dict[element.attr("id")] = element_js_obj;
@@ -130,14 +129,9 @@
html_elt_js_obj_dict[parent.attr("id")].move_drawable(this_obj, (d.deltaY > 0 ? i-1 : i) );
}
}).bind("dragstart", function() {
- css_border_props["border-top"] = element.css("border-top");
- css_border_props["border-bottom"] = element.css("border-bottom");
- $(this).css({
- "border-top": "1px solid blue",
- "border-bottom": "1px solid blue"
- });
+ $(this).addClass('dragging');
}).bind("dragend", function() {
- $(this).css(css_border_props);
+ $(this).removeClass('dragging');
});
};
@@ -236,7 +230,7 @@
this.container_div.append(this.header_div);
// Icons container.
- this.icons_div = $("<div/>").css("float", "left").hide().appendTo(this.header_div);
+ this.icons_div = $("<div/>").addClass('track-icons').hide().appendTo(this.header_div);
this.build_action_icons(this.action_icons_def);
this.header_div.append( $("<div style='clear: both'/>") );
@@ -930,7 +924,7 @@
// Top container for things that are fixed at the top
this.top_container = $("<div/>").addClass("top-container").appendTo(parent_element);
// Browser content, primary tracks are contained in here
- this.browser_content_div = $("<div/>").addClass("content").css("position", "relative").appendTo(parent_element);
+ this.browser_content_div = $("<div/>").addClass("content").appendTo(parent_element);
// Bottom container for things that are fixed at the bottom
this.bottom_container = $("<div/>").addClass("bottom-container").appendTo(parent_element);
// Label track fixed at top
@@ -967,7 +961,7 @@
this.default_overview_height = this.overview_box.height();
this.nav_controls = $("<div/>").addClass("nav-controls").appendTo(this.nav);
- this.chrom_select = $("<select/>").attr({ "name": "chrom"}).css("width", "15em").append("<option value=''>Loading</option>").appendTo(this.nav_controls);
+ this.chrom_select = $("<select/>").attr({ "name": "chrom"}).addClass('chrom-nav').append("<option value=''>Loading</option>").appendTo(this.nav_controls);
var submit_nav = function(e) {
if (e.type === "focusout" || (e.keyCode || e.which) === 13 || (e.keyCode || e.which) === 27 ) {
if ((e.keyCode || e.which) !== 27) { // Not escape key
@@ -1708,7 +1702,7 @@
// Add buttons for running on dataset, region.
var run_tool_row = $("<div>").addClass("param-row").appendTo(parent_div);
var run_on_dataset_button = $("<input type='submit'>").attr("value", "Run on complete dataset").appendTo(run_tool_row);
- var run_on_region_button = $("<input type='submit'>").attr("value", "Run on visible region").css("margin-left", "3em").appendTo(run_tool_row);
+ var run_on_region_button = $("<input type='submit'>").attr("value", "Run on visible region").appendTo(run_tool_row);
run_on_region_button.click( function() {
// Run tool to create new track.
self.run_on_region();
@@ -2027,7 +2021,10 @@
else if ( param.type === 'color' ) {
var
container_div = $("<div/>").appendTo(row),
- input = $('<input />').attr("id", id ).attr("name", id ).val( value ).css("float", "left")
+ input = $('<input/>').attr({
+ id: id,
+ name: id
+ }).val( value ).addClass('color-input')
.appendTo(container_div).click(function(e) {
// Hide other pickers.
$(".tooltip").removeClass( "in" );
@@ -2517,7 +2514,7 @@
},
build_container_div: function () {
- return $("<div/>").addClass('track').attr("id", "track_" + this.id).css("position", "relative");
+ return $("<div/>").addClass('track').attr("id", "track_" + this.id);
},
build_header_div: function() {
@@ -3532,7 +3529,6 @@
while ( position < view.high ) {
var screenPosition = ( position - view.low ) / range * width;
new_div.append( $("<div/>").addClass('label').text(commatize( position )).css( {
- // Reduce by one to account for border
left: screenPosition
}));
position += tickDistance;
diff -r 8b613225f1def637af4774b860578a77a7fb0fe4 -r 842d13d7925c67764a0099a445b133392a0b0e13 static/style/blue/trackster.css
--- a/static/style/blue/trackster.css
+++ b/static/style/blue/trackster.css
@@ -1,14 +1,14 @@
.viewport-container{overflow-x:hidden;overflow-y:auto;background:white}
.trackster-nav-container{width:100%;height:0;text-align:center}
.trackster-nav{padding:0 0;color:#333;font-weight:bold;background:#cccccc;position:relative;display:inline-block;top:-2em;background:transparent;border:none}
-.content{font:10px verdana}
+.chrom-nav{width:15em}
+.content{font:10px verdana;position:relative}
.nav-controls{text-align:center;padding:1px 0}.nav-controls input{margin:0 5px}
#zoom-in,#zoom-out{display:inline-block;height:16px;width:16px;margin-bottom:-3px;cursor:pointer}
#zoom-out{background:transparent url(../images/fugue/magnifier-zoom-out.png) center center no-repeat}
#zoom-in{margin-left:10px;background:transparent url(../images/fugue/magnifier-zoom.png) center center no-repeat}
.nav-input{font-size:12px;width:30em;z-index:2}
.location{display:inline-block;width:15em;margin:0 10px}
-.draghandle{margin-top:2px;cursor:move;float:left;background:transparent url(../images/visualization/draggable_horizontal.png) center center no-repeat;width:10px;height:12px}
.intro{z-index:2;margin-left:auto;margin-right:auto;color:#555;text-align:center;font-size:16px}.intro .action-button{background-color:#CCC;margin-top:10px;padding:1em;text-decoration:underline}
.overview{width:100%;margin:0px;color:white}
.overview-viewport{position:relative;height:14px;background:white;margin:0}
@@ -28,7 +28,7 @@
.overlay{position:absolute;left:0;top:0}
.track-tile{position:absolute;background:white}.track-tile canvas{position:relative;z-index:1}
.tile-message{border-bottom:solid 1px red;text-align:center;color:red;background-color:white}
-.track{border-bottom:1px solid #bbb}.track.error{background-color:#ECB4AF;background-image:none}
+.track{position:relative;border-bottom:1px solid #bbb}.track.error{background-color:#ECB4AF;background-image:none}
.track.nodata .track-content{background-color:white;background-image:none}
.track.pending .track-content{background-color:white;background-image:none}
.track-content{text-align:center;position:relative;min-height:20px;padding:0px 0px 1px 0px}
@@ -40,7 +40,8 @@
.right-float{float:right;margin-left:5px}
.top-labeltrack{position:relative;border-bottom:solid #999 1px}
.nav-labeltrack{border-top:solid #999 1px;border-bottom:solid #333 1px}
-input{font:10px verdana}input[type="submit"]{padding:0px;font-size:inherit}
+input{font:10px verdana}input[type="submit"]{padding:0px;margin-right:20px;font-size:inherit}
+input.color-input{float:left}
.dynamic-tool,.filters{padding-top:18px;padding-bottom:0.5em;margin-left:0.25em}
.dynamic-tool{width:410px}
.filters{float:left;margin:1em;width:60%;position:relative}
@@ -53,6 +54,7 @@
.param-label{float:left;font-weight:bold;padding-top:0.2em;width:50%}
.menu-button{margin:0px 4px 0px 4px}
.exclamation{background:transparent url(../images/fugue/exclamation.png) no-repeat;margin-left:5em}
+.track-icons{float:left}
.icon-button.bookmarks{background:transparent url('../images/fugue/bookmarks-bw.png') no-repeat}.icon-button.bookmarks:hover{background:transparent url('../images/fugue/bookmarks.png') no-repeat}
.icon-button.layers-stack{background:transparent url('../images/fugue/layers-stack-bw.png') no-repeat}.icon-button.layers-stack:hover{background:transparent url('../images/fugue/layers-stack.png') no-repeat}
.icon-button.hammer{background:transparent url('../images/fugue/hammer-bw.png') no-repeat}.icon-button.hammer:hover{background:transparent url('../images/fugue/hammer.png') no-repeat}
@@ -77,3 +79,5 @@
.feature-popup{position:absolute;z-index:2;padding:5px;font-size:10px;filter:alpha(opacity=80);background-repeat:no-repeat;background-image:url(../images/tipsy.gif);background-position:top center}
.feature-popup-inner{padding:5px 8px 4px 8px;background-color:black;color:white}
.zoom-area{position:absolute;top:0px;background-color:#ccf;opacity:0.5;z-index:2}
+.draghandle{margin-top:2px;cursor:move;float:left;background:transparent url(../images/visualization/draggable_horizontal.png) center center no-repeat;width:10px;height:12px}
+.dragging{border:1px solid blue}
diff -r 8b613225f1def637af4774b860578a77a7fb0fe4 -r 842d13d7925c67764a0099a445b133392a0b0e13 static/style/src/less/trackster.less
--- a/static/style/src/less/trackster.less
+++ b/static/style/src/less/trackster.less
@@ -31,8 +31,13 @@
border: none;
}
+.chrom-nav {
+ width: 15em;
+}
+
.content {
font: 10px verdana;
+ position: relative;
}
.nav-controls {
@@ -72,15 +77,6 @@
margin: 0 10px;
}
-.draghandle {
- margin-top: 2px;
- cursor: move;
- float: left;
- background: transparent url(../images/visualization/draggable_horizontal.png) center center no-repeat;
- width: 10px;
- height: 12px;
-}
-
.intro {
z-index: @overlay-index;
/* margin-top: 200px;*/
@@ -244,6 +240,8 @@
}
.track {
+ position: relative;
+
// Separates tracks.
border-bottom: 1px solid @separator-color;
@@ -340,8 +338,13 @@
// Override Galaxy defaults to make a smaller button.
&[type="submit"] {
padding: 0px;
+ margin-right: 20px;
font-size: inherit;
}
+
+ &.color-input {
+ float: left;
+ }
}
.dynamic-tool, .filters {
@@ -415,6 +418,10 @@
margin-left: 5em;
}
+.track-icons {
+ float: left
+}
+
// Defines icons that change from black/white to normal on hover. Note that the images
// are required to exist in the location specified.
// TODO: make images into spritemap.
@@ -523,4 +530,19 @@
background-color: #ccf;
opacity: 0.5;
z-index: @overlay-index;
+}
+
+.draghandle {
+ margin-top: 2px;
+ cursor: move;
+ float: left;
+ background: transparent url(../images/visualization/draggable_horizontal.png) center center no-repeat;
+ width: 10px;
+ height: 12px;
+}
+
+// Highlight for element(s) being dragged. This definition must be after definition of draggable
+// elements (e.g. track, group) in order to take precedence.
+.dragging {
+ border: 1px solid blue;
}
\ No newline at end of file
https://bitbucket.org/galaxy/galaxy-central/commits/c2d705f7b50a/
Changeset: c2d705f7b50a
User: jgoecks
Date: 2013-11-30 23:00:01
Summary: Trackster: (a) use colors to denote track state and (b) add and use function to show messages.
Affected #: 3 files
diff -r 842d13d7925c67764a0099a445b133392a0b0e13 -r c2d705f7b50a51a9a3e8153da634ea61e9c3fec7 static/scripts/viz/trackster/tracks.js
--- a/static/scripts/viz/trackster/tracks.js
+++ b/static/scripts/viz/trackster/tracks.js
@@ -2667,23 +2667,35 @@
},
/**
+ * Remove visualization content and display message.
+ */
+ show_message: function(msg_html) {
+ this.tiles_div.remove();
+ return $('<span/>').addClass('message').html(msg_html).appendTo(this.content_div);
+ },
+
+ /**
* Initialize and draw the track.
*/
init: function(retry) {
+ // FIXME: track should have a 'state' attribute that is checked on load; this state attribute should be
+ // used in this function to determine what action(s) to take.
+
var track = this;
track.enabled = false;
track.tile_cache.clear();
track.data_manager.clear();
- track.tiles_div.css("height", "auto");
/*
if (!track.content_div.text()) {
track.content_div.text(DATA_LOADING);
}
*/
// Remove old track content (e.g. tiles, messages).
- track.tiles_div.text('').children().remove();
+ track.content_div.children().remove();
track.container_div.removeClass("nodata error pending");
-
+
+ track.tiles_div = $("<div/>").addClass("tiles").appendTo(track.content_div);
+
//
// Tracks with no dataset id are handled differently.
// FIXME: is this really necessary?
@@ -2705,16 +2717,16 @@
if (!result || result === "error" || result.kind === "error") {
// Dataset is in error state.
track.container_div.addClass("error");
- track.content_div.text(DATA_ERROR);
+ var msg_elt = track.show_message(DATA_ERROR);
if (result.message) {
// Add links to (a) show error and (b) try again.
- track.content_div.append(
+ msg_elt.append(
$("<a href='javascript:void(0);'></a>").text("View error").click(function() {
Galaxy.modal.show({title: "Trackster Error", body: "<pre>" + result.message + "</pre>", buttons : {'Close' : function() { Galaxy.modal.hide(); } } });
})
);
- track.content_div.append( $('<span/>').text(' ') );
- track.content_div.append(
+ msg_elt.append( $('<span/>').text(' ') );
+ msg_elt.append(
$("<a href='javascript:void(0);'></a>").text("Try again").click(function() {
track.init(true);
})
@@ -2723,15 +2735,15 @@
}
else if (result === "no converter") {
track.container_div.addClass("error");
- track.tiles_div.text(DATA_NOCONVERTER);
+ track.show_message(DATA_NOCONVERTER);
}
else if (result === "no data" || (result.data !== undefined && (result.data === null || result.data.length === 0))) {
track.container_div.addClass("nodata");
- track.tiles_div.text(DATA_NONE);
+ track.show_message(DATA_NONE);
}
else if (result === "pending") {
track.container_div.addClass("pending");
- track.tiles_div.html(DATA_PENDING);
+ track.show_message(DATA_PENDING);
//$("<img/>").attr("src", image_path + "/yui/rel_interstitial_loading.gif").appendTo(track.tiles_div);
setTimeout(function() { track.init(); }, track.data_query_wait);
}
diff -r 842d13d7925c67764a0099a445b133392a0b0e13 -r c2d705f7b50a51a9a3e8153da634ea61e9c3fec7 static/style/blue/trackster.css
--- a/static/style/blue/trackster.css
+++ b/static/style/blue/trackster.css
@@ -20,7 +20,7 @@
.yaxislabel{z-index:2;position:absolute;right:20px}
.yaxislabel.bottom{bottom:2px}
.group-handle{cursor:move;float:left;background:#eee url('../images/tracks/block.png');width:12px;height:12px}
-.group{min-height:20px;border-top:1px solid #bbb;border-bottom:1px solid #bbb}.group>.track-header{position:relative;float:left}
+.group{min-height:20px;border-top:1px solid #888;border-bottom:1px solid #888}.group>.track-header{position:relative;float:left}
.track-header{height:16px;position:absolute;z-index:2;background-color:rgba(1,1,1,0.1);border-radius:5px;padding:0px 2px;text-align:left;margin:2px;}.track-header:hover{background-color:#DDDDDD}
.track-header .menubutton{margin-left:0px}
.track-name{float:left;margin-top:2px}
@@ -28,10 +28,10 @@
.overlay{position:absolute;left:0;top:0}
.track-tile{position:absolute;background:white}.track-tile canvas{position:relative;z-index:1}
.tile-message{border-bottom:solid 1px red;text-align:center;color:red;background-color:white}
-.track{position:relative;border-bottom:1px solid #bbb}.track.error{background-color:#ECB4AF;background-image:none}
-.track.nodata .track-content{background-color:white;background-image:none}
-.track.pending .track-content{background-color:white;background-image:none}
-.track-content{text-align:center;position:relative;min-height:20px;padding:0px 0px 1px 0px}
+.track{position:relative;border-bottom:1px solid #888}.track.error{background:#ECB4AF}
+.track.nodata{background:#EEEEEE}
+.track.pending{background:#FFFFCC}
+.track-content{text-align:center;position:relative;min-height:20px;padding:0px 0px 1px 0px}.track-content .message{position:relative;top:4px}
.loading{min-height:100px}
.label-track{font-size:10px;border:none;padding:0;margin:0;height:1.5em;overflow:hidden}.label-track .label-container{position:relative;height:1.3em}
.label-track .label{position:absolute;border-left:solid #999 1px;padding:1px;padding-bottom:2px;display:inline-block}
diff -r 842d13d7925c67764a0099a445b133392a0b0e13 -r c2d705f7b50a51a9a3e8153da634ea61e9c3fec7 static/style/src/less/trackster.less
--- a/static/style/src/less/trackster.less
+++ b/static/style/src/less/trackster.less
@@ -3,7 +3,7 @@
@overlay-index: @base-index + 1;
@track-header-height: 16px;
@min-track-height: (@track-header-height + 4);
-@separator-color: #BBB;
+@separator-color: #888;
.viewport-container {
overflow-x: hidden;
@@ -246,22 +246,15 @@
border-bottom: 1px solid @separator-color;
&.error {
- background-color: #ECB4AF;
- background-image: none;
+ background: #ECB4AF;
}
&.nodata {
- .track-content {
- background-color: white;
- background-image: none;
- }
+ background: #EEEEEE;
}
&.pending {
- .track-content {
- background-color: white;
- background-image: none;
- }
+ background: #FFFFCC;
}
}
@@ -270,6 +263,12 @@
position: relative;
min-height: @min-track-height;
padding: 0px 0px 1px 0px;
+
+ .message {
+ position: relative;
+ // To vertically center message in track:
+ top: 4px;
+ }
}
.loading {
https://bitbucket.org/galaxy/galaxy-central/commits/9a5a8d18cd16/
Changeset: 9a5a8d18cd16
User: jgoecks
Date: 2013-11-30 23:00:44
Summary: Automated merge of local changesets with default branch.
Affected #: 14 files
diff -r c2d705f7b50a51a9a3e8153da634ea61e9c3fec7 -r 9a5a8d18cd16ed3331ba996de21ed4d4dfa93da9 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -3471,6 +3471,29 @@
def can_reinstall_or_activate( self ):
return self.deleted
+ @property
+ def installing( self ):
+ """
+ Used to determine if tool dependencies can denote this repository as
+ installing.
+ """
+ return self.status not in [ self.installation_status.DEACTIVATED,
+ self.installation_status.UNINSTALLED,
+ self.installation_status.ERROR,
+ self.installation_status.INSTALLED,
+ self.installation_status.NEW,
+ ]
+
+ @property
+ def installation_complete( self ):
+ """
+ Used to determine if tool dependency installations can proceed.
+ Installed artifacts must be available on disk.
+ """
+ return self.status in [ self.installation_status.DEACTIVATED,
+ self.installation_status.INSTALLED,
+ ]
+
def to_dict( self, view='collection', value_mapper=None ):
if value_mapper is None:
value_mapper = {}
diff -r c2d705f7b50a51a9a3e8153da634ea61e9c3fec7 -r 9a5a8d18cd16ed3331ba996de21ed4d4dfa93da9 lib/galaxy/webapps/galaxy/api/provenance.py
--- a/lib/galaxy/webapps/galaxy/api/provenance.py
+++ b/lib/galaxy/webapps/galaxy/api/provenance.py
@@ -58,6 +58,8 @@
for p in job.parameters:
out[p.name] = p.value
for in_d in job.input_datasets:
+ if not in_d.dataset:
+ continue
if follow:
out[in_d.name] = self._get_record(trans, in_d.dataset, follow)
else:
diff -r c2d705f7b50a51a9a3e8153da634ea61e9c3fec7 -r 9a5a8d18cd16ed3331ba996de21ed4d4dfa93da9 lib/galaxy/webapps/tool_shed/api/repository_revisions.py
--- a/lib/galaxy/webapps/tool_shed/api/repository_revisions.py
+++ b/lib/galaxy/webapps/tool_shed/api/repository_revisions.py
@@ -107,10 +107,9 @@
skip_tool_test = kwd.get( 'skip_tool_test', None )
if skip_tool_test is not None:
skip_tool_test = util.string_as_bool( skip_tool_test )
+ if skip_tool_test:
skipped_metadata_ids_subquery = select( [ trans.app.model.SkipToolTest.table.c.repository_metadata_id ] )
- if skip_tool_test:
- clause_list.append( trans.model.RepositoryMetadata.id.in_( skipped_metadata_ids_subquery ) )
- else:
+ if skipped_metadata_ids_subquery:
clause_list.append( not_( trans.model.RepositoryMetadata.id.in_( skipped_metadata_ids_subquery ) ) )
# Generate and execute the query.
try:
diff -r c2d705f7b50a51a9a3e8153da634ea61e9c3fec7 -r 9a5a8d18cd16ed3331ba996de21ed4d4dfa93da9 lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
@@ -390,7 +390,7 @@
with lcd( current_dir ):
with settings( warn_only=True ):
for tarball_name in tarball_names:
- cmd = '''export PATH=$PATH:$R_HOME/bin && export R_LIBS=$INSTALL_DIR &&
+ cmd = '''PATH=$PATH:$R_HOME/bin; export PATH; R_LIBS=$INSTALL_DIR; export R_LIBS; &&
Rscript -e "install.packages(c('%s'),lib='$INSTALL_DIR', repos=NULL, dependencies=FALSE)"''' % ( str( tarball_name ) )
cmd = install_environment.build_command( td_common_util.evaluate_template( cmd, install_dir ) )
return_code = handle_command( app, tool_dependency, install_dir, cmd )
@@ -432,24 +432,24 @@
gem, gem_version = ruby_package_tup
if os.path.isfile( gem ):
# we assume a local shipped gem file
- cmd = '''export PATH=$PATH:$RUBY_HOME/bin && export GEM_HOME=$INSTALL_DIR &&
+ cmd = '''PATH=$PATH:$RUBY_HOME/bin; export PATH; GEM_HOME=$INSTALL_DIR; export GEM_HOME;
gem install --local %s''' % ( gem )
elif gem.find( '://' ) != -1:
# We assume a URL to a gem file.
url = gem
gem_name = url.split( '/' )[ -1 ]
td_common_util.url_download( work_dir, gem_name, url, extract=False )
- cmd = '''export PATH=$PATH:$RUBY_HOME/bin && export GEM_HOME=$INSTALL_DIR &&
+ cmd = '''PATH=$PATH:$RUBY_HOME/bin; export PATH; GEM_HOME=$INSTALL_DIR; export GEM_HOME;
gem install --local %s ''' % ( gem_name )
else:
# gem file from rubygems.org with or without version number
if gem_version:
# version number was specified
- cmd = '''export PATH=$PATH:$RUBY_HOME/bin && export GEM_HOME=$INSTALL_DIR &&
+ cmd = '''PATH=$PATH:$RUBY_HOME/bin; export PATH; GEM_HOME=$INSTALL_DIR; export GEM_HOME;
gem install %s --version "=%s"''' % ( gem, gem_version)
else:
# no version number given
- cmd = '''export PATH=$PATH:$RUBY_HOME/bin && export GEM_HOME=$INSTALL_DIR &&
+ cmd = '''PATH=$PATH:$RUBY_HOME/bin; export PATH; GEM_HOME=$INSTALL_DIR; export GEM_HOME;
gem install %s''' % ( gem )
cmd = install_environment.build_command( td_common_util.evaluate_template( cmd, install_dir ) )
return_code = handle_command( app, tool_dependency, install_dir, cmd )
@@ -490,7 +490,7 @@
for perl_package in perl_packages:
# If set to a true value then MakeMaker's prompt function will always
# return the default without waiting for user input.
- cmd = '''export PERL_MM_USE_DEFAULT=1 && '''
+ cmd = '''PERL_MM_USE_DEFAULT=1; export PERL_MM_USE_DEFAULT; '''
if perl_package.find( '://' ) != -1:
# We assume a URL to a gem file.
url = perl_package
diff -r c2d705f7b50a51a9a3e8153da634ea61e9c3fec7 -r 9a5a8d18cd16ed3331ba996de21ed4d4dfa93da9 lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
@@ -40,6 +40,70 @@
raise Exception( message )
return None
+
+def find_complex_dependency_package( app, dependent_install_dir, required_install_dir, tool_shed_repository, required_repository, package_name, package_version, tool_dependencies_config ):
+ """
+ """
+ tool_dependencies = []
+ if not os.path.exists( dependent_install_dir ):
+ os.makedirs( dependent_install_dir )
+ env_file = None
+ if tool_dependencies_config:
+ required_td_tree, error_message = xml_util.parse_xml( tool_dependencies_config )
+ if required_td_tree:
+ required_td_root = required_td_tree.getroot()
+ for required_td_elem in required_td_root:
+ # Find the appropriate package name and version.
+ if required_td_elem.tag == 'package':
+ # <package name="bwa" version="0.5.9">
+ required_td_package_name = required_td_elem.get( 'name', None )
+ required_td_package_version = required_td_elem.get( 'version', None )
+ if required_td_package_name == package_name and required_td_package_version == package_version:
+ tool_dependency = tool_dependency_util.create_or_update_tool_dependency( app=app,
+ tool_shed_repository=tool_shed_repository,
+ name=package_name,
+ version=package_version,
+ type='package',
+ status=app.model.ToolDependency.installation_status.NEVER_INSTALLED,
+ set_status=True )
+
+ if required_repository.installing:
+ tool_dependency = tool_dependency_util.set_tool_dependency_attributes( app,
+ tool_dependency=tool_dependency,
+ status=app.model.ToolDependency.installation_status.INSTALLING )
+ # What happens if dependent repository fails or is
+ # uninstalled during this process.
+ env_file = required_repository_package_env( app, package_name, package_version, required_repository )
+ if required_repository.installation_complete:
+ if not os.path.exists( env_file ):
+ error_message = 'env.sh file %s for package %s in dependendent repository could not be found. Required repository has status %s.' % ( package_name, env_file, required_repository.status )
+ tool_dependency = tool_dependency_util.handle_tool_dependency_installation_error( app,
+ tool_dependency,
+ error_message,
+ remove_installation_path=False )
+ else:
+ tool_dependency = tool_dependency_util.set_tool_dependency_attributes( app,
+ tool_dependency=tool_dependency,
+ status=app.model.ToolDependency.installation_status.INSTALLED )
+ else:
+ # Ekk - handling tool depednencies for a dependent
+ # repository that is not installed.
+ #
+ # Go ahead a return the env.sh file - Galaxy will
+ # proceed to create an invalid symbolic link.
+ # This is subtle-ly different than the previous
+ # behavior which would have recreated an env.sh
+ # from the the required repository's
+ # tool_dependencies.xml but since it was not
+ # installed all of the values inside would be
+ # invalid path modifications. Either way, this file
+ # is junk until the required repository is
+ # installed properly.
+ pass
+ tool_dependencies.append( tool_dependency )
+ return tool_dependencies, env_file
+
+
def get_absolute_path_to_file_in_repository( repo_files_dir, file_name ):
"""Return the absolute path to a specified disk file contained in a repository."""
stripped_file_name = strip_path( file_name )
@@ -91,6 +155,7 @@
text = common_util.tool_shed_get( app, tool_shed_url, url )
return text
+
def handle_complex_repository_dependency_for_package( app, elem, package_name, package_version, tool_shed_repository ):
handled_tool_dependencies = []
tool_shed = elem.attrib[ 'toolshed' ]
@@ -125,8 +190,7 @@
tool_dependency_version=package_version )
# Set this dependent repository's tool dependency env.sh file with a path to the required repository's installed tool dependency package.
# We can get everything we need from the discovered installed required_repository.
- if required_repository.status in [ app.model.ToolShedRepository.installation_status.DEACTIVATED,
- app.model.ToolShedRepository.installation_status.INSTALLED ]:
+ if required_repository.installation_complete:
if not os.path.exists( required_repository_package_install_dir ):
print 'Missing required tool dependency directory %s' % str( required_repository_package_install_dir )
repo_files_dir = required_repository.repo_files_directory( app )
@@ -156,22 +220,26 @@
required_repository_owner,
required_repository_changeset_revision )
config_to_use = tmp_filename
- tool_dependencies, actions_dict = populate_actions_dict( app=app,
- dependent_install_dir=dependent_install_dir,
- required_install_dir=required_repository_package_install_dir,
- tool_shed_repository=tool_shed_repository,
- required_repository=required_repository,
- package_name=package_name,
- package_version=package_version,
- tool_dependencies_config=config_to_use )
+
+ tool_dependencies, package_env_sh_file = find_complex_dependency_package(
+ app=app,
+ dependent_install_dir=dependent_install_dir,
+ required_install_dir=required_repository_package_install_dir,
+ tool_shed_repository=tool_shed_repository,
+ required_repository=required_repository,
+ package_name=package_name,
+ package_version=package_version,
+ tool_dependencies_config=config_to_use
+ )
+ if package_env_sh_file:
+ os.symlink( package_env_sh_file, os.path.join( dependent_install_dir, "env.sh" ) )
if tmp_filename:
try:
os.remove( tmp_filename )
except:
pass
for tool_dependency in tool_dependencies:
- # Install and build the package via fabric and update the tool_dependency record accordingly..
- tool_dependency = install_and_build_package_via_fabric( app, tool_dependency, actions_dict )
+ tool_dependency = __mark_tool_dependency_installed( app, tool_dependency)
handled_tool_dependencies.append( tool_dependency )
else:
message = "Unable to locate required tool shed repository named %s owned by %s with revision %s." % \
@@ -179,113 +247,6 @@
raise Exception( message )
return handled_tool_dependencies
-def handle_set_environment_entry_for_package( app, install_dir, tool_shed_repository, package_name, package_version, elem, required_repository ):
- """
- Populate a list of actions for creating an env.sh file for a dependent repository. The received elem is the <package> tag set associated
- with the tool-dependencies.xml file for one of the received tool_shed_repository's repository dependency.
- """
- action_dict = {}
- actions = []
- tool_dependencies = []
- for package_elem in elem:
- if package_elem.tag == 'install':
- # Create the new tool_dependency record in the database.
- tool_dependency = tool_dependency_util.create_or_update_tool_dependency( app=app,
- tool_shed_repository=tool_shed_repository,
- name=package_name,
- version=package_version,
- type='package',
- status=app.model.ToolDependency.installation_status.NEVER_INSTALLED,
- set_status=True )
- # Get the installation method version from a tag like: <install version="1.0">
- package_install_version = package_elem.get( 'version', '1.0' )
- if package_install_version == '1.0':
- # Update the tool dependency's status.
- tool_dependency = tool_dependency_util.set_tool_dependency_attributes( app,
- tool_dependency=tool_dependency,
- status=app.model.ToolDependency.installation_status.INSTALLING )
- # Since the required tool dependency is installed for a repository dependency, we first need to inspect the <actions> tag set to find
- # the <action type="set_environment"> tag.
- env_var_dicts = []
- for actions_elem in package_elem:
- for action_elem in actions_elem:
- action_type = action_elem.get( 'type', 'shell_command' )
- if action_type == 'set_environment':
- # <action type="set_environment">
- # <environment_variable name="PYTHONPATH" action="append_to">$INSTALL_DIR/lib/python</environment_variable>
- # <environment_variable name="PATH" action="prepend_to">$INSTALL_DIR/bin</environment_variable>
- # </action>
- for env_elem in action_elem:
- if env_elem.tag == 'environment_variable':
- env_var_dict = td_common_util.create_env_var_dict( env_elem, tool_dependency_install_dir=install_dir )
- if env_var_dict:
- if env_var_dict not in env_var_dicts:
- env_var_dicts.append( env_var_dict )
- elif action_type == 'setup_virtualenv':
- # Add the virtualenv's site-packages to PYTHONPATH and bin to PATH. This is a bit hackish.
- site_packages_command = "%s -c 'import os, sys; print os.path.join(sys.prefix, \"lib\", \"python\" + sys.version[:3], \"site-packages\")'" % os.path.join( install_dir, "venv", "bin", "python" )
- output = fabric_util.handle_command( app, tool_dependency, install_dir, site_packages_command, return_output=True )
- if output.return_code:
- log.error( 'Tool dependency %s includes a setup_virtualenv action but venv python is broken: ' % \
- ( str( tool_dependency.name ), str( output.stderr ) ) )
- elif not os.path.exists( output.stdout ):
- log.error( "virtualenv's site-packages directory '%s' does not exist", str( output.stdout ) )
- else:
- env_var_dicts.append( dict( name="PYTHONPATH", action="prepend_to", value=output.stdout ) )
- env_var_dicts.append( dict( name="PATH", action="prepend_to", value=os.path.join( install_dir, 'venv', 'bin' ) ) )
- if env_var_dicts:
- if required_repository.status in [ app.model.ToolShedRepository.installation_status.INSTALLED,
- app.model.ToolShedRepository.installation_status.DEACTIVATED ]:
- # Handle the case where we have an installed required repository due to the prior_installation_required = True
- # setting in the received tool_shed_repository's tool_dependencies.xml file and the required repository's
- # tool_dependencies.xml file may include the use of the $ENV[] variable inheritance feature. To handle this,
- # we will replace the current "value" entries in each env_var_dict with the actual path taken from the env.sh
- # file generated for the installed required repository. Each env_var_dict currently looks something like this:
- # {'action': 'append_to', 'name': 'LD_LIBRARY_PATH', 'value': '$BOOST_ROOT_DIR/lib/'}
- # We'll read the contents of the received required_repository's env.sh file and replace the 'value' entry of
- # each env_var_dict with the associated value in the env.sh file.
- new_env_var_dicts = []
- env_sh_file_dir = tool_dependency_util.get_tool_dependency_install_dir( app=app,
- repository_name=required_repository.name,
- repository_owner=required_repository.owner,
- repository_changeset_revision=required_repository.installed_changeset_revision,
- tool_dependency_type='package',
- tool_dependency_name=package_name,
- tool_dependency_version=package_version )
- env_sh_file_path = os.path.join( env_sh_file_dir, 'env.sh' )
- if os.path.exists( env_sh_file_path ):
- for i, line in enumerate( open( env_sh_file_path, 'r' ) ):
- env_var_dict = env_var_dicts[ i ]
- action = env_var_dict.get( 'action', None )
- name = env_var_dict.get( 'name', None )
- value = env_var_dict.get( 'value', None )
- if action and name and value:
- new_value = parse_env_shell_entry( action, name, value, line )
- env_var_dict[ 'value' ] = new_value
- new_env_var_dicts.append( env_var_dict )
- else:
- error_message = 'Invalid file %s specified, ignoring set_environment_for_install action.' % str( env_sh_file_path )
- tool_dependency = tool_dependency_util.handle_tool_dependency_installation_error( app,
- tool_dependency,
- error_message,
- remove_installation_path=False )
- action_dict[ 'environment_variable' ] = new_env_var_dicts
- else:
- action_dict[ 'environment_variable' ] = env_var_dicts
- actions.append( ( 'set_environment', action_dict ) )
- if tool_dependency.status not in [ app.model.ToolDependency.installation_status.ERROR,
- app.model.ToolDependency.installation_status.INSTALLED ]:
- # Update the tool dependency's status.
- tool_dependency = \
- tool_dependency_util.set_tool_dependency_attributes( app,
- tool_dependency=tool_dependency,
- status=app.model.ToolDependency.installation_status.INSTALLED )
- # Accumulate processed tool dependencies to return to the caller.
- tool_dependencies.append( tool_dependency )
- else:
- raise NotImplementedError( 'Only install version 1.0 is currently supported (i.e., change your tag to be <install version="1.0">).' )
- return tool_dependencies, actions
- return tool_dependencies, actions
def install_and_build_package_via_fabric( app, tool_dependency, actions_dict ):
sa_session = app.model.context.current
@@ -301,17 +262,10 @@
tool_dependency,
error_message,
remove_installation_path=False )
- if tool_dependency.status not in [ app.model.ToolDependency.installation_status.ERROR,
- app.model.ToolDependency.installation_status.INSTALLED ]:
- log.debug( 'Changing status for tool dependency %s from %s to %s.' % \
- ( str( tool_dependency.name ), str( tool_dependency.status ), str( app.model.ToolDependency.installation_status.INSTALLED ) ) )
- tool_dependency = tool_dependency_util.set_tool_dependency_attributes( app,
- tool_dependency=tool_dependency,
- status=app.model.ToolDependency.installation_status.INSTALLED,
- error_message=None,
- remove_from_disk=False )
+ tool_dependency = __mark_tool_dependency_installed( app, tool_dependency)
return tool_dependency
+
def install_package( app, elem, tool_shed_repository, tool_dependencies=None ):
# The value of tool_dependencies is a partial or full list of ToolDependency records associated with the tool_shed_repository.
sa_session = app.model.context.current
@@ -821,60 +775,22 @@
tool_dependency = install_and_build_package_via_fabric( app, tool_dependency, actions_dict )
return tool_dependency
-def parse_env_shell_entry( action, name, value, line ):
- new_value = value
- var_name = '$%s' % name
- tmp_value = line.split( '=' )[ 1 ]
- if action == 'prepend_to':
- # PATH=/test/package_rdkit_2012_12/62ebd7bb637a/rdkit/bin:$PATH; export PATH
- new_value = tmp_value.split( ':%s' % var_name )[ 0 ]
- elif action == 'set_to':
- # RDBASE=test/package_rdkit_2012_12/62ebd7bb637a/rdkit; export RDBASE
- new_value = tmp_value.split( ';' )[ 0 ]
- elif action == 'append_to':
- # LD_LIBRARY_PATH=$LD_LIBRARY_PATH:test/package_rdkit_2012_12/62ebd7bb637a/rdkit/lib/; export LD_LIBRARY_PATH
- new_value = tmp_value.split( ':' )[ 1 ]
- new_value = new_value.split( ';' )[ 0 ]
- return new_value
-def populate_actions_dict( app, dependent_install_dir, required_install_dir, tool_shed_repository, required_repository, package_name, package_version, tool_dependencies_config ):
+# TODO: Move to tool_dependency_util?
+def required_repository_package_env( app, package_name, package_version, required_repository ):
"""
- Populate an actions dictionary that can be sent to fabric_util.install_and_build_package. This method handles the scenario where a tool_dependencies.xml
- file defines a complex repository dependency. In this case, the tool dependency package will be installed in a separate repository and the tool dependency
- defined for the dependent repository will use an environment_variable setting defined in it's env.sh file to locate the required package. This method
- basically does what the install_via_fabric method does, but restricts it's activity to the <action type="set_environment"> tag set within the required
- repository's tool_dependencies.xml file.
+ Return path to env.sh file in required repository if the required repository has been installed.
"""
- sa_session = app.model.context.current
- if not os.path.exists( dependent_install_dir ):
- os.makedirs( dependent_install_dir )
- actions_dict = dict( install_dir=dependent_install_dir )
- if package_name:
- actions_dict[ 'package_name' ] = package_name
- tool_dependencies = []
- action_dict = {}
- if tool_dependencies_config:
- required_td_tree, error_message = xml_util.parse_xml( tool_dependencies_config )
- if required_td_tree:
- required_td_root = required_td_tree.getroot()
- for required_td_elem in required_td_root:
- # Find the appropriate package name and version.
- if required_td_elem.tag == 'package':
- # <package name="bwa" version="0.5.9">
- required_td_package_name = required_td_elem.get( 'name', None )
- required_td_package_version = required_td_elem.get( 'version', None )
- if required_td_package_name==package_name and required_td_package_version==package_version:
- tool_dependencies, actions = handle_set_environment_entry_for_package( app=app,
- install_dir=required_install_dir,
- tool_shed_repository=tool_shed_repository,
- package_name=package_name,
- package_version=package_version,
- elem=required_td_elem,
- required_repository=required_repository )
- if actions:
- actions_dict[ 'actions' ] = actions
- break
- return tool_dependencies, actions_dict
+ env_sh_file_dir = tool_dependency_util.get_tool_dependency_install_dir( app=app,
+ repository_name=required_repository.name,
+ repository_owner=required_repository.owner,
+ repository_changeset_revision=required_repository.installed_changeset_revision,
+ tool_dependency_type='package',
+ tool_dependency_name=package_name,
+ tool_dependency_version=package_version )
+ env_sh_file_path = os.path.join( env_sh_file_dir, 'env.sh' )
+ return env_sh_file_path
+
def run_proprietary_fabric_method( app, elem, proprietary_fabfile_path, install_dir, package_name=None, **kwd ):
"""
@@ -1019,3 +935,17 @@
for arg in args:
parts.append( arg.strip( '/' ) )
return '/'.join( parts )
+
+
+# TODO: Move to tool_dependency_util?
+def __mark_tool_dependency_installed( app, tool_dependency ):
+ if tool_dependency.status not in [ app.model.ToolDependency.installation_status.ERROR,
+ app.model.ToolDependency.installation_status.INSTALLED ]:
+ log.debug( 'Changing status for tool dependency %s from %s to %s.' % \
+ ( str( tool_dependency.name ), str( tool_dependency.status ), str( app.model.ToolDependency.installation_status.INSTALLED ) ) )
+ tool_dependency = tool_dependency_util.set_tool_dependency_attributes( app,
+ tool_dependency=tool_dependency,
+ status=app.model.ToolDependency.installation_status.INSTALLED,
+ error_message=None,
+ remove_from_disk=False )
+ return tool_dependency
diff -r c2d705f7b50a51a9a3e8153da634ea61e9c3fec7 -r 9a5a8d18cd16ed3331ba996de21ed4d4dfa93da9 lib/tool_shed/scripts/check_repositories_for_functional_tests.py
--- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
+++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
@@ -95,18 +95,25 @@
invalid_revisions = 0
records_checked = 0
# Do not check metadata records that have an entry in the skip_tool_tests table, since they won't be tested anyway.
- skip_metadata_ids = select( [ app.model.SkipToolTest.table.c.repository_metadata_id ] )
+ print '# -------------------------------------------------------------------------------------------'
+ print '# The skip_tool_test setting has been set for the following repository revision, so they will not be tested.'
+ skip_metadata_ids = []
+ for skip_tool_test in app.sa_session.query( app.model.SkipToolTest ):
+ print '# repository_metadata_id: %s, changeset_revision: %s' % \
+ ( str( skip_tool_test.repository_metadata_id ), str( skip_tool_test.initial_changeset_revision ) )
+ print 'reason: %s' % str( skip_tool_test.comment )
+ skip_metadata_ids.append( skip_tool_test.repository_metadata_id )
# Get the list of metadata records to check for functional tests and test data. Limit this to records that have not been flagged do_not_test,
# since there's no need to check them again if they won't be tested anyway. Also filter out changeset revisions that are not downloadable,
# because it's redundant to test a revision that a user can't install.
for repository_metadata in app.sa_session.query( app.model.RepositoryMetadata ) \
.filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True,
app.model.RepositoryMetadata.table.c.includes_tools == True,
- app.model.RepositoryMetadata.table.c.do_not_test == False,
- not_( app.model.RepositoryMetadata.table.c.id.in_( skip_metadata_ids ) ) ) ):
- # Clear any old invalid tests for this metadata revision, since this could lead to duplication of invalid test rows,
- # or tests incorrectly labeled as invalid.
+ app.model.RepositoryMetadata.table.c.do_not_test == False ) ):
+ # Initialize some items.
missing_test_components = []
+ revision_has_test_data = False
+ testable_revision = False
repository = repository_metadata.repository
records_checked += 1
# Check the next repository revision.
@@ -117,15 +124,14 @@
repository = repository_metadata.repository
if repository.id not in checked_repository_ids:
checked_repository_ids.append( repository.id )
- if verbosity >= 1:
- print '# -------------------------------------------------------------------------------------------'
- print '# Now checking revision %s of %s, owned by %s.' % ( changeset_revision, name, owner )
+ print '# -------------------------------------------------------------------------------------------'
+ print '# Checking revision %s of %s owned by %s.' % ( changeset_revision, name, owner )
+ if repository_metadata.id in skip_metadata_ids:
+ print'# Skipping revision %s of %s owned by %s because the skip_tool_test setting has been set.' % ( changeset_revision, name, owner )
# If this changeset revision has no tools, we don't need to do anything here, the install and test script has a filter for returning
# only repositories that contain tools.
tool_dicts = metadata.get( 'tools', None )
if tool_dicts is not None:
- has_test_data = False
- testable_revision = False
# Clone the repository up to the changeset revision we're checking.
repo_dir = repository.repo_path( app )
repo = hg.repository( suc.get_configured_ui(), repo_dir )
@@ -139,29 +145,30 @@
if '.hg' in dirs:
dirs.remove( '.hg' )
if 'test-data' in dirs:
- has_test_data = True
+ revision_has_test_data = True
test_data_path = os.path.join( root, dirs[ dirs.index( 'test-data' ) ] )
break
- if verbosity >= 1:
- if has_test_data:
- print '# Test data directory found in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
- else:
- print '# Test data directory missing in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
- print '# Checking for functional tests in changeset revision %s of %s, owned by %s.' % \
- ( changeset_revision, name, owner )
- # Inspect each tool_dict for defined functional tests.
+ if revision_has_test_data:
+ print '# Test data directory found in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
+ else:
+ print '# Test data directory missing in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
+ print '# Checking for functional tests in changeset revision %s of %s, owned by %s.' % \
+ ( changeset_revision, name, owner )
+ # Inspect each tool_dict for defined functional tests. If there are no tests, this tool should not be tested, since the
+ # tool functional tests only report failure if the test itself fails, not if it's missing or undefined. Filtering out those
+ # repositories at this step will reduce the number of "false negatives" the automated functional test framework produces.
for tool_dict in tool_dicts:
+ failure_reason = ''
+ problem_found = False
+ tool_has_defined_tests = False
+ tool_has_test_files = False
+ missing_test_files = []
tool_count += 1
tool_id = tool_dict[ 'id' ]
tool_version = tool_dict[ 'version' ]
tool_guid = tool_dict[ 'guid' ]
- if verbosity >= 2:
- print "# Checking tool ID '%s' in changeset revision %s of %s." % \
- ( tool_id, changeset_revision, name )
- # If there are no tests, this tool should not be tested, since the tool functional tests only report failure if the test itself fails,
- # not if it's missing or undefined. Filtering out those repositories at this step will reduce the number of "false negatives" the
- # automated functional test framework produces.
- tool_has_tests = False
+ if verbosity >= 1:
+ print "# Checking tool ID '%s' in changeset revision %s of %s." % ( tool_id, changeset_revision, name )
defined_test_dicts = tool_dict.get( 'tests', None )
if defined_test_dicts is not None:
# We need to inspect the <test> tags because the following tags...
@@ -182,33 +189,26 @@
outputs = defined_test_dict.get( 'outputs', [] )
if inputs and outputs:
# At least one tool within the repository has a valid <test> tag.
- tool_has_tests = True
+ tool_has_defined_tests = True
break
- if tool_has_tests:
- if verbosity >= 2:
- print "# Tool ID '%s' in changeset revision %s of %s has one or more valid functional tests defined." % \
- ( tool_id, changeset_revision, name )
+ if tool_has_defined_tests:
+ print "# Tool ID '%s' in changeset revision %s of %s has one or more valid functional tests defined." % \
+ ( tool_id, changeset_revision, name )
has_tests += 1
else:
- if verbosity >= 2:
- print '# No functional tests defined for %s.' % tool_id
+ print '# No functional tests defined for %s.' % tool_id
no_tests += 1
- failure_reason = ''
- problem_found = False
- missing_test_files = []
- has_test_files = False
- if tool_has_tests and has_test_data:
+ if tool_has_defined_tests and revision_has_test_data:
missing_test_files = check_for_missing_test_files( defined_test_dicts, test_data_path )
if missing_test_files:
- if verbosity >= 2:
- print "# Tool ID '%s' in changeset revision %s of %s is missing one or more required test files: %s" % \
- ( tool_id, changeset_revision, name, ', '.join( missing_test_files ) )
+ print "# Tool id '%s' in changeset revision %s of %s is missing one or more required test files: %s" % \
+ ( tool_id, changeset_revision, name, ', '.join( missing_test_files ) )
else:
- has_test_files = True
- if not has_test_data:
+ tool_has_test_files = True
+ if not revision_has_test_data:
failure_reason += 'Repository does not have a test-data directory. '
problem_found = True
- if not tool_has_tests:
+ if not tool_has_defined_tests:
failure_reason += 'Functional test definitions missing for %s. ' % tool_id
problem_found = True
if missing_test_files:
@@ -219,23 +219,22 @@
if problem_found:
if test_errors not in missing_test_components:
missing_test_components.append( test_errors )
- if tool_has_tests and has_test_files:
+ if tool_has_defined_tests and tool_has_test_files:
+ print '# Revision %s of %s owned by %s is a testable revision.' % ( changeset_revision, name, owner )
testable_revision = True
# Remove the cloned repository path. This has to be done after the check for required test files, for obvious reasons.
if os.path.exists( work_dir ):
shutil.rmtree( work_dir )
if not missing_test_components:
valid_revisions += 1
- if verbosity >= 1:
- print '# All tools have functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
+ print '# All tools have functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
else:
invalid_revisions += 1
+ print '# Some tools have problematic functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
if verbosity >= 1:
- print '# Some tools have problematic functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
- if verbosity >= 2:
- for invalid_test in missing_test_components:
- if 'missing_components' in invalid_test:
- print '# %s' % invalid_test[ 'missing_components' ]
+ for missing_test_component in missing_test_components:
+ if 'missing_components' in missing_test_component:
+ print '# %s' % missing_test_component[ 'missing_components' ]
if not info_only:
# Get or create the list of tool_test_results dictionaries.
if repository_metadata.tool_test_results is not None:
@@ -249,8 +248,9 @@
# install_and_test_tool_sed_repositories.sh script which will further populate the tool_test_results_dict.
tool_test_results_dict = tool_test_results_dicts[ 0 ]
if len( tool_test_results_dict ) <= 1:
- # We can re-use the mostly empty tool_test_results_dict for this run, but we need to eliminate it from
- # the list of tool_test_results_dicts since it will be re-inserted later.
+ # We can re-use the mostly empty tool_test_results_dict for this run because it is either empty or it contains only
+ # a test_environment entry. If we use it we need to temporarily eliminate it from the list of tool_test_results_dicts
+ # since it will be re-inserted later.
tool_test_results_dict = tool_test_results_dicts.pop( 0 )
else:
# The latest tool_test_results_dict has been populated with the results of a test run, so it cannot be used.
@@ -259,6 +259,11 @@
# Create a new dictionary for the most recent test run.
tool_test_results_dict = {}
test_environment_dict = tool_test_results_dict.get( 'test_environment', {} )
+ # Add the current time as the approximate time that this test run occurs. A similar value will also be
+ # set to the repository_metadata.time_last_tested column, but we also store it here because the Tool Shed
+ # may be configured to store multiple test run results, so each must be associated with a time stamp.
+ now = time.strftime( "%Y-%m-%d %H:%M:%S" )
+ test_environment_dict[ 'time_tested' ] = now
test_environment_dict[ 'tool_shed_database_version' ] = get_database_version( app )
test_environment_dict[ 'tool_shed_mercurial_version' ] = __version__.version
test_environment_dict[ 'tool_shed_revision' ] = get_repository_current_revision( os.getcwd() )
@@ -281,7 +286,13 @@
print "# and it is not the latest downloadable revision."
repository_metadata.do_not_test = True
repository_metadata.tools_functionally_correct = False
- repository_metadata.missing_test_components = True
+ if not testable_revision:
+ # Even though some tools may be missing test components, it may be possible to test other tools. Since the
+ # install and test framework filters out repositories marked as missing test components, we'll set it only if
+ # no tools can be tested.
+ print '# Setting missing_test_components to True for revision %s of %s owned by %s because all tools are missing test components.' % \
+ ( changeset_revision, name, owner )
+ repository_metadata.missing_test_components = True
tool_test_results_dict[ 'missing_test_components' ] = missing_test_components
# Store only the configured number of test runs.
num_tool_test_results_saved = int( app.config.num_tool_test_results_saved )
diff -r c2d705f7b50a51a9a3e8153da634ea61e9c3fec7 -r 9a5a8d18cd16ed3331ba996de21ed4d4dfa93da9 lib/tool_shed/util/container_util.py
--- a/lib/tool_shed/util/container_util.py
+++ b/lib/tool_shed/util/container_util.py
@@ -1052,45 +1052,51 @@
"""Return a folder hierarchy containing tool dependencies."""
# This container is displayed only in the tool shed.
if tool_test_results_dicts:
- multiple_tool_test_results_dicts = len( tool_test_results_dicts ) > 1
test_results_dict_id = 0
folder_id += 1
tool_test_results_root_folder = Folder( id=folder_id, key='root', label='root', parent=None )
+ multiple_tool_test_results_dicts = len( tool_test_results_dicts ) > 1
+ if multiple_tool_test_results_dicts:
+ folder_id += 1
+ test_runs_folder = Folder( id=folder_id, key='test_runs', label='Test runs', parent=tool_test_results_root_folder )
+ tool_test_results_root_folder.folders.append( test_runs_folder )
for index, tool_test_results_dict in enumerate( tool_test_results_dicts ):
test_environment_dict = tool_test_results_dict.get( 'test_environment', None )
- if test_environment_dict is not None:
- time_tested = test_environment_dict.get( 'time_tested', 'unknown_%d' % index )
- if multiple_tool_test_results_dicts:
- folder_id += 1
- containing_folder = Folder( id=folder_id, key='test_results', label=time_tested, parent=tool_test_results_root_folder )
- else:
- containing_folder = tool_test_results_root_folder
- test_results_dict_id += 1
- #folder_id += 1
- #test_results_folder = Folder( id=folder_id, key='test_results', label='Automated test environment', parent=containing_folder )
- #containing_folder.folders.append( test_results_folder )
+ if test_environment_dict is None:
+ # the test environment entry will exist only if the preparation script check_re;ositories_for_functional_tests.py
+ # was executed prior to the ~/install_and_test_repositories/functional_tests.py script. If that did not occur,
+ # we'll display test result, but the test_environment entries will not be complete.
+ test_environment_dict = {}
+ time_tested = test_environment_dict.get( 'time_tested', 'unknown_%d' % index )
+ if multiple_tool_test_results_dicts:
folder_id += 1
- folder = Folder( id=folder_id, key='test_environment', label='Automated test environment', parent=containing_folder )
- containing_folder.folders.append( folder )
- architecture = test_environment_dict.get( 'architecture', '' )
- galaxy_database_version = test_environment_dict.get( 'galaxy_database_version', '' )
- galaxy_revision = test_environment_dict.get( 'galaxy_revision', '' )
- python_version = test_environment_dict.get( 'python_version', '' )
- system = test_environment_dict.get( 'system', '' )
- tool_shed_database_version = test_environment_dict.get( 'tool_shed_database_version', '' )
- tool_shed_mercurial_version = test_environment_dict.get( 'tool_shed_mercurial_version', '' )
- tool_shed_revision = test_environment_dict.get( 'tool_shed_revision', '' )
- test_environment = TestEnvironment( id=1,
- architecture=architecture,
- galaxy_database_version=galaxy_database_version,
- galaxy_revision=galaxy_revision,
- python_version=python_version,
- system=system,
- time_tested=time_tested,
- tool_shed_database_version=tool_shed_database_version,
- tool_shed_mercurial_version=tool_shed_mercurial_version,
- tool_shed_revision=tool_shed_revision )
- folder.test_environments.append( test_environment )
+ containing_folder = Folder( id=folder_id, key='test_results', label=time_tested, parent=test_runs_folder )
+ test_runs_folder.folders.append( containing_folder )
+ else:
+ containing_folder = tool_test_results_root_folder
+ test_results_dict_id += 1
+ folder_id += 1
+ folder = Folder( id=folder_id, key='test_environment', label='Automated test environment', parent=containing_folder )
+ containing_folder.folders.append( folder )
+ architecture = test_environment_dict.get( 'architecture', '' )
+ galaxy_database_version = test_environment_dict.get( 'galaxy_database_version', '' )
+ galaxy_revision = test_environment_dict.get( 'galaxy_revision', '' )
+ python_version = test_environment_dict.get( 'python_version', '' )
+ system = test_environment_dict.get( 'system', '' )
+ tool_shed_database_version = test_environment_dict.get( 'tool_shed_database_version', '' )
+ tool_shed_mercurial_version = test_environment_dict.get( 'tool_shed_mercurial_version', '' )
+ tool_shed_revision = test_environment_dict.get( 'tool_shed_revision', '' )
+ test_environment = TestEnvironment( id=1,
+ architecture=architecture,
+ galaxy_database_version=galaxy_database_version,
+ galaxy_revision=galaxy_revision,
+ python_version=python_version,
+ system=system,
+ time_tested=time_tested,
+ tool_shed_database_version=tool_shed_database_version,
+ tool_shed_mercurial_version=tool_shed_mercurial_version,
+ tool_shed_revision=tool_shed_revision )
+ folder.test_environments.append( test_environment )
not_tested_dict = tool_test_results_dict.get( 'not_tested', {} )
if not_tested_dict:
folder_id += 1
@@ -1120,6 +1126,9 @@
containing_folder.folders.append( folder )
failed_test_id = 0
for failed_tests_dict in failed_tests_dicts:
+ # TODO: Remove this when invalid test data is eliminated.
+ if isinstance( failed_tests_dict, list ):
+ failed_tests_dict = failed_tests_dict[ 0 ]
failed_test_id += 1
failed_test = FailedTest( id=failed_test_id,
stderr=failed_tests_dict.get( 'stderr', '' ),
@@ -1153,6 +1162,7 @@
key='installation_errors',
label='Installation errors',
parent=containing_folder )
+ containing_folder.installation_errors.append( installation_error_base_folder )
if current_repository_errors:
folder_id += 1
subfolder = Folder( id=folder_id,
@@ -1203,10 +1213,6 @@
error_message=tool_dependency_error_dict.get( 'error_message', '' ) )
subfolder.tool_dependency_installation_errors.append( tool_dependency_installation_error )
installation_error_base_folder.folders.append( subfolder )
- containing_folder.installation_errors.append( installation_error_base_folder )
- #containing_folder.folders.append( containing_folder )
- if multiple_tool_test_results_dicts:
- tool_test_results_root_folder.folders.append( containing_folder )
else:
tool_test_results_root_folder = None
return folder_id, tool_test_results_root_folder
diff -r c2d705f7b50a51a9a3e8153da634ea61e9c3fec7 -r 9a5a8d18cd16ed3331ba996de21ed4d4dfa93da9 test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -363,6 +363,8 @@
"""
error_message = ''
latest_revision_only = '-check_all_revisions' not in sys.argv
+ if latest_revision_only:
+ log.debug( 'Testing is restricted to the latest downloadable revision in this test run.' )
repository_dicts = []
params = urllib.urlencode( dict( do_not_test='false',
downloadable='true',
@@ -374,13 +376,12 @@
baseline_repository_dicts, error_message = json_from_url( api_url )
if error_message:
return None, error_message
- log.debug( 'The Tool Shed API returned %d metadata revisions for installation and testing.' % len( baseline_repository_dicts ) )
for baseline_repository_dict in baseline_repository_dicts:
# We need to get some details from the tool shed API, such as repository name and owner, to pass on to the
# module that will generate the install methods.
repository_dict, error_message = get_repository_dict( galaxy_tool_shed_url, baseline_repository_dict )
if error_message:
- log.debug( 'Error getting additional details about repository %s from the API: %s' % ( str( name ), error_message ) )
+ log.debug( 'Error getting additional details from the API: %s' % str( error_message ) )
else:
# Don't test empty repositories.
changeset_revision = baseline_repository_dict[ 'changeset_revision' ]
@@ -393,10 +394,6 @@
repository_dicts.append( dict( repository_dict.items() + baseline_repository_dict.items() ) )
else:
repository_dicts.append( dict( repository_dict.items() + baseline_repository_dict.items() ) )
- if latest_revision_only:
- skipped_previous = ' and metadata revisions that are not the most recent'
- else:
- skipped_previous = ''
if testing_single_repository:
tsr_name = testing_single_repository[ 'name' ]
tsr_owner = testing_single_repository[ 'owner' ]
@@ -414,14 +411,15 @@
return repository_dicts, error_message
return repository_dicts, error_message
# Get a list of repositories to test from the tool shed specified in the GALAXY_INSTALL_TEST_TOOL_SHED_URL environment variable.
- log.debug( "Retrieved %d repositories from the API url: %s." % ( len( repository_dicts ), str( galaxy_tool_shed_url ) ) )
- if '-list_repositories' in sys.argv:
- log.debug( "Repositories for testing:" )
- for repository_dict in repository_dicts:
- log.debug( "Revision %s of repository %s owned by %s" % \
- ( str( repository_dict.get( 'changeset_revision', None ) ), \
- str( repository_dict.get( 'name', None ) ), \
- str( repository_dict.get( 'owner', None ) ) ) )
+ log.debug( "The Tool Shed's API url...\n%s" % str( api_url ) )
+ log.debug( "...retrieved %d repository revisions for testing." % len( repository_dicts ) )
+ #if '-list_repositories' in sys.argv:
+ log.debug( "Repository revisions for testing:" )
+ for repository_dict in repository_dicts:
+ log.debug( "Revision %s of repository %s owned by %s" % \
+ ( str( repository_dict.get( 'changeset_revision', None ) ), \
+ str( repository_dict.get( 'name', None ) ), \
+ str( repository_dict.get( 'owner', None ) ) ) )
return repository_dicts, error_message
def get_static_settings():
@@ -503,7 +501,7 @@
params = dict( tools_functionally_correct=False,
do_not_test=False,
test_install_error=True )
- # TODO: do something usefule with response_dict
+ # TODO: do something useful with response_dict
response_dict = register_test_result( galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params )
# Since this repository is missing components, we do not want to test it, so deactivate it or uninstall it.
# The deactivate flag is set to True if the environment variable GALAXY_INSTALL_TEST_KEEP_TOOL_DEPENDENCIES
@@ -618,33 +616,39 @@
name = str( repository_dict[ 'name' ] )
owner = str( repository_dict[ 'owner' ] )
changeset_revision = str( repository_dict[ 'changeset_revision' ] )
+ log.debug( "Processing revision %s of repository %s owned by %s..." % ( changeset_revision, name, owner ) )
# Populate the tool_test_results_dict.
tool_test_results_dicts, error_message = get_tool_test_results_dicts( galaxy_tool_shed_url, encoded_repository_metadata_id )
- # The preparation script ~/tool_shed/scripts/check_repositories_for_functional_tests.py will have entered
- # information in the 'test_environment' and possibly the 'missing_test_components' entries of the first
- # tool_test_results_dict in the list of tool_test_results_dicts. We need to be careful to not lose this
- # information.
- tool_test_results_dict = tool_test_results_dicts.pop( 0 )
- # See if this repository should be skipped for any reason.
- skip_this_repository = False
- skip_reason = None
- for exclude_dict in exclude_list:
- reason = exclude_dict[ 'reason' ]
- exclude_repositories = exclude_dict[ 'repositories' ]
- if ( name, owner, changeset_revision ) in exclude_repositories or ( name, owner, None ) in exclude_repositories:
- skip_this_repository = True
- skip_reason = reason
- break
- if skip_this_repository:
- tool_test_results_dict[ 'not_tested' ] = dict( reason=skip_reason )
- params = dict( tools_functionally_correct=False,
- do_not_test=False )
- # TODO: do something usefule with response_dict
- response_dict = register_test_result( galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params )
- log.debug( "Not testing revision %s of repository %s owned by %s." % ( changeset_revision, name, owner ) )
+ if error_message:
+ log.debug( error_message )
else:
- if error_message:
- log.debug( error_message )
+ # The preparation script ~/tool_shed/scripts/check_repositories_for_functional_tests.py will have entered
+ # information in the 'test_environment' and possibly the 'missing_test_components' entries of the first
+ # tool_test_results_dict in the list of tool_test_results_dicts. We need to be careful to not lose this
+ # information.
+ try:
+ tool_test_results_dict = tool_test_results_dicts.pop( 0 )
+ except Exception, e:
+ log.exception( "Invalid list of tool_test_results_dicts %s: %s" % ( str( tool_test_results_dicts ), str( e ) ) )
+ continue
+ # See if this repository should be skipped for any reason.
+ this_repository_is_in_the_exclude_lost = False
+ skip_reason = None
+ for exclude_dict in exclude_list:
+ reason = exclude_dict[ 'reason' ]
+ exclude_repositories = exclude_dict[ 'repositories' ]
+ if ( name, owner, changeset_revision ) in exclude_repositories or ( name, owner, None ) in exclude_repositories:
+ this_repository_is_in_the_exclude_lost = True
+ skip_reason = reason
+ break
+ if this_repository_is_in_the_exclude_lost:
+ tool_test_results_dict[ 'not_tested' ] = dict( reason=skip_reason )
+ params = dict( tools_functionally_correct=False,
+ do_not_test=False )
+ # TODO: do something useful with response_dict
+ response_dict = register_test_result( galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params )
+ log.debug( "Not testing revision %s of repository %s owned by %s because it is in the exclude list for this test run." % \
+ ( changeset_revision, name, owner ) )
else:
test_environment_dict = tool_test_results_dict.get( 'test_environment', None )
test_environment_dict = get_test_environment( test_environment_dict )
@@ -678,7 +682,7 @@
params = dict( tools_functionally_correct=False,
test_install_error=True,
do_not_test=False )
- # TODO: do something usefule with response_dict
+ # TODO: do something useful with response_dict
response_dict = register_test_result( galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params )
try:
if deactivate:
@@ -736,7 +740,7 @@
params = dict( tools_functionally_correct=False,
do_not_test=False,
test_install_error=False )
- # TODO: do something usefule with response_dict
+ # TODO: do something useful with response_dict
response_dict = register_test_result( galaxy_tool_shed_url,
tool_test_results_dicts,
tool_test_results_dict,
@@ -1066,13 +1070,14 @@
exclude_count += 1
exclude_dict[ 'repositories' ].append( repository_tuple )
exclude_list.append( exclude_dict )
- log.debug( '%s repositories will be excluded from testing...' % str( exclude_count ) )
- if '-list_repositories' in sys.argv:
- for name, owner, changeset_revision in exclude_verbose:
- if changeset_revision:
- log.debug( 'Repository %s owned by %s, changeset revision %s.' % ( str( name ), str( owner ), str( changeset_revision ) ) )
- else:
- log.debug( 'Repository %s owned by %s, all revisions.' % ( str( name ), str( owner ) ) )
+ log.debug( 'The xml document %s containing the exclude list defines the following %s repositories to be excluded from testing...' % \
+ ( str( xml_filename ), str( exclude_count ) ) )
+ #if '-list_repositories' in sys.argv:
+ for name, owner, changeset_revision in exclude_verbose:
+ if changeset_revision:
+ log.debug( 'Repository %s owned by %s, changeset revision %s.' % ( str( name ), str( owner ), str( changeset_revision ) ) )
+ else:
+ log.debug( 'Repository %s owned by %s, all revisions.' % ( str( name ), str( owner ) ) )
return exclude_list
def register_test_result( url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params ):
@@ -1202,17 +1207,18 @@
params = dict( tools_functionally_correct=True,
do_not_test=False,
test_install_error=False )
- # TODO: do something usefule with response_dict
+ # TODO: do something useful with response_dict
response_dict = register_test_result( galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params )
log.debug( 'Revision %s of repository %s installed and passed functional tests.' % ( str( changeset_revision ), str( name ) ) )
else:
- tool_test_results_dict[ 'failed_tests' ].append( extract_log_data( result, from_tool_test=True ) )
+ # The extract_log_data() netod returns a list.
+ tool_test_results_dict[ 'failed_tests' ] = extract_log_data( result, from_tool_test=True )
results_dict[ 'repositories_failed' ].append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
set_do_not_test = not is_latest_downloadable_revision( galaxy_tool_shed_url, repository_dict )
params = dict( tools_functionally_correct=False,
test_install_error=False,
do_not_test=str( set_do_not_test ) )
- # TODO: do something usefule with response_dict
+ # TODO: do something useful with response_dict
response_dict = register_test_result( galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params )
log.debug( 'Revision %s of repository %s installed successfully but did not pass functional tests.' % \
( str( changeset_revision ), str( name ) ) )
diff -r c2d705f7b50a51a9a3e8153da634ea61e9c3fec7 -r 9a5a8d18cd16ed3331ba996de21ed4d4dfa93da9 tool_conf.xml.main
--- a/tool_conf.xml.main
+++ b/tool_conf.xml.main
@@ -280,8 +280,6 @@
<section id="peak_calling" name="NGS: Peak Calling"><tool file="peak_calling/macs_wrapper.xml" /><tool file="peak_calling/sicer_wrapper.xml" />
- <tool file="genetrack/genetrack_indexer.xml" />
- <tool file="genetrack/genetrack_peak_prediction.xml" /></section><section id="ngs-rna-tools" name="NGS: RNA Analysis"><label id="rna_seq" text="RNA-seq" />
diff -r c2d705f7b50a51a9a3e8153da634ea61e9c3fec7 -r 9a5a8d18cd16ed3331ba996de21ed4d4dfa93da9 tool_conf.xml.sample
--- a/tool_conf.xml.sample
+++ b/tool_conf.xml.sample
@@ -340,8 +340,6 @@
<tool file="peak_calling/macs_wrapper.xml" /><tool file="peak_calling/sicer_wrapper.xml" /><tool file="peak_calling/ccat_wrapper.xml" />
- <tool file="genetrack/genetrack_indexer.xml" />
- <tool file="genetrack/genetrack_peak_prediction.xml" /></section><section id="ngs-simulation" name="NGS: Simulation"><tool file="ngs_simulation/ngs_simulation.xml" />
diff -r c2d705f7b50a51a9a3e8153da634ea61e9c3fec7 -r 9a5a8d18cd16ed3331ba996de21ed4d4dfa93da9 tools/genetrack/genetrack_indexer.py
--- a/tools/genetrack/genetrack_indexer.py
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Wraps genetrack.scripts.tabs2genetrack so the tool can be executed from Galaxy.
-
-usage: %prog input output shift
-"""
-
-import sys, shutil, os
-from galaxy import eggs
-import pkg_resources
-pkg_resources.require( "GeneTrack" )
-
-from genetrack.scripts import tabs2genetrack
-from genetrack import logger
-
-if __name__ == "__main__":
- import os
- os.environ[ 'LC_ALL' ] = 'C'
- #os.system( 'export' )
-
- parser = tabs2genetrack.option_parser()
-
- options, args = parser.parse_args()
-
- # uppercase the format
- options.format = options.format.upper()
-
- if options.format not in ('BED', 'GFF'):
- sys.stdout = sys.stderr
- parser.print_help()
- sys.exit(-1)
-
- logger.disable(options.verbosity)
-
- # missing file names
- if not (options.inpname and options.outname and options.format):
- parser.print_help()
- sys.exit(-1)
- else:
- tabs2genetrack.transform(inpname=options.inpname, outname=options.outname,\
- format=options.format, shift=options.shift, index=options.index, options=options)
diff -r c2d705f7b50a51a9a3e8153da634ea61e9c3fec7 -r 9a5a8d18cd16ed3331ba996de21ed4d4dfa93da9 tools/genetrack/genetrack_indexer.xml
--- a/tools/genetrack/genetrack_indexer.xml
+++ /dev/null
@@ -1,64 +0,0 @@
-<tool id="bed2genetrack" name="GeneTrack indexer" version="1.0.1">
-
- <description>on a BED file</description>
-
- <command interpreter="python">
- genetrack_indexer.py -i $input -o $output -s $shift -v 0 -f BED -x
- </command>
-
- <inputs>
-
- <param format="bed6" name="input" type="data" help="Input data">
- <label>Select input bed file</label>
- </param>
-
- <param name="shift" size="4" type="integer" value="0" help="distance in basepairs">
- <label>Shift at 5' end</label>
- </param>
-
- <!-- this parameter is currently not used, may not be feasible to use it
- <param name="coverage" type="select" label="Full coverage">
- <option value="no">NO</option>
- <option value="yes">YES</option>
- </param>
- -->
-
- </inputs>
-
- <outputs>
- <data format="genetrack" name="output" />
- </outputs>
-
-<help>
-**Help**
-
-This tool will create a visualization of the bed file that is selected.
-
-**Parameters**
-
-- **Shift at 5' end** should be used when the location of interest is at a fixed distance from
- the 5' end for **all sequenced fragments**!
-
- For example if the sequenced sample consists
- mono-nucleosomal DNA (146bp) we should expect that
- each nucleosome midpoint is located at 73 bp from the 5' end of the fragment.
- Therefore we would enter 73 as the shift parameter. Once corrected the reads
- on each strand will coincide and indicate the actual midpoints
- of the nucleosomes.
-
- When shifting the averaging process in GeneTrack is able correct for longer or shorter
- than expected fragment sizes as long as the errors are reasonably random.
-
-See http://genetrack.bx.psu.edu/ for more information on GeneTrack.
-
-------
-
-**Citation**
-
-For the underlying tool, please cite `Albert I, Wachi S, Jiang C, Pugh BF. GeneTrack--a genomic data processing and visualization framework. Bioinformatics. 2008 May 15;24(10):1305-6. <http://www.ncbi.nlm.nih.gov/pubmed/18388141>`_
-
-If you use this tool in Galaxy, please cite Blankenberg D, et al. *In preparation.*
-
-</help>
-
-</tool>
diff -r c2d705f7b50a51a9a3e8153da634ea61e9c3fec7 -r 9a5a8d18cd16ed3331ba996de21ed4d4dfa93da9 tools/genetrack/genetrack_peak_prediction.py
--- a/tools/genetrack/genetrack_peak_prediction.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Wraps genetrack.scripts.peakpred so the tool can be executed from Galaxy.
-
-usage: %prog input output level sigma mode exclusion strand
-"""
-
-import sys
-from galaxy import eggs
-import pkg_resources
-pkg_resources.require( "GeneTrack" )
-
-from genetrack.scripts import peakpred
-from genetrack import logger
-
-if __name__ == "__main__":
-
- parser = peakpred.option_parser()
-
- options, args = parser.parse_args()
-
- logger.disable(options.verbosity)
-
- from genetrack import conf
-
- # trigger test mode
- if options.test:
- options.inpname = conf.testdata('test-hdflib-input.gtrack')
- options.outname = conf.testdata('predictions.bed')
-
- # missing input file name
- if not options.inpname and not options.outname:
- parser.print_help()
- else:
- print 'Sigma = %s' % options.sigma
- print 'Minimum peak = %s' % options.level
- print 'Peak-to-peak = %s' % options.exclude
-
- peakpred.predict(options.inpname, options.outname, options)
diff -r c2d705f7b50a51a9a3e8153da634ea61e9c3fec7 -r 9a5a8d18cd16ed3331ba996de21ed4d4dfa93da9 tools/genetrack/genetrack_peak_prediction.xml
--- a/tools/genetrack/genetrack_peak_prediction.xml
+++ /dev/null
@@ -1,69 +0,0 @@
-<tool id="predict2genetrack" name="Peak predictor">
-
- <description>on GeneTrack index</description>
-
- <command interpreter="python">
- genetrack_peak_prediction.py -i $input -o $output --level=$level --sigma=$sigma --mode=$mode --exclusion=$exclusion --strand=$strand -v 0 -x
- </command>
-
- <inputs>
-
- <param format="genetrack" name="input" type="data" help="Input data" label="Select input data"/>
-
- <param name="method" type="select" label="Smoothing method" help="The function used to average nearby read values">
- <option value="gauss">Gaussian kernel</option>
- <!-- <option value="yes">Moving averages</option> -->
- </param>
-
- <param name="sigma" size="4" type="integer" value="10" label="Smoothing factor" help="The interval over which each read is averaged" />
-
-
- <param name="mode" type="select" label="Peak prediction" help="Peak prediction method">
- <option value="nolap">Maximal non-overlapping</option>
- <!-- <option value="above">Above a threshold</option> -->
- <option value="all">All peaks</option>
- </param>
-
- <param name="exclusion" type="integer" size="4" value="0" help="The minimal distance between peaks" label="Peak-to-peak distance">
- </param>
-
- <param name="level" size="4" type="float" value="1" label="Threshold" help="Return only peaks above this value" />
-
- <param name="strand" type="select" label="Strands" help="Combine strand data or predict on each strand separately">
- <option value="all">Merge strands</option>
- <!-- <option value="yes1">Above a threshold</option> -->
- <option value="two">Separate strands</option>
- </param>
-
- </inputs>
-
- <outputs>
- <data format="bed" name="output" />
- </outputs>
-
-<help>
-**Help**
-
-This tool will generate genome wide peak prediction from an index file.
-
-**Parameters**
-
-- **Smoothing method** the function used to average nearby values
-
-- **Smoothing value** the factor used in the method
-
-- **Prediction method** the function used to average nearby values
-
-See http://genetrack.bx.psu.edu/ for more information on GeneTrack.
-
-------
-
-**Citation**
-
-For the underlying tool, please cite `Albert I, Wachi S, Jiang C, Pugh BF. GeneTrack--a genomic data processing and visualization framework. Bioinformatics. 2008 May 15;24(10):1305-6. <http://www.ncbi.nlm.nih.gov/pubmed/18388141>`_
-
-If you use this tool in Galaxy, please cite Blankenberg D, et al. *In preparation.*
-
-</help>
-
-</tool>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/63006e23bf7d/
Changeset: 63006e23bf7d
Branch: stable
User: natefoo
Date: 2013-11-30 18:46:53
Summary: Remove broken GeneTrack tools.
Affected #: 6 files
diff -r 3b6cdcd9759595335cacd06b960efa6d96faaf72 -r 63006e23bf7d32e18033e0e59c0c194259935f6d tool_conf.xml.main
--- a/tool_conf.xml.main
+++ b/tool_conf.xml.main
@@ -284,8 +284,6 @@
<section id="peak_calling" name="NGS: Peak Calling"><tool file="peak_calling/macs_wrapper.xml" /><tool file="peak_calling/sicer_wrapper.xml" />
- <tool file="genetrack/genetrack_indexer.xml" />
- <tool file="genetrack/genetrack_peak_prediction.xml" /></section><section id="ngs-rna-tools" name="NGS: RNA Analysis"><label id="rna_seq" text="RNA-seq" />
diff -r 3b6cdcd9759595335cacd06b960efa6d96faaf72 -r 63006e23bf7d32e18033e0e59c0c194259935f6d tool_conf.xml.sample
--- a/tool_conf.xml.sample
+++ b/tool_conf.xml.sample
@@ -345,8 +345,6 @@
<tool file="peak_calling/macs_wrapper.xml" /><tool file="peak_calling/sicer_wrapper.xml" /><tool file="peak_calling/ccat_wrapper.xml" />
- <tool file="genetrack/genetrack_indexer.xml" />
- <tool file="genetrack/genetrack_peak_prediction.xml" /></section><section id="ngs-simulation" name="NGS: Simulation"><tool file="ngs_simulation/ngs_simulation.xml" />
diff -r 3b6cdcd9759595335cacd06b960efa6d96faaf72 -r 63006e23bf7d32e18033e0e59c0c194259935f6d tools/genetrack/genetrack_indexer.py
--- a/tools/genetrack/genetrack_indexer.py
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Wraps genetrack.scripts.tabs2genetrack so the tool can be executed from Galaxy.
-
-usage: %prog input output shift
-"""
-
-import sys, shutil, os
-from galaxy import eggs
-import pkg_resources
-pkg_resources.require( "GeneTrack" )
-
-from genetrack.scripts import tabs2genetrack
-from genetrack import logger
-
-if __name__ == "__main__":
- import os
- os.environ[ 'LC_ALL' ] = 'C'
- #os.system( 'export' )
-
- parser = tabs2genetrack.option_parser()
-
- options, args = parser.parse_args()
-
- # uppercase the format
- options.format = options.format.upper()
-
- if options.format not in ('BED', 'GFF'):
- sys.stdout = sys.stderr
- parser.print_help()
- sys.exit(-1)
-
- logger.disable(options.verbosity)
-
- # missing file names
- if not (options.inpname and options.outname and options.format):
- parser.print_help()
- sys.exit(-1)
- else:
- tabs2genetrack.transform(inpname=options.inpname, outname=options.outname,\
- format=options.format, shift=options.shift, index=options.index, options=options)
diff -r 3b6cdcd9759595335cacd06b960efa6d96faaf72 -r 63006e23bf7d32e18033e0e59c0c194259935f6d tools/genetrack/genetrack_indexer.xml
--- a/tools/genetrack/genetrack_indexer.xml
+++ /dev/null
@@ -1,64 +0,0 @@
-<tool id="bed2genetrack" name="GeneTrack indexer" version="1.0.1">
-
- <description>on a BED file</description>
-
- <command interpreter="python">
- genetrack_indexer.py -i $input -o $output -s $shift -v 0 -f BED -x
- </command>
-
- <inputs>
-
- <param format="bed6" name="input" type="data" help="Input data">
- <label>Select input bed file</label>
- </param>
-
- <param name="shift" size="4" type="integer" value="0" help="distance in basepairs">
- <label>Shift at 5' end</label>
- </param>
-
- <!-- this parameter is currently not used, may not be feasible to use it
- <param name="coverage" type="select" label="Full coverage">
- <option value="no">NO</option>
- <option value="yes">YES</option>
- </param>
- -->
-
- </inputs>
-
- <outputs>
- <data format="genetrack" name="output" />
- </outputs>
-
-<help>
-**Help**
-
-This tool will create a visualization of the bed file that is selected.
-
-**Parameters**
-
-- **Shift at 5' end** should be used when the location of interest is at a fixed distance from
- the 5' end for **all sequenced fragments**!
-
- For example if the sequenced sample consists
- mono-nucleosomal DNA (146bp) we should expect that
- each nucleosome midpoint is located at 73 bp from the 5' end of the fragment.
- Therefore we would enter 73 as the shift parameter. Once corrected the reads
- on each strand will coincide and indicate the actual midpoints
- of the nucleosomes.
-
- When shifting the averaging process in GeneTrack is able correct for longer or shorter
- than expected fragment sizes as long as the errors are reasonably random.
-
-See http://genetrack.bx.psu.edu/ for more information on GeneTrack.
-
-------
-
-**Citation**
-
-For the underlying tool, please cite `Albert I, Wachi S, Jiang C, Pugh BF. GeneTrack--a genomic data processing and visualization framework. Bioinformatics. 2008 May 15;24(10):1305-6. <http://www.ncbi.nlm.nih.gov/pubmed/18388141>`_
-
-If you use this tool in Galaxy, please cite Blankenberg D, et al. *In preparation.*
-
-</help>
-
-</tool>
diff -r 3b6cdcd9759595335cacd06b960efa6d96faaf72 -r 63006e23bf7d32e18033e0e59c0c194259935f6d tools/genetrack/genetrack_peak_prediction.py
--- a/tools/genetrack/genetrack_peak_prediction.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Wraps genetrack.scripts.peakpred so the tool can be executed from Galaxy.
-
-usage: %prog input output level sigma mode exclusion strand
-"""
-
-import sys
-from galaxy import eggs
-import pkg_resources
-pkg_resources.require( "GeneTrack" )
-
-from genetrack.scripts import peakpred
-from genetrack import logger
-
-if __name__ == "__main__":
-
- parser = peakpred.option_parser()
-
- options, args = parser.parse_args()
-
- logger.disable(options.verbosity)
-
- from genetrack import conf
-
- # trigger test mode
- if options.test:
- options.inpname = conf.testdata('test-hdflib-input.gtrack')
- options.outname = conf.testdata('predictions.bed')
-
- # missing input file name
- if not options.inpname and not options.outname:
- parser.print_help()
- else:
- print 'Sigma = %s' % options.sigma
- print 'Minimum peak = %s' % options.level
- print 'Peak-to-peak = %s' % options.exclude
-
- peakpred.predict(options.inpname, options.outname, options)
diff -r 3b6cdcd9759595335cacd06b960efa6d96faaf72 -r 63006e23bf7d32e18033e0e59c0c194259935f6d tools/genetrack/genetrack_peak_prediction.xml
--- a/tools/genetrack/genetrack_peak_prediction.xml
+++ /dev/null
@@ -1,69 +0,0 @@
-<tool id="predict2genetrack" name="Peak predictor">
-
- <description>on GeneTrack index</description>
-
- <command interpreter="python">
- genetrack_peak_prediction.py -i $input -o $output --level=$level --sigma=$sigma --mode=$mode --exclusion=$exclusion --strand=$strand -v 0 -x
- </command>
-
- <inputs>
-
- <param format="genetrack" name="input" type="data" help="Input data" label="Select input data"/>
-
- <param name="method" type="select" label="Smoothing method" help="The function used to average nearby read values">
- <option value="gauss">Gaussian kernel</option>
- <!-- <option value="yes">Moving averages</option> -->
- </param>
-
- <param name="sigma" size="4" type="integer" value="10" label="Smoothing factor" help="The interval over which each read is averaged" />
-
-
- <param name="mode" type="select" label="Peak prediction" help="Peak prediction method">
- <option value="nolap">Maximal non-overlapping</option>
- <!-- <option value="above">Above a threshold</option> -->
- <option value="all">All peaks</option>
- </param>
-
- <param name="exclusion" type="integer" size="4" value="0" help="The minimal distance between peaks" label="Peak-to-peak distance">
- </param>
-
- <param name="level" size="4" type="float" value="1" label="Threshold" help="Return only peaks above this value" />
-
- <param name="strand" type="select" label="Strands" help="Combine strand data or predict on each strand separately">
- <option value="all">Merge strands</option>
- <!-- <option value="yes1">Above a threshold</option> -->
- <option value="two">Separate strands</option>
- </param>
-
- </inputs>
-
- <outputs>
- <data format="bed" name="output" />
- </outputs>
-
-<help>
-**Help**
-
-This tool will generate genome wide peak prediction from an index file.
-
-**Parameters**
-
-- **Smoothing method** the function used to average nearby values
-
-- **Smoothing value** the factor used in the method
-
-- **Prediction method** the function used to average nearby values
-
-See http://genetrack.bx.psu.edu/ for more information on GeneTrack.
-
-------
-
-**Citation**
-
-For the underlying tool, please cite `Albert I, Wachi S, Jiang C, Pugh BF. GeneTrack--a genomic data processing and visualization framework. Bioinformatics. 2008 May 15;24(10):1305-6. <http://www.ncbi.nlm.nih.gov/pubmed/18388141>`_
-
-If you use this tool in Galaxy, please cite Blankenberg D, et al. *In preparation.*
-
-</help>
-
-</tool>
https://bitbucket.org/galaxy/galaxy-central/commits/a508799de645/
Changeset: a508799de645
User: natefoo
Date: 2013-11-30 18:47:19
Summary: merge stable.
Affected #: 6 files
diff -r 2b5329b6fba3a5d332e734139b30a14d48a680a3 -r a508799de64595113ad4d26b8f2f9df7b64f1f69 tool_conf.xml.main
--- a/tool_conf.xml.main
+++ b/tool_conf.xml.main
@@ -280,8 +280,6 @@
<section id="peak_calling" name="NGS: Peak Calling"><tool file="peak_calling/macs_wrapper.xml" /><tool file="peak_calling/sicer_wrapper.xml" />
- <tool file="genetrack/genetrack_indexer.xml" />
- <tool file="genetrack/genetrack_peak_prediction.xml" /></section><section id="ngs-rna-tools" name="NGS: RNA Analysis"><label id="rna_seq" text="RNA-seq" />
diff -r 2b5329b6fba3a5d332e734139b30a14d48a680a3 -r a508799de64595113ad4d26b8f2f9df7b64f1f69 tool_conf.xml.sample
--- a/tool_conf.xml.sample
+++ b/tool_conf.xml.sample
@@ -340,8 +340,6 @@
<tool file="peak_calling/macs_wrapper.xml" /><tool file="peak_calling/sicer_wrapper.xml" /><tool file="peak_calling/ccat_wrapper.xml" />
- <tool file="genetrack/genetrack_indexer.xml" />
- <tool file="genetrack/genetrack_peak_prediction.xml" /></section><section id="ngs-simulation" name="NGS: Simulation"><tool file="ngs_simulation/ngs_simulation.xml" />
diff -r 2b5329b6fba3a5d332e734139b30a14d48a680a3 -r a508799de64595113ad4d26b8f2f9df7b64f1f69 tools/genetrack/genetrack_indexer.py
--- a/tools/genetrack/genetrack_indexer.py
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Wraps genetrack.scripts.tabs2genetrack so the tool can be executed from Galaxy.
-
-usage: %prog input output shift
-"""
-
-import sys, shutil, os
-from galaxy import eggs
-import pkg_resources
-pkg_resources.require( "GeneTrack" )
-
-from genetrack.scripts import tabs2genetrack
-from genetrack import logger
-
-if __name__ == "__main__":
- import os
- os.environ[ 'LC_ALL' ] = 'C'
- #os.system( 'export' )
-
- parser = tabs2genetrack.option_parser()
-
- options, args = parser.parse_args()
-
- # uppercase the format
- options.format = options.format.upper()
-
- if options.format not in ('BED', 'GFF'):
- sys.stdout = sys.stderr
- parser.print_help()
- sys.exit(-1)
-
- logger.disable(options.verbosity)
-
- # missing file names
- if not (options.inpname and options.outname and options.format):
- parser.print_help()
- sys.exit(-1)
- else:
- tabs2genetrack.transform(inpname=options.inpname, outname=options.outname,\
- format=options.format, shift=options.shift, index=options.index, options=options)
diff -r 2b5329b6fba3a5d332e734139b30a14d48a680a3 -r a508799de64595113ad4d26b8f2f9df7b64f1f69 tools/genetrack/genetrack_indexer.xml
--- a/tools/genetrack/genetrack_indexer.xml
+++ /dev/null
@@ -1,64 +0,0 @@
-<tool id="bed2genetrack" name="GeneTrack indexer" version="1.0.1">
-
- <description>on a BED file</description>
-
- <command interpreter="python">
- genetrack_indexer.py -i $input -o $output -s $shift -v 0 -f BED -x
- </command>
-
- <inputs>
-
- <param format="bed6" name="input" type="data" help="Input data">
- <label>Select input bed file</label>
- </param>
-
- <param name="shift" size="4" type="integer" value="0" help="distance in basepairs">
- <label>Shift at 5' end</label>
- </param>
-
- <!-- this parameter is currently not used, may not be feasible to use it
- <param name="coverage" type="select" label="Full coverage">
- <option value="no">NO</option>
- <option value="yes">YES</option>
- </param>
- -->
-
- </inputs>
-
- <outputs>
- <data format="genetrack" name="output" />
- </outputs>
-
-<help>
-**Help**
-
-This tool will create a visualization of the bed file that is selected.
-
-**Parameters**
-
-- **Shift at 5' end** should be used when the location of interest is at a fixed distance from
- the 5' end for **all sequenced fragments**!
-
- For example if the sequenced sample consists
- mono-nucleosomal DNA (146bp) we should expect that
- each nucleosome midpoint is located at 73 bp from the 5' end of the fragment.
- Therefore we would enter 73 as the shift parameter. Once corrected the reads
- on each strand will coincide and indicate the actual midpoints
- of the nucleosomes.
-
- When shifting the averaging process in GeneTrack is able correct for longer or shorter
- than expected fragment sizes as long as the errors are reasonably random.
-
-See http://genetrack.bx.psu.edu/ for more information on GeneTrack.
-
-------
-
-**Citation**
-
-For the underlying tool, please cite `Albert I, Wachi S, Jiang C, Pugh BF. GeneTrack--a genomic data processing and visualization framework. Bioinformatics. 2008 May 15;24(10):1305-6. <http://www.ncbi.nlm.nih.gov/pubmed/18388141>`_
-
-If you use this tool in Galaxy, please cite Blankenberg D, et al. *In preparation.*
-
-</help>
-
-</tool>
diff -r 2b5329b6fba3a5d332e734139b30a14d48a680a3 -r a508799de64595113ad4d26b8f2f9df7b64f1f69 tools/genetrack/genetrack_peak_prediction.py
--- a/tools/genetrack/genetrack_peak_prediction.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Wraps genetrack.scripts.peakpred so the tool can be executed from Galaxy.
-
-usage: %prog input output level sigma mode exclusion strand
-"""
-
-import sys
-from galaxy import eggs
-import pkg_resources
-pkg_resources.require( "GeneTrack" )
-
-from genetrack.scripts import peakpred
-from genetrack import logger
-
-if __name__ == "__main__":
-
- parser = peakpred.option_parser()
-
- options, args = parser.parse_args()
-
- logger.disable(options.verbosity)
-
- from genetrack import conf
-
- # trigger test mode
- if options.test:
- options.inpname = conf.testdata('test-hdflib-input.gtrack')
- options.outname = conf.testdata('predictions.bed')
-
- # missing input file name
- if not options.inpname and not options.outname:
- parser.print_help()
- else:
- print 'Sigma = %s' % options.sigma
- print 'Minimum peak = %s' % options.level
- print 'Peak-to-peak = %s' % options.exclude
-
- peakpred.predict(options.inpname, options.outname, options)
diff -r 2b5329b6fba3a5d332e734139b30a14d48a680a3 -r a508799de64595113ad4d26b8f2f9df7b64f1f69 tools/genetrack/genetrack_peak_prediction.xml
--- a/tools/genetrack/genetrack_peak_prediction.xml
+++ /dev/null
@@ -1,69 +0,0 @@
-<tool id="predict2genetrack" name="Peak predictor">
-
- <description>on GeneTrack index</description>
-
- <command interpreter="python">
- genetrack_peak_prediction.py -i $input -o $output --level=$level --sigma=$sigma --mode=$mode --exclusion=$exclusion --strand=$strand -v 0 -x
- </command>
-
- <inputs>
-
- <param format="genetrack" name="input" type="data" help="Input data" label="Select input data"/>
-
- <param name="method" type="select" label="Smoothing method" help="The function used to average nearby read values">
- <option value="gauss">Gaussian kernel</option>
- <!-- <option value="yes">Moving averages</option> -->
- </param>
-
- <param name="sigma" size="4" type="integer" value="10" label="Smoothing factor" help="The interval over which each read is averaged" />
-
-
- <param name="mode" type="select" label="Peak prediction" help="Peak prediction method">
- <option value="nolap">Maximal non-overlapping</option>
- <!-- <option value="above">Above a threshold</option> -->
- <option value="all">All peaks</option>
- </param>
-
- <param name="exclusion" type="integer" size="4" value="0" help="The minimal distance between peaks" label="Peak-to-peak distance">
- </param>
-
- <param name="level" size="4" type="float" value="1" label="Threshold" help="Return only peaks above this value" />
-
- <param name="strand" type="select" label="Strands" help="Combine strand data or predict on each strand separately">
- <option value="all">Merge strands</option>
- <!-- <option value="yes1">Above a threshold</option> -->
- <option value="two">Separate strands</option>
- </param>
-
- </inputs>
-
- <outputs>
- <data format="bed" name="output" />
- </outputs>
-
-<help>
-**Help**
-
-This tool will generate genome wide peak prediction from an index file.
-
-**Parameters**
-
-- **Smoothing method** the function used to average nearby values
-
-- **Smoothing value** the factor used in the method
-
-- **Prediction method** the function used to average nearby values
-
-See http://genetrack.bx.psu.edu/ for more information on GeneTrack.
-
-------
-
-**Citation**
-
-For the underlying tool, please cite `Albert I, Wachi S, Jiang C, Pugh BF. GeneTrack--a genomic data processing and visualization framework. Bioinformatics. 2008 May 15;24(10):1305-6. <http://www.ncbi.nlm.nih.gov/pubmed/18388141>`_
-
-If you use this tool in Galaxy, please cite Blankenberg D, et al. *In preparation.*
-
-</help>
-
-</tool>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/3b6cdcd97595/
Changeset: 3b6cdcd97595
Branch: stable
User: jmchilton
Date: 2013-11-30 15:28:38
Summary: Fix from Jim Johnson for optional input handling in provenance API.
Affected #: 1 file
diff -r 51b4282dce3a411d02b982d1cca137884d5f6c2b -r 3b6cdcd9759595335cacd06b960efa6d96faaf72 lib/galaxy/webapps/galaxy/api/provenance.py
--- a/lib/galaxy/webapps/galaxy/api/provenance.py
+++ b/lib/galaxy/webapps/galaxy/api/provenance.py
@@ -54,6 +54,8 @@
for p in job.parameters:
out[p.name] = p.value
for in_d in job.input_datasets:
+ if not in_d.dataset:
+ continue
if follow:
out[in_d.name] = self._get_record(trans, in_d.dataset, follow)
else:
https://bitbucket.org/galaxy/galaxy-central/commits/2b5329b6fba3/
Changeset: 2b5329b6fba3
User: jmchilton
Date: 2013-11-30 15:29:06
Summary: Merge latest stable.
Affected #: 1 file
diff -r 228156daa575892f92d81c30a7e0f0ec4fa6b960 -r 2b5329b6fba3a5d332e734139b30a14d48a680a3 lib/galaxy/webapps/galaxy/api/provenance.py
--- a/lib/galaxy/webapps/galaxy/api/provenance.py
+++ b/lib/galaxy/webapps/galaxy/api/provenance.py
@@ -58,6 +58,8 @@
for p in job.parameters:
out[p.name] = p.value
for in_d in job.input_datasets:
+ if not in_d.dataset:
+ continue
if follow:
out[in_d.name] = self._get_record(trans, in_d.dataset, follow)
else:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Tool shed api fix for filtering tools that should not be tested via the tool shed's install and test framework.
by commits-noreply@bitbucket.org 29 Nov '13
by commits-noreply@bitbucket.org 29 Nov '13
29 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/228156daa575/
Changeset: 228156daa575
User: greg
Date: 2013-11-29 23:06:02
Summary: Tool shed api fix for filtering tools that should not be tested via the tool shed's install and test framework.
Affected #: 1 file
diff -r 820ff73e8aab805b7f4f4f50cb66d0f47adc5bc2 -r 228156daa575892f92d81c30a7e0f0ec4fa6b960 lib/galaxy/webapps/tool_shed/api/repository_revisions.py
--- a/lib/galaxy/webapps/tool_shed/api/repository_revisions.py
+++ b/lib/galaxy/webapps/tool_shed/api/repository_revisions.py
@@ -107,10 +107,9 @@
skip_tool_test = kwd.get( 'skip_tool_test', None )
if skip_tool_test is not None:
skip_tool_test = util.string_as_bool( skip_tool_test )
+ if skip_tool_test:
skipped_metadata_ids_subquery = select( [ trans.app.model.SkipToolTest.table.c.repository_metadata_id ] )
- if skip_tool_test:
- clause_list.append( trans.model.RepositoryMetadata.id.in_( skipped_metadata_ids_subquery ) )
- else:
+ if skipped_metadata_ids_subquery:
clause_list.append( not_( trans.model.RepositoryMetadata.id.in_( skipped_metadata_ids_subquery ) ) )
# Generate and execute the query.
try:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Attempt to display the results of the tool shed's install and test framework test run even if the preparation script was not executed before the test run.
by commits-noreply@bitbucket.org 29 Nov '13
by commits-noreply@bitbucket.org 29 Nov '13
29 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/820ff73e8aab/
Changeset: 820ff73e8aab
User: greg
Date: 2013-11-29 22:28:25
Summary: Attempt to display the results of the tool shed's install and test framework test run even if the preparation script was not executed before the test run.
Affected #: 1 file
diff -r 8544d14f89dd48d313384a22117f56fa07a99974 -r 820ff73e8aab805b7f4f4f50cb66d0f47adc5bc2 lib/tool_shed/util/container_util.py
--- a/lib/tool_shed/util/container_util.py
+++ b/lib/tool_shed/util/container_util.py
@@ -1062,37 +1062,41 @@
tool_test_results_root_folder.folders.append( test_runs_folder )
for index, tool_test_results_dict in enumerate( tool_test_results_dicts ):
test_environment_dict = tool_test_results_dict.get( 'test_environment', None )
- if test_environment_dict is not None:
- time_tested = test_environment_dict.get( 'time_tested', 'unknown_%d' % index )
- if multiple_tool_test_results_dicts:
- folder_id += 1
- containing_folder = Folder( id=folder_id, key='test_results', label=time_tested, parent=test_runs_folder )
- test_runs_folder.folders.append( containing_folder )
- else:
- containing_folder = tool_test_results_root_folder
- test_results_dict_id += 1
+ if test_environment_dict is None:
+ # the test environment entry will exist only if the preparation script check_re;ositories_for_functional_tests.py
+ # was executed prior to the ~/install_and_test_repositories/functional_tests.py script. If that did not occur,
+ # we'll display test result, but the test_environment entries will not be complete.
+ test_environment_dict = {}
+ time_tested = test_environment_dict.get( 'time_tested', 'unknown_%d' % index )
+ if multiple_tool_test_results_dicts:
folder_id += 1
- folder = Folder( id=folder_id, key='test_environment', label='Automated test environment', parent=containing_folder )
- containing_folder.folders.append( folder )
- architecture = test_environment_dict.get( 'architecture', '' )
- galaxy_database_version = test_environment_dict.get( 'galaxy_database_version', '' )
- galaxy_revision = test_environment_dict.get( 'galaxy_revision', '' )
- python_version = test_environment_dict.get( 'python_version', '' )
- system = test_environment_dict.get( 'system', '' )
- tool_shed_database_version = test_environment_dict.get( 'tool_shed_database_version', '' )
- tool_shed_mercurial_version = test_environment_dict.get( 'tool_shed_mercurial_version', '' )
- tool_shed_revision = test_environment_dict.get( 'tool_shed_revision', '' )
- test_environment = TestEnvironment( id=1,
- architecture=architecture,
- galaxy_database_version=galaxy_database_version,
- galaxy_revision=galaxy_revision,
- python_version=python_version,
- system=system,
- time_tested=time_tested,
- tool_shed_database_version=tool_shed_database_version,
- tool_shed_mercurial_version=tool_shed_mercurial_version,
- tool_shed_revision=tool_shed_revision )
- folder.test_environments.append( test_environment )
+ containing_folder = Folder( id=folder_id, key='test_results', label=time_tested, parent=test_runs_folder )
+ test_runs_folder.folders.append( containing_folder )
+ else:
+ containing_folder = tool_test_results_root_folder
+ test_results_dict_id += 1
+ folder_id += 1
+ folder = Folder( id=folder_id, key='test_environment', label='Automated test environment', parent=containing_folder )
+ containing_folder.folders.append( folder )
+ architecture = test_environment_dict.get( 'architecture', '' )
+ galaxy_database_version = test_environment_dict.get( 'galaxy_database_version', '' )
+ galaxy_revision = test_environment_dict.get( 'galaxy_revision', '' )
+ python_version = test_environment_dict.get( 'python_version', '' )
+ system = test_environment_dict.get( 'system', '' )
+ tool_shed_database_version = test_environment_dict.get( 'tool_shed_database_version', '' )
+ tool_shed_mercurial_version = test_environment_dict.get( 'tool_shed_mercurial_version', '' )
+ tool_shed_revision = test_environment_dict.get( 'tool_shed_revision', '' )
+ test_environment = TestEnvironment( id=1,
+ architecture=architecture,
+ galaxy_database_version=galaxy_database_version,
+ galaxy_revision=galaxy_revision,
+ python_version=python_version,
+ system=system,
+ time_tested=time_tested,
+ tool_shed_database_version=tool_shed_database_version,
+ tool_shed_mercurial_version=tool_shed_mercurial_version,
+ tool_shed_revision=tool_shed_revision )
+ folder.test_environments.append( test_environment )
not_tested_dict = tool_test_results_dict.get( 'not_tested', {} )
if not_tested_dict:
folder_id += 1
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Minor fix for the tool shed's install and test framework.
by commits-noreply@bitbucket.org 29 Nov '13
by commits-noreply@bitbucket.org 29 Nov '13
29 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/8544d14f89dd/
Changeset: 8544d14f89dd
User: greg
Date: 2013-11-29 21:18:22
Summary: Minor fix for the tool shed's install and test framework.
Affected #: 1 file
diff -r 024e7ca6c1a6023d49f4f63014185d14760faecf -r 8544d14f89dd48d313384a22117f56fa07a99974 test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -1070,7 +1070,7 @@
exclude_count += 1
exclude_dict[ 'repositories' ].append( repository_tuple )
exclude_list.append( exclude_dict )
- log.debug( 'The xml document %s containing the exclude list %s defines the following %s repositories to be excluded from testing...' % \
+ log.debug( 'The xml document %s containing the exclude list defines the following %s repositories to be excluded from testing...' % \
( str( xml_filename ), str( exclude_count ) ) )
#if '-list_repositories' in sys.argv:
for name, owner, changeset_revision in exclude_verbose:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Additional code cleanup, better logging and more minor fixes for the tool shed's install and test framework.
by commits-noreply@bitbucket.org 29 Nov '13
by commits-noreply@bitbucket.org 29 Nov '13
29 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/024e7ca6c1a6/
Changeset: 024e7ca6c1a6
User: greg
Date: 2013-11-29 20:34:35
Summary: Additional code cleanup, better logging and more minor fixes for the tool shed's install and test framework.
Affected #: 2 files
diff -r 864f8f4c466e4c031739549ea6a038dd70a4ed9a -r 024e7ca6c1a6023d49f4f63014185d14760faecf lib/tool_shed/scripts/check_repositories_for_functional_tests.py
--- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
+++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
@@ -95,17 +95,25 @@
invalid_revisions = 0
records_checked = 0
# Do not check metadata records that have an entry in the skip_tool_tests table, since they won't be tested anyway.
- skip_metadata_ids = select( [ app.model.SkipToolTest.table.c.repository_metadata_id ] )
+ print '# -------------------------------------------------------------------------------------------'
+ print '# The skip_tool_test setting has been set for the following repository revision, so they will not be tested.'
+ skip_metadata_ids = []
+ for skip_tool_test in app.sa_session.query( app.model.SkipToolTest ):
+ print '# repository_metadata_id: %s, changeset_revision: %s' % \
+ ( str( skip_tool_test.repository_metadata_id ), str( skip_tool_test.initial_changeset_revision ) )
+ print 'reason: %s' % str( skip_tool_test.comment )
+ skip_metadata_ids.append( skip_tool_test.repository_metadata_id )
# Get the list of metadata records to check for functional tests and test data. Limit this to records that have not been flagged do_not_test,
# since there's no need to check them again if they won't be tested anyway. Also filter out changeset revisions that are not downloadable,
# because it's redundant to test a revision that a user can't install.
for repository_metadata in app.sa_session.query( app.model.RepositoryMetadata ) \
.filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True,
app.model.RepositoryMetadata.table.c.includes_tools == True,
- app.model.RepositoryMetadata.table.c.do_not_test == False,
- not_( app.model.RepositoryMetadata.table.c.id.in_( skip_metadata_ids ) ) ) ):
+ app.model.RepositoryMetadata.table.c.do_not_test == False ) ):
# Initialize some items.
missing_test_components = []
+ revision_has_test_data = False
+ testable_revision = False
repository = repository_metadata.repository
records_checked += 1
# Check the next repository revision.
@@ -117,13 +125,13 @@
if repository.id not in checked_repository_ids:
checked_repository_ids.append( repository.id )
print '# -------------------------------------------------------------------------------------------'
- print '# Now checking revision %s of %s, owned by %s.' % ( changeset_revision, name, owner )
+ print '# Checking revision %s of %s owned by %s.' % ( changeset_revision, name, owner )
+ if repository_metadata.id in skip_metadata_ids:
+ print'# Skipping revision %s of %s owned by %s because the skip_tool_test setting has been set.' % ( changeset_revision, name, owner )
# If this changeset revision has no tools, we don't need to do anything here, the install and test script has a filter for returning
# only repositories that contain tools.
tool_dicts = metadata.get( 'tools', None )
if tool_dicts is not None:
- has_test_data = False
- testable_revision = False
# Clone the repository up to the changeset revision we're checking.
repo_dir = repository.repo_path( app )
repo = hg.repository( suc.get_configured_ui(), repo_dir )
@@ -137,27 +145,30 @@
if '.hg' in dirs:
dirs.remove( '.hg' )
if 'test-data' in dirs:
- has_test_data = True
+ revision_has_test_data = True
test_data_path = os.path.join( root, dirs[ dirs.index( 'test-data' ) ] )
break
- if has_test_data:
+ if revision_has_test_data:
print '# Test data directory found in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
else:
print '# Test data directory missing in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
print '# Checking for functional tests in changeset revision %s of %s, owned by %s.' % \
( changeset_revision, name, owner )
- # Inspect each tool_dict for defined functional tests.
+ # Inspect each tool_dict for defined functional tests. If there are no tests, this tool should not be tested, since the
+ # tool functional tests only report failure if the test itself fails, not if it's missing or undefined. Filtering out those
+ # repositories at this step will reduce the number of "false negatives" the automated functional test framework produces.
for tool_dict in tool_dicts:
+ failure_reason = ''
+ problem_found = False
+ tool_has_defined_tests = False
+ tool_has_test_files = False
+ missing_test_files = []
tool_count += 1
tool_id = tool_dict[ 'id' ]
tool_version = tool_dict[ 'version' ]
tool_guid = tool_dict[ 'guid' ]
if verbosity >= 1:
print "# Checking tool ID '%s' in changeset revision %s of %s." % ( tool_id, changeset_revision, name )
- # If there are no tests, this tool should not be tested, since the tool functional tests only report failure if the test itself fails,
- # not if it's missing or undefined. Filtering out those repositories at this step will reduce the number of "false negatives" the
- # automated functional test framework produces.
- tool_has_tests = False
defined_test_dicts = tool_dict.get( 'tests', None )
if defined_test_dicts is not None:
# We need to inspect the <test> tags because the following tags...
@@ -178,33 +189,26 @@
outputs = defined_test_dict.get( 'outputs', [] )
if inputs and outputs:
# At least one tool within the repository has a valid <test> tag.
- tool_has_tests = True
+ tool_has_defined_tests = True
break
- if tool_has_tests:
- if verbosity >= 1:
- print "# Tool ID '%s' in changeset revision %s of %s has one or more valid functional tests defined." % \
- ( tool_id, changeset_revision, name )
+ if tool_has_defined_tests:
+ print "# Tool ID '%s' in changeset revision %s of %s has one or more valid functional tests defined." % \
+ ( tool_id, changeset_revision, name )
has_tests += 1
else:
- if verbosity >= 1:
- print '# No functional tests defined for %s.' % tool_id
+ print '# No functional tests defined for %s.' % tool_id
no_tests += 1
- failure_reason = ''
- problem_found = False
- missing_test_files = []
- has_test_files = False
- if tool_has_tests and has_test_data:
+ if tool_has_defined_tests and revision_has_test_data:
missing_test_files = check_for_missing_test_files( defined_test_dicts, test_data_path )
if missing_test_files:
- if verbosity >= 1:
- print "# Tool ID '%s' in changeset revision %s of %s is missing one or more required test files: %s" % \
- ( tool_id, changeset_revision, name, ', '.join( missing_test_files ) )
+ print "# Tool id '%s' in changeset revision %s of %s is missing one or more required test files: %s" % \
+ ( tool_id, changeset_revision, name, ', '.join( missing_test_files ) )
else:
- has_test_files = True
- if not has_test_data:
+ tool_has_test_files = True
+ if not revision_has_test_data:
failure_reason += 'Repository does not have a test-data directory. '
problem_found = True
- if not tool_has_tests:
+ if not tool_has_defined_tests:
failure_reason += 'Functional test definitions missing for %s. ' % tool_id
problem_found = True
if missing_test_files:
@@ -215,7 +219,8 @@
if problem_found:
if test_errors not in missing_test_components:
missing_test_components.append( test_errors )
- if tool_has_tests and has_test_files:
+ if tool_has_defined_tests and tool_has_test_files:
+ print '# Revision %s of %s owned by %s is a testable revision.' % ( changeset_revision, name, owner )
testable_revision = True
# Remove the cloned repository path. This has to be done after the check for required test files, for obvious reasons.
if os.path.exists( work_dir ):
@@ -227,9 +232,9 @@
invalid_revisions += 1
print '# Some tools have problematic functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
if verbosity >= 1:
- for invalid_test in missing_test_components:
- if 'missing_components' in invalid_test:
- print '# %s' % invalid_test[ 'missing_components' ]
+ for missing_test_component in missing_test_components:
+ if 'missing_components' in missing_test_component:
+ print '# %s' % missing_test_component[ 'missing_components' ]
if not info_only:
# Get or create the list of tool_test_results dictionaries.
if repository_metadata.tool_test_results is not None:
@@ -243,8 +248,9 @@
# install_and_test_tool_sed_repositories.sh script which will further populate the tool_test_results_dict.
tool_test_results_dict = tool_test_results_dicts[ 0 ]
if len( tool_test_results_dict ) <= 1:
- # We can re-use the mostly empty tool_test_results_dict for this run, but we need to eliminate it from
- # the list of tool_test_results_dicts since it will be re-inserted later.
+ # We can re-use the mostly empty tool_test_results_dict for this run because it is either empty or it contains only
+ # a test_environment entry. If we use it we need to temporarily eliminate it from the list of tool_test_results_dicts
+ # since it will be re-inserted later.
tool_test_results_dict = tool_test_results_dicts.pop( 0 )
else:
# The latest tool_test_results_dict has been populated with the results of a test run, so it cannot be used.
@@ -284,6 +290,8 @@
# Even though some tools may be missing test components, it may be possible to test other tools. Since the
# install and test framework filters out repositories marked as missing test components, we'll set it only if
# no tools can be tested.
+ print '# Setting missing_test_components to True for revision %s of %s owned by %s because all tools are missing test components.' % \
+ ( changeset_revision, name, owner )
repository_metadata.missing_test_components = True
tool_test_results_dict[ 'missing_test_components' ] = missing_test_components
# Store only the configured number of test runs.
diff -r 864f8f4c466e4c031739549ea6a038dd70a4ed9a -r 024e7ca6c1a6023d49f4f63014185d14760faecf test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -363,6 +363,8 @@
"""
error_message = ''
latest_revision_only = '-check_all_revisions' not in sys.argv
+ if latest_revision_only:
+ log.debug( 'Testing is restricted to the latest downloadable revision in this test run.' )
repository_dicts = []
params = urllib.urlencode( dict( do_not_test='false',
downloadable='true',
@@ -374,7 +376,6 @@
baseline_repository_dicts, error_message = json_from_url( api_url )
if error_message:
return None, error_message
- log.debug( 'The Tool Shed API returned %d metadata revisions for installation and testing.' % len( baseline_repository_dicts ) )
for baseline_repository_dict in baseline_repository_dicts:
# We need to get some details from the tool shed API, such as repository name and owner, to pass on to the
# module that will generate the install methods.
@@ -393,10 +394,6 @@
repository_dicts.append( dict( repository_dict.items() + baseline_repository_dict.items() ) )
else:
repository_dicts.append( dict( repository_dict.items() + baseline_repository_dict.items() ) )
- if latest_revision_only:
- skipped_previous = ' and metadata revisions that are not the most recent'
- else:
- skipped_previous = ''
if testing_single_repository:
tsr_name = testing_single_repository[ 'name' ]
tsr_owner = testing_single_repository[ 'owner' ]
@@ -414,14 +411,15 @@
return repository_dicts, error_message
return repository_dicts, error_message
# Get a list of repositories to test from the tool shed specified in the GALAXY_INSTALL_TEST_TOOL_SHED_URL environment variable.
- log.debug( "Retrieved %d repositories from the API url: %s." % ( len( repository_dicts ), str( galaxy_tool_shed_url ) ) )
- if '-list_repositories' in sys.argv:
- log.debug( "Repositories for testing:" )
- for repository_dict in repository_dicts:
- log.debug( "Revision %s of repository %s owned by %s" % \
- ( str( repository_dict.get( 'changeset_revision', None ) ), \
- str( repository_dict.get( 'name', None ) ), \
- str( repository_dict.get( 'owner', None ) ) ) )
+ log.debug( "The Tool Shed's API url...\n%s" % str( api_url ) )
+ log.debug( "...retrieved %d repository revisions for testing." % len( repository_dicts ) )
+ #if '-list_repositories' in sys.argv:
+ log.debug( "Repository revisions for testing:" )
+ for repository_dict in repository_dicts:
+ log.debug( "Revision %s of repository %s owned by %s" % \
+ ( str( repository_dict.get( 'changeset_revision', None ) ), \
+ str( repository_dict.get( 'name', None ) ), \
+ str( repository_dict.get( 'owner', None ) ) ) )
return repository_dicts, error_message
def get_static_settings():
@@ -503,7 +501,7 @@
params = dict( tools_functionally_correct=False,
do_not_test=False,
test_install_error=True )
- # TODO: do something usefule with response_dict
+ # TODO: do something useful with response_dict
response_dict = register_test_result( galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params )
# Since this repository is missing components, we do not want to test it, so deactivate it or uninstall it.
# The deactivate flag is set to True if the environment variable GALAXY_INSTALL_TEST_KEEP_TOOL_DEPENDENCIES
@@ -618,6 +616,7 @@
name = str( repository_dict[ 'name' ] )
owner = str( repository_dict[ 'owner' ] )
changeset_revision = str( repository_dict[ 'changeset_revision' ] )
+ log.debug( "Processing revision %s of repository %s owned by %s..." % ( changeset_revision, name, owner ) )
# Populate the tool_test_results_dict.
tool_test_results_dicts, error_message = get_tool_test_results_dicts( galaxy_tool_shed_url, encoded_repository_metadata_id )
if error_message:
@@ -633,124 +632,122 @@
log.exception( "Invalid list of tool_test_results_dicts %s: %s" % ( str( tool_test_results_dicts ), str( e ) ) )
continue
# See if this repository should be skipped for any reason.
- skip_this_repository = False
+ this_repository_is_in_the_exclude_lost = False
skip_reason = None
for exclude_dict in exclude_list:
reason = exclude_dict[ 'reason' ]
exclude_repositories = exclude_dict[ 'repositories' ]
if ( name, owner, changeset_revision ) in exclude_repositories or ( name, owner, None ) in exclude_repositories:
- skip_this_repository = True
+ this_repository_is_in_the_exclude_lost = True
skip_reason = reason
break
- if skip_this_repository:
+ if this_repository_is_in_the_exclude_lost:
tool_test_results_dict[ 'not_tested' ] = dict( reason=skip_reason )
params = dict( tools_functionally_correct=False,
do_not_test=False )
- # TODO: do something usefule with response_dict
+ # TODO: do something useful with response_dict
response_dict = register_test_result( galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params )
- log.debug( "Not testing revision %s of repository %s owned by %s." % ( changeset_revision, name, owner ) )
+ log.debug( "Not testing revision %s of repository %s owned by %s because it is in the exclude list for this test run." % \
+ ( changeset_revision, name, owner ) )
else:
+ test_environment_dict = tool_test_results_dict.get( 'test_environment', None )
+ test_environment_dict = get_test_environment( test_environment_dict )
+ # Add the current time as the approximate time that this test run occurs. A similar value will also be
+ # set to the repository_metadata.time_last_tested column, but we also store it here because the Tool Shed
+ # may be configured to store multiple test run results, so each must be associated with a time stamp.
+ now = time.strftime( "%Y-%m-%d %H:%M:%S" )
+ test_environment_dict[ 'time_tested' ] = now
+ test_environment_dict[ 'galaxy_database_version' ] = get_database_version( app )
+ test_environment_dict[ 'galaxy_revision' ] = get_repository_current_revision( os.getcwd() )
+ tool_test_results_dict[ 'test_environment' ] = test_environment_dict
+ tool_test_results_dict[ 'passed_tests' ] = []
+ tool_test_results_dict[ 'failed_tests' ] = []
+ tool_test_results_dict[ 'installation_errors' ] = dict( current_repository=[], repository_dependencies=[], tool_dependencies=[] )
+ repository, error_message = install_repository( app, repository_dict )
if error_message:
- log.debug( error_message )
+ tool_test_results_dict[ 'installation_errors' ][ 'current_repository' ] = error_message
+ # Even if the repository failed to install, execute the uninstall method, in case a dependency did succeed.
+ log.debug( 'Attempting to uninstall repository %s owned by %s.' % ( name, owner ) )
+ try:
+ repository = test_db_util.get_installed_repository_by_name_owner_changeset_revision( name, owner, changeset_revision )
+ except Exception, e:
+ error_message = 'Unable to find installed repository %s owned by %s: %s.' % ( name, owner, str( e ) )
+ log.exception( error_message )
+ test_result = dict( tool_shed=galaxy_tool_shed_url,
+ name=name,
+ owner=owner,
+ changeset_revision=changeset_revision,
+ error_message=error_message )
+ tool_test_results_dict[ 'installation_errors' ][ 'repository_dependencies' ].append( test_result )
+ params = dict( tools_functionally_correct=False,
+ test_install_error=True,
+ do_not_test=False )
+ # TODO: do something useful with response_dict
+ response_dict = register_test_result( galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params )
+ try:
+ if deactivate:
+ # We are deactivating this repository and all of its repository dependencies.
+ deactivate_repository( app, repository_dict )
+ else:
+ # We are uninstalling this repository and all of its repository dependencies.
+ uninstall_repository( app, repository_dict )
+ except:
+ log.exception( 'Encountered error attempting to deactivate or uninstall %s.', str( repository_dict[ 'name' ] ) )
+ results_dict[ 'repositories_failed_install' ].append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
+ log.debug( 'Repository %s failed to install correctly.' % str( name ) )
else:
- test_environment_dict = tool_test_results_dict.get( 'test_environment', None )
- test_environment_dict = get_test_environment( test_environment_dict )
- # Add the current time as the approximate time that this test run occurs. A similar value will also be
- # set to the repository_metadata.time_last_tested column, but we also store it here because the Tool Shed
- # may be configured to store multiple test run results, so each must be associated with a time stamp.
- now = time.strftime( "%Y-%m-%d %H:%M:%S" )
- test_environment_dict[ 'time_tested' ] = now
- test_environment_dict[ 'galaxy_database_version' ] = get_database_version( app )
- test_environment_dict[ 'galaxy_revision' ] = get_repository_current_revision( os.getcwd() )
- tool_test_results_dict[ 'test_environment' ] = test_environment_dict
- tool_test_results_dict[ 'passed_tests' ] = []
- tool_test_results_dict[ 'failed_tests' ] = []
- tool_test_results_dict[ 'installation_errors' ] = dict( current_repository=[], repository_dependencies=[], tool_dependencies=[] )
- repository, error_message = install_repository( app, repository_dict )
- if error_message:
- tool_test_results_dict[ 'installation_errors' ][ 'current_repository' ] = error_message
- # Even if the repository failed to install, execute the uninstall method, in case a dependency did succeed.
- log.debug( 'Attempting to uninstall repository %s owned by %s.' % ( name, owner ) )
+ # Configure and run functional tests for this repository. This is equivalent to sh run_functional_tests.sh -installed
+ remove_install_tests()
+ log.debug( 'Installation of %s succeeded, running all defined functional tests.' % str( repository.name ) )
+ # Generate the shed_tools_dict that specifies the location of test data contained within this repository. If the repository
+ # does not have a test-data directory, this will return has_test_data = False, and we will set the do_not_test flag to True,
+ # and the tools_functionally_correct flag to False, as well as updating tool_test_results.
+ file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( {} ) )
+ has_test_data, shed_tools_dict = parse_tool_panel_config( galaxy_shed_tool_conf_file,
+ from_json_string( file( galaxy_shed_tools_dict, 'r' ).read() ) )
+ # Add an empty 'missing_test_results' entry if it is missing from the tool_test_results_dict. The
+ # ~/tool_shed/scripts/check_repositories_for_functional_tests.py will have entered information in the
+ # 'missing_test_components' entry of the tool_test_results_dict dictionary for repositories that are
+ # missing test components.
+ if 'missing_test_components' not in tool_test_results_dict:
+ tool_test_results_dict[ 'missing_test_components' ] = []
+ missing_tool_dependencies = get_missing_tool_dependencies( repository )
+ if missing_tool_dependencies or repository.missing_repository_dependencies:
+ results_dict = handle_missing_dependencies( app,
+ repository,
+ missing_tool_dependencies,
+ repository_dict,
+ tool_test_results_dicts,
+ tool_test_results_dict,
+ results_dict )
+ else:
+ # If the repository has a test-data directory we write the generated shed_tools_dict to a file, so the functional
+ # test framework can find it.
+ file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( shed_tools_dict ) )
+ log.debug( 'Saved generated shed_tools_dict to %s\nContents: %s' % ( str( galaxy_shed_tools_dict ), str( shed_tools_dict ) ) )
try:
- repository = test_db_util.get_installed_repository_by_name_owner_changeset_revision( name, owner, changeset_revision )
+ results_dict = test_repository_tools( app,
+ repository,
+ repository_dict,
+ tool_test_results_dicts,
+ tool_test_results_dict,
+ results_dict )
except Exception, e:
- error_message = 'Unable to find installed repository %s owned by %s: %s.' % ( name, owner, str( e ) )
- log.exception( error_message )
- test_result = dict( tool_shed=galaxy_tool_shed_url,
- name=name,
- owner=owner,
- changeset_revision=changeset_revision,
- error_message=error_message )
- tool_test_results_dict[ 'installation_errors' ][ 'repository_dependencies' ].append( test_result )
- params = dict( tools_functionally_correct=False,
- test_install_error=True,
- do_not_test=False )
- # TODO: do something usefule with response_dict
- response_dict = register_test_result( galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params )
- try:
- if deactivate:
- # We are deactivating this repository and all of its repository dependencies.
- deactivate_repository( app, repository_dict )
- else:
- # We are uninstalling this repository and all of its repository dependencies.
- uninstall_repository( app, repository_dict )
- except:
- log.exception( 'Encountered error attempting to deactivate or uninstall %s.', str( repository_dict[ 'name' ] ) )
- results_dict[ 'repositories_failed_install' ].append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
- log.debug( 'Repository %s failed to install correctly.' % str( name ) )
- else:
- # Configure and run functional tests for this repository. This is equivalent to sh run_functional_tests.sh -installed
- remove_install_tests()
- log.debug( 'Installation of %s succeeded, running all defined functional tests.' % str( repository.name ) )
- # Generate the shed_tools_dict that specifies the location of test data contained within this repository. If the repository
- # does not have a test-data directory, this will return has_test_data = False, and we will set the do_not_test flag to True,
- # and the tools_functionally_correct flag to False, as well as updating tool_test_results.
- file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( {} ) )
- has_test_data, shed_tools_dict = parse_tool_panel_config( galaxy_shed_tool_conf_file,
- from_json_string( file( galaxy_shed_tools_dict, 'r' ).read() ) )
- # Add an empty 'missing_test_results' entry if it is missing from the tool_test_results_dict. The
- # ~/tool_shed/scripts/check_repositories_for_functional_tests.py will have entered information in the
- # 'missing_test_components' entry of the tool_test_results_dict dictionary for repositories that are
- # missing test components.
- if 'missing_test_components' not in tool_test_results_dict:
- tool_test_results_dict[ 'missing_test_components' ] = []
- missing_tool_dependencies = get_missing_tool_dependencies( repository )
- if missing_tool_dependencies or repository.missing_repository_dependencies:
- results_dict = handle_missing_dependencies( app,
- repository,
- missing_tool_dependencies,
- repository_dict,
- tool_test_results_dicts,
- tool_test_results_dict,
- results_dict )
- else:
- # If the repository has a test-data directory we write the generated shed_tools_dict to a file, so the functional
- # test framework can find it.
- file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( shed_tools_dict ) )
- log.debug( 'Saved generated shed_tools_dict to %s\nContents: %s' % ( str( galaxy_shed_tools_dict ), str( shed_tools_dict ) ) )
- try:
- results_dict = test_repository_tools( app,
- repository,
- repository_dict,
- tool_test_results_dicts,
- tool_test_results_dict,
- results_dict )
- except Exception, e:
- exception_message = 'Error executing tests for repository %s: %s' % ( name, str( e ) )
- log.exception( exception_message )
- tool_test_results_dict[ 'failed_tests' ].append( exception_message )
- # Record the status of this repository in the tool shed.
- params = dict( tools_functionally_correct=False,
- do_not_test=False,
- test_install_error=False )
- # TODO: do something usefule with response_dict
- response_dict = register_test_result( galaxy_tool_shed_url,
- tool_test_results_dicts,
- tool_test_results_dict,
- repository_dict,
- params )
- results_dict[ 'repositories_failed' ].append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
- total_repositories_tested += 1
+ exception_message = 'Error executing tests for repository %s: %s' % ( name, str( e ) )
+ log.exception( exception_message )
+ tool_test_results_dict[ 'failed_tests' ].append( exception_message )
+ # Record the status of this repository in the tool shed.
+ params = dict( tools_functionally_correct=False,
+ do_not_test=False,
+ test_install_error=False )
+ # TODO: do something useful with response_dict
+ response_dict = register_test_result( galaxy_tool_shed_url,
+ tool_test_results_dicts,
+ tool_test_results_dict,
+ repository_dict,
+ params )
+ results_dict[ 'repositories_failed' ].append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
+ total_repositories_tested += 1
results_dict[ 'total_repositories_tested' ] = total_repositories_tested
return results_dict, error_message
@@ -1073,13 +1070,14 @@
exclude_count += 1
exclude_dict[ 'repositories' ].append( repository_tuple )
exclude_list.append( exclude_dict )
- log.debug( '%s repositories will be excluded from testing...' % str( exclude_count ) )
- if '-list_repositories' in sys.argv:
- for name, owner, changeset_revision in exclude_verbose:
- if changeset_revision:
- log.debug( 'Repository %s owned by %s, changeset revision %s.' % ( str( name ), str( owner ), str( changeset_revision ) ) )
- else:
- log.debug( 'Repository %s owned by %s, all revisions.' % ( str( name ), str( owner ) ) )
+ log.debug( 'The xml document %s containing the exclude list %s defines the following %s repositories to be excluded from testing...' % \
+ ( str( xml_filename ), str( exclude_count ) ) )
+ #if '-list_repositories' in sys.argv:
+ for name, owner, changeset_revision in exclude_verbose:
+ if changeset_revision:
+ log.debug( 'Repository %s owned by %s, changeset revision %s.' % ( str( name ), str( owner ), str( changeset_revision ) ) )
+ else:
+ log.debug( 'Repository %s owned by %s, all revisions.' % ( str( name ), str( owner ) ) )
return exclude_list
def register_test_result( url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params ):
@@ -1209,7 +1207,7 @@
params = dict( tools_functionally_correct=True,
do_not_test=False,
test_install_error=False )
- # TODO: do something usefule with response_dict
+ # TODO: do something useful with response_dict
response_dict = register_test_result( galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params )
log.debug( 'Revision %s of repository %s installed and passed functional tests.' % ( str( changeset_revision ), str( name ) ) )
else:
@@ -1220,7 +1218,7 @@
params = dict( tools_functionally_correct=False,
test_install_error=False,
do_not_test=str( set_do_not_test ) )
- # TODO: do something usefule with response_dict
+ # TODO: do something useful with response_dict
response_dict = register_test_result( galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params )
log.debug( 'Revision %s of repository %s installed successfully but did not pass functional tests.' % \
( str( changeset_revision ), str( name ) ) )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/5b8c91e41836/
Changeset: 5b8c91e41836
User: jmchilton
Date: 2013-11-29 04:23:42
Summary: Rework handling of complex package dependencies.
Now symbolically linking to previously generated env.sh files instead of recreating them from scratch using install_and_build_package_via_fabric after inferring previous install parameters by parsing out existing env.sh file.
This has several advantages:
- Fixes problems where setup_* actions were not being used as complex dependency package properly (previous code assumed only set_environment and setup_virtualenv actions would work in this case). Problem reported by Bjoern Gruening.
- Handles deployer modified env.sh files and allows deployers to modify fewer env.sh.
- The way install_util was pulling values out of existing env.sh and recreating the files was creating a dependency on each future version of the install code with the specific implementations of past install procedures - new environment variables could not be added to setup_ruby_environment for instance if it was realized that they were needed.
Turns out the biggest difference between this and the previous implementation is related to handling of complex package dependencies where the dependent repository is not installed. Previously, the Galaxy install code would have attempted to create an env.sh file with the path modifications as they would look if it had been installed - i.e. with completely invalid paths. My translation of this behavior is to write an invalid symbolic link to the env.sh file that would be installed. I am not sure this has any practical consequences.
I would like to understand better why handling complex tool dependencies on dependent repositories that are not installed is done at all. Most package management systems, will prevent you from installing a package if dependent packages are not installed. This isn't to say what Galaxy is doing is wrong, it is just atypical and I would like to understand the use cases that lead to the decision better.
Affected #: 2 files
diff -r 9ff030d0daee11b2187d9784a8b0a57742a671c2 -r 5b8c91e4183664c90f4d95699ab971e674ffd67d lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -3471,6 +3471,29 @@
def can_reinstall_or_activate( self ):
return self.deleted
+ @property
+ def installing( self ):
+ """
+ Used to determine if tool dependencies can denote this repository as
+ installing.
+ """
+ return self.status not in [ self.installation_status.DEACTIVATED,
+ self.installation_status.UNINSTALLED,
+ self.installation_status.ERROR,
+ self.installation_status.INSTALLED,
+ self.installation_status.NEW,
+ ]
+
+ @property
+ def installation_complete( self ):
+ """
+ Used to determine if tool dependency installations can proceed.
+ Installed artifacts must be available on disk.
+ """
+ return self.status in [ self.installation_status.DEACTIVATED,
+ self.installation_status.INSTALLED,
+ ]
+
def to_dict( self, view='collection', value_mapper=None ):
if value_mapper is None:
value_mapper = {}
diff -r 9ff030d0daee11b2187d9784a8b0a57742a671c2 -r 5b8c91e4183664c90f4d95699ab971e674ffd67d lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
@@ -40,6 +40,70 @@
raise Exception( message )
return None
+
+def find_complex_dependency_package( app, dependent_install_dir, required_install_dir, tool_shed_repository, required_repository, package_name, package_version, tool_dependencies_config ):
+ """
+ """
+ tool_dependencies = []
+ if not os.path.exists( dependent_install_dir ):
+ os.makedirs( dependent_install_dir )
+ env_file = None
+ if tool_dependencies_config:
+ required_td_tree, error_message = xml_util.parse_xml( tool_dependencies_config )
+ if required_td_tree:
+ required_td_root = required_td_tree.getroot()
+ for required_td_elem in required_td_root:
+ # Find the appropriate package name and version.
+ if required_td_elem.tag == 'package':
+ # <package name="bwa" version="0.5.9">
+ required_td_package_name = required_td_elem.get( 'name', None )
+ required_td_package_version = required_td_elem.get( 'version', None )
+ if required_td_package_name == package_name and required_td_package_version == package_version:
+ tool_dependency = tool_dependency_util.create_or_update_tool_dependency( app=app,
+ tool_shed_repository=tool_shed_repository,
+ name=package_name,
+ version=package_version,
+ type='package',
+ status=app.model.ToolDependency.installation_status.NEVER_INSTALLED,
+ set_status=True )
+
+ if required_repository.installing:
+ tool_dependency = tool_dependency_util.set_tool_dependency_attributes( app,
+ tool_dependency=tool_dependency,
+ status=app.model.ToolDependency.installation_status.INSTALLING )
+ # What happens if dependent repository fails or is
+ # uninstalled during this process.
+ env_file = required_repository_package_env( app, package_name, package_version, required_repository )
+ if required_repository.installation_complete:
+ if not os.path.exists( env_file ):
+ error_message = 'env.sh file %s for package %s in dependendent repository could not be found. Required repository has status %s.' % ( package_name, env_file, required_repository.status )
+ tool_dependency = tool_dependency_util.handle_tool_dependency_installation_error( app,
+ tool_dependency,
+ error_message,
+ remove_installation_path=False )
+ else:
+ tool_dependency = tool_dependency_util.set_tool_dependency_attributes( app,
+ tool_dependency=tool_dependency,
+ status=app.model.ToolDependency.installation_status.INSTALLED )
+ else:
+ # Ekk - handling tool depednencies for a dependent
+ # repository that is not installed.
+ #
+ # Go ahead a return the env.sh file - Galaxy will
+ # proceed to create an invalid symbolic link.
+ # This is subtle-ly different than the previous
+ # behavior which would have recreated an env.sh
+ # from the the required repository's
+ # tool_dependencies.xml but since it was not
+ # installed all of the values inside would be
+ # invalid path modifications. Either way, this file
+ # is junk until the required repository is
+ # installed properly.
+ pass
+ tool_dependencies.append( tool_dependency )
+ return tool_dependencies, env_file
+
+
def get_absolute_path_to_file_in_repository( repo_files_dir, file_name ):
"""Return the absolute path to a specified disk file contained in a repository."""
stripped_file_name = strip_path( file_name )
@@ -91,6 +155,7 @@
text = common_util.tool_shed_get( app, tool_shed_url, url )
return text
+
def handle_complex_repository_dependency_for_package( app, elem, package_name, package_version, tool_shed_repository ):
handled_tool_dependencies = []
tool_shed = elem.attrib[ 'toolshed' ]
@@ -125,8 +190,7 @@
tool_dependency_version=package_version )
# Set this dependent repository's tool dependency env.sh file with a path to the required repository's installed tool dependency package.
# We can get everything we need from the discovered installed required_repository.
- if required_repository.status in [ app.model.ToolShedRepository.installation_status.DEACTIVATED,
- app.model.ToolShedRepository.installation_status.INSTALLED ]:
+ if required_repository.installation_complete:
if not os.path.exists( required_repository_package_install_dir ):
print 'Missing required tool dependency directory %s' % str( required_repository_package_install_dir )
repo_files_dir = required_repository.repo_files_directory( app )
@@ -156,22 +220,26 @@
required_repository_owner,
required_repository_changeset_revision )
config_to_use = tmp_filename
- tool_dependencies, actions_dict = populate_actions_dict( app=app,
- dependent_install_dir=dependent_install_dir,
- required_install_dir=required_repository_package_install_dir,
- tool_shed_repository=tool_shed_repository,
- required_repository=required_repository,
- package_name=package_name,
- package_version=package_version,
- tool_dependencies_config=config_to_use )
+
+ tool_dependencies, package_env_sh_file = find_complex_dependency_package(
+ app=app,
+ dependent_install_dir=dependent_install_dir,
+ required_install_dir=required_repository_package_install_dir,
+ tool_shed_repository=tool_shed_repository,
+ required_repository=required_repository,
+ package_name=package_name,
+ package_version=package_version,
+ tool_dependencies_config=config_to_use
+ )
+ if package_env_sh_file:
+ os.symlink( package_env_sh_file, os.path.join( dependent_install_dir, "env.sh" ) )
if tmp_filename:
try:
os.remove( tmp_filename )
except:
pass
for tool_dependency in tool_dependencies:
- # Install and build the package via fabric and update the tool_dependency record accordingly..
- tool_dependency = install_and_build_package_via_fabric( app, tool_dependency, actions_dict )
+ tool_dependency = __mark_tool_dependency_installed( app, tool_dependency)
handled_tool_dependencies.append( tool_dependency )
else:
message = "Unable to locate required tool shed repository named %s owned by %s with revision %s." % \
@@ -179,113 +247,6 @@
raise Exception( message )
return handled_tool_dependencies
-def handle_set_environment_entry_for_package( app, install_dir, tool_shed_repository, package_name, package_version, elem, required_repository ):
- """
- Populate a list of actions for creating an env.sh file for a dependent repository. The received elem is the <package> tag set associated
- with the tool-dependencies.xml file for one of the received tool_shed_repository's repository dependency.
- """
- action_dict = {}
- actions = []
- tool_dependencies = []
- for package_elem in elem:
- if package_elem.tag == 'install':
- # Create the new tool_dependency record in the database.
- tool_dependency = tool_dependency_util.create_or_update_tool_dependency( app=app,
- tool_shed_repository=tool_shed_repository,
- name=package_name,
- version=package_version,
- type='package',
- status=app.model.ToolDependency.installation_status.NEVER_INSTALLED,
- set_status=True )
- # Get the installation method version from a tag like: <install version="1.0">
- package_install_version = package_elem.get( 'version', '1.0' )
- if package_install_version == '1.0':
- # Update the tool dependency's status.
- tool_dependency = tool_dependency_util.set_tool_dependency_attributes( app,
- tool_dependency=tool_dependency,
- status=app.model.ToolDependency.installation_status.INSTALLING )
- # Since the required tool dependency is installed for a repository dependency, we first need to inspect the <actions> tag set to find
- # the <action type="set_environment"> tag.
- env_var_dicts = []
- for actions_elem in package_elem:
- for action_elem in actions_elem:
- action_type = action_elem.get( 'type', 'shell_command' )
- if action_type == 'set_environment':
- # <action type="set_environment">
- # <environment_variable name="PYTHONPATH" action="append_to">$INSTALL_DIR/lib/python</environment_variable>
- # <environment_variable name="PATH" action="prepend_to">$INSTALL_DIR/bin</environment_variable>
- # </action>
- for env_elem in action_elem:
- if env_elem.tag == 'environment_variable':
- env_var_dict = td_common_util.create_env_var_dict( env_elem, tool_dependency_install_dir=install_dir )
- if env_var_dict:
- if env_var_dict not in env_var_dicts:
- env_var_dicts.append( env_var_dict )
- elif action_type == 'setup_virtualenv':
- # Add the virtualenv's site-packages to PYTHONPATH and bin to PATH. This is a bit hackish.
- site_packages_command = "%s -c 'import os, sys; print os.path.join(sys.prefix, \"lib\", \"python\" + sys.version[:3], \"site-packages\")'" % os.path.join( install_dir, "venv", "bin", "python" )
- output = fabric_util.handle_command( app, tool_dependency, install_dir, site_packages_command, return_output=True )
- if output.return_code:
- log.error( 'Tool dependency %s includes a setup_virtualenv action but venv python is broken: ' % \
- ( str( tool_dependency.name ), str( output.stderr ) ) )
- elif not os.path.exists( output.stdout ):
- log.error( "virtualenv's site-packages directory '%s' does not exist", str( output.stdout ) )
- else:
- env_var_dicts.append( dict( name="PYTHONPATH", action="prepend_to", value=output.stdout ) )
- env_var_dicts.append( dict( name="PATH", action="prepend_to", value=os.path.join( install_dir, 'venv', 'bin' ) ) )
- if env_var_dicts:
- if required_repository.status in [ app.model.ToolShedRepository.installation_status.INSTALLED,
- app.model.ToolShedRepository.installation_status.DEACTIVATED ]:
- # Handle the case where we have an installed required repository due to the prior_installation_required = True
- # setting in the received tool_shed_repository's tool_dependencies.xml file and the required repository's
- # tool_dependencies.xml file may include the use of the $ENV[] variable inheritance feature. To handle this,
- # we will replace the current "value" entries in each env_var_dict with the actual path taken from the env.sh
- # file generated for the installed required repository. Each env_var_dict currently looks something like this:
- # {'action': 'append_to', 'name': 'LD_LIBRARY_PATH', 'value': '$BOOST_ROOT_DIR/lib/'}
- # We'll read the contents of the received required_repository's env.sh file and replace the 'value' entry of
- # each env_var_dict with the associated value in the env.sh file.
- new_env_var_dicts = []
- env_sh_file_dir = tool_dependency_util.get_tool_dependency_install_dir( app=app,
- repository_name=required_repository.name,
- repository_owner=required_repository.owner,
- repository_changeset_revision=required_repository.installed_changeset_revision,
- tool_dependency_type='package',
- tool_dependency_name=package_name,
- tool_dependency_version=package_version )
- env_sh_file_path = os.path.join( env_sh_file_dir, 'env.sh' )
- if os.path.exists( env_sh_file_path ):
- for i, line in enumerate( open( env_sh_file_path, 'r' ) ):
- env_var_dict = env_var_dicts[ i ]
- action = env_var_dict.get( 'action', None )
- name = env_var_dict.get( 'name', None )
- value = env_var_dict.get( 'value', None )
- if action and name and value:
- new_value = parse_env_shell_entry( action, name, value, line )
- env_var_dict[ 'value' ] = new_value
- new_env_var_dicts.append( env_var_dict )
- else:
- error_message = 'Invalid file %s specified, ignoring set_environment_for_install action.' % str( env_sh_file_path )
- tool_dependency = tool_dependency_util.handle_tool_dependency_installation_error( app,
- tool_dependency,
- error_message,
- remove_installation_path=False )
- action_dict[ 'environment_variable' ] = new_env_var_dicts
- else:
- action_dict[ 'environment_variable' ] = env_var_dicts
- actions.append( ( 'set_environment', action_dict ) )
- if tool_dependency.status not in [ app.model.ToolDependency.installation_status.ERROR,
- app.model.ToolDependency.installation_status.INSTALLED ]:
- # Update the tool dependency's status.
- tool_dependency = \
- tool_dependency_util.set_tool_dependency_attributes( app,
- tool_dependency=tool_dependency,
- status=app.model.ToolDependency.installation_status.INSTALLED )
- # Accumulate processed tool dependencies to return to the caller.
- tool_dependencies.append( tool_dependency )
- else:
- raise NotImplementedError( 'Only install version 1.0 is currently supported (i.e., change your tag to be <install version="1.0">).' )
- return tool_dependencies, actions
- return tool_dependencies, actions
def install_and_build_package_via_fabric( app, tool_dependency, actions_dict ):
sa_session = app.model.context.current
@@ -301,17 +262,10 @@
tool_dependency,
error_message,
remove_installation_path=False )
- if tool_dependency.status not in [ app.model.ToolDependency.installation_status.ERROR,
- app.model.ToolDependency.installation_status.INSTALLED ]:
- log.debug( 'Changing status for tool dependency %s from %s to %s.' % \
- ( str( tool_dependency.name ), str( tool_dependency.status ), str( app.model.ToolDependency.installation_status.INSTALLED ) ) )
- tool_dependency = tool_dependency_util.set_tool_dependency_attributes( app,
- tool_dependency=tool_dependency,
- status=app.model.ToolDependency.installation_status.INSTALLED,
- error_message=None,
- remove_from_disk=False )
+ tool_dependency = __mark_tool_dependency_installed( app, tool_dependency)
return tool_dependency
+
def install_package( app, elem, tool_shed_repository, tool_dependencies=None ):
# The value of tool_dependencies is a partial or full list of ToolDependency records associated with the tool_shed_repository.
sa_session = app.model.context.current
@@ -821,60 +775,22 @@
tool_dependency = install_and_build_package_via_fabric( app, tool_dependency, actions_dict )
return tool_dependency
-def parse_env_shell_entry( action, name, value, line ):
- new_value = value
- var_name = '$%s' % name
- tmp_value = line.split( '=' )[ 1 ]
- if action == 'prepend_to':
- # PATH=/test/package_rdkit_2012_12/62ebd7bb637a/rdkit/bin:$PATH; export PATH
- new_value = tmp_value.split( ':%s' % var_name )[ 0 ]
- elif action == 'set_to':
- # RDBASE=test/package_rdkit_2012_12/62ebd7bb637a/rdkit; export RDBASE
- new_value = tmp_value.split( ';' )[ 0 ]
- elif action == 'append_to':
- # LD_LIBRARY_PATH=$LD_LIBRARY_PATH:test/package_rdkit_2012_12/62ebd7bb637a/rdkit/lib/; export LD_LIBRARY_PATH
- new_value = tmp_value.split( ':' )[ 1 ]
- new_value = new_value.split( ';' )[ 0 ]
- return new_value
-def populate_actions_dict( app, dependent_install_dir, required_install_dir, tool_shed_repository, required_repository, package_name, package_version, tool_dependencies_config ):
+# TODO: Move to tool_dependency_util?
+def required_repository_package_env( app, package_name, package_version, required_repository ):
"""
- Populate an actions dictionary that can be sent to fabric_util.install_and_build_package. This method handles the scenario where a tool_dependencies.xml
- file defines a complex repository dependency. In this case, the tool dependency package will be installed in a separate repository and the tool dependency
- defined for the dependent repository will use an environment_variable setting defined in it's env.sh file to locate the required package. This method
- basically does what the install_via_fabric method does, but restricts it's activity to the <action type="set_environment"> tag set within the required
- repository's tool_dependencies.xml file.
+ Return path to env.sh file in required repository if the required repository has been installed.
"""
- sa_session = app.model.context.current
- if not os.path.exists( dependent_install_dir ):
- os.makedirs( dependent_install_dir )
- actions_dict = dict( install_dir=dependent_install_dir )
- if package_name:
- actions_dict[ 'package_name' ] = package_name
- tool_dependencies = []
- action_dict = {}
- if tool_dependencies_config:
- required_td_tree, error_message = xml_util.parse_xml( tool_dependencies_config )
- if required_td_tree:
- required_td_root = required_td_tree.getroot()
- for required_td_elem in required_td_root:
- # Find the appropriate package name and version.
- if required_td_elem.tag == 'package':
- # <package name="bwa" version="0.5.9">
- required_td_package_name = required_td_elem.get( 'name', None )
- required_td_package_version = required_td_elem.get( 'version', None )
- if required_td_package_name==package_name and required_td_package_version==package_version:
- tool_dependencies, actions = handle_set_environment_entry_for_package( app=app,
- install_dir=required_install_dir,
- tool_shed_repository=tool_shed_repository,
- package_name=package_name,
- package_version=package_version,
- elem=required_td_elem,
- required_repository=required_repository )
- if actions:
- actions_dict[ 'actions' ] = actions
- break
- return tool_dependencies, actions_dict
+ env_sh_file_dir = tool_dependency_util.get_tool_dependency_install_dir( app=app,
+ repository_name=required_repository.name,
+ repository_owner=required_repository.owner,
+ repository_changeset_revision=required_repository.installed_changeset_revision,
+ tool_dependency_type='package',
+ tool_dependency_name=package_name,
+ tool_dependency_version=package_version )
+ env_sh_file_path = os.path.join( env_sh_file_dir, 'env.sh' )
+ return env_sh_file_path
+
def run_proprietary_fabric_method( app, elem, proprietary_fabfile_path, install_dir, package_name=None, **kwd ):
"""
@@ -1019,3 +935,17 @@
for arg in args:
parts.append( arg.strip( '/' ) )
return '/'.join( parts )
+
+
+# TODO: Move to tool_dependency_util?
+def __mark_tool_dependency_installed( app, tool_dependency ):
+ if tool_dependency.status not in [ app.model.ToolDependency.installation_status.ERROR,
+ app.model.ToolDependency.installation_status.INSTALLED ]:
+ log.debug( 'Changing status for tool dependency %s from %s to %s.' % \
+ ( str( tool_dependency.name ), str( tool_dependency.status ), str( app.model.ToolDependency.installation_status.INSTALLED ) ) )
+ tool_dependency = tool_dependency_util.set_tool_dependency_attributes( app,
+ tool_dependency=tool_dependency,
+ status=app.model.ToolDependency.installation_status.INSTALLED,
+ error_message=None,
+ remove_from_disk=False )
+ return tool_dependency
https://bitbucket.org/galaxy/galaxy-central/commits/864f8f4c466e/
Changeset: 864f8f4c466e
User: jmchilton
Date: 2013-11-29 04:23:42
Summary: Eliminate pre-bashisms from tool shed install action handlers.
Affected #: 1 file
diff -r 5b8c91e4183664c90f4d95699ab971e674ffd67d -r 864f8f4c466e4c031739549ea6a038dd70a4ed9a lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
@@ -390,7 +390,7 @@
with lcd( current_dir ):
with settings( warn_only=True ):
for tarball_name in tarball_names:
- cmd = '''export PATH=$PATH:$R_HOME/bin && export R_LIBS=$INSTALL_DIR &&
+ cmd = '''PATH=$PATH:$R_HOME/bin; export PATH; R_LIBS=$INSTALL_DIR; export R_LIBS; &&
Rscript -e "install.packages(c('%s'),lib='$INSTALL_DIR', repos=NULL, dependencies=FALSE)"''' % ( str( tarball_name ) )
cmd = install_environment.build_command( td_common_util.evaluate_template( cmd, install_dir ) )
return_code = handle_command( app, tool_dependency, install_dir, cmd )
@@ -432,24 +432,24 @@
gem, gem_version = ruby_package_tup
if os.path.isfile( gem ):
# we assume a local shipped gem file
- cmd = '''export PATH=$PATH:$RUBY_HOME/bin && export GEM_HOME=$INSTALL_DIR &&
+ cmd = '''PATH=$PATH:$RUBY_HOME/bin; export PATH; GEM_HOME=$INSTALL_DIR; export GEM_HOME;
gem install --local %s''' % ( gem )
elif gem.find( '://' ) != -1:
# We assume a URL to a gem file.
url = gem
gem_name = url.split( '/' )[ -1 ]
td_common_util.url_download( work_dir, gem_name, url, extract=False )
- cmd = '''export PATH=$PATH:$RUBY_HOME/bin && export GEM_HOME=$INSTALL_DIR &&
+ cmd = '''PATH=$PATH:$RUBY_HOME/bin; export PATH; GEM_HOME=$INSTALL_DIR; export GEM_HOME;
gem install --local %s ''' % ( gem_name )
else:
# gem file from rubygems.org with or without version number
if gem_version:
# version number was specified
- cmd = '''export PATH=$PATH:$RUBY_HOME/bin && export GEM_HOME=$INSTALL_DIR &&
+ cmd = '''PATH=$PATH:$RUBY_HOME/bin; export PATH; GEM_HOME=$INSTALL_DIR; export GEM_HOME;
gem install %s --version "=%s"''' % ( gem, gem_version)
else:
# no version number given
- cmd = '''export PATH=$PATH:$RUBY_HOME/bin && export GEM_HOME=$INSTALL_DIR &&
+ cmd = '''PATH=$PATH:$RUBY_HOME/bin; export PATH; GEM_HOME=$INSTALL_DIR; export GEM_HOME;
gem install %s''' % ( gem )
cmd = install_environment.build_command( td_common_util.evaluate_template( cmd, install_dir ) )
return_code = handle_command( app, tool_dependency, install_dir, cmd )
@@ -490,7 +490,7 @@
for perl_package in perl_packages:
# If set to a true value then MakeMaker's prompt function will always
# return the default without waiting for user input.
- cmd = '''export PERL_MM_USE_DEFAULT=1 && '''
+ cmd = '''PERL_MM_USE_DEFAULT=1; export PERL_MM_USE_DEFAULT; '''
if perl_package.find( '://' ) != -1:
# We assume a URL to a gem file.
url = perl_package
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Set a repository revision as missing test components only if test components are missing for all tools in the revision.
by commits-noreply@bitbucket.org 28 Nov '13
by commits-noreply@bitbucket.org 28 Nov '13
28 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/9ff030d0daee/
Changeset: 9ff030d0daee
User: greg
Date: 2013-11-28 23:38:38
Summary: Set a repository revision as missing test components only if test components are missing for all tools in the revision.
Affected #: 2 files
diff -r 5aede225ebef9801748b0f2948f5d1c387368ee7 -r 9ff030d0daee11b2187d9784a8b0a57742a671c2 lib/tool_shed/scripts/check_repositories_for_functional_tests.py
--- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
+++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
@@ -104,8 +104,7 @@
app.model.RepositoryMetadata.table.c.includes_tools == True,
app.model.RepositoryMetadata.table.c.do_not_test == False,
not_( app.model.RepositoryMetadata.table.c.id.in_( skip_metadata_ids ) ) ) ):
- # Clear any old invalid tests for this metadata revision, since this could lead to duplication of invalid test rows,
- # or tests incorrectly labeled as invalid.
+ # Initialize some items.
missing_test_components = []
repository = repository_metadata.repository
records_checked += 1
@@ -154,8 +153,7 @@
tool_version = tool_dict[ 'version' ]
tool_guid = tool_dict[ 'guid' ]
if verbosity >= 1:
- print "# Checking tool ID '%s' in changeset revision %s of %s." % \
- ( tool_id, changeset_revision, name )
+ print "# Checking tool ID '%s' in changeset revision %s of %s." % ( tool_id, changeset_revision, name )
# If there are no tests, this tool should not be tested, since the tool functional tests only report failure if the test itself fails,
# not if it's missing or undefined. Filtering out those repositories at this step will reduce the number of "false negatives" the
# automated functional test framework produces.
@@ -282,7 +280,11 @@
print "# and it is not the latest downloadable revision."
repository_metadata.do_not_test = True
repository_metadata.tools_functionally_correct = False
- repository_metadata.missing_test_components = True
+ if not testable_revision:
+ # Even though some tools may be missing test components, it may be possible to test other tools. Since the
+ # install and test framework filters out repositories marked as missing test components, we'll set it only if
+ # no tools can be tested.
+ repository_metadata.missing_test_components = True
tool_test_results_dict[ 'missing_test_components' ] = missing_test_components
# Store only the configured number of test runs.
num_tool_test_results_saved = int( app.config.num_tool_test_results_saved )
diff -r 5aede225ebef9801748b0f2948f5d1c387368ee7 -r 9ff030d0daee11b2187d9784a8b0a57742a671c2 lib/tool_shed/util/container_util.py
--- a/lib/tool_shed/util/container_util.py
+++ b/lib/tool_shed/util/container_util.py
@@ -1122,6 +1122,9 @@
containing_folder.folders.append( folder )
failed_test_id = 0
for failed_tests_dict in failed_tests_dicts:
+ # TODO: Remove this when invalid test data is eliminated.
+ if isinstance( failed_tests_dict, list ):
+ failed_tests_dict = failed_tests_dict[ 0 ]
failed_test_id += 1
failed_test = FailedTest( id=failed_test_id,
stderr=failed_tests_dict.get( 'stderr', '' ),
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Provide more useful details when logging the results of the prep script for the Tool Shed's install and test framework.
by commits-noreply@bitbucket.org 28 Nov '13
by commits-noreply@bitbucket.org 28 Nov '13
28 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/5aede225ebef/
Changeset: 5aede225ebef
User: greg
Date: 2013-11-28 14:41:23
Summary: Provide more useful details when logging the results of the prep script for the Tool Shed's install and test framework.
Affected #: 1 file
diff -r 7756b29bea292228d3472a7f613f5be5adee38a2 -r 5aede225ebef9801748b0f2948f5d1c387368ee7 lib/tool_shed/scripts/check_repositories_for_functional_tests.py
--- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
+++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
@@ -117,9 +117,8 @@
repository = repository_metadata.repository
if repository.id not in checked_repository_ids:
checked_repository_ids.append( repository.id )
- if verbosity >= 1:
- print '# -------------------------------------------------------------------------------------------'
- print '# Now checking revision %s of %s, owned by %s.' % ( changeset_revision, name, owner )
+ print '# -------------------------------------------------------------------------------------------'
+ print '# Now checking revision %s of %s, owned by %s.' % ( changeset_revision, name, owner )
# If this changeset revision has no tools, we don't need to do anything here, the install and test script has a filter for returning
# only repositories that contain tools.
tool_dicts = metadata.get( 'tools', None )
@@ -142,20 +141,19 @@
has_test_data = True
test_data_path = os.path.join( root, dirs[ dirs.index( 'test-data' ) ] )
break
- if verbosity >= 1:
- if has_test_data:
- print '# Test data directory found in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
- else:
- print '# Test data directory missing in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
- print '# Checking for functional tests in changeset revision %s of %s, owned by %s.' % \
- ( changeset_revision, name, owner )
+ if has_test_data:
+ print '# Test data directory found in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
+ else:
+ print '# Test data directory missing in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
+ print '# Checking for functional tests in changeset revision %s of %s, owned by %s.' % \
+ ( changeset_revision, name, owner )
# Inspect each tool_dict for defined functional tests.
for tool_dict in tool_dicts:
tool_count += 1
tool_id = tool_dict[ 'id' ]
tool_version = tool_dict[ 'version' ]
tool_guid = tool_dict[ 'guid' ]
- if verbosity >= 2:
+ if verbosity >= 1:
print "# Checking tool ID '%s' in changeset revision %s of %s." % \
( tool_id, changeset_revision, name )
# If there are no tests, this tool should not be tested, since the tool functional tests only report failure if the test itself fails,
@@ -185,12 +183,12 @@
tool_has_tests = True
break
if tool_has_tests:
- if verbosity >= 2:
+ if verbosity >= 1:
print "# Tool ID '%s' in changeset revision %s of %s has one or more valid functional tests defined." % \
( tool_id, changeset_revision, name )
has_tests += 1
else:
- if verbosity >= 2:
+ if verbosity >= 1:
print '# No functional tests defined for %s.' % tool_id
no_tests += 1
failure_reason = ''
@@ -200,7 +198,7 @@
if tool_has_tests and has_test_data:
missing_test_files = check_for_missing_test_files( defined_test_dicts, test_data_path )
if missing_test_files:
- if verbosity >= 2:
+ if verbosity >= 1:
print "# Tool ID '%s' in changeset revision %s of %s is missing one or more required test files: %s" % \
( tool_id, changeset_revision, name, ', '.join( missing_test_files ) )
else:
@@ -226,16 +224,14 @@
shutil.rmtree( work_dir )
if not missing_test_components:
valid_revisions += 1
- if verbosity >= 1:
- print '# All tools have functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
+ print '# All tools have functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
else:
invalid_revisions += 1
+ print '# Some tools have problematic functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
if verbosity >= 1:
- print '# Some tools have problematic functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
- if verbosity >= 2:
- for invalid_test in missing_test_components:
- if 'missing_components' in invalid_test:
- print '# %s' % invalid_test[ 'missing_components' ]
+ for invalid_test in missing_test_components:
+ if 'missing_components' in invalid_test:
+ print '# %s' % invalid_test[ 'missing_components' ]
if not info_only:
# Get or create the list of tool_test_results dictionaries.
if repository_metadata.tool_test_results is not None:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0