galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
February 2014
- 1 participants
- 192 discussions
commit/galaxy-central: guerler: Upload: Fix popup destruction
by commits-noreply@bitbucket.org 04 Feb '14
by commits-noreply@bitbucket.org 04 Feb '14
04 Feb '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/62488676f55e/
Changeset: 62488676f55e
User: guerler
Date: 2014-02-04 21:24:35
Summary: Upload: Fix popup destruction
Affected #: 2 files
diff -r 30ae36e55f869920a7959cef0e166a00f0be3d2b -r 62488676f55e37589536bae49b684e3df4ac4b5a static/scripts/mvc/upload/upload-row.js
--- a/static/scripts/mvc/upload/upload-row.js
+++ b/static/scripts/mvc/upload/upload-row.js
@@ -97,9 +97,11 @@
self._refreshGenome();
});
this.model.on('remove', function() {
+ self._destroyExtensionInfo();
self.remove();
});
this.app.collection.on('reset', function() {
+ self._destroyExtensionInfo();
self.remove();
});
},
@@ -263,10 +265,16 @@
// remove popup
var self = this
this.popover_timeout = setTimeout(function() {
- self.$el.find('#extension_info').popover('destroy');
+ self._destroyExtensionInfo();
}, this.options.timeout);
},
+ // attach file info popup
+ _destroyExtensionInfo : function()
+ {
+ this.$el.find('#extension_info').popover('destroy');
+ },
+
// template
_template: function(options)
{
diff -r 30ae36e55f869920a7959cef0e166a00f0be3d2b -r 62488676f55e37589536bae49b684e3df4ac4b5a static/scripts/packed/mvc/upload/upload-row.js
--- a/static/scripts/packed/mvc/upload/upload-row.js
+++ b/static/scripts/packed/mvc/upload/upload-row.js
@@ -1,1 +1,1 @@
-define(["mvc/upload/upload-model","mvc/upload/upload-extensions"],function(a,b){return Backbone.View.extend({options:{padding:8,timeout:2000},status_classes:{init:"symbol fa fa-trash-o",queued:"symbol fa fa-spinner fa-spin",running:"symbol fa fa-spinner fa-spin",success:"symbol fa fa-check",error:"symbol fa fa-exclamation-triangle"},initialize:function(f,d){this.app=f;var c=this;this.model=new a.Model(d);this.setElement(this._template(d));var e=this.$el;e.find("#symbol").on("click",function(){var g=c.model.get("status");if(g=="init"||g=="success"||g=="error"){c.app.collection.remove(c.model)}});e.find("#extension_info").on("mouseover",function(){c._showExtensionInfo()}).on("mouseleave",function(){c._hideExtensionInfo()});e.find("#text-content").on("keyup",function(){var g=e.find("#text-content");var i=g.val();var h=i.length;e.find("#size").html(c._formatSize(h));c.model.set("url_paste",i);c.model.set("file_size",h)});e.find("#genome").on("change",function(g){c.model.set("genome",$(g.target).val())});e.find("#extension").on("change",function(g){c.model.set("extension",$(g.target).val());c.$el.find("#extension_info").popover("destroy")});e.find("#space_to_tabs").on("change",function(g){c.model.set("space_to_tabs",$(g.target).prop("checked"))});this.model.on("change:percentage",function(){c._refreshPercentage()});this.model.on("change:status",function(){c._refreshStatus()});this.model.on("change:info",function(){c._refreshInfo()});this.model.on("change:genome",function(){c._refreshGenome()});this.model.on("remove",function(){c.remove()});this.app.collection.on("reset",function(){c.remove()})},render:function(){var i=this.model.get("file_name");var d=this.model.get("file_size");var f=this.$el;f.find("#title").html(i);f.find("#size").html(this._formatSize(d));if(d==-1){var h=f.find("#text");var g=this.options.padding;var e=f.width()-2*g;var c=f.height()-g;h.css("width",e+"px");h.css("top",c+"px");f.height(c+h.height()+2*g);h.show()}},_refreshGenome:function(){var c=this.model.get("genome");this.$el.find("#genome").val(c)},_refreshInfo:function(){var c=this.model.get("info");if(c){this.$el.find("#info").html("<strong>Failed: </strong>"+c).show()}else{this.$el.find("#info").hide()}},_refreshPercentage:function(){var c=parseInt(this.model.get("percentage"));this.$el.find(".progress-bar").css({width:c+"%"});if(c!=100){this.$el.find("#percentage").html(c+"%")}else{this.$el.find("#percentage").html("Adding to history...")}},_refreshStatus:function(){var d=this.$el;var c=this.model.get("status");var f=this.status_classes[c];var e=this.$el.find("#symbol");e.removeClass();e.addClass(f);if(c=="init"){d.find("#text-content").attr("disabled",false);d.find("#genome").attr("disabled",false);d.find("#extension").attr("disabled",false);d.find("#space_to_tabs").attr("disabled",false)}else{d.find("#text-content").attr("disabled",true);d.find("#genome").attr("disabled",true);d.find("#extension").attr("disabled",true);d.find("#space_to_tabs").attr("disabled",true)}if(c=="success"){d.addClass("success");d.find("#percentage").html("100%")}if(c=="error"){d.addClass("danger");d.find(".progress").remove()}},_formatSize:function(c){var d="";if(c>=100000000000){c=c/100000000000;d="TB"}else{if(c>=100000000){c=c/100000000;d="GB"}else{if(c>=100000){c=c/100000;d="MB"}else{if(c>=100){c=c/100;d="KB"}else{if(c>0){c=c*10;d="b"}else{return"<strong>-</strong>"}}}}}return"<strong>"+(Math.round(c)/10)+"</strong> "+d},_showExtensionInfo:function(){var c=this;var d=$(this.el).find("#extension_info");var f=this.model.get("extension");var e=$(this.el).find("#extension").find("option:selected").text();d.popover({html:true,title:e,content:b(f),placement:"bottom",container:c.$el.parent()});d.popover("show");clearTimeout(this.popover_timeout)},_hideExtensionInfo:function(){var c=this;this.popover_timeout=setTimeout(function(){c.$el.find("#extension_info").popover("destroy")},this.options.timeout)},_template:function(e){var d=this;var c='<tr id="upload-item-'+e.id+'" class="upload-item"><td><div style="position: relative;"><div id="title" class="title"></div><div id="text" class="text"><div class="text-info">You can tell Galaxy to download data from web by entering URL in this box (one per line). You can also directly paste the contents of a file.</div><textarea id="text-content" class="text-content form-control"></textarea></div></div></td><td><div id="size" class="size"></div></td>';c+='<td><select id="extension" class="extension">';for(key in d.app.select_extension){c+='<option value="'+d.app.select_extension[key][1]+'">'+d.app.select_extension[key][0]+"</option>"}c+='</select> <i id="extension_info" class="fa fa-search" style="cursor: pointer;"/></td>';c+='<td><select id="genome" class="genome">';for(key in d.app.select_genome){c+='<option value="'+d.app.select_genome[key][1]+'">'+d.app.select_genome[key][0]+"</option>"}c+="</select></td>";c+='<td><input id="space_to_tabs" type="checkbox"></input></td><td><div id="info" class="info"><div class="progress"><div class="progress-bar progress-bar-success"></div><div id="percentage" class="percentage">0%</div></div></div></td><td><div id="symbol" class="'+this.status_classes.init+'"></div></td></tr>';return c}})});
\ No newline at end of file
+define(["mvc/upload/upload-model","mvc/upload/upload-extensions"],function(a,b){return Backbone.View.extend({options:{padding:8,timeout:2000},status_classes:{init:"symbol fa fa-trash-o",queued:"symbol fa fa-spinner fa-spin",running:"symbol fa fa-spinner fa-spin",success:"symbol fa fa-check",error:"symbol fa fa-exclamation-triangle"},initialize:function(f,d){this.app=f;var c=this;this.model=new a.Model(d);this.setElement(this._template(d));var e=this.$el;e.find("#symbol").on("click",function(){var g=c.model.get("status");if(g=="init"||g=="success"||g=="error"){c.app.collection.remove(c.model)}});e.find("#extension_info").on("mouseover",function(){c._showExtensionInfo()}).on("mouseleave",function(){c._hideExtensionInfo()});e.find("#text-content").on("keyup",function(){var g=e.find("#text-content");var i=g.val();var h=i.length;e.find("#size").html(c._formatSize(h));c.model.set("url_paste",i);c.model.set("file_size",h)});e.find("#genome").on("change",function(g){c.model.set("genome",$(g.target).val())});e.find("#extension").on("change",function(g){c.model.set("extension",$(g.target).val());c.$el.find("#extension_info").popover("destroy")});e.find("#space_to_tabs").on("change",function(g){c.model.set("space_to_tabs",$(g.target).prop("checked"))});this.model.on("change:percentage",function(){c._refreshPercentage()});this.model.on("change:status",function(){c._refreshStatus()});this.model.on("change:info",function(){c._refreshInfo()});this.model.on("change:genome",function(){c._refreshGenome()});this.model.on("remove",function(){c._destroyExtensionInfo();c.remove()});this.app.collection.on("reset",function(){c._destroyExtensionInfo();c.remove()})},render:function(){var i=this.model.get("file_name");var d=this.model.get("file_size");var f=this.$el;f.find("#title").html(i);f.find("#size").html(this._formatSize(d));if(d==-1){var h=f.find("#text");var g=this.options.padding;var e=f.width()-2*g;var c=f.height()-g;h.css("width",e+"px");h.css("top",c+"px");f.height(c+h.height()+2*g);h.show()}},_refreshGenome:function(){var c=this.model.get("genome");this.$el.find("#genome").val(c)},_refreshInfo:function(){var c=this.model.get("info");if(c){this.$el.find("#info").html("<strong>Failed: </strong>"+c).show()}else{this.$el.find("#info").hide()}},_refreshPercentage:function(){var c=parseInt(this.model.get("percentage"));this.$el.find(".progress-bar").css({width:c+"%"});if(c!=100){this.$el.find("#percentage").html(c+"%")}else{this.$el.find("#percentage").html("Adding to history...")}},_refreshStatus:function(){var d=this.$el;var c=this.model.get("status");var f=this.status_classes[c];var e=this.$el.find("#symbol");e.removeClass();e.addClass(f);if(c=="init"){d.find("#text-content").attr("disabled",false);d.find("#genome").attr("disabled",false);d.find("#extension").attr("disabled",false);d.find("#space_to_tabs").attr("disabled",false)}else{d.find("#text-content").attr("disabled",true);d.find("#genome").attr("disabled",true);d.find("#extension").attr("disabled",true);d.find("#space_to_tabs").attr("disabled",true)}if(c=="success"){d.addClass("success");d.find("#percentage").html("100%")}if(c=="error"){d.addClass("danger");d.find(".progress").remove()}},_formatSize:function(c){var d="";if(c>=100000000000){c=c/100000000000;d="TB"}else{if(c>=100000000){c=c/100000000;d="GB"}else{if(c>=100000){c=c/100000;d="MB"}else{if(c>=100){c=c/100;d="KB"}else{if(c>0){c=c*10;d="b"}else{return"<strong>-</strong>"}}}}}return"<strong>"+(Math.round(c)/10)+"</strong> "+d},_showExtensionInfo:function(){var c=this;var d=$(this.el).find("#extension_info");var f=this.model.get("extension");var e=$(this.el).find("#extension").find("option:selected").text();d.popover({html:true,title:e,content:b(f),placement:"bottom",container:c.$el.parent()});d.popover("show");clearTimeout(this.popover_timeout)},_hideExtensionInfo:function(){var c=this;this.popover_timeout=setTimeout(function(){c._destroyExtensionInfo()},this.options.timeout)},_destroyExtensionInfo:function(){this.$el.find("#extension_info").popover("destroy")},_template:function(e){var d=this;var c='<tr id="upload-item-'+e.id+'" class="upload-item"><td><div style="position: relative;"><div id="title" class="title"></div><div id="text" class="text"><div class="text-info">You can tell Galaxy to download data from web by entering URL in this box (one per line). You can also directly paste the contents of a file.</div><textarea id="text-content" class="text-content form-control"></textarea></div></div></td><td><div id="size" class="size"></div></td>';c+='<td><select id="extension" class="extension">';for(key in d.app.select_extension){c+='<option value="'+d.app.select_extension[key][1]+'">'+d.app.select_extension[key][0]+"</option>"}c+='</select> <i id="extension_info" class="fa fa-search" style="cursor: pointer;"/></td>';c+='<td><select id="genome" class="genome">';for(key in d.app.select_genome){c+='<option value="'+d.app.select_genome[key][1]+'">'+d.app.select_genome[key][0]+"</option>"}c+="</select></td>";c+='<td><input id="space_to_tabs" type="checkbox"></input></td><td><div id="info" class="info"><div class="progress"><div class="progress-bar progress-bar-success"></div><div id="percentage" class="percentage">0%</div></div></div></td><td><div id="symbol" class="'+this.status_classes.init+'"></div></td></tr>';return c}})});
\ No newline at end of file
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
04 Feb '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/30ae36e55f86/
Changeset: 30ae36e55f86
User: guerler
Date: 2014-02-04 21:02:46
Summary: Upload: Change icon
Affected #: 2 files
diff -r e1ac3242786b055b8cc2b80ed701643f41c1e42e -r 30ae36e55f869920a7959cef0e166a00f0be3d2b static/scripts/mvc/upload/upload-row.js
--- a/static/scripts/mvc/upload/upload-row.js
+++ b/static/scripts/mvc/upload/upload-row.js
@@ -292,7 +292,7 @@
for (key in self.app.select_extension)
tmpl += '<option value="' + self.app.select_extension[key][1] + '">' + self.app.select_extension[key][0] + '</option>';
tmpl += '</select>' +
- ' <i id="extension_info" class="fa fa-question" style="cursor: pointer;"/>' +
+ ' <i id="extension_info" class="fa fa-search" style="cursor: pointer;"/>' +
'</td>';
// add genome selector
diff -r e1ac3242786b055b8cc2b80ed701643f41c1e42e -r 30ae36e55f869920a7959cef0e166a00f0be3d2b static/scripts/packed/mvc/upload/upload-row.js
--- a/static/scripts/packed/mvc/upload/upload-row.js
+++ b/static/scripts/packed/mvc/upload/upload-row.js
@@ -1,1 +1,1 @@
-define(["mvc/upload/upload-model","mvc/upload/upload-extensions"],function(a,b){return Backbone.View.extend({options:{padding:8,timeout:2000},status_classes:{init:"symbol fa fa-trash-o",queued:"symbol fa fa-spinner fa-spin",running:"symbol fa fa-spinner fa-spin",success:"symbol fa fa-check",error:"symbol fa fa-exclamation-triangle"},initialize:function(f,d){this.app=f;var c=this;this.model=new a.Model(d);this.setElement(this._template(d));var e=this.$el;e.find("#symbol").on("click",function(){var g=c.model.get("status");if(g=="init"||g=="success"||g=="error"){c.app.collection.remove(c.model)}});e.find("#extension_info").on("mouseover",function(){c._showExtensionInfo()}).on("mouseleave",function(){c._hideExtensionInfo()});e.find("#text-content").on("keyup",function(){var g=e.find("#text-content");var i=g.val();var h=i.length;e.find("#size").html(c._formatSize(h));c.model.set("url_paste",i);c.model.set("file_size",h)});e.find("#genome").on("change",function(g){c.model.set("genome",$(g.target).val())});e.find("#extension").on("change",function(g){c.model.set("extension",$(g.target).val());c.$el.find("#extension_info").popover("destroy")});e.find("#space_to_tabs").on("change",function(g){c.model.set("space_to_tabs",$(g.target).prop("checked"))});this.model.on("change:percentage",function(){c._refreshPercentage()});this.model.on("change:status",function(){c._refreshStatus()});this.model.on("change:info",function(){c._refreshInfo()});this.model.on("change:genome",function(){c._refreshGenome()});this.model.on("remove",function(){c.remove()});this.app.collection.on("reset",function(){c.remove()})},render:function(){var i=this.model.get("file_name");var d=this.model.get("file_size");var f=this.$el;f.find("#title").html(i);f.find("#size").html(this._formatSize(d));if(d==-1){var h=f.find("#text");var g=this.options.padding;var e=f.width()-2*g;var c=f.height()-g;h.css("width",e+"px");h.css("top",c+"px");f.height(c+h.height()+2*g);h.show()}},_refreshGenome:function(){var c=this.model.get("genome");this.$el.find("#genome").val(c)},_refreshInfo:function(){var c=this.model.get("info");if(c){this.$el.find("#info").html("<strong>Failed: </strong>"+c).show()}else{this.$el.find("#info").hide()}},_refreshPercentage:function(){var c=parseInt(this.model.get("percentage"));this.$el.find(".progress-bar").css({width:c+"%"});if(c!=100){this.$el.find("#percentage").html(c+"%")}else{this.$el.find("#percentage").html("Adding to history...")}},_refreshStatus:function(){var d=this.$el;var c=this.model.get("status");var f=this.status_classes[c];var e=this.$el.find("#symbol");e.removeClass();e.addClass(f);if(c=="init"){d.find("#text-content").attr("disabled",false);d.find("#genome").attr("disabled",false);d.find("#extension").attr("disabled",false);d.find("#space_to_tabs").attr("disabled",false)}else{d.find("#text-content").attr("disabled",true);d.find("#genome").attr("disabled",true);d.find("#extension").attr("disabled",true);d.find("#space_to_tabs").attr("disabled",true)}if(c=="success"){d.addClass("success");d.find("#percentage").html("100%")}if(c=="error"){d.addClass("danger");d.find(".progress").remove()}},_formatSize:function(c){var d="";if(c>=100000000000){c=c/100000000000;d="TB"}else{if(c>=100000000){c=c/100000000;d="GB"}else{if(c>=100000){c=c/100000;d="MB"}else{if(c>=100){c=c/100;d="KB"}else{if(c>0){c=c*10;d="b"}else{return"<strong>-</strong>"}}}}}return"<strong>"+(Math.round(c)/10)+"</strong> "+d},_showExtensionInfo:function(){var c=this;var d=$(this.el).find("#extension_info");var f=this.model.get("extension");var e=$(this.el).find("#extension").find("option:selected").text();d.popover({html:true,title:e,content:b(f),placement:"bottom",container:c.$el.parent()});d.popover("show");clearTimeout(this.popover_timeout)},_hideExtensionInfo:function(){var c=this;this.popover_timeout=setTimeout(function(){c.$el.find("#extension_info").popover("destroy")},this.options.timeout)},_template:function(e){var d=this;var c='<tr id="upload-item-'+e.id+'" class="upload-item"><td><div style="position: relative;"><div id="title" class="title"></div><div id="text" class="text"><div class="text-info">You can tell Galaxy to download data from web by entering URL in this box (one per line). You can also directly paste the contents of a file.</div><textarea id="text-content" class="text-content form-control"></textarea></div></div></td><td><div id="size" class="size"></div></td>';c+='<td><select id="extension" class="extension">';for(key in d.app.select_extension){c+='<option value="'+d.app.select_extension[key][1]+'">'+d.app.select_extension[key][0]+"</option>"}c+='</select> <i id="extension_info" class="fa fa-question" style="cursor: pointer;"/></td>';c+='<td><select id="genome" class="genome">';for(key in d.app.select_genome){c+='<option value="'+d.app.select_genome[key][1]+'">'+d.app.select_genome[key][0]+"</option>"}c+="</select></td>";c+='<td><input id="space_to_tabs" type="checkbox"></input></td><td><div id="info" class="info"><div class="progress"><div class="progress-bar progress-bar-success"></div><div id="percentage" class="percentage">0%</div></div></div></td><td><div id="symbol" class="'+this.status_classes.init+'"></div></td></tr>';return c}})});
\ No newline at end of file
+define(["mvc/upload/upload-model","mvc/upload/upload-extensions"],function(a,b){return Backbone.View.extend({options:{padding:8,timeout:2000},status_classes:{init:"symbol fa fa-trash-o",queued:"symbol fa fa-spinner fa-spin",running:"symbol fa fa-spinner fa-spin",success:"symbol fa fa-check",error:"symbol fa fa-exclamation-triangle"},initialize:function(f,d){this.app=f;var c=this;this.model=new a.Model(d);this.setElement(this._template(d));var e=this.$el;e.find("#symbol").on("click",function(){var g=c.model.get("status");if(g=="init"||g=="success"||g=="error"){c.app.collection.remove(c.model)}});e.find("#extension_info").on("mouseover",function(){c._showExtensionInfo()}).on("mouseleave",function(){c._hideExtensionInfo()});e.find("#text-content").on("keyup",function(){var g=e.find("#text-content");var i=g.val();var h=i.length;e.find("#size").html(c._formatSize(h));c.model.set("url_paste",i);c.model.set("file_size",h)});e.find("#genome").on("change",function(g){c.model.set("genome",$(g.target).val())});e.find("#extension").on("change",function(g){c.model.set("extension",$(g.target).val());c.$el.find("#extension_info").popover("destroy")});e.find("#space_to_tabs").on("change",function(g){c.model.set("space_to_tabs",$(g.target).prop("checked"))});this.model.on("change:percentage",function(){c._refreshPercentage()});this.model.on("change:status",function(){c._refreshStatus()});this.model.on("change:info",function(){c._refreshInfo()});this.model.on("change:genome",function(){c._refreshGenome()});this.model.on("remove",function(){c.remove()});this.app.collection.on("reset",function(){c.remove()})},render:function(){var i=this.model.get("file_name");var d=this.model.get("file_size");var f=this.$el;f.find("#title").html(i);f.find("#size").html(this._formatSize(d));if(d==-1){var h=f.find("#text");var g=this.options.padding;var e=f.width()-2*g;var c=f.height()-g;h.css("width",e+"px");h.css("top",c+"px");f.height(c+h.height()+2*g);h.show()}},_refreshGenome:function(){var c=this.model.get("genome");this.$el.find("#genome").val(c)},_refreshInfo:function(){var c=this.model.get("info");if(c){this.$el.find("#info").html("<strong>Failed: </strong>"+c).show()}else{this.$el.find("#info").hide()}},_refreshPercentage:function(){var c=parseInt(this.model.get("percentage"));this.$el.find(".progress-bar").css({width:c+"%"});if(c!=100){this.$el.find("#percentage").html(c+"%")}else{this.$el.find("#percentage").html("Adding to history...")}},_refreshStatus:function(){var d=this.$el;var c=this.model.get("status");var f=this.status_classes[c];var e=this.$el.find("#symbol");e.removeClass();e.addClass(f);if(c=="init"){d.find("#text-content").attr("disabled",false);d.find("#genome").attr("disabled",false);d.find("#extension").attr("disabled",false);d.find("#space_to_tabs").attr("disabled",false)}else{d.find("#text-content").attr("disabled",true);d.find("#genome").attr("disabled",true);d.find("#extension").attr("disabled",true);d.find("#space_to_tabs").attr("disabled",true)}if(c=="success"){d.addClass("success");d.find("#percentage").html("100%")}if(c=="error"){d.addClass("danger");d.find(".progress").remove()}},_formatSize:function(c){var d="";if(c>=100000000000){c=c/100000000000;d="TB"}else{if(c>=100000000){c=c/100000000;d="GB"}else{if(c>=100000){c=c/100000;d="MB"}else{if(c>=100){c=c/100;d="KB"}else{if(c>0){c=c*10;d="b"}else{return"<strong>-</strong>"}}}}}return"<strong>"+(Math.round(c)/10)+"</strong> "+d},_showExtensionInfo:function(){var c=this;var d=$(this.el).find("#extension_info");var f=this.model.get("extension");var e=$(this.el).find("#extension").find("option:selected").text();d.popover({html:true,title:e,content:b(f),placement:"bottom",container:c.$el.parent()});d.popover("show");clearTimeout(this.popover_timeout)},_hideExtensionInfo:function(){var c=this;this.popover_timeout=setTimeout(function(){c.$el.find("#extension_info").popover("destroy")},this.options.timeout)},_template:function(e){var d=this;var c='<tr id="upload-item-'+e.id+'" class="upload-item"><td><div style="position: relative;"><div id="title" class="title"></div><div id="text" class="text"><div class="text-info">You can tell Galaxy to download data from web by entering URL in this box (one per line). You can also directly paste the contents of a file.</div><textarea id="text-content" class="text-content form-control"></textarea></div></div></td><td><div id="size" class="size"></div></td>';c+='<td><select id="extension" class="extension">';for(key in d.app.select_extension){c+='<option value="'+d.app.select_extension[key][1]+'">'+d.app.select_extension[key][0]+"</option>"}c+='</select> <i id="extension_info" class="fa fa-search" style="cursor: pointer;"/></td>';c+='<td><select id="genome" class="genome">';for(key in d.app.select_genome){c+='<option value="'+d.app.select_genome[key][1]+'">'+d.app.select_genome[key][0]+"</option>"}c+="</select></td>";c+='<td><input id="space_to_tabs" type="checkbox"></input></td><td><div id="info" class="info"><div class="progress"><div class="progress-bar progress-bar-success"></div><div id="percentage" class="percentage">0%</div></div></div></td><td><div id="symbol" class="'+this.status_classes.init+'"></div></td></tr>';return c}})});
\ No newline at end of file
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: guerler: Upload: Add file extension descriptions
by commits-noreply@bitbucket.org 04 Feb '14
by commits-noreply@bitbucket.org 04 Feb '14
04 Feb '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/e1ac3242786b/
Changeset: e1ac3242786b
User: guerler
Date: 2014-02-04 20:53:26
Summary: Upload: Add file extension descriptions
Affected #: 8 files
diff -r 82d2c2109792d098ddc5ede7992ae132e596584a -r e1ac3242786b055b8cc2b80ed701643f41c1e42e static/scripts/mvc/upload/upload-extensions.js
--- /dev/null
+++ b/static/scripts/mvc/upload/upload-extensions.js
@@ -0,0 +1,110 @@
+// dependencies
+define([], function() {
+
+var dictionary = {
+
+ auto : {
+ text: 'The system will attempt to detect Axt, Fasta, Fastqsolexa, Gff, Gff3, Html, Lav, Maf, Tabular, Wiggle, Bed and Interval (Bed with headers) formats. If your file is not detected properly as one of the known formats, it most likely means that it has some format problems (e.g., different number of columns on different rows). You can still coerce the system to set your data to the format you think it should be. You can also upload compressed files, which will automatically be decompressed'
+ },
+
+ ab1 : {
+ url : 'https://wiki.galaxyproject.org/Learn/Datatypes#Ab1',
+ text: 'A binary sequence file in \'ab1\' format with a \'.ab1\' file extension. You must manually select this \'File Format\' when uploading the file.'
+ },
+
+ axt : {
+ url : 'https://wiki.galaxyproject.org/Learn/Datatypes#Axt',
+ text: 'blastz pairwise alignment format. Each alignment block in an axt file contains three lines: a summary line and 2 sequence lines. Blocks are separated from one another by blank lines. The summary line contains chromosomal position and size information about the alignment. It consists of 9 required fields.'
+ },
+
+ bam : {
+ url : 'https://wiki.galaxyproject.org/Learn/Datatypes#BAM',
+ text: 'A binary file compressed in the BGZF format with a \'.bam\' file extension.'
+ },
+
+ bed : {
+ url : 'https://wiki.galaxyproject.org/Learn/Datatypes#Bed',
+ text: 'BED format provides a flexible way to define the data lines that are displayed in an annotation track. BED lines have three required columns and nine additional optional columns. The three required columns are chrom, chromStart and chromEnd.'
+ },
+
+ fasta : {
+ url : 'https://wiki.galaxyproject.org/Learn/Datatypes#Fasta',
+ text: 'A sequence in FASTA format consists of a single-line description, followed by lines of sequence data. The first character of the description line is a greater-than (">") symbol in the first column. All lines should be shorter than 80 characters.'
+ },
+
+ fastq : {
+ url : 'https://wiki.galaxyproject.org/Learn/Datatypes#Fastq',
+ text: 'FASTQ format is a text-based format for storing both a biological sequence (usually nucleotide sequence) and its corresponding quality scores. '
+ },
+
+ fastqsolexa : {
+ url : 'https://wiki.galaxyproject.org/Learn/Datatypes#FastqSolexa',
+ text: 'FastqSolexa is the Illumina (Solexa) variant of the Fastq format, which stores sequences and quality scores in a single file.'
+ },
+
+ gff : {
+ url : 'https://wiki.galaxyproject.org/Learn/Datatypes#GFF',
+ text: 'GFF lines have nine required fields that must be tab-separated.'
+ },
+
+ gff3 : {
+ url : 'https://wiki.galaxyproject.org/Learn/Datatypes#GFF3',
+ text: 'The GFF3 format addresses the most common extensions to GFF, while preserving backward compatibility with previous formats.'
+ },
+
+ interval : {
+ url : 'https://wiki.galaxyproject.org/Learn/Datatypes#GFF3',
+ text: 'File must start with definition line in the following format (columns may be in any order).'
+ },
+
+ lav : {
+ url : 'https://wiki.galaxyproject.org/Learn/Datatypes#GFF3',
+ text: 'Lav is the primary output format for BLASTZ. The first line of a .lav file begins with #:lav..'
+ },
+
+ maf : {
+ url : 'https://wiki.galaxyproject.org/Learn/Datatypes#MAF',
+ text: 'TBA and multiz multiple alignment format. The first line of a .maf file begins with ##maf. This word is followed by white-space-separated "variable=value" pairs. There should be no white space surrounding the "=".'
+ },
+
+ scf : {
+ url : 'https://wiki.galaxyproject.org/Learn/Datatypes#Scf',
+ text: 'A binary sequence file in \'scf\' format with a \'.scf\' file extension. You must manually select this \'File Format\' when uploading the file.'
+ },
+
+ sff : {
+ url : 'https://wiki.galaxyproject.org/Learn/Datatypes#Sff',
+ text: 'A binary file in \'Standard Flowgram Format\' with a \'.sff\' file extension.'
+ },
+
+ tabular : {
+ url : 'https://wiki.galaxyproject.org/Learn/Datatypes#Tabular_.28tab_delimited.29',
+ text: 'Any data in tab delimited format (tabular).'
+ },
+
+ wig : {
+ url : 'https://wiki.galaxyproject.org/Learn/Datatypes#Wig',
+ text: 'The wiggle format is line-oriented. Wiggle data is preceded by a track definition line, which adds a number of options for controlling the default display of this track.'
+ },
+
+ txt : {
+ url : 'https://wiki.galaxyproject.org/Learn/Datatypes#Plain_text',
+ text: 'Any text file.'
+ },
+
+};
+
+// create description content
+return function(key) {
+ var description = dictionary[key];
+ if (description) {
+ var tmpl = description.text;
+ if (description.url) {
+ tmpl += ' (<a href="' + description.url + '" target="_blank">read more</a>)';
+ }
+ return tmpl;
+ } else {
+ return 'There is no description available for this file extension.';
+ }
+}
+});
diff -r 82d2c2109792d098ddc5ede7992ae132e596584a -r e1ac3242786b055b8cc2b80ed701643f41c1e42e static/scripts/mvc/upload/upload-row.js
--- a/static/scripts/mvc/upload/upload-row.js
+++ b/static/scripts/mvc/upload/upload-row.js
@@ -1,11 +1,12 @@
// dependencies
-define(['mvc/upload/upload-model'], function(UploadModel) {
+define(['mvc/upload/upload-model', 'mvc/upload/upload-extensions'], function(UploadModel, UploadExtensions) {
// item view
return Backbone.View.extend({
// options
options: {
- padding : 8
+ padding : 8,
+ timeout : 2000
},
// states
@@ -46,6 +47,10 @@
self.app.collection.remove(self.model);
}
});
+
+ // handle mouse over
+ it.find('#extension_info').on('mouseover' , function() { self._showExtensionInfo(); })
+ .on('mouseleave', function() { self._hideExtensionInfo(); });
// handle text editing event
it.find('#text-content').on('keyup', function() {
@@ -70,6 +75,7 @@
// handle extension selection
it.find('#extension').on('change', function(e) {
self.model.set('extension', $(e.target).val());
+ self.$el.find('#extension_info').popover('destroy');
});
// handle space to tabs button
@@ -139,7 +145,7 @@
// genome
_refreshGenome: function()
{
- // write error message
+ // update genome info on screen
var genome = this.model.get('genome');
this.$el.find('#genome').val(genome);
},
@@ -226,6 +232,41 @@
return '<strong>' + (Math.round(size) / 10) + '</strong> ' + unit;
},
+ // attach file info popup
+ _showExtensionInfo : function()
+ {
+ // initialize
+ var self = this;
+ var $el = $(this.el).find('#extension_info');
+ var extension = this.model.get('extension');
+ var title = $(this.el).find('#extension').find('option:selected').text();
+
+ // create popup
+ $el.popover({
+ html: true,
+ title: title,
+ content: UploadExtensions(extension),
+ placement: 'bottom',
+ container: self.$el.parent()
+ });
+
+ // show popup
+ $el.popover('show');
+
+ // clear previous timers
+ clearTimeout(this.popover_timeout);
+ },
+
+ // attach file info popup
+ _hideExtensionInfo : function()
+ {
+ // remove popup
+ var self = this
+ this.popover_timeout = setTimeout(function() {
+ self.$el.find('#extension_info').popover('destroy');
+ }, this.options.timeout);
+ },
+
// template
_template: function(options)
{
@@ -251,6 +292,7 @@
for (key in self.app.select_extension)
tmpl += '<option value="' + self.app.select_extension[key][1] + '">' + self.app.select_extension[key][0] + '</option>';
tmpl += '</select>' +
+ ' <i id="extension_info" class="fa fa-question" style="cursor: pointer;"/>' +
'</td>';
// add genome selector
diff -r 82d2c2109792d098ddc5ede7992ae132e596584a -r e1ac3242786b055b8cc2b80ed701643f41c1e42e static/scripts/packed/mvc/upload/upload-extensions.js
--- /dev/null
+++ b/static/scripts/packed/mvc/upload/upload-extensions.js
@@ -0,0 +1,1 @@
+define([],function(){var a={auto:{text:"The system will attempt to detect Axt, Fasta, Fastqsolexa, Gff, Gff3, Html, Lav, Maf, Tabular, Wiggle, Bed and Interval (Bed with headers) formats. If your file is not detected properly as one of the known formats, it most likely means that it has some format problems (e.g., different number of columns on different rows). You can still coerce the system to set your data to the format you think it should be. You can also upload compressed files, which will automatically be decompressed"},ab1:{url:"https://wiki.galaxyproject.org/Learn/Datatypes#Ab1",text:"A binary sequence file in 'ab1' format with a '.ab1' file extension. You must manually select this 'File Format' when uploading the file."},axt:{url:"https://wiki.galaxyproject.org/Learn/Datatypes#Axt",text:"blastz pairwise alignment format. Each alignment block in an axt file contains three lines: a summary line and 2 sequence lines. Blocks are separated from one another by blank lines. The summary line contains chromosomal position and size information about the alignment. It consists of 9 required fields."},bam:{url:"https://wiki.galaxyproject.org/Learn/Datatypes#BAM",text:"A binary file compressed in the BGZF format with a '.bam' file extension."},bed:{url:"https://wiki.galaxyproject.org/Learn/Datatypes#Bed",text:"BED format provides a flexible way to define the data lines that are displayed in an annotation track. BED lines have three required columns and nine additional optional columns. The three required columns are chrom, chromStart and chromEnd."},fasta:{url:"https://wiki.galaxyproject.org/Learn/Datatypes#Fasta",text:'A sequence in FASTA format consists of a single-line description, followed by lines of sequence data. The first character of the description line is a greater-than (">") symbol in the first column. All lines should be shorter than 80 characters.'},fastq:{url:"https://wiki.galaxyproject.org/Learn/Datatypes#Fastq",text:"FASTQ format is a text-based format for storing both a biological sequence (usually nucleotide sequence) and its corresponding quality scores. "},fastqsolexa:{url:"https://wiki.galaxyproject.org/Learn/Datatypes#FastqSolexa",text:"FastqSolexa is the Illumina (Solexa) variant of the Fastq format, which stores sequences and quality scores in a single file."},gff:{url:"https://wiki.galaxyproject.org/Learn/Datatypes#GFF",text:"GFF lines have nine required fields that must be tab-separated."},gff3:{url:"https://wiki.galaxyproject.org/Learn/Datatypes#GFF3",text:"The GFF3 format addresses the most common extensions to GFF, while preserving backward compatibility with previous formats."},interval:{url:"https://wiki.galaxyproject.org/Learn/Datatypes#GFF3",text:"File must start with definition line in the following format (columns may be in any order)."},lav:{url:"https://wiki.galaxyproject.org/Learn/Datatypes#GFF3",text:"Lav is the primary output format for BLASTZ. The first line of a .lav file begins with #:lav.."},maf:{url:"https://wiki.galaxyproject.org/Learn/Datatypes#MAF",text:'TBA and multiz multiple alignment format. The first line of a .maf file begins with ##maf. This word is followed by white-space-separated "variable=value" pairs. There should be no white space surrounding the "=".'},scf:{url:"https://wiki.galaxyproject.org/Learn/Datatypes#Scf",text:"A binary sequence file in 'scf' format with a '.scf' file extension. You must manually select this 'File Format' when uploading the file."},sff:{url:"https://wiki.galaxyproject.org/Learn/Datatypes#Sff",text:"A binary file in 'Standard Flowgram Format' with a '.sff' file extension."},tabular:{url:"https://wiki.galaxyproject.org/Learn/Datatypes#Tabular_.28tab_delimited.29",text:"Any data in tab delimited format (tabular)."},wig:{url:"https://wiki.galaxyproject.org/Learn/Datatypes#Wig",text:"The wiggle format is line-oriented. Wiggle data is preceded by a track definition line, which adds a number of options for controlling the default display of this track."},txt:{url:"https://wiki.galaxyproject.org/Learn/Datatypes#Plain_text",text:"Any text file."},};return function(c){var d=a[c];if(d){var b=d.text;if(d.url){b+=' (<a href="'+d.url+'" target="_blank">read more</a>)'}return b}else{return"There is no description available for this file extension."}}});
\ No newline at end of file
diff -r 82d2c2109792d098ddc5ede7992ae132e596584a -r e1ac3242786b055b8cc2b80ed701643f41c1e42e static/scripts/packed/mvc/upload/upload-row.js
--- a/static/scripts/packed/mvc/upload/upload-row.js
+++ b/static/scripts/packed/mvc/upload/upload-row.js
@@ -1,1 +1,1 @@
-define(["mvc/upload/upload-model"],function(a){return Backbone.View.extend({options:{padding:8},status_classes:{init:"symbol fa fa-trash-o",queued:"symbol fa fa-spinner fa-spin",running:"symbol fa fa-spinner fa-spin",success:"symbol fa fa-check",error:"symbol fa fa-exclamation-triangle"},initialize:function(e,c){this.app=e;var b=this;this.model=new a.Model(c);this.setElement(this._template(c));var d=this.$el;d.find("#symbol").on("click",function(){var f=b.model.get("status");if(f=="init"||f=="success"||f=="error"){b.app.collection.remove(b.model)}});d.find("#text-content").on("keyup",function(){var f=d.find("#text-content");var h=f.val();var g=h.length;d.find("#size").html(b._formatSize(g));b.model.set("url_paste",h);b.model.set("file_size",g)});d.find("#genome").on("change",function(f){b.model.set("genome",$(f.target).val())});d.find("#extension").on("change",function(f){b.model.set("extension",$(f.target).val())});d.find("#space_to_tabs").on("change",function(f){b.model.set("space_to_tabs",$(f.target).prop("checked"))});this.model.on("change:percentage",function(){b._refreshPercentage()});this.model.on("change:status",function(){b._refreshStatus()});this.model.on("change:info",function(){b._refreshInfo()});this.model.on("change:genome",function(){b._refreshGenome()});this.model.on("remove",function(){b.remove()});this.app.collection.on("reset",function(){b.remove()})},render:function(){var h=this.model.get("file_name");var c=this.model.get("file_size");var e=this.$el;e.find("#title").html(h);e.find("#size").html(this._formatSize(c));if(c==-1){var g=e.find("#text");var f=this.options.padding;var d=e.width()-2*f;var b=e.height()-f;g.css("width",d+"px");g.css("top",b+"px");e.height(b+g.height()+2*f);g.show()}},_refreshGenome:function(){var b=this.model.get("genome");this.$el.find("#genome").val(b)},_refreshInfo:function(){var b=this.model.get("info");if(b){this.$el.find("#info").html("<strong>Failed: </strong>"+b).show()}else{this.$el.find("#info").hide()}},_refreshPercentage:function(){var b=parseInt(this.model.get("percentage"));this.$el.find(".progress-bar").css({width:b+"%"});if(b!=100){this.$el.find("#percentage").html(b+"%")}else{this.$el.find("#percentage").html("Adding to history...")}},_refreshStatus:function(){var c=this.$el;var b=this.model.get("status");var e=this.status_classes[b];var d=this.$el.find("#symbol");d.removeClass();d.addClass(e);if(b=="init"){c.find("#text-content").attr("disabled",false);c.find("#genome").attr("disabled",false);c.find("#extension").attr("disabled",false);c.find("#space_to_tabs").attr("disabled",false)}else{c.find("#text-content").attr("disabled",true);c.find("#genome").attr("disabled",true);c.find("#extension").attr("disabled",true);c.find("#space_to_tabs").attr("disabled",true)}if(b=="success"){c.addClass("success");c.find("#percentage").html("100%")}if(b=="error"){c.addClass("danger");c.find(".progress").remove()}},_formatSize:function(b){var c="";if(b>=100000000000){b=b/100000000000;c="TB"}else{if(b>=100000000){b=b/100000000;c="GB"}else{if(b>=100000){b=b/100000;c="MB"}else{if(b>=100){b=b/100;c="KB"}else{if(b>0){b=b*10;c="b"}else{return"<strong>-</strong>"}}}}}return"<strong>"+(Math.round(b)/10)+"</strong> "+c},_template:function(d){var c=this;var b='<tr id="upload-item-'+d.id+'" class="upload-item"><td><div style="position: relative;"><div id="title" class="title"></div><div id="text" class="text"><div class="text-info">You can tell Galaxy to download data from web by entering URL in this box (one per line). You can also directly paste the contents of a file.</div><textarea id="text-content" class="text-content form-control"></textarea></div></div></td><td><div id="size" class="size"></div></td>';b+='<td><select id="extension" class="extension">';for(key in c.app.select_extension){b+='<option value="'+c.app.select_extension[key][1]+'">'+c.app.select_extension[key][0]+"</option>"}b+="</select></td>";b+='<td><select id="genome" class="genome">';for(key in c.app.select_genome){b+='<option value="'+c.app.select_genome[key][1]+'">'+c.app.select_genome[key][0]+"</option>"}b+="</select></td>";b+='<td><input id="space_to_tabs" type="checkbox"></input></td><td><div id="info" class="info"><div class="progress"><div class="progress-bar progress-bar-success"></div><div id="percentage" class="percentage">0%</div></div></div></td><td><div id="symbol" class="'+this.status_classes.init+'"></div></td></tr>';return b}})});
\ No newline at end of file
+define(["mvc/upload/upload-model","mvc/upload/upload-extensions"],function(a,b){return Backbone.View.extend({options:{padding:8,timeout:2000},status_classes:{init:"symbol fa fa-trash-o",queued:"symbol fa fa-spinner fa-spin",running:"symbol fa fa-spinner fa-spin",success:"symbol fa fa-check",error:"symbol fa fa-exclamation-triangle"},initialize:function(f,d){this.app=f;var c=this;this.model=new a.Model(d);this.setElement(this._template(d));var e=this.$el;e.find("#symbol").on("click",function(){var g=c.model.get("status");if(g=="init"||g=="success"||g=="error"){c.app.collection.remove(c.model)}});e.find("#extension_info").on("mouseover",function(){c._showExtensionInfo()}).on("mouseleave",function(){c._hideExtensionInfo()});e.find("#text-content").on("keyup",function(){var g=e.find("#text-content");var i=g.val();var h=i.length;e.find("#size").html(c._formatSize(h));c.model.set("url_paste",i);c.model.set("file_size",h)});e.find("#genome").on("change",function(g){c.model.set("genome",$(g.target).val())});e.find("#extension").on("change",function(g){c.model.set("extension",$(g.target).val());c.$el.find("#extension_info").popover("destroy")});e.find("#space_to_tabs").on("change",function(g){c.model.set("space_to_tabs",$(g.target).prop("checked"))});this.model.on("change:percentage",function(){c._refreshPercentage()});this.model.on("change:status",function(){c._refreshStatus()});this.model.on("change:info",function(){c._refreshInfo()});this.model.on("change:genome",function(){c._refreshGenome()});this.model.on("remove",function(){c.remove()});this.app.collection.on("reset",function(){c.remove()})},render:function(){var i=this.model.get("file_name");var d=this.model.get("file_size");var f=this.$el;f.find("#title").html(i);f.find("#size").html(this._formatSize(d));if(d==-1){var h=f.find("#text");var g=this.options.padding;var e=f.width()-2*g;var c=f.height()-g;h.css("width",e+"px");h.css("top",c+"px");f.height(c+h.height()+2*g);h.show()}},_refreshGenome:function(){var c=this.model.get("genome");this.$el.find("#genome").val(c)},_refreshInfo:function(){var c=this.model.get("info");if(c){this.$el.find("#info").html("<strong>Failed: </strong>"+c).show()}else{this.$el.find("#info").hide()}},_refreshPercentage:function(){var c=parseInt(this.model.get("percentage"));this.$el.find(".progress-bar").css({width:c+"%"});if(c!=100){this.$el.find("#percentage").html(c+"%")}else{this.$el.find("#percentage").html("Adding to history...")}},_refreshStatus:function(){var d=this.$el;var c=this.model.get("status");var f=this.status_classes[c];var e=this.$el.find("#symbol");e.removeClass();e.addClass(f);if(c=="init"){d.find("#text-content").attr("disabled",false);d.find("#genome").attr("disabled",false);d.find("#extension").attr("disabled",false);d.find("#space_to_tabs").attr("disabled",false)}else{d.find("#text-content").attr("disabled",true);d.find("#genome").attr("disabled",true);d.find("#extension").attr("disabled",true);d.find("#space_to_tabs").attr("disabled",true)}if(c=="success"){d.addClass("success");d.find("#percentage").html("100%")}if(c=="error"){d.addClass("danger");d.find(".progress").remove()}},_formatSize:function(c){var d="";if(c>=100000000000){c=c/100000000000;d="TB"}else{if(c>=100000000){c=c/100000000;d="GB"}else{if(c>=100000){c=c/100000;d="MB"}else{if(c>=100){c=c/100;d="KB"}else{if(c>0){c=c*10;d="b"}else{return"<strong>-</strong>"}}}}}return"<strong>"+(Math.round(c)/10)+"</strong> "+d},_showExtensionInfo:function(){var c=this;var d=$(this.el).find("#extension_info");var f=this.model.get("extension");var e=$(this.el).find("#extension").find("option:selected").text();d.popover({html:true,title:e,content:b(f),placement:"bottom",container:c.$el.parent()});d.popover("show");clearTimeout(this.popover_timeout)},_hideExtensionInfo:function(){var c=this;this.popover_timeout=setTimeout(function(){c.$el.find("#extension_info").popover("destroy")},this.options.timeout)},_template:function(e){var d=this;var c='<tr id="upload-item-'+e.id+'" class="upload-item"><td><div style="position: relative;"><div id="title" class="title"></div><div id="text" class="text"><div class="text-info">You can tell Galaxy to download data from web by entering URL in this box (one per line). You can also directly paste the contents of a file.</div><textarea id="text-content" class="text-content form-control"></textarea></div></div></td><td><div id="size" class="size"></div></td>';c+='<td><select id="extension" class="extension">';for(key in d.app.select_extension){c+='<option value="'+d.app.select_extension[key][1]+'">'+d.app.select_extension[key][0]+"</option>"}c+='</select> <i id="extension_info" class="fa fa-question" style="cursor: pointer;"/></td>';c+='<td><select id="genome" class="genome">';for(key in d.app.select_genome){c+='<option value="'+d.app.select_genome[key][1]+'">'+d.app.select_genome[key][0]+"</option>"}c+="</select></td>";c+='<td><input id="space_to_tabs" type="checkbox"></input></td><td><div id="info" class="info"><div class="progress"><div class="progress-bar progress-bar-success"></div><div id="percentage" class="percentage">0%</div></div></div></td><td><div id="symbol" class="'+this.status_classes.init+'"></div></td></tr>';return c}})});
\ No newline at end of file
diff -r 82d2c2109792d098ddc5ede7992ae132e596584a -r e1ac3242786b055b8cc2b80ed701643f41c1e42e static/scripts/packed/utils/uploadbox.js
--- a/static/scripts/packed/utils/uploadbox.js
+++ b/static/scripts/packed/utils/uploadbox.js
@@ -1,1 +1,1 @@
-(function(c){jQuery.event.props.push("dataTransfer");var h={url:"",paramname:"content",maxfilesize:2048,maxfilenumber:20,dragover:function(){},dragleave:function(){},announce:function(){},initialize:function(){},progress:function(){},success:function(){},error:function(k,l,m){alert(m)},complete:function(){},error_filesize:"File exceeds 2GB. Please use an FTP client.",error_default:"Please make sure the file is available.",error_server:"Upload request failed.",error_login:"Uploads require you to log in.",error_missing:"No upload content available."};var a={};var e={};var f=0;var j=0;var d=false;var g=false;var b=null;var i=null;c.fn.uploadbox=function(A){a=c.extend({},h,A);b=this;b.append('<input id="uploadbox_input" type="file" style="display: none" multiple>');b.on("drop",o);b.on("dragover",p);b.on("dragleave",x);c("#uploadbox_input").change(function(B){z(B.target.files);c(this).val("")});function o(B){if(!B.dataTransfer){return}z(B.dataTransfer.files);B.preventDefault();return false}function p(B){B.preventDefault();a.dragover.call(B)}function x(B){B.stopPropagation();a.dragleave.call(B)}function k(B){if(B.lengthComputable){a.progress(this.index,this.file,Math.round((B.loaded*100)/B.total))}}function z(D){if(d){return}for(var C=0;C<D.length;C++){if(j>=a.maxfilenumber){break}var B=String(f++);e[B]=D[C];a.announce(B,e[B],"");j++}}function r(B){if(e[B]){delete e[B];j--}}function m(){if(j==0||g){g=false;d=false;a.complete();return}else{d=true}var D=-1;for(var F in e){D=F;break}var E=e[D];r(D);var C=E.size;var B=1048576*a.maxfilesize;if(C<B){var G=a.initialize(D,E);if(G){q(D,E,G)}else{u(D,E,a.error_missing)}}else{u(D,E,a.error_filesize)}}function q(B,D,E){var F=new FormData();for(var C in E){F.append(C,E[C])}if(D.size>0){F.append(a.paramname,D,D.name)}i=new XMLHttpRequest();i.open("POST",a.url,true);i.setRequestHeader("Accept","application/json");i.setRequestHeader("Cache-Control","no-cache");i.setRequestHeader("X-Requested-With","XMLHttpRequest");i.onreadystatechange=function(){if(i.readyState!=i.DONE){return}var G=null;if(i.responseText){try{G=jQuery.parseJSON(i.responseText)}catch(H){G=i.responseText}}if(i.status<200||i.status>299){var I=i.statusText;if(i.status==403){I=a.error_login}else{if(i.status==0){I=a.error_server}else{if(!I){I=a.error_default}}}u(B,D,I+" ("+i.status+")")}else{y(B,D,G)}};i.upload.index=B;i.upload.file=D;i.upload.addEventListener("progress",k,false);i.send(F)}function y(B,C,D){a.success(B,C,D);m()}function u(B,C,D){a.error(B,C,D);m()}function w(){c("#uploadbox_input").trigger("click")}function t(B){for(B in e){r(B)}}function l(){if(!d){d=true;m()}}function v(){g=true}function n(B){a=c.extend({},a,B);return a}function s(){return window.File&&window.FormData&&window.XMLHttpRequest&&window.FileList}return{select:w,add:z,remove:r,start:l,stop:v,reset:t,configure:n,compatible:s}}})(jQuery);
\ No newline at end of file
+(function(c){jQuery.event.props.push("dataTransfer");var h={url:"",paramname:"content",maxfilesize:2048,maxfilenumber:50,dragover:function(){},dragleave:function(){},announce:function(){},initialize:function(){},progress:function(){},success:function(){},error:function(k,l,m){alert(m)},complete:function(){},error_filesize:"File exceeds 2GB. Please use an FTP client.",error_default:"Please make sure the file is available.",error_server:"Upload request failed.",error_login:"Uploads require you to log in.",error_missing:"No upload content available."};var a={};var e={};var f=0;var j=0;var d=false;var g=false;var b=null;var i=null;c.fn.uploadbox=function(A){a=c.extend({},h,A);b=this;b.append('<input id="uploadbox_input" type="file" style="display: none" multiple>');b.on("drop",o);b.on("dragover",p);b.on("dragleave",x);c("#uploadbox_input").change(function(B){z(B.target.files);c(this).val("")});function o(B){if(!B.dataTransfer){return}z(B.dataTransfer.files);B.preventDefault();return false}function p(B){B.preventDefault();a.dragover.call(B)}function x(B){B.stopPropagation();a.dragleave.call(B)}function k(B){if(B.lengthComputable){a.progress(this.index,this.file,Math.round((B.loaded*100)/B.total))}}function z(D){if(d){return}for(var C=0;C<D.length;C++){if(j>=a.maxfilenumber){break}var B=String(f++);e[B]=D[C];a.announce(B,e[B],"");j++}}function r(B){if(e[B]){delete e[B];j--}}function m(){if(j==0||g){g=false;d=false;a.complete();return}else{d=true}var D=-1;for(var F in e){D=F;break}var E=e[D];r(D);var C=E.size;var B=1048576*a.maxfilesize;if(C<B){var G=a.initialize(D,E);if(G){q(D,E,G)}else{u(D,E,a.error_missing)}}else{u(D,E,a.error_filesize)}}function q(B,D,E){var F=new FormData();for(var C in E){F.append(C,E[C])}if(D.size>0){F.append(a.paramname,D,D.name)}i=new XMLHttpRequest();i.open("POST",a.url,true);i.setRequestHeader("Accept","application/json");i.setRequestHeader("Cache-Control","no-cache");i.setRequestHeader("X-Requested-With","XMLHttpRequest");i.onreadystatechange=function(){if(i.readyState!=i.DONE){return}var G=null;if(i.responseText){try{G=jQuery.parseJSON(i.responseText)}catch(H){G=i.responseText}}if(i.status<200||i.status>299){var I=i.statusText;if(i.status==403){I=a.error_login}else{if(i.status==0){I=a.error_server}else{if(!I){I=a.error_default}}}u(B,D,I+" ("+i.status+")")}else{y(B,D,G)}};i.upload.index=B;i.upload.file=D;i.upload.addEventListener("progress",k,false);i.send(F)}function y(B,C,D){a.success(B,C,D);m()}function u(B,C,D){a.error(B,C,D);m()}function w(){c("#uploadbox_input").trigger("click")}function t(B){for(B in e){r(B)}}function l(){if(!d){d=true;m()}}function v(){g=true}function n(B){a=c.extend({},a,B);return a}function s(){return window.File&&window.FormData&&window.XMLHttpRequest&&window.FileList}return{select:w,add:z,remove:r,start:l,stop:v,reset:t,configure:n,compatible:s}}})(jQuery);
\ No newline at end of file
diff -r 82d2c2109792d098ddc5ede7992ae132e596584a -r e1ac3242786b055b8cc2b80ed701643f41c1e42e static/scripts/utils/uploadbox.js
--- a/static/scripts/utils/uploadbox.js
+++ b/static/scripts/utils/uploadbox.js
@@ -12,7 +12,7 @@
url : '',
paramname : 'content',
maxfilesize : 2048,
- maxfilenumber : 20,
+ maxfilenumber : 50,
dragover : function() {},
dragleave : function() {},
announce : function() {},
diff -r 82d2c2109792d098ddc5ede7992ae132e596584a -r e1ac3242786b055b8cc2b80ed701643f41c1e42e static/style/blue/base.css
--- a/static/style/blue/base.css
+++ b/static/style/blue/base.css
@@ -1257,7 +1257,8 @@
.progress-button .progress-bar-notransition{-webkit-transition:none;-moz-transition:none;-ms-transition:none;-o-transition:none;transition:none}
.progress-button .label{position:absolute;top:0px;width:inherit;text-align:center;line-height:19px}
.upload-info{font-weight:normal;text-align:center}
-.upload-box{width:100%;height:95%;text-align:center;overflow:scroll;font-size:12px;line-height:1.33;-moz-border-radius:5px;border-radius:5px;border:1px dashed #bfbfbf;padding:10px;overflow-x:hidden;-ms-overflow-style:none}.upload-box .table{width:100%}
+.upload-box{width:100%;height:95%;text-align:center;overflow:scroll;font-size:12px;line-height:1.33;-moz-border-radius:5px;border-radius:5px;border:1px dashed #bfbfbf;padding:10px;overflow-x:hidden;-ms-overflow-style:none}.upload-box .popover{max-width:600px;width:auto}
+.upload-box .table{width:100%}
.upload-box .table th{text-align:center;white-space:nowrap}
.upload-box .table td{margin:0px;paddign:0px}
.upload-box .title{width:130px;word-wrap:break-word;font-size:11px}
diff -r 82d2c2109792d098ddc5ede7992ae132e596584a -r e1ac3242786b055b8cc2b80ed701643f41c1e42e static/style/src/less/upload.less
--- a/static/style/src/less/upload.less
+++ b/static/style/src/less/upload.less
@@ -61,7 +61,12 @@
padding : 10px;
overflow-x : hidden;
-ms-overflow-style: none;
-
+
+ .popover {
+ max-width: 500px;
+ width: auto;
+ }
+
.table {
width : 100%;
}
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/935942afdf70/
Changeset: 935942afdf70
User: nsoranzo
Date: 2014-01-27 19:51:46
Summary: Remove unused imports and unused variables. Fix spacing.
Affected #: 17 files
diff -r 37cf56c3f0e4605e15f5988764f147bcb186f40f -r 935942afdf706951fb45faf5468e3e12c25fb28f tools/regVariation/WeightedAverage.py
--- a/tools/regVariation/WeightedAverage.py
+++ b/tools/regVariation/WeightedAverage.py
@@ -1,20 +1,16 @@
#!/usr/bin/env python
"""
-
usage: %prog bed_file_1 bed_file_2 out_file
-1, --cols1=N,N,N,N: Columns for chr, start, end, strand in first file
-2, --cols2=N,N,N,N,N: Columns for chr, start, end, strand, name/value in second file
"""
-from galaxy import eggs
import collections
-import sys, string
+import sys
#import numpy
from galaxy import eggs
import pkg_resources
pkg_resources.require( "bx-python" )
-import sys, traceback, fileinput
-from warnings import warn
from galaxy.tools.util.galaxyops import *
from bx.cookbook import doc_optparse
@@ -27,77 +23,72 @@
sys.exit()
-def FindRate(chromosome,start_stop,dictType):
- OverlapList=[]
+def FindRate(chromosome, start_stop, dictType):
+ OverlapList = []
for tempO in dictType[chromosome]:
- DatabaseInterval=[tempO[0],tempO[1]]
- Overlap=GetOverlap(start_stop,DatabaseInterval)
- if Overlap>0:
- OverlapList.append([Overlap,tempO[2]])
-
- if len(OverlapList)>0:
-
- SumRecomb=0
- SumOverlap=0
+ DatabaseInterval = [tempO[0], tempO[1]]
+ Overlap = GetOverlap( start_stop, DatabaseInterval )
+ if Overlap > 0:
+ OverlapList.append([Overlap, tempO[2]])
+
+ if len(OverlapList) > 0:
+ SumRecomb = 0
+ SumOverlap = 0
for member in OverlapList:
- SumRecomb+=member[0]*member[1]
- SumOverlap+=member[0]
- averageRate=SumRecomb/SumOverlap
-
+ SumRecomb += member[0]*member[1]
+ SumOverlap += member[0]
+ averageRate = SumRecomb/SumOverlap
return averageRate
-
else:
return 'NA'
-
-
-
-def GetOverlap(a,b):
- return min(a[1],b[1])-max(a[0],b[0])
+
+
+def GetOverlap(a, b):
+ return min(a[1], b[1])-max(a[0], b[0])
+
options, args = doc_optparse.parse( __doc__ )
try:
chr_col_1, start_col_1, end_col_1, strand_col1 = parse_cols_arg( options.cols1 )
- chr_col_2, start_col_2, end_col_2, strand_col2, name_col_2 = parse_cols_arg( options.cols2 )
+ chr_col_2, start_col_2, end_col_2, strand_col2, name_col_2 = parse_cols_arg( options.cols2 )
input1, input2, input3 = args
except Exception, eee:
print eee
stop_err( "Data issue: click the pencil icon in the history item to correct the metadata attributes." )
-
+fd2 = open(input2)
+lines2 = fd2.readlines()
+RecombChrDict = collections.defaultdict(list)
-fd2=open(input2)
-lines2=fd2.readlines()
-RecombChrDict=collections.defaultdict(list)
-
-skipped=0
+skipped = 0
for line in lines2:
- temp=line.strip().split()
+ temp = line.strip().split()
try:
assert float(temp[int(name_col_2)])
except:
- skipped+=1
+ skipped += 1
continue
- tempIndex=[int(temp[int(start_col_2)]),int(temp[int(end_col_2)]),float(temp[int(name_col_2)])]
+ tempIndex = [int(temp[int(start_col_2)]), int(temp[int(end_col_2)]), float(temp[int(name_col_2)])]
RecombChrDict[temp[int(chr_col_2)]].append(tempIndex)
-print "Skipped %d features with invalid values" %(skipped)
+print "Skipped %d features with invalid values" % (skipped)
-fd1=open(input1)
-lines=fd1.readlines()
-finalProduct=''
+fd1 = open(input1)
+lines = fd1.readlines()
+finalProduct = ''
for line in lines:
- temp=line.strip().split('\t')
- chromosome=temp[int(chr_col_1)]
- start=int(temp[int(start_col_1)])
- stop=int(temp[int(end_col_1)])
- start_stop=[start,stop]
- RecombRate=FindRate(chromosome,start_stop,RecombChrDict)
+ temp = line.strip().split('\t')
+ chromosome = temp[int(chr_col_1)]
+ start = int(temp[int(start_col_1)])
+ stop = int(temp[int(end_col_1)])
+ start_stop = [start, stop]
+ RecombRate = FindRate( chromosome, start_stop, RecombChrDict )
try:
- RecombRate="%.4f" %(float(RecombRate))
+ RecombRate = "%.4f" % (float(RecombRate))
except:
- RecombRate=RecombRate
- finalProduct+=line.strip()+'\t'+str(RecombRate)+'\n'
-fdd=open(input3,'w')
+ RecombRate = RecombRate
+ finalProduct += line.strip()+'\t'+str(RecombRate)+'\n'
+fdd = open(input3, 'w')
fdd.writelines(finalProduct)
fdd.close()
diff -r 37cf56c3f0e4605e15f5988764f147bcb186f40f -r 935942afdf706951fb45faf5468e3e12c25fb28f tools/regVariation/best_regression_subsets.py
--- a/tools/regVariation/best_regression_subsets.py
+++ b/tools/regVariation/best_regression_subsets.py
@@ -2,7 +2,7 @@
from galaxy import eggs
-import sys, string
+import sys
from rpy import *
import numpy
@@ -10,19 +10,20 @@
sys.stderr.write(msg)
sys.exit()
+
infile = sys.argv[1]
y_col = int(sys.argv[2])-1
x_cols = sys.argv[3].split(',')
outfile = sys.argv[4]
outfile2 = sys.argv[5]
-print "Predictor columns: %s; Response column: %d" %(x_cols,y_col+1)
+print "Predictor columns: %s; Response column: %d" % ( x_cols, y_col+1 )
fout = open(outfile,'w')
for i, line in enumerate( file ( infile )):
line = line.rstrip('\r\n')
if len( line )>0 and not line.startswith( '#' ):
elems = line.split( '\t' )
- break
+ break
if i == 30:
break # Hopefully we'll never get here...
@@ -32,12 +33,12 @@
y_vals = []
x_vals = []
-for k,col in enumerate(x_cols):
+for k, col in enumerate(x_cols):
x_cols[k] = int(col)-1
x_vals.append([])
NA = 'NA'
-for ind,line in enumerate( file( infile )):
+for ind, line in enumerate( file( infile ) ):
if line and not line.startswith( '#' ):
try:
fields = line.split("\t")
@@ -46,7 +47,7 @@
except Exception, ey:
yval = r('NA')
y_vals.append(yval)
- for k,col in enumerate(x_cols):
+ for k, col in enumerate(x_cols):
try:
xval = float(fields[col])
except Exception, ex:
@@ -59,10 +60,10 @@
x_vals1 = numpy.asarray(x_vals).transpose()
-dat= r.list(x=array(x_vals1), y=y_vals)
+dat = r.list(x=array(x_vals1), y=y_vals)
r.library("leaps")
-
+
set_default_mode(NO_CONVERSION)
try:
leaps = r.regsubsets(r("y ~ x"), data= r.na_exclude(dat))
@@ -75,10 +76,10 @@
pattern = "["
for i in range(tot):
pattern = pattern + 'c' + str(int(x_cols[int(i)]) + 1) + ' '
-pattern = pattern.strip() + ']'
-print >>fout, "#Vars\t%s\tR-sq\tAdj. R-sq\tC-p\tbic" %(pattern)
-for ind,item in enumerate(summary['outmat']):
- print >>fout, "%s\t%s\t%s\t%s\t%s\t%s" %(str(item).count('*'), item, summary['rsq'][ind], summary['adjr2'][ind], summary['cp'][ind], summary['bic'][ind])
+pattern = pattern.strip() + ']'
+print >> fout, "#Vars\t%s\tR-sq\tAdj. R-sq\tC-p\tbic" % (pattern)
+for ind, item in enumerate(summary['outmat']):
+ print >> fout, "%s\t%s\t%s\t%s\t%s\t%s" % (str(item).count('*'), item, summary['rsq'][ind], summary['adjr2'][ind], summary['cp'][ind], summary['bic'][ind])
r.pdf( outfile2, 8, 8 )
diff -r 37cf56c3f0e4605e15f5988764f147bcb186f40f -r 935942afdf706951fb45faf5468e3e12c25fb28f tools/regVariation/featureCounter.py
--- a/tools/regVariation/featureCounter.py
+++ b/tools/regVariation/featureCounter.py
@@ -11,8 +11,7 @@
from galaxy import eggs
import pkg_resources
pkg_resources.require( "bx-python" )
-import sys, traceback, fileinput
-from warnings import warn
+import sys, fileinput
from bx.intervals.io import *
from bx.cookbook import doc_optparse
from bx.intervals.operations import quicksect
@@ -33,7 +32,7 @@
partial += 1
if node.left and node.left.maxend > start:
counter(node.left, start, end)
- if node.right:
+ if node.right:
counter(node.right, start, end)
elif start < node.start < end:
if node.end <= end:
@@ -42,10 +41,10 @@
partial += 1
if node.left and node.left.maxend > start:
counter(node.left, start, end)
- if node.right:
+ if node.right:
counter(node.right, start, end)
else:
- if node.left:
+ if node.left:
counter(node.left, start, end)
def count_coverage( readers, comments=True ):
@@ -58,8 +57,8 @@
if type( item ) is GenomicInterval:
rightTree.insert( item, secondary.linenum, item.fields )
- bitsets = secondary_copy.binned_bitsets()
-
+ bitsets = secondary_copy.binned_bitsets()
+
global full, partial
for interval in primary:
@@ -82,7 +81,7 @@
bases_covered = bitsets[ chrom ].count_range( start, end-start )
if (end - start) == 0:
percent = 0
- else:
+ else:
percent = float(bases_covered) / float(end - start)
if bases_covered:
root = rightTree.chroms[chrom] #root node for the chrom tree
@@ -92,13 +91,14 @@
interval.fields.append(str(full))
interval.fields.append(str(partial))
yield interval
-
+
+
def main():
options, args = doc_optparse.parse( __doc__ )
try:
chr_col_1, start_col_1, end_col_1, strand_col_1 = parse_cols_arg( options.cols1 )
- chr_col_2, start_col_2, end_col_2, strand_col_2 = parse_cols_arg( options.cols2 )
+ chr_col_2, start_col_2, end_col_2, strand_col_2 = parse_cols_arg( options.cols2 )
in1_fname, in2_fname, out_fname = args
except:
stop_err( "Data issue: click the pencil icon in the history item to correct the metadata attributes." )
@@ -126,7 +126,7 @@
out_file = open( out_fname, "w" )
try:
- for line in count_coverage([g1,g2,g2_copy]):
+ for line in count_coverage([g1, g2, g2_copy]):
if type( line ) is GenomicInterval:
out_file.write( "%s\n" % "\t".join( line.fields ) )
else:
@@ -143,6 +143,7 @@
print skipped( g2, filedesc=" of 2nd dataset" )
elif g2_copy.skipped > 0:
print skipped( g2_copy, filedesc=" of 2nd dataset" )
-
+
+
if __name__ == "__main__":
main()
diff -r 37cf56c3f0e4605e15f5988764f147bcb186f40f -r 935942afdf706951fb45faf5468e3e12c25fb28f tools/regVariation/getIndelRates_3way.py
--- a/tools/regVariation/getIndelRates_3way.py
+++ b/tools/regVariation/getIndelRates_3way.py
@@ -6,7 +6,6 @@
pkg_resources.require( "bx-python" )
import sys, os, tempfile
-import traceback
import fileinput
from warnings import warn
@@ -18,7 +17,8 @@
def stop_err(msg):
sys.stderr.write(msg)
sys.exit()
-
+
+
def counter(node, start, end, sort_col):
global full, blk_len, blk_list
if node.start < start:
@@ -31,14 +31,14 @@
blk_len += int(node.other[sort_col+2])
if node.left and node.left.maxend > start:
counter(node.left, start, end, sort_col)
- if node.right:
+ if node.right:
counter(node.right, start, end, sort_col)
elif node.start > end:
- if node.left:
+ if node.left:
counter(node.left, start, end, sort_col)
-
-infile = sys.argv[1]
+
+infile = sys.argv[1]
fout = open(sys.argv[2],'w')
int_file = sys.argv[3]
if int_file != "None": #User has specified an interval file
@@ -48,9 +48,9 @@
chr_col_i, start_col_i, end_col_i, strand_col_i = parse_cols_arg( sys.argv[5] )
except:
stop_err("Unable to open input Interval file")
-
+
+
def main():
-
for i, line in enumerate( file ( infile )):
line = line.rstrip('\r\n')
if len( line )>0 and not line.startswith( '#' ):
@@ -86,8 +86,7 @@
break
except:
continue
-
-
+
fin = open(infile, 'r')
skipped = 0
@@ -98,7 +97,7 @@
os.system(cmdline)
except:
stop_err("Encountered error while sorting the input file.")
- print >>fout, "#Block\t%s_InsRate\t%s_InsRate\t%s_InsRate\t%s_DelRate\t%s_DelRate\t%s_DelRate" %(species[0],species[1],species[2],species[0],species[1],species[2])
+ print >> fout, "#Block\t%s_InsRate\t%s_InsRate\t%s_InsRate\t%s_DelRate\t%s_DelRate\t%s_DelRate" % ( species[0], species[1], species[2], species[0], species[1], species[2] )
prev_bnum = -1
sorted_infile.seek(0)
for line in sorted_infile.readlines():
@@ -112,16 +111,16 @@
if prev_bnum != -1:
irate = []
drate = []
- for i,elem in enumerate(inserts):
+ for i, elem in enumerate(inserts):
try:
- irate.append(str("%.2e" %(inserts[i]/blen[i])))
+ irate.append(str("%.2e" % (inserts[i]/blen[i])))
except:
irate.append('0')
try:
- drate.append(str("%.2e" %(deletes[i]/blen[i])))
+ drate.append(str("%.2e" % (deletes[i]/blen[i])))
except:
drate.append('0')
- print >>fout, "%s\t%s\t%s" %(prev_bnum, '\t'.join(irate) , '\t'.join(drate))
+ print >> fout, "%s\t%s\t%s" % ( prev_bnum, '\t'.join(irate) , '\t'.join(drate) )
inserts = [0.0, 0.0, 0.0]
deletes = [0.0, 0.0, 0.0]
blen = []
@@ -134,25 +133,24 @@
inserts[sp_ind] += 1
elif elems[1].endswith('delete'):
deletes[sp_ind] += 1
- prev_bnum = new_bnum
+ prev_bnum = new_bnum
except Exception, ei:
#print >>sys.stderr, ei
continue
irate = []
drate = []
- for i,elem in enumerate(inserts):
+ for i, elem in enumerate(inserts):
try:
- irate.append(str("%.2e" %(inserts[i]/blen[i])))
+ irate.append(str("%.2e" % (inserts[i]/blen[i])))
except:
irate.append('0')
try:
- drate.append(str("%.2e" %(deletes[i]/blen[i])))
+ drate.append(str("%.2e" % (deletes[i]/blen[i])))
except:
drate.append('0')
- print >>fout, "%s\t%s\t%s" %(prev_bnum, '\t'.join(irate) , '\t'.join(drate))
+ print >> fout, "%s\t%s\t%s" % ( prev_bnum, '\t'.join(irate) , '\t'.join(drate) )
sys.exit()
-
inf = open(infile, 'r')
start_met = False
end_met = False
@@ -163,14 +161,14 @@
try:
assert int(elems[0])
assert len(elems) == 18
- if dbkey_i not in elems[1]:
- if not(start_met):
+ if dbkey_i not in elems[1]:
+ if not(start_met):
continue
else:
sp_end = n
break
else:
- print >>sp_file, line
+ print >> sp_file, line
if not(start_met):
start_met = True
sp_start = n
@@ -201,7 +199,7 @@
for item in indel:
if type( item ) is GenomicInterval:
indelTree.insert( item, indel.linenum, item.fields )
- result=[]
+ result = []
global full, blk_len, blk_list
for interval in win:
@@ -213,14 +211,14 @@
chrom = interval.chrom
start = int(interval.start)
end = int(interval.end)
- if start > end:
+ if start > end:
warn( "Interval start after end!" )
- ins_chr = "%s.%s_insert" %(dbkey_i,chrom)
- del_chr = "%s.%s_delete" %(dbkey_i,chrom)
+ ins_chr = "%s.%s_insert" % ( dbkey_i, chrom )
+ del_chr = "%s.%s_delete" % ( dbkey_i, chrom )
irate = 0
drate = 0
if ins_chr not in indelTree.chroms and del_chr not in indelTree.chroms:
- pass
+ pass
else:
if ins_chr in indelTree.chroms:
full = 0.0
@@ -242,8 +240,9 @@
interval.fields.append(str("%.2e" %irate))
interval.fields.append(str("%.2e" %drate))
- print >>fout, "\t".join(interval.fields)
+ print >> fout, "\t".join(interval.fields)
fout.flush()
+
if __name__ == "__main__":
- main()
\ No newline at end of file
+ main()
diff -r 37cf56c3f0e4605e15f5988764f147bcb186f40f -r 935942afdf706951fb45faf5468e3e12c25fb28f tools/regVariation/getIndels.py
--- a/tools/regVariation/getIndels.py
+++ b/tools/regVariation/getIndels.py
@@ -8,13 +8,12 @@
from __future__ import division
from galaxy import eggs
-import pkg_resources
+import pkg_resources
pkg_resources.require( "bx-python" )
try:
pkg_resources.require("numpy")
except:
pass
-import psyco_full
import sys
from bx.cookbook import doc_optparse
from galaxy.tools.exception_handling import *
@@ -22,24 +21,24 @@
assert sys.version_info[:2] >= ( 2, 4 )
-def main():
+def main():
# Parsing Command Line here
options, args = doc_optparse.parse( __doc__ )
try:
- inp_file, out_file1 = args
+ inp_file, out_file1 = args
except:
print >> sys.stderr, "Tool initialization error."
sys.exit()
try:
- fin = open(inp_file,'r')
+ open(inp_file, 'r')
except:
print >> sys.stderr, "Unable to open input file"
sys.exit()
try:
- fout1 = open(out_file1,'w')
- #fout2 = open(out_file2,'w')
+ fout1 = open(out_file1, 'w')
+ #fout2 = open(out_file2, 'w')
except:
print >> sys.stderr, "Unable to open output file"
sys.exit()
@@ -47,11 +46,10 @@
try:
maf_reader = bx.align.maf.Reader( open(inp_file, 'r') )
except:
- print >>sys.stderr, "Your MAF file appears to be malformed."
+ print >> sys.stderr, "Your MAF file appears to be malformed."
sys.exit()
- maf_count = 0
- print >>fout1, "#Block\tSource\tSeq1_Start\tSeq1_End\tSeq2_Start\tSeq2_End\tIndel_length"
+ print >> fout1, "#Block\tSource\tSeq1_Start\tSeq1_End\tSeq2_Start\tSeq2_End\tIndel_length"
for block_ind, block in enumerate(maf_reader):
if len(block.components) < 2:
continue
@@ -84,19 +82,19 @@
#write 2
if prev_pos_gap2 == 1:
prev_pos_gap2 = 0
- print >>fout1,"%d\t%s\t%s\t%s\t%s\t%s\t%s" %(block_ind+1,src2,nt_pos1,nt_pos1+1,nt_pos2-1,nt_pos2-1+gaplen2,gaplen2)
+ print >> fout1, "%d\t%s\t%s\t%s\t%s\t%s\t%s" % ( block_ind+1, src2, nt_pos1, nt_pos1+1, nt_pos2-1, nt_pos2-1+gaplen2, gaplen2 )
if pos == len(seq1)-1:
- print >>fout1,"%d\t%s\t%s\t%s\t%s\t%s\t%s" %(block_ind+1,src1,nt_pos1,nt_pos1+1,nt_pos2+1-gaplen1,nt_pos2+1,gaplen1)
+ print >> fout1, "%d\t%s\t%s\t%s\t%s\t%s\t%s" % ( block_ind+1, src1, nt_pos1, nt_pos1+1, nt_pos2+1-gaplen1, nt_pos2+1, gaplen1 )
else:
prev_pos_gap1 = 0
prev_pos_gap2 = 0
"""
if prev_pos_gap1 == 1:
prev_pos_gap1 = 0
- print >>fout1,"%d\t%s\t%s\t%s\t%s" %(block_ind+1,src1,nt_pos1-1,nt_pos1,gaplen1)
+ print >> fout1, "%d\t%s\t%s\t%s\t%s" % ( block_ind+1, src1, nt_pos1-1, nt_pos1, gaplen1 )
elif prev_pos_gap2 == 1:
prev_pos_gap2 = 0
- print >>fout1,"%d\t%s\t%s\t%s\t%s" %(block_ind+1,src2,nt_pos2-1,nt_pos2,gaplen2)
+ print >> fout1, "%d\t%s\t%s\t%s\t%s" % ( block_ind+1, src2, nt_pos2-1, nt_pos2, gaplen2 )
"""
else:
nt_pos1 += 1
@@ -105,19 +103,21 @@
#write both
if prev_pos_gap1 == 1:
prev_pos_gap1 = 0
- print >>fout1,"%d\t%s\t%s\t%s\t%s\t%s\t%s" %(block_ind+1,src1,nt_pos1-1,nt_pos1,nt_pos2-gaplen1,nt_pos2,gaplen1)
+ print >> fout1, "%d\t%s\t%s\t%s\t%s\t%s\t%s" % ( block_ind+1, src1, nt_pos1-1, nt_pos1, nt_pos2-gaplen1, nt_pos2, gaplen1 )
elif prev_pos_gap2 == 1:
prev_pos_gap2 = 0
- print >>fout1,"%d\t%s\t%s\t%s\t%s\t%s\t%s" %(block_ind+1,src2,nt_pos1-gaplen2,nt_pos1,nt_pos2-1,nt_pos2,gaplen2)
+ print >> fout1, "%d\t%s\t%s\t%s\t%s\t%s\t%s" % ( block_ind+1, src2, nt_pos1-gaplen2, nt_pos1, nt_pos2-1, nt_pos2, gaplen2 )
else:
gaplen2 += 1
prev_pos_gap2 = 1
#write 1
if prev_pos_gap1 == 1:
prev_pos_gap1 = 0
- print >>fout1,"%d\t%s\t%s\t%s\t%s\t%s\t%s" %(block_ind+1,src1,nt_pos1-1,nt_pos1,nt_pos2,nt_pos2+gaplen1,gaplen1)
+ print >> fout1, "%d\t%s\t%s\t%s\t%s\t%s\t%s" % ( block_ind+1, src1, nt_pos1-1, nt_pos1, nt_pos2, nt_pos2+gaplen1, gaplen1 )
if pos == len(seq1)-1:
- print >>fout1,"%d\t%s\t%s\t%s\t%s\t%s\t%s" %(block_ind+1,src2,nt_pos1+1-gaplen2,nt_pos1+1,nt_pos2,nt_pos2+1,gaplen2)
+ print >> fout1, "%d\t%s\t%s\t%s\t%s\t%s\t%s" % ( block_ind+1, src2, nt_pos1+1-gaplen2, nt_pos1+1, nt_pos2, nt_pos2+1, gaplen2 )
pos += 1
+
+
if __name__ == "__main__":
main()
diff -r 37cf56c3f0e4605e15f5988764f147bcb186f40f -r 935942afdf706951fb45faf5468e3e12c25fb28f tools/regVariation/linear_regression.py
--- a/tools/regVariation/linear_regression.py
+++ b/tools/regVariation/linear_regression.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
from galaxy import eggs
-import sys, string
+import sys
from rpy import *
import numpy
@@ -15,14 +15,14 @@
outfile = sys.argv[4]
outfile2 = sys.argv[5]
-print "Predictor columns: %s; Response column: %d" %(x_cols,y_col+1)
+print "Predictor columns: %s; Response column: %d" % ( x_cols, y_col+1 )
fout = open(outfile,'w')
elems = []
for i, line in enumerate( file ( infile )):
line = line.rstrip('\r\n')
if len( line )>0 and not line.startswith( '#' ):
elems = line.split( '\t' )
- break
+ break
if i == 30:
break # Hopefully we'll never get here...
@@ -32,12 +32,12 @@
y_vals = []
x_vals = []
-for k,col in enumerate(x_cols):
+for k, col in enumerate(x_cols):
x_cols[k] = int(col)-1
x_vals.append([])
NA = 'NA'
-for ind,line in enumerate( file( infile )):
+for ind, line in enumerate( file( infile )):
if line and not line.startswith( '#' ):
try:
fields = line.split("\t")
@@ -46,7 +46,7 @@
except:
yval = r('NA')
y_vals.append(yval)
- for k,col in enumerate(x_cols):
+ for k, col in enumerate(x_cols):
try:
xval = float(fields[col])
except:
@@ -57,7 +57,7 @@
x_vals1 = numpy.asarray(x_vals).transpose()
-dat= r.list(x=array(x_vals1), y=y_vals)
+dat = r.list(x=array(x_vals1), y=y_vals)
set_default_mode(NO_CONVERSION)
try:
@@ -66,8 +66,8 @@
stop_err("Error performing linear regression on the input data.\nEither the response column or one of the predictor columns contain only non-numeric or invalid values.")
set_default_mode(BASIC_CONVERSION)
-coeffs=linear_model.as_py()['coefficients']
-yintercept= coeffs['(Intercept)']
+coeffs = linear_model.as_py()['coefficients']
+yintercept = coeffs['(Intercept)']
summary = r.summary(linear_model)
co = summary.get('coefficients', 'NA')
@@ -82,8 +82,8 @@
except:
pass
-print >>fout, "Y-intercept\t%s" %(yintercept)
-print >>fout, "p-value (Y-intercept)\t%s" %(pvaly)
+print >> fout, "Y-intercept\t%s" % (yintercept)
+print >> fout, "p-value (Y-intercept)\t%s" % (pvaly)
if len(x_vals) == 1: #Simple linear regression case with 1 predictor variable
try:
@@ -94,22 +94,22 @@
pval = r.round(float(co[1][3]), digits=10)
except:
pval = 'NA'
- print >>fout, "Slope (c%d)\t%s" %(x_cols[0]+1,slope)
- print >>fout, "p-value (c%d)\t%s" %(x_cols[0]+1,pval)
+ print >> fout, "Slope (c%d)\t%s" % ( x_cols[0]+1, slope )
+ print >> fout, "p-value (c%d)\t%s" % ( x_cols[0]+1, pval )
else: #Multiple regression case with >1 predictors
- ind=1
+ ind = 1
while ind < len(coeffs.keys()):
try:
slope = r.round(float(coeffs['x'+str(ind)]), digits=10)
except:
slope = 'NA'
- print >>fout, "Slope (c%d)\t%s" %(x_cols[ind-1]+1,slope)
+ print >> fout, "Slope (c%d)\t%s" % ( x_cols[ind-1]+1, slope )
try:
pval = r.round(float(co[ind][3]), digits=10)
except:
pval = 'NA'
- print >>fout, "p-value (c%d)\t%s" %(x_cols[ind-1]+1,pval)
- ind+=1
+ print >> fout, "p-value (c%d)\t%s" % ( x_cols[ind-1]+1, pval )
+ ind += 1
rsq = summary.get('r.squared','NA')
adjrsq = summary.get('adj.r.squared','NA')
@@ -125,14 +125,14 @@
except:
pass
-print >>fout, "R-squared\t%s" %(rsq)
-print >>fout, "Adjusted R-squared\t%s" %(adjrsq)
-print >>fout, "F-statistic\t%s" %(fstat)
-print >>fout, "Sigma\t%s" %(sigma)
+print >> fout, "R-squared\t%s" % (rsq)
+print >> fout, "Adjusted R-squared\t%s" % (adjrsq)
+print >> fout, "F-statistic\t%s" % (fstat)
+print >> fout, "Sigma\t%s" % (sigma)
r.pdf( outfile2, 8, 8 )
if len(x_vals) == 1: #Simple linear regression case with 1 predictor variable
- sub_title = "Slope = %s; Y-int = %s" %(slope,yintercept)
+ sub_title = "Slope = %s; Y-int = %s" % ( slope, yintercept )
try:
r.plot(x=x_vals[0], y=y_vals, xlab="X", ylab="Y", sub=sub_title, main="Scatterplot with regression")
r.abline(a=yintercept, b=slope, col="red")
diff -r 37cf56c3f0e4605e15f5988764f147bcb186f40f -r 935942afdf706951fb45faf5468e3e12c25fb28f tools/regVariation/logistic_regression_vif.py
--- a/tools/regVariation/logistic_regression_vif.py
+++ b/tools/regVariation/logistic_regression_vif.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
from galaxy import eggs
-import sys, string
+import sys
from rpy import *
import numpy
@@ -15,14 +15,14 @@
outfile = sys.argv[4]
-print "Predictor columns: %s; Response column: %d" %(x_cols,y_col+1)
+print "Predictor columns: %s; Response column: %d" % ( x_cols, y_col+1 )
fout = open(outfile,'w')
elems = []
-for i, line in enumerate( file ( infile )):
+for i, line in enumerate( file( infile ) ):
line = line.rstrip('\r\n')
if len( line )>0 and not line.startswith( '#' ):
elems = line.split( '\t' )
- break
+ break
if i == 30:
break # Hopefully we'll never get here...
@@ -32,12 +32,12 @@
y_vals = []
x_vals = []
-for k,col in enumerate(x_cols):
+for k, col in enumerate(x_cols):
x_cols[k] = int(col)-1
x_vals.append([])
NA = 'NA'
-for ind,line in enumerate( file( infile )):
+for ind, line in enumerate( file( infile )):
if line and not line.startswith( '#' ):
try:
fields = line.split("\t")
@@ -46,7 +46,7 @@
except:
yval = r('NA')
y_vals.append(yval)
- for k,col in enumerate(x_cols):
+ for k, col in enumerate(x_cols):
try:
xval = float(fields[col])
except:
@@ -57,46 +57,45 @@
x_vals1 = numpy.asarray(x_vals).transpose()
-check1=0
-check0=0
+check1 = 0
+check0 = 0
for i in y_vals:
if i == 1:
- check1=1
+ check1 = 1
if i == 0:
- check0=1
-if check1==0 or check0==0:
+ check0 = 1
+if check1 == 0 or check0 == 0:
sys.exit("Warning: logistic regression must have at least two classes")
for i in y_vals:
- if i not in [1,0,r('NA')]:
- print >>fout, str(i)
+ if i not in [1, 0, r('NA')]:
+ print >> fout, str(i)
sys.exit("Warning: the current version of this tool can run only with two classes and need to be labeled as 0 and 1.")
-
-
-dat= r.list(x=array(x_vals1), y=y_vals)
-novif=0
+
+dat = r.list(x=array(x_vals1), y=y_vals)
+novif = 0
set_default_mode(NO_CONVERSION)
try:
- linear_model = r.glm(r("y ~ x"), data = r.na_exclude(dat),family="binomial")
+ linear_model = r.glm(r("y ~ x"), data=r.na_exclude(dat), family="binomial")
except RException, rex:
stop_err("Error performing logistic regression on the input data.\nEither the response column or one of the predictor columns contain only non-numeric or invalid values.")
if len(x_cols)>1:
try:
r('suppressPackageStartupMessages(library(car))')
- r.assign('dat',dat)
- r.assign('ncols',len(x_cols))
- vif=r.vif(r('glm(dat$y ~ ., data = na.exclude(data.frame(as.matrix(dat$x,ncol=ncols))->datx),family="binomial")'))
+ r.assign('dat', dat)
+ r.assign('ncols', len(x_cols))
+ vif = r.vif(r('glm(dat$y ~ ., data = na.exclude(data.frame(as.matrix(dat$x,ncol=ncols))->datx), family="binomial")'))
except RException, rex:
print rex
else:
- novif=1
-
+ novif = 1
+
set_default_mode(BASIC_CONVERSION)
-coeffs=linear_model.as_py()['coefficients']
-null_deviance=linear_model.as_py()['null.deviance']
-residual_deviance=linear_model.as_py()['deviance']
-yintercept= coeffs['(Intercept)']
+coeffs = linear_model.as_py()['coefficients']
+null_deviance = linear_model.as_py()['null.deviance']
+residual_deviance = linear_model.as_py()['deviance']
+yintercept = coeffs['(Intercept)']
summary = r.summary(linear_model)
co = summary.get('coefficients', 'NA')
"""
@@ -109,14 +108,14 @@
pvaly = r.round(float(co[0][3]), digits=10)
except:
pass
-print >>fout, "response column\tc%d" %(y_col+1)
-tempP=[]
+print >> fout, "response column\tc%d" % (y_col+1)
+tempP = []
for i in x_cols:
tempP.append('c'+str(i+1))
-tempP=','.join(tempP)
-print >>fout, "predictor column(s)\t%s" %(tempP)
-print >>fout, "Y-intercept\t%s" %(yintercept)
-print >>fout, "p-value (Y-intercept)\t%s" %(pvaly)
+tempP = ','.join(tempP)
+print >> fout, "predictor column(s)\t%s" % (tempP)
+print >> fout, "Y-intercept\t%s" % (yintercept)
+print >> fout, "p-value (Y-intercept)\t%s" % (pvaly)
if len(x_vals) == 1: #Simple linear regression case with 1 predictor variable
try:
@@ -127,44 +126,43 @@
pval = r.round(float(co[1][3]), digits=10)
except:
pval = 'NA'
- print >>fout, "Slope (c%d)\t%s" %(x_cols[0]+1,slope)
- print >>fout, "p-value (c%d)\t%s" %(x_cols[0]+1,pval)
+ print >> fout, "Slope (c%d)\t%s" % ( x_cols[0]+1, slope )
+ print >> fout, "p-value (c%d)\t%s" % ( x_cols[0]+1, pval )
else: #Multiple regression case with >1 predictors
- ind=1
+ ind = 1
while ind < len(coeffs.keys()):
try:
slope = r.round(float(coeffs['x'+str(ind)]), digits=10)
except:
slope = 'NA'
- print >>fout, "Slope (c%d)\t%s" %(x_cols[ind-1]+1,slope)
+ print >> fout, "Slope (c%d)\t%s" % ( x_cols[ind-1]+1, slope )
try:
pval = r.round(float(co[ind][3]), digits=10)
except:
pval = 'NA'
- print >>fout, "p-value (c%d)\t%s" %(x_cols[ind-1]+1,pval)
- ind+=1
+ print >> fout, "p-value (c%d)\t%s" % ( x_cols[ind-1]+1, pval )
+ ind += 1
rsq = summary.get('r.squared','NA')
-
try:
- rsq= r.round(float((null_deviance-residual_deviance)/null_deviance), digits=5)
- null_deviance= r.round(float(null_deviance), digits=5)
- residual_deviance= r.round(float(residual_deviance), digits=5)
+ rsq = r.round(float((null_deviance-residual_deviance)/null_deviance), digits=5)
+ null_deviance = r.round(float(null_deviance), digits=5)
+ residual_deviance = r.round(float(residual_deviance), digits=5)
except:
pass
-print >>fout, "Null deviance\t%s" %(null_deviance)
-print >>fout, "Residual deviance\t%s" %(residual_deviance)
-print >>fout, "pseudo R-squared\t%s" %(rsq)
-print >>fout, "\n"
-print >>fout, 'vif'
+print >> fout, "Null deviance\t%s" % (null_deviance)
+print >> fout, "Residual deviance\t%s" % (residual_deviance)
+print >> fout, "pseudo R-squared\t%s" % (rsq)
+print >> fout, "\n"
+print >> fout, 'vif'
-if novif==0:
- py_vif=vif.as_py()
- count=0
+if novif == 0:
+ py_vif = vif.as_py()
+ count = 0
for i in sorted(py_vif.keys()):
- print >>fout,'c'+str(x_cols[count]+1) ,str(py_vif[i])
- count+=1
-elif novif==1:
- print >>fout, "vif can calculate only when model have more than 1 predictor"
+ print >> fout, 'c'+str(x_cols[count]+1), str(py_vif[i])
+ count += 1
+elif novif == 1:
+ print >> fout, "vif can calculate only when model have more than 1 predictor"
diff -r 37cf56c3f0e4605e15f5988764f147bcb186f40f -r 935942afdf706951fb45faf5468e3e12c25fb28f tools/regVariation/maf_cpg_filter.py
--- a/tools/regVariation/maf_cpg_filter.py
+++ b/tools/regVariation/maf_cpg_filter.py
@@ -10,7 +10,7 @@
"""
from galaxy import eggs
-import pkg_resources
+import pkg_resources
pkg_resources.require( "bx-python" )
try:
pkg_resources.require( "numpy" )
@@ -54,7 +54,7 @@
defn = "non-CpG"
cpgfilter.run( reader, writer.write )
- print "%2.2f percent bases masked; Mask character = %s, Definition = %s" %(float(cpgfilter.masked)/float(cpgfilter.total) * 100, mask, defn)
+ print "%2.2f percent bases masked; Mask character = %s, Definition = %s" % ( float(cpgfilter.masked)/float(cpgfilter.total) * 100, mask, defn )
if __name__ == "__main__":
main()
diff -r 37cf56c3f0e4605e15f5988764f147bcb186f40f -r 935942afdf706951fb45faf5468e3e12c25fb28f tools/regVariation/microsats_alignment_level.py
--- a/tools/regVariation/microsats_alignment_level.py
+++ b/tools/regVariation/microsats_alignment_level.py
@@ -4,7 +4,11 @@
Uses SPUTNIK to fetch microsatellites and extracts orthologous repeats from the sputnik output.
"""
from galaxy import eggs
-import sys, os, tempfile, string, math, re
+import os
+import re
+import string
+import sys
+import tempfile
def reverse_complement(text):
DNA_COMP = string.maketrans( "ACGTacgt", "TGCAtgca" )
@@ -12,31 +16,26 @@
comp.reverse()
return "".join(comp)
+
def main():
if len(sys.argv) != 8:
- print >>sys.stderr, "Insufficient number of arguments."
+ print >> sys.stderr, "Insufficient number of arguments."
sys.exit()
infile = open(sys.argv[1],'r')
separation = int(sys.argv[2])
outfile = sys.argv[3]
- align_type = sys.argv[4]
- if align_type == "2way":
- align_type_len = 2
- elif align_type == "3way":
- align_type_len = 3
mono_threshold = int(sys.argv[5])
non_mono_threshold = int(sys.argv[6])
allow_different_units = int(sys.argv[7])
- print "Min distance = %d bp; Min threshold for mono repeats = %d; Min threshold for non-mono repeats = %d; Allow different motifs = %s" %(separation, mono_threshold, non_mono_threshold, allow_different_units==1)
+ print "Min distance = %d bp; Min threshold for mono repeats = %d; Min threshold for non-mono repeats = %d; Allow different motifs = %s" % ( separation, mono_threshold, non_mono_threshold, allow_different_units==1 )
try:
fout = open(outfile, "w")
- print >>fout, "#Block\tSeq1_Name\tSeq1_Start\tSeq1_End\tSeq1_Type\tSeq1_Length\tSeq1_RepeatNumber\tSeq1_Unit\tSeq2_Name\tSeq2_Start\tSeq2_End\tSeq2_Type\tSeq2_Length\tSeq2_RepeatNumber\tSeq2_Unit"
+ print >> fout, "#Block\tSeq1_Name\tSeq1_Start\tSeq1_End\tSeq1_Type\tSeq1_Length\tSeq1_RepeatNumber\tSeq1_Unit\tSeq2_Name\tSeq2_Start\tSeq2_End\tSeq2_Type\tSeq2_Length\tSeq2_RepeatNumber\tSeq2_Unit"
#sputnik_cmd = os.path.join(os.path.split(sys.argv[0])[0], "sputnik")
sputnik_cmd = "sputnik"
input = infile.read()
- skipped = 0
block_num = 0
input = input.replace('\r','\n')
for block in input.split('\n\n'):
@@ -44,26 +43,24 @@
tmpin = tempfile.NamedTemporaryFile()
tmpout = tempfile.NamedTemporaryFile()
tmpin.write(block.strip())
- blk = tmpin.read()
cmdline = sputnik_cmd + " " + tmpin.name + " > /dev/null 2>&1 >> " + tmpout.name
try:
os.system(cmdline)
- except Exception, es:
+ except Exception:
continue
sputnik_out = tmpout.read()
tmpin.close()
tmpout.close()
if sputnik_out != "":
if len(block.split('>')[1:]) != 2: #len(sputnik_out.split('>')):
- skipped += 1
continue
align_block = block.strip().split('>')
lendict = {'mononucleotide':1, 'dinucleotide':2, 'trinucleotide':3, 'tetranucleotide':4, 'pentanucleotide':5, 'hexanucleotide':6}
- blockdict={}
- r=0
- namelist=[]
- for k,sput_block in enumerate(sputnik_out.split('>')[1:]):
+ blockdict = {}
+ r = 0
+ namelist = []
+ for k, sput_block in enumerate(sputnik_out.split('>')[1:]):
whole_seq = ''.join(align_block[k+1].split('\n')[1:]).replace('\n','').strip()
p = re.compile('\n(\S*nucleotide)')
repeats = p.split(sput_block.strip())
@@ -71,13 +68,12 @@
j = 1
name = repeats[0].strip()
try:
- coords = re.search('\d+[-_:]\d+',name).group()
- coords = coords.replace('_','-').replace(':','-')
- except Exception, e:
+ coords = re.search('\d+[-_:]\d+', name).group()
+ coords = coords.replace('_', '-').replace(':', '-')
+ except Exception:
coords = '0-0'
- pass
r += 1
- blockdict[r]={}
+ blockdict[r] = {}
try:
sp_name = name[:name.index('.')]
chr_name = name[name.index('.'):name.index('(')]
@@ -91,11 +87,10 @@
continue
if blockdict[r].has_key('types'):
- blockdict[r]['types'].append(repeats[j].strip()) #type of microsat
+ blockdict[r]['types'].append(repeats[j].strip()) #type of microsat
else:
- blockdict[r]['types'] = [repeats[j].strip()] #type of microsat
+ blockdict[r]['types'] = [repeats[j].strip()] #type of microsat
- sequence = ''.join(align_block[r].split('\n')[1:]).replace('\n','').strip()
start = int(repeats[j+1].split('--')[0].split(':')[0].strip())
#check to see if there are gaps before the start of the repeat, and change the start accordingly
sgaps = 0
@@ -107,7 +102,7 @@
break #break at the 1st non-gap character
ch_pos -= 1
if blockdict[r].has_key('starts'):
- blockdict[r]['starts'].append(start+sgaps) #start co-ords adjusted with alignment co-ords to include GAPS
+ blockdict[r]['starts'].append(start+sgaps) #start co-ords adjusted with alignment co-ords to include GAPS
else:
blockdict[r]['starts'] = [start+sgaps]
@@ -120,7 +115,7 @@
else:
break #break at the 1st non-gap character
if blockdict[r].has_key('ends'):
- blockdict[r]['ends'].append(end+egaps) #end co-ords adjusted with alignment co-ords to include GAPS
+ blockdict[r]['ends'].append(end+egaps) #end co-ords adjusted with alignment co-ords to include GAPS
else:
blockdict[r]['ends'] = [end+egaps]
@@ -134,20 +129,20 @@
gaps_before_start = whole_seq[:rel_start].count('-')
if blockdict[r].has_key('gaps_before_start'):
- blockdict[r]['gaps_before_start'].append(gaps_before_start) #lengths
+ blockdict[r]['gaps_before_start'].append(gaps_before_start) #lengths
else:
- blockdict[r]['gaps_before_start'] = [gaps_before_start] #lengths
+ blockdict[r]['gaps_before_start'] = [gaps_before_start] #lengths
- whole_seq_start= int(coords.split('-')[0])
+ whole_seq_start = int(coords.split('-')[0])
if blockdict[r].has_key('whole_seq_start'):
- blockdict[r]['whole_seq_start'].append(whole_seq_start) #lengths
+ blockdict[r]['whole_seq_start'].append(whole_seq_start) #lengths
else:
- blockdict[r]['whole_seq_start'] = [whole_seq_start] #lengths
+ blockdict[r]['whole_seq_start'] = [whole_seq_start] #lengths
if blockdict[r].has_key('lengths'):
- blockdict[r]['lengths'].append(repeat_len) #lengths
+ blockdict[r]['lengths'].append(repeat_len) #lengths
else:
- blockdict[r]['lengths'] = [repeat_len] #lengths
+ blockdict[r]['lengths'] = [repeat_len] #lengths
if blockdict[r].has_key('counts'):
blockdict[r]['counts'].append(str(int(repeat_len)/lendict[repeats[j].strip()])) #Repeat Unit
@@ -159,10 +154,10 @@
else:
blockdict[r]['units'] = [repeat_seq[:lendict[repeats[j].strip()]]] #Repeat Unit
- except Exception, eh:
+ except Exception:
pass
- j+=2
- #check the co-ords of all repeats corresponding to a sequence and remove adjacent repeats separated by less than the user-specified 'separation'.
+ j += 2
+ #check the co-ords of all repeats corresponding to a sequence and remove adjacent repeats separated by less than the user-specified 'separation'.
delete_index_list = []
for ind, item in enumerate(blockdict[r]['ends']):
try:
@@ -171,7 +166,7 @@
delete_index_list.append(ind)
if ind+1 not in delete_index_list:
delete_index_list.append(ind+1)
- except Exception, ek:
+ except Exception:
pass
for index in delete_index_list: #mark them for deletion
try:
@@ -183,7 +178,7 @@
blockdict[r]['lengths'][index] = 'marked'
blockdict[r]['counts'][index] = 'marked'
blockdict[r]['units'][index] = 'marked'
- except Exception, ej:
+ except Exception:
pass
#remove 'marked' elements from all the lists
"""
@@ -192,19 +187,19 @@
if elem == 'marked':
blockdict[r][key].remove(elem)
"""
- #print blockdict
+ #print blockdict
- #make sure that the blockdict has keys for both the species
+ #make sure that the blockdict has keys for both the species
if (1 not in blockdict) or (2 not in blockdict):
continue
visited_2 = [0 for x in range(len(blockdict[2]['starts']))]
- for ind1,coord_s1 in enumerate(blockdict[1]['starts']):
+ for ind1, coord_s1 in enumerate(blockdict[1]['starts']):
if coord_s1 == 'marked':
continue
coord_e1 = blockdict[1]['ends'][ind1]
out = []
- for ind2,coord_s2 in enumerate(blockdict[2]['starts']):
+ for ind2, coord_s2 in enumerate(blockdict[2]['starts']):
if coord_s2 == 'marked':
visited_2[ind2] = 1
continue
@@ -216,7 +211,7 @@
else:
if (blockdict[1]['units'][ind1] not in blockdict[2]['units'][ind2]*2) and (reverse_complement(blockdict[1]['units'][ind1]) not in blockdict[2]['units'][ind2]*2):
continue
- #print >>sys.stderr, (reverse_complement(blockdict[1]['units'][ind1]) not in blockdict[2]['units'][ind2]*2)
+ #print >> sys.stderr, (reverse_complement(blockdict[1]['units'][ind1]) not in blockdict[2]['units'][ind2]*2)
#skip if the repeat number thresholds are not met
if blockdict[1]['types'][ind1] == 'mononucleotide':
if (int(blockdict[1]['counts'][ind1]) < mono_threshold):
@@ -231,12 +226,12 @@
else:
if (int(blockdict[2]['counts'][ind2]) < non_mono_threshold):
continue
- #print "s1,e1=%s,%s; s2,e2=%s,%s" %(coord_s1,coord_e1,coord_s2,coord_e2)
- if (coord_s1 in range(coord_s2,coord_e2)) or (coord_e1 in range(coord_s2,coord_e2)):
+ #print "s1,e1=%s,%s; s2,e2=%s,%s" % ( coord_s1, coord_e1, coord_s2, coord_e2 )
+ if (coord_s1 in range(coord_s2, coord_e2)) or (coord_e1 in range(coord_s2, coord_e2)):
out.append(str(block_num))
out.append(namelist[0])
rel_start = blockdict[1]['whole_seq_start'][ind1] + coord_s1 - blockdict[1]['gaps_before_start'][ind1]
- rel_end = rel_start + int(blockdict[1]['lengths'][ind1])
+ rel_end = rel_start + int(blockdict[1]['lengths'][ind1])
out.append(str(rel_start))
out.append(str(rel_end))
out.append(blockdict[1]['types'][ind1])
@@ -245,16 +240,16 @@
out.append(blockdict[1]['units'][ind1])
out.append(namelist[1])
rel_start = blockdict[2]['whole_seq_start'][ind2] + coord_s2 - blockdict[2]['gaps_before_start'][ind2]
- rel_end = rel_start + int(blockdict[2]['lengths'][ind2])
+ rel_end = rel_start + int(blockdict[2]['lengths'][ind2])
out.append(str(rel_start))
out.append(str(rel_end))
out.append(blockdict[2]['types'][ind2])
out.append(blockdict[2]['lengths'][ind2])
out.append(blockdict[2]['counts'][ind2])
out.append(blockdict[2]['units'][ind2])
- print >>fout, '\t'.join(out)
+ print >> fout, '\t'.join(out)
visited_2[ind2] = 1
- out=[]
+ out = []
if 0 in visited_2: #there are still some elements in 2nd set which haven't found orthologs yet.
for ind2, coord_s2 in enumerate(blockdict[2]['starts']):
@@ -264,7 +259,7 @@
continue
coord_e2 = blockdict[2]['ends'][ind2]
out = []
- for ind1,coord_s1 in enumerate(blockdict[1]['starts']):
+ for ind1, coord_s1 in enumerate(blockdict[1]['starts']):
if coord_s1 == 'marked':
continue
coord_e1 = blockdict[1]['ends'][ind1]
@@ -290,11 +285,11 @@
if (int(blockdict[2]['counts'][ind2]) < non_mono_threshold):
continue
- if (coord_s2 in range(coord_s1,coord_e1)) or (coord_e2 in range(coord_s1,coord_e1)):
- out.append(str(block_num))
+ if (coord_s2 in range(coord_s1, coord_e1)) or (coord_e2 in range(coord_s1, coord_e1)):
+ out.append(str(block_num))
out.append(namelist[0])
rel_start = blockdict[1]['whole_seq_start'][ind1] + coord_s1 - blockdict[1]['gaps_before_start'][ind1]
- rel_end = rel_start + int(blockdict[1]['lengths'][ind1])
+ rel_end = rel_start + int(blockdict[1]['lengths'][ind1])
out.append(str(rel_start))
out.append(str(rel_end))
out.append(blockdict[1]['types'][ind1])
@@ -303,21 +298,21 @@
out.append(blockdict[1]['units'][ind1])
out.append(namelist[1])
rel_start = blockdict[2]['whole_seq_start'][ind2] + coord_s2 - blockdict[2]['gaps_before_start'][ind2]
- rel_end = rel_start + int(blockdict[2]['lengths'][ind2])
+ rel_end = rel_start + int(blockdict[2]['lengths'][ind2])
out.append(str(rel_start))
out.append(str(rel_end))
out.append(blockdict[2]['types'][ind2])
out.append(blockdict[2]['lengths'][ind2])
out.append(blockdict[2]['counts'][ind2])
out.append(blockdict[2]['units'][ind2])
- print >>fout, '\t'.join(out)
+ print >> fout, '\t'.join(out)
visited_2[ind2] = 1
- out=[]
+ out = []
- #print >>fout, blockdict
+ #print >> fout, blockdict
except Exception, exc:
- print >>sys.stderr, "type(exc),args,exc: %s, %s, %s" %(type(exc), exc.args, exc)
+ print >> sys.stderr, "type(exc),args,exc: %s, %s, %s" % ( type(exc), exc.args, exc )
+
if __name__ == "__main__":
main()
-
diff -r 37cf56c3f0e4605e15f5988764f147bcb186f40f -r 935942afdf706951fb45faf5468e3e12c25fb28f tools/regVariation/microsats_mutability.py
--- a/tools/regVariation/microsats_mutability.py
+++ b/tools/regVariation/microsats_mutability.py
@@ -4,7 +4,10 @@
This tool computes microsatellite mutability for the orthologous microsatellites fetched from 'Extract Orthologous Microsatellites from pair-wise alignments' tool.
"""
from galaxy import eggs
-import sys, string, re, commands, tempfile, os, fileinput
+import fileinput
+import string
+import sys
+import tempfile
from galaxy.tools.util.galaxyops import *
from bx.intervals.io import *
from bx.intervals.operations import quicksect
@@ -19,7 +22,7 @@
p_group_cols = [p_group, p_group+7]
s_group_cols = [s_group, s_group+7]
num_generations = int(sys.argv[7])
-region = sys.argv[8]
+region = sys.argv[8]
int_file = sys.argv[9]
if int_file != "None": #User has specified an interval file
try:
@@ -28,31 +31,35 @@
chr_col_i, start_col_i, end_col_i, strand_col_i = parse_cols_arg( sys.argv[11] )
except:
stop_err("Unable to open input Interval file")
-
+
+
def stop_err(msg):
sys.stderr.write(msg)
sys.exit()
+
def reverse_complement(text):
DNA_COMP = string.maketrans( "ACGTacgt", "TGCAtgca" )
comp = [ch for ch in text.translate(DNA_COMP)]
comp.reverse()
return "".join(comp)
+
def get_unique_elems(elems):
- seen=set()
+ seen = set()
return[x for x in elems if x not in seen and not seen.add(x)]
+
def get_binned_lists(uniqlist, binsize):
- binnedlist=[]
+ binnedlist = []
uniqlist.sort()
start = int(uniqlist[0])
- bin_ind=0
- l_ind=0
+ bin_ind = 0
+ l_ind = 0
binnedlist.append([])
while l_ind < len(uniqlist):
elem = int(uniqlist[l_ind])
- if elem in range(start,start+binsize):
+ if elem in range(start, start+binsize):
binnedlist[bin_ind].append(elem)
else:
start += binsize
@@ -62,39 +69,38 @@
l_ind += 1
return binnedlist
-def fetch_weight(H,C,t):
+
+def fetch_weight(H, C, t):
if (H-(C-H)) < t:
return 2.0
else:
return 1.0
-def mutabilityEstimator(repeats1,repeats2,thresholds):
+
+def mutabilityEstimator(repeats1, repeats2, thresholds):
mut_num = 0.0 #Mutability Numerator
mut_den = 0.0 #Mutability denominator
- for ind,H in enumerate(repeats1):
+ for ind, H in enumerate(repeats1):
C = repeats2[ind]
t = thresholds[ind]
- w = fetch_weight(H,C,t)
+ w = fetch_weight(H, C, t)
mut_num += ((H-C)*(H-C)*w)
mut_den += w
return [mut_num, mut_den]
+
def output_writer(blk, blk_lines):
global winspecies, speciesind
- all_elems_1=[]
- all_elems_2=[]
- all_s_elems_1=[]
- all_s_elems_2=[]
+ all_elems_1 = []
+ all_elems_2 = []
+ all_s_elems_1 = []
+ all_s_elems_2 = []
for bline in blk_lines:
if not(bline):
continue
items = bline.split('\t')
seq1 = items[1]
- start1 = items[2]
- end1 = items[3]
seq2 = items[8]
- start2 = items[9]
- end2 = items[10]
if p_group_cols[0] == 6:
items[p_group_cols[0]] = int(items[p_group_cols[0]])
items[p_group_cols[1]] = int(items[p_group_cols[1]])
@@ -111,8 +117,8 @@
if s_group_cols[0] != -1:
uniq_s_elems_1 = get_unique_elems(all_s_elems_1)
uniq_s_elems_2 = get_unique_elems(all_s_elems_2)
- mut1={}
- mut2={}
+ mut1 = {}
+ mut2 = {}
count1 = {}
count2 = {}
"""
@@ -120,12 +126,12 @@
uniq_elems_1 = get_unique_units(j.sort(lambda x, y: len(x)-len(y)))
"""
if p_group_cols[0] == 6: #i.e. the option chosen is group-by repeat number.
- uniq_elems_1 = get_binned_lists(uniq_elems_1,p_bin_size)
- uniq_elems_2 = get_binned_lists(uniq_elems_2,p_bin_size)
+ uniq_elems_1 = get_binned_lists( uniq_elems_1, p_bin_size )
+ uniq_elems_2 = get_binned_lists( uniq_elems_2, p_bin_size )
if s_group_cols[0] == 6: #i.e. the option chosen is subgroup-by repeat number.
- uniq_s_elems_1 = get_binned_lists(uniq_s_elems_1,s_bin_size)
- uniq_s_elems_2 = get_binned_lists(uniq_s_elems_2,s_bin_size)
+ uniq_s_elems_1 = get_binned_lists( uniq_s_elems_1, s_bin_size )
+ uniq_s_elems_2 = get_binned_lists( uniq_s_elems_2, s_bin_size )
for pitem1 in uniq_elems_1:
#repeats1 = []
@@ -143,61 +149,61 @@
if p_group_cols[0] == 6:
belems[p_group_cols[0]] = int(belems[p_group_cols[0]])
if belems[p_group_cols[0]] in pitem1:
- if belems[s_group_cols[0]]==sitem1:
+ if belems[s_group_cols[0]] == sitem1:
repeats1.append(int(belems[6]))
repeats2.append(int(belems[13]))
if belems[4] == 'mononucleotide':
thresholds.append(mono_threshold)
else:
thresholds.append(non_mono_threshold)
- mut1[str(pitem1)+'\t'+str(sitem1)]=mutabilityEstimator(repeats1,repeats2,thresholds)
+ mut1[str(pitem1)+'\t'+str(sitem1)] = mutabilityEstimator( repeats1, repeats2, thresholds )
if region == 'align':
- count1[str(pitem1)+'\t'+str(sitem1)]=min(sum(repeats1),sum(repeats2))
- else:
+ count1[str(pitem1)+'\t'+str(sitem1)] = min( sum(repeats1), sum(repeats2) )
+ else:
if winspecies == 1:
- count1["%s\t%s" %(pitem1,sitem1)]=sum(repeats1)
+ count1["%s\t%s" % ( pitem1, sitem1 )] = sum(repeats1)
elif winspecies == 2:
- count1["%s\t%s" %(pitem1,sitem1)]=sum(repeats2)
+ count1["%s\t%s" % ( pitem1, sitem1 )] = sum(repeats2)
else:
if type(sitem1) == list:
if s_group_cols[0] == 6:
belems[s_group_cols[0]] = int(belems[s_group_cols[0]])
- if belems[p_group_cols[0]]==pitem1 and belems[s_group_cols[0]] in sitem1:
+ if belems[p_group_cols[0]] == pitem1 and belems[s_group_cols[0]] in sitem1:
repeats1.append(int(belems[6]))
repeats2.append(int(belems[13]))
if belems[4] == 'mononucleotide':
thresholds.append(mono_threshold)
else:
thresholds.append(non_mono_threshold)
- mut1["%s\t%s" %(pitem1,sitem1)]=mutabilityEstimator(repeats1,repeats2,thresholds)
+ mut1["%s\t%s" % ( pitem1, sitem1 )] = mutabilityEstimator( repeats1, repeats2, thresholds )
if region == 'align':
- count1[str(pitem1)+'\t'+str(sitem1)]=min(sum(repeats1),sum(repeats2))
- else:
+ count1[str(pitem1)+'\t'+str(sitem1)] = min( sum(repeats1), sum(repeats2) )
+ else:
if winspecies == 1:
- count1[str(pitem1)+'\t'+str(sitem1)]=sum(repeats1)
+ count1[str(pitem1)+'\t'+str(sitem1)] = sum(repeats1)
elif winspecies == 2:
- count1[str(pitem1)+'\t'+str(sitem1)]=sum(repeats2)
+ count1[str(pitem1)+'\t'+str(sitem1)] = sum(repeats2)
else:
- if belems[p_group_cols[0]]==pitem1 and belems[s_group_cols[0]]==sitem1:
+ if belems[p_group_cols[0]] == pitem1 and belems[s_group_cols[0]] == sitem1:
repeats1.append(int(belems[6]))
repeats2.append(int(belems[13]))
if belems[4] == 'mononucleotide':
thresholds.append(mono_threshold)
else:
thresholds.append(non_mono_threshold)
- mut1["%s\t%s" %(pitem1,sitem1)]=mutabilityEstimator(repeats1,repeats2,thresholds)
+ mut1["%s\t%s" % ( pitem1, sitem1 )] = mutabilityEstimator( repeats1, repeats2, thresholds )
if region == 'align':
- count1[str(pitem1)+'\t'+str(sitem1)]=min(sum(repeats1),sum(repeats2))
- else:
+ count1[str(pitem1)+'\t'+str(sitem1)] = min( sum(repeats1), sum(repeats2) )
+ else:
if winspecies == 1:
- count1["%s\t%s" %(pitem1,sitem1)]=sum(repeats1)
+ count1["%s\t%s" % ( pitem1, sitem1 )] = sum(repeats1)
elif winspecies == 2:
- count1["%s\t%s" %(pitem1,sitem1)]=sum(repeats2)
+ count1["%s\t%s" % ( pitem1, sitem1 )] = sum(repeats2)
else: #Sub-group by feature is None
for bline in blk_lines:
belems = bline.split('\t')
if type(pitem1) == list:
- #print >>sys.stderr, "item: " + str(item1)
+ #print >> sys.stderr, "item: " + str(item1)
if p_group_cols[0] == 6:
belems[p_group_cols[0]] = int(belems[p_group_cols[0]])
if belems[p_group_cols[0]] in pitem1:
@@ -208,21 +214,21 @@
else:
thresholds.append(non_mono_threshold)
else:
- if belems[p_group_cols[0]]==pitem1:
+ if belems[p_group_cols[0]] == pitem1:
repeats1.append(int(belems[6]))
repeats2.append(int(belems[13]))
if belems[4] == 'mononucleotide':
thresholds.append(mono_threshold)
else:
thresholds.append(non_mono_threshold)
- mut1["%s" %(pitem1)]=mutabilityEstimator(repeats1,repeats2,thresholds)
+ mut1["%s" % (pitem1)] = mutabilityEstimator( repeats1, repeats2, thresholds )
if region == 'align':
- count1["%s" %(pitem1)]=min(sum(repeats1),sum(repeats2))
- else:
+ count1["%s" % (pitem1)] = min( sum(repeats1), sum(repeats2) )
+ else:
if winspecies == 1:
- count1[str(pitem1)]=sum(repeats1)
+ count1[str(pitem1)] = sum(repeats1)
elif winspecies == 2:
- count1[str(pitem1)]=sum(repeats2)
+ count1[str(pitem1)] = sum(repeats2)
for pitem2 in uniq_elems_2:
#repeats1 = []
@@ -239,57 +245,57 @@
if type(pitem2) == list:
if p_group_cols[0] == 6:
belems[p_group_cols[1]] = int(belems[p_group_cols[1]])
- if belems[p_group_cols[1]] in pitem2 and belems[s_group_cols[1]]==sitem2:
+ if belems[p_group_cols[1]] in pitem2 and belems[s_group_cols[1]] == sitem2:
repeats2.append(int(belems[13]))
repeats1.append(int(belems[6]))
if belems[4] == 'mononucleotide':
thresholds.append(mono_threshold)
else:
thresholds.append(non_mono_threshold)
- mut2["%s\t%s" %(pitem2,sitem2)]=mutabilityEstimator(repeats2,repeats1,thresholds)
+ mut2["%s\t%s" % ( pitem2, sitem2 )] = mutabilityEstimator( repeats2, repeats1, thresholds )
#count2[str(pitem2)+'\t'+str(sitem2)]=len(repeats2)
if region == 'align':
- count2["%s\t%s" %(pitem2,sitem2)]=min(sum(repeats1),sum(repeats2))
- else:
+ count2["%s\t%s" % ( pitem2, sitem2 )] = min( sum(repeats1), sum(repeats2) )
+ else:
if winspecies == 1:
- count2["%s\t%s" %(pitem2,sitem2)]=len(repeats2)
+ count2["%s\t%s" % ( pitem2, sitem2 )] = len(repeats2)
elif winspecies == 2:
- count2["%s\t%s" %(pitem2,sitem2)]=len(repeats1)
+ count2["%s\t%s" % ( pitem2, sitem2 )] = len(repeats1)
else:
if type(sitem2) == list:
if s_group_cols[0] == 6:
belems[s_group_cols[1]] = int(belems[s_group_cols[1]])
- if belems[p_group_cols[1]]==pitem2 and belems[s_group_cols[1]] in sitem2:
+ if belems[p_group_cols[1]] == pitem2 and belems[s_group_cols[1]] in sitem2:
repeats2.append(int(belems[13]))
repeats1.append(int(belems[6]))
if belems[4] == 'mononucleotide':
thresholds.append(mono_threshold)
else:
thresholds.append(non_mono_threshold)
- mut2["%s\t%s" %(pitem2,sitem2)]=mutabilityEstimator(repeats2,repeats1,thresholds)
+ mut2["%s\t%s" % ( pitem2, sitem2 )] = mutabilityEstimator( repeats2, repeats1, thresholds )
if region == 'align':
- count2["%s\t%s" %(pitem2,sitem2)]=min(sum(repeats1),sum(repeats2))
- else:
+ count2["%s\t%s" % ( pitem2, sitem2 )] = min( sum(repeats1), sum(repeats2) )
+ else:
if winspecies == 1:
- count2["%s\t%s" %(pitem2,sitem2)]=len(repeats2)
+ count2["%s\t%s" % ( pitem2, sitem2 )] = len(repeats2)
elif winspecies == 2:
- count2["%s\t%s" %(pitem2,sitem2)]=len(repeats1)
+ count2["%s\t%s" % ( pitem2, sitem2 )] = len(repeats1)
else:
- if belems[p_group_cols[1]]==pitem2 and belems[s_group_cols[1]]==sitem2:
+ if belems[p_group_cols[1]] == pitem2 and belems[s_group_cols[1]] == sitem2:
repeats1.append(int(belems[13]))
repeats2.append(int(belems[6]))
if belems[4] == 'mononucleotide':
thresholds.append(mono_threshold)
else:
thresholds.append(non_mono_threshold)
- mut2["%s\t%s" %(pitem2,sitem2)]=mutabilityEstimator(repeats2,repeats1,thresholds)
+ mut2["%s\t%s" % ( pitem2, sitem2 )] = mutabilityEstimator( repeats2, repeats1, thresholds )
if region == 'align':
- count2["%s\t%s" %(pitem2,sitem2)]=min(sum(repeats1),sum(repeats2))
- else:
+ count2["%s\t%s" % ( pitem2, sitem2 )] = min( sum(repeats1), sum(repeats2) )
+ else:
if winspecies == 1:
- count2["%s\t%s" %(pitem2,sitem2)]=len(repeats2)
+ count2["%s\t%s" % ( pitem2, sitem2 )] = len(repeats2)
elif winspecies == 2:
- count2["%s\t%s" %(pitem2,sitem2)]=len(repeats1)
+ count2["%s\t%s" % ( pitem2, sitem2 )] = len(repeats1)
else: #Sub-group by feature is None
for bline in blk_lines:
belems = bline.split('\t')
@@ -304,21 +310,21 @@
else:
thresholds.append(non_mono_threshold)
else:
- if belems[p_group_cols[1]]==pitem2:
+ if belems[p_group_cols[1]] == pitem2:
repeats2.append(int(belems[13]))
repeats1.append(int(belems[6]))
if belems[4] == 'mononucleotide':
thresholds.append(mono_threshold)
else:
thresholds.append(non_mono_threshold)
- mut2["%s" %(pitem2)]=mutabilityEstimator(repeats2,repeats1,thresholds)
+ mut2["%s" % (pitem2)] = mutabilityEstimator( repeats2, repeats1, thresholds )
if region == 'align':
- count2["%s" %(pitem2)]=min(sum(repeats1),sum(repeats2))
+ count2["%s" % (pitem2)] = min( sum(repeats1), sum(repeats2) )
else:
if winspecies == 1:
- count2["%s" %(pitem2)]=sum(repeats2)
+ count2["%s" % (pitem2)] = sum(repeats2)
elif winspecies == 2:
- count2["%s" %(pitem2)]=sum(repeats1)
+ count2["%s" % (pitem2)] = sum(repeats1)
for key in mut1.keys():
if key in mut2.keys():
mut = (mut1[key][0]+mut2[key][0])/(mut1[key][1]+mut2[key][1])
@@ -328,9 +334,9 @@
unit_found = False
if p_group_cols[0] == 7 or s_group_cols[0] == 7: #if it is Repeat Unit (AG, GCT etc.) check for reverse-complements too
if p_group_cols[0] == 7:
- this,other = 0,1
+ this, other = 0, 1
else:
- this,other = 1,0
+ this, other = 1, 0
groups1 = key.split('\t')
mutn = mut1[key][0]
mutd = mut1[key][1]
@@ -351,28 +357,29 @@
else:
mut = mut1[key][0]/mut1[key][1]
count = count1[key]
- mut = "%.2e" %(mut/num_generations)
+ mut = "%.2e" % (mut/num_generations)
if region == 'align':
- print >>fout, str(blk) + '\t'+seq1 + '\t' + seq2 + '\t' +key.strip()+ '\t'+str(mut) + '\t'+ str(count)
+ print >> fout, str(blk) + '\t'+seq1 + '\t' + seq2 + '\t' +key.strip()+ '\t'+str(mut) + '\t'+ str(count)
elif region == 'win':
- fout.write("%s\t%s\t%s\t%s\n" %(blk,key.strip(),mut,count))
+ fout.write("%s\t%s\t%s\t%s\n" % ( blk, key.strip(), mut, count ))
fout.flush()
#catch any remaining repeats, for instance if the orthologous position contained different repeat units
for remaining_key in mut2.keys():
mut = mut2[remaining_key][0]/mut2[remaining_key][1]
- mut = "%.2e" %(mut/num_generations)
+ mut = "%.2e" % (mut/num_generations)
count = count2[remaining_key]
if region == 'align':
- print >>fout, str(blk) + '\t'+seq1 + '\t'+seq2 + '\t'+remaining_key.strip()+ '\t'+str(mut)+ '\t'+ str(count)
+ print >> fout, str(blk) + '\t'+seq1 + '\t'+seq2 + '\t'+remaining_key.strip()+ '\t'+str(mut)+ '\t'+ str(count)
elif region == 'win':
- fout.write("%s\t%s\t%s\t%s\n" %(blk,remaining_key.strip(),mut,count))
+ fout.write("%s\t%s\t%s\t%s\n" % ( blk, remaining_key.strip(), mut, count ))
fout.flush()
- #print >>fout, blk + '\t'+remaining_key.strip()+ '\t'+str(mut)+ '\t'+ str(count)
+ #print >> fout, blk + '\t'+remaining_key.strip()+ '\t'+str(mut)+ '\t'+ str(count)
+
def counter(node, start, end, report_func):
if start <= node.start < end and start < node.end <= end:
- report_func(node)
+ report_func(node)
if node.right:
counter(node.right, start, end, report_func)
if node.left:
@@ -381,8 +388,8 @@
counter(node.right, start, end, report_func)
elif node.start >= end and node.left and node.left.maxend > start:
counter(node.left, start, end, report_func)
-
-
+
+
def main():
infile = sys.argv[1]
@@ -400,21 +407,18 @@
if region == 'win':
if dbkey_i in elems[1]:
winspecies = 1
- speciesind = 1
+ speciesind = 1
elif dbkey_i in elems[8]:
winspecies = 2
speciesind = 8
else:
- stop_err("The species build corresponding to your interval file is not present in the Microsatellite file.")
+ stop_err("The species build corresponding to your interval file is not present in the Microsatellite file.")
fin = open(infile, 'r')
skipped = 0
- blk=0
- win=0
- linestr=""
+ linestr = ""
if region == 'win':
-
msats = NiceReaderWrapper( fileinput.FileInput( infile ),
chrom_col = speciesind,
start_col = speciesind+1,
@@ -435,7 +439,7 @@
ichr = ielems[chr_col_i]
istart = int(ielems[start_col_i])
iend = int(ielems[end_col_i])
- isrc = "%s.%s" %(dbkey_i,ichr)
+ isrc = "%s.%s" % ( dbkey_i, ichr )
if isrc not in msatTree.chroms:
continue
result = []
@@ -450,14 +454,14 @@
tmpfile1.seek(0)
output_writer(iline, tmpfile1.readlines())
except:
- skipped+=1
+ skipped += 1
if skipped:
- print "Skipped %d intervals as invalid." %(skipped)
+ print "Skipped %d intervals as invalid." % (skipped)
elif region == 'align':
if s_group_cols[0] != -1:
- print >>fout, "#Window\tSpecies_1\tSpecies_2\tGroupby_Feature\tSubGroupby_Feature\tMutability\tCount"
+ print >> fout, "#Window\tSpecies_1\tSpecies_2\tGroupby_Feature\tSubGroupby_Feature\tMutability\tCount"
else:
- print >>fout, "#Window\tSpecies_1\tWindow_Start\tWindow_End\tSpecies_2\tGroupby_Feature\tMutability\tCount"
+ print >> fout, "#Window\tSpecies_1\tWindow_Start\tWindow_End\tSpecies_2\tGroupby_Feature\tMutability\tCount"
prev_bnum = -1
try:
for line in fin:
@@ -481,9 +485,11 @@
prev_bnum = new_bnum
output_writer(prev_bnum, linestr.strip().replace('\r','\n').split('\n'))
except Exception, ea:
- print >>sys.stderr, ea
+ print >> sys.stderr, ea
skipped += 1
if skipped:
- print "Skipped %d lines as invalid." %(skipped)
+ print "Skipped %d lines as invalid." % (skipped)
+
+
if __name__ == "__main__":
- main()
\ No newline at end of file
+ main()
diff -r 37cf56c3f0e4605e15f5988764f147bcb186f40f -r 935942afdf706951fb45faf5468e3e12c25fb28f tools/regVariation/partialR_square.py
--- a/tools/regVariation/partialR_square.py
+++ b/tools/regVariation/partialR_square.py
@@ -2,7 +2,7 @@
from galaxy import eggs
-import sys, string
+import sys
from rpy import *
import numpy
@@ -13,6 +13,7 @@
sys.stderr.write(msg)
sys.exit()
+
def sscombs(s):
if len(s) == 1:
return [s]
@@ -26,14 +27,14 @@
x_cols = sys.argv[3].split(',')
outfile = sys.argv[4]
-print "Predictor columns: %s; Response column: %d" %(x_cols,y_col+1)
+print "Predictor columns: %s; Response column: %d" % ( x_cols, y_col+1 )
fout = open(outfile,'w')
for i, line in enumerate( file ( infile )):
line = line.rstrip('\r\n')
if len( line )>0 and not line.startswith( '#' ):
elems = line.split( '\t' )
- break
+ break
if i == 30:
break # Hopefully we'll never get here...
@@ -43,7 +44,7 @@
y_vals = []
x_vals = []
-for k,col in enumerate(x_cols):
+for k, col in enumerate(x_cols):
x_cols[k] = int(col)-1
x_vals.append([])
"""
@@ -51,13 +52,13 @@
float( elems[x_cols[k]] )
except:
try:
- msg = "This operation cannot be performed on non-numeric column %d containing value '%s'." %( col, elems[x_cols[k]] )
+ msg = "This operation cannot be performed on non-numeric column %d containing value '%s'." % ( col, elems[x_cols[k]] )
except:
msg = "This operation cannot be performed on non-numeric data."
stop_err( msg )
"""
NA = 'NA'
-for ind,line in enumerate( file( infile )):
+for ind, line in enumerate( file( infile )):
if line and not line.startswith( '#' ):
try:
fields = line.split("\t")
@@ -65,20 +66,20 @@
yval = float(fields[y_col])
except Exception, ey:
yval = r('NA')
- #print >>sys.stderr, "ey = %s" %ey
+ #print >> sys.stderr, "ey = %s" %ey
y_vals.append(yval)
- for k,col in enumerate(x_cols):
+ for k, col in enumerate(x_cols):
try:
xval = float(fields[col])
except Exception, ex:
xval = r('NA')
- #print >>sys.stderr, "ex = %s" %ex
+ #print >> sys.stderr, "ex = %s" %ex
x_vals[k].append(xval)
except:
pass
x_vals1 = numpy.asarray(x_vals).transpose()
-dat= r.list(x=array(x_vals1), y=y_vals)
+dat = r.list(x=array(x_vals1), y=y_vals)
set_default_mode(NO_CONVERSION)
try:
@@ -91,7 +92,7 @@
fullr2 = summary.get('r.squared','NA')
if fullr2 == 'NA':
- stop_error("Error in linear regression")
+ stop_err("Error in linear regression")
if len(x_vals) < 10:
s = ""
@@ -100,10 +101,10 @@
else:
stop_err("This tool only works with less than 10 predictors.")
-print >>fout, "#Model\tR-sq\tpartial_R_Terms\tpartial_R_Value"
+print >> fout, "#Model\tR-sq\tpartial_R_Terms\tpartial_R_Value"
all_combos = sorted(sscombs(s), key=len)
all_combos.reverse()
-for j,cols in enumerate(all_combos):
+for j, cols in enumerate(all_combos):
#if len(cols) == len(s): #Same as the full model above
# continue
if len(cols) == 1:
@@ -113,7 +114,7 @@
for col in cols:
x_v.append(x_vals[int(col)])
x_vals1 = numpy.asarray(x_v).transpose()
- dat= r.list(x=array(x_vals1), y=y_vals)
+ dat = r.list(x=array(x_vals1), y=y_vals)
set_default_mode(NO_CONVERSION)
red = r.lm(r("y ~ x"), data= dat) #Reduced model
set_default_mode(BASIC_CONVERSION)
@@ -136,11 +137,11 @@
partial_R_col_str = "-"
partial_R = "-"
try:
- redr2 = "%.4f" %(float(redr2))
+ redr2 = "%.4f" % (float(redr2))
except:
pass
try:
- partial_R = "%.4f" %(float(partial_R))
+ partial_R = "%.4f" % (float(partial_R))
except:
pass
- print >>fout, "%s\t%s\t%s\t%s" %(col_str,redr2,partial_R_col_str,partial_R)
+ print >> fout, "%s\t%s\t%s\t%s" % ( col_str, redr2, partial_R_col_str, partial_R )
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/galaxy/galaxy-central/commits/82d2c2109792/
Changeset: 82d2c2109792
User: dannon
Date: 2014-02-04 16:33:40
Summary: Merged in nsoranzo/galaxy-central (pull request #312)
Remove unused imports and unused variables. Fix spacing.
Affected #: 17 files
diff -r c043a2ca8051de612d4e895ba55e6dce0697a8d3 -r 82d2c2109792d098ddc5ede7992ae132e596584a tools/regVariation/WeightedAverage.py
--- a/tools/regVariation/WeightedAverage.py
+++ b/tools/regVariation/WeightedAverage.py
@@ -1,20 +1,16 @@
#!/usr/bin/env python
"""
-
usage: %prog bed_file_1 bed_file_2 out_file
-1, --cols1=N,N,N,N: Columns for chr, start, end, strand in first file
-2, --cols2=N,N,N,N,N: Columns for chr, start, end, strand, name/value in second file
"""
-from galaxy import eggs
import collections
-import sys, string
+import sys
#import numpy
from galaxy import eggs
import pkg_resources
pkg_resources.require( "bx-python" )
-import sys, traceback, fileinput
-from warnings import warn
from galaxy.tools.util.galaxyops import *
from bx.cookbook import doc_optparse
@@ -27,77 +23,72 @@
sys.exit()
-def FindRate(chromosome,start_stop,dictType):
- OverlapList=[]
+def FindRate(chromosome, start_stop, dictType):
+ OverlapList = []
for tempO in dictType[chromosome]:
- DatabaseInterval=[tempO[0],tempO[1]]
- Overlap=GetOverlap(start_stop,DatabaseInterval)
- if Overlap>0:
- OverlapList.append([Overlap,tempO[2]])
-
- if len(OverlapList)>0:
-
- SumRecomb=0
- SumOverlap=0
+ DatabaseInterval = [tempO[0], tempO[1]]
+ Overlap = GetOverlap( start_stop, DatabaseInterval )
+ if Overlap > 0:
+ OverlapList.append([Overlap, tempO[2]])
+
+ if len(OverlapList) > 0:
+ SumRecomb = 0
+ SumOverlap = 0
for member in OverlapList:
- SumRecomb+=member[0]*member[1]
- SumOverlap+=member[0]
- averageRate=SumRecomb/SumOverlap
-
+ SumRecomb += member[0]*member[1]
+ SumOverlap += member[0]
+ averageRate = SumRecomb/SumOverlap
return averageRate
-
else:
return 'NA'
-
-
-
-def GetOverlap(a,b):
- return min(a[1],b[1])-max(a[0],b[0])
+
+
+def GetOverlap(a, b):
+ return min(a[1], b[1])-max(a[0], b[0])
+
options, args = doc_optparse.parse( __doc__ )
try:
chr_col_1, start_col_1, end_col_1, strand_col1 = parse_cols_arg( options.cols1 )
- chr_col_2, start_col_2, end_col_2, strand_col2, name_col_2 = parse_cols_arg( options.cols2 )
+ chr_col_2, start_col_2, end_col_2, strand_col2, name_col_2 = parse_cols_arg( options.cols2 )
input1, input2, input3 = args
except Exception, eee:
print eee
stop_err( "Data issue: click the pencil icon in the history item to correct the metadata attributes." )
-
+fd2 = open(input2)
+lines2 = fd2.readlines()
+RecombChrDict = collections.defaultdict(list)
-fd2=open(input2)
-lines2=fd2.readlines()
-RecombChrDict=collections.defaultdict(list)
-
-skipped=0
+skipped = 0
for line in lines2:
- temp=line.strip().split()
+ temp = line.strip().split()
try:
assert float(temp[int(name_col_2)])
except:
- skipped+=1
+ skipped += 1
continue
- tempIndex=[int(temp[int(start_col_2)]),int(temp[int(end_col_2)]),float(temp[int(name_col_2)])]
+ tempIndex = [int(temp[int(start_col_2)]), int(temp[int(end_col_2)]), float(temp[int(name_col_2)])]
RecombChrDict[temp[int(chr_col_2)]].append(tempIndex)
-print "Skipped %d features with invalid values" %(skipped)
+print "Skipped %d features with invalid values" % (skipped)
-fd1=open(input1)
-lines=fd1.readlines()
-finalProduct=''
+fd1 = open(input1)
+lines = fd1.readlines()
+finalProduct = ''
for line in lines:
- temp=line.strip().split('\t')
- chromosome=temp[int(chr_col_1)]
- start=int(temp[int(start_col_1)])
- stop=int(temp[int(end_col_1)])
- start_stop=[start,stop]
- RecombRate=FindRate(chromosome,start_stop,RecombChrDict)
+ temp = line.strip().split('\t')
+ chromosome = temp[int(chr_col_1)]
+ start = int(temp[int(start_col_1)])
+ stop = int(temp[int(end_col_1)])
+ start_stop = [start, stop]
+ RecombRate = FindRate( chromosome, start_stop, RecombChrDict )
try:
- RecombRate="%.4f" %(float(RecombRate))
+ RecombRate = "%.4f" % (float(RecombRate))
except:
- RecombRate=RecombRate
- finalProduct+=line.strip()+'\t'+str(RecombRate)+'\n'
-fdd=open(input3,'w')
+ RecombRate = RecombRate
+ finalProduct += line.strip()+'\t'+str(RecombRate)+'\n'
+fdd = open(input3, 'w')
fdd.writelines(finalProduct)
fdd.close()
diff -r c043a2ca8051de612d4e895ba55e6dce0697a8d3 -r 82d2c2109792d098ddc5ede7992ae132e596584a tools/regVariation/best_regression_subsets.py
--- a/tools/regVariation/best_regression_subsets.py
+++ b/tools/regVariation/best_regression_subsets.py
@@ -2,7 +2,7 @@
from galaxy import eggs
-import sys, string
+import sys
from rpy import *
import numpy
@@ -10,19 +10,20 @@
sys.stderr.write(msg)
sys.exit()
+
infile = sys.argv[1]
y_col = int(sys.argv[2])-1
x_cols = sys.argv[3].split(',')
outfile = sys.argv[4]
outfile2 = sys.argv[5]
-print "Predictor columns: %s; Response column: %d" %(x_cols,y_col+1)
+print "Predictor columns: %s; Response column: %d" % ( x_cols, y_col+1 )
fout = open(outfile,'w')
for i, line in enumerate( file ( infile )):
line = line.rstrip('\r\n')
if len( line )>0 and not line.startswith( '#' ):
elems = line.split( '\t' )
- break
+ break
if i == 30:
break # Hopefully we'll never get here...
@@ -32,12 +33,12 @@
y_vals = []
x_vals = []
-for k,col in enumerate(x_cols):
+for k, col in enumerate(x_cols):
x_cols[k] = int(col)-1
x_vals.append([])
NA = 'NA'
-for ind,line in enumerate( file( infile )):
+for ind, line in enumerate( file( infile ) ):
if line and not line.startswith( '#' ):
try:
fields = line.split("\t")
@@ -46,7 +47,7 @@
except Exception, ey:
yval = r('NA')
y_vals.append(yval)
- for k,col in enumerate(x_cols):
+ for k, col in enumerate(x_cols):
try:
xval = float(fields[col])
except Exception, ex:
@@ -59,10 +60,10 @@
x_vals1 = numpy.asarray(x_vals).transpose()
-dat= r.list(x=array(x_vals1), y=y_vals)
+dat = r.list(x=array(x_vals1), y=y_vals)
r.library("leaps")
-
+
set_default_mode(NO_CONVERSION)
try:
leaps = r.regsubsets(r("y ~ x"), data= r.na_exclude(dat))
@@ -75,10 +76,10 @@
pattern = "["
for i in range(tot):
pattern = pattern + 'c' + str(int(x_cols[int(i)]) + 1) + ' '
-pattern = pattern.strip() + ']'
-print >>fout, "#Vars\t%s\tR-sq\tAdj. R-sq\tC-p\tbic" %(pattern)
-for ind,item in enumerate(summary['outmat']):
- print >>fout, "%s\t%s\t%s\t%s\t%s\t%s" %(str(item).count('*'), item, summary['rsq'][ind], summary['adjr2'][ind], summary['cp'][ind], summary['bic'][ind])
+pattern = pattern.strip() + ']'
+print >> fout, "#Vars\t%s\tR-sq\tAdj. R-sq\tC-p\tbic" % (pattern)
+for ind, item in enumerate(summary['outmat']):
+ print >> fout, "%s\t%s\t%s\t%s\t%s\t%s" % (str(item).count('*'), item, summary['rsq'][ind], summary['adjr2'][ind], summary['cp'][ind], summary['bic'][ind])
r.pdf( outfile2, 8, 8 )
diff -r c043a2ca8051de612d4e895ba55e6dce0697a8d3 -r 82d2c2109792d098ddc5ede7992ae132e596584a tools/regVariation/featureCounter.py
--- a/tools/regVariation/featureCounter.py
+++ b/tools/regVariation/featureCounter.py
@@ -11,8 +11,7 @@
from galaxy import eggs
import pkg_resources
pkg_resources.require( "bx-python" )
-import sys, traceback, fileinput
-from warnings import warn
+import sys, fileinput
from bx.intervals.io import *
from bx.cookbook import doc_optparse
from bx.intervals.operations import quicksect
@@ -33,7 +32,7 @@
partial += 1
if node.left and node.left.maxend > start:
counter(node.left, start, end)
- if node.right:
+ if node.right:
counter(node.right, start, end)
elif start < node.start < end:
if node.end <= end:
@@ -42,10 +41,10 @@
partial += 1
if node.left and node.left.maxend > start:
counter(node.left, start, end)
- if node.right:
+ if node.right:
counter(node.right, start, end)
else:
- if node.left:
+ if node.left:
counter(node.left, start, end)
def count_coverage( readers, comments=True ):
@@ -58,8 +57,8 @@
if type( item ) is GenomicInterval:
rightTree.insert( item, secondary.linenum, item.fields )
- bitsets = secondary_copy.binned_bitsets()
-
+ bitsets = secondary_copy.binned_bitsets()
+
global full, partial
for interval in primary:
@@ -82,7 +81,7 @@
bases_covered = bitsets[ chrom ].count_range( start, end-start )
if (end - start) == 0:
percent = 0
- else:
+ else:
percent = float(bases_covered) / float(end - start)
if bases_covered:
root = rightTree.chroms[chrom] #root node for the chrom tree
@@ -92,13 +91,14 @@
interval.fields.append(str(full))
interval.fields.append(str(partial))
yield interval
-
+
+
def main():
options, args = doc_optparse.parse( __doc__ )
try:
chr_col_1, start_col_1, end_col_1, strand_col_1 = parse_cols_arg( options.cols1 )
- chr_col_2, start_col_2, end_col_2, strand_col_2 = parse_cols_arg( options.cols2 )
+ chr_col_2, start_col_2, end_col_2, strand_col_2 = parse_cols_arg( options.cols2 )
in1_fname, in2_fname, out_fname = args
except:
stop_err( "Data issue: click the pencil icon in the history item to correct the metadata attributes." )
@@ -126,7 +126,7 @@
out_file = open( out_fname, "w" )
try:
- for line in count_coverage([g1,g2,g2_copy]):
+ for line in count_coverage([g1, g2, g2_copy]):
if type( line ) is GenomicInterval:
out_file.write( "%s\n" % "\t".join( line.fields ) )
else:
@@ -143,6 +143,7 @@
print skipped( g2, filedesc=" of 2nd dataset" )
elif g2_copy.skipped > 0:
print skipped( g2_copy, filedesc=" of 2nd dataset" )
-
+
+
if __name__ == "__main__":
main()
diff -r c043a2ca8051de612d4e895ba55e6dce0697a8d3 -r 82d2c2109792d098ddc5ede7992ae132e596584a tools/regVariation/getIndelRates_3way.py
--- a/tools/regVariation/getIndelRates_3way.py
+++ b/tools/regVariation/getIndelRates_3way.py
@@ -6,7 +6,6 @@
pkg_resources.require( "bx-python" )
import sys, os, tempfile
-import traceback
import fileinput
from warnings import warn
@@ -18,7 +17,8 @@
def stop_err(msg):
sys.stderr.write(msg)
sys.exit()
-
+
+
def counter(node, start, end, sort_col):
global full, blk_len, blk_list
if node.start < start:
@@ -31,14 +31,14 @@
blk_len += int(node.other[sort_col+2])
if node.left and node.left.maxend > start:
counter(node.left, start, end, sort_col)
- if node.right:
+ if node.right:
counter(node.right, start, end, sort_col)
elif node.start > end:
- if node.left:
+ if node.left:
counter(node.left, start, end, sort_col)
-
-infile = sys.argv[1]
+
+infile = sys.argv[1]
fout = open(sys.argv[2],'w')
int_file = sys.argv[3]
if int_file != "None": #User has specified an interval file
@@ -48,9 +48,9 @@
chr_col_i, start_col_i, end_col_i, strand_col_i = parse_cols_arg( sys.argv[5] )
except:
stop_err("Unable to open input Interval file")
-
+
+
def main():
-
for i, line in enumerate( file ( infile )):
line = line.rstrip('\r\n')
if len( line )>0 and not line.startswith( '#' ):
@@ -86,8 +86,7 @@
break
except:
continue
-
-
+
fin = open(infile, 'r')
skipped = 0
@@ -98,7 +97,7 @@
os.system(cmdline)
except:
stop_err("Encountered error while sorting the input file.")
- print >>fout, "#Block\t%s_InsRate\t%s_InsRate\t%s_InsRate\t%s_DelRate\t%s_DelRate\t%s_DelRate" %(species[0],species[1],species[2],species[0],species[1],species[2])
+ print >> fout, "#Block\t%s_InsRate\t%s_InsRate\t%s_InsRate\t%s_DelRate\t%s_DelRate\t%s_DelRate" % ( species[0], species[1], species[2], species[0], species[1], species[2] )
prev_bnum = -1
sorted_infile.seek(0)
for line in sorted_infile.readlines():
@@ -112,16 +111,16 @@
if prev_bnum != -1:
irate = []
drate = []
- for i,elem in enumerate(inserts):
+ for i, elem in enumerate(inserts):
try:
- irate.append(str("%.2e" %(inserts[i]/blen[i])))
+ irate.append(str("%.2e" % (inserts[i]/blen[i])))
except:
irate.append('0')
try:
- drate.append(str("%.2e" %(deletes[i]/blen[i])))
+ drate.append(str("%.2e" % (deletes[i]/blen[i])))
except:
drate.append('0')
- print >>fout, "%s\t%s\t%s" %(prev_bnum, '\t'.join(irate) , '\t'.join(drate))
+ print >> fout, "%s\t%s\t%s" % ( prev_bnum, '\t'.join(irate) , '\t'.join(drate) )
inserts = [0.0, 0.0, 0.0]
deletes = [0.0, 0.0, 0.0]
blen = []
@@ -134,25 +133,24 @@
inserts[sp_ind] += 1
elif elems[1].endswith('delete'):
deletes[sp_ind] += 1
- prev_bnum = new_bnum
+ prev_bnum = new_bnum
except Exception, ei:
#print >>sys.stderr, ei
continue
irate = []
drate = []
- for i,elem in enumerate(inserts):
+ for i, elem in enumerate(inserts):
try:
- irate.append(str("%.2e" %(inserts[i]/blen[i])))
+ irate.append(str("%.2e" % (inserts[i]/blen[i])))
except:
irate.append('0')
try:
- drate.append(str("%.2e" %(deletes[i]/blen[i])))
+ drate.append(str("%.2e" % (deletes[i]/blen[i])))
except:
drate.append('0')
- print >>fout, "%s\t%s\t%s" %(prev_bnum, '\t'.join(irate) , '\t'.join(drate))
+ print >> fout, "%s\t%s\t%s" % ( prev_bnum, '\t'.join(irate) , '\t'.join(drate) )
sys.exit()
-
inf = open(infile, 'r')
start_met = False
end_met = False
@@ -163,14 +161,14 @@
try:
assert int(elems[0])
assert len(elems) == 18
- if dbkey_i not in elems[1]:
- if not(start_met):
+ if dbkey_i not in elems[1]:
+ if not(start_met):
continue
else:
sp_end = n
break
else:
- print >>sp_file, line
+ print >> sp_file, line
if not(start_met):
start_met = True
sp_start = n
@@ -201,7 +199,7 @@
for item in indel:
if type( item ) is GenomicInterval:
indelTree.insert( item, indel.linenum, item.fields )
- result=[]
+ result = []
global full, blk_len, blk_list
for interval in win:
@@ -213,14 +211,14 @@
chrom = interval.chrom
start = int(interval.start)
end = int(interval.end)
- if start > end:
+ if start > end:
warn( "Interval start after end!" )
- ins_chr = "%s.%s_insert" %(dbkey_i,chrom)
- del_chr = "%s.%s_delete" %(dbkey_i,chrom)
+ ins_chr = "%s.%s_insert" % ( dbkey_i, chrom )
+ del_chr = "%s.%s_delete" % ( dbkey_i, chrom )
irate = 0
drate = 0
if ins_chr not in indelTree.chroms and del_chr not in indelTree.chroms:
- pass
+ pass
else:
if ins_chr in indelTree.chroms:
full = 0.0
@@ -242,8 +240,9 @@
interval.fields.append(str("%.2e" %irate))
interval.fields.append(str("%.2e" %drate))
- print >>fout, "\t".join(interval.fields)
+ print >> fout, "\t".join(interval.fields)
fout.flush()
+
if __name__ == "__main__":
- main()
\ No newline at end of file
+ main()
diff -r c043a2ca8051de612d4e895ba55e6dce0697a8d3 -r 82d2c2109792d098ddc5ede7992ae132e596584a tools/regVariation/getIndels.py
--- a/tools/regVariation/getIndels.py
+++ b/tools/regVariation/getIndels.py
@@ -8,13 +8,12 @@
from __future__ import division
from galaxy import eggs
-import pkg_resources
+import pkg_resources
pkg_resources.require( "bx-python" )
try:
pkg_resources.require("numpy")
except:
pass
-import psyco_full
import sys
from bx.cookbook import doc_optparse
from galaxy.tools.exception_handling import *
@@ -22,24 +21,24 @@
assert sys.version_info[:2] >= ( 2, 4 )
-def main():
+def main():
# Parsing Command Line here
options, args = doc_optparse.parse( __doc__ )
try:
- inp_file, out_file1 = args
+ inp_file, out_file1 = args
except:
print >> sys.stderr, "Tool initialization error."
sys.exit()
try:
- fin = open(inp_file,'r')
+ open(inp_file, 'r')
except:
print >> sys.stderr, "Unable to open input file"
sys.exit()
try:
- fout1 = open(out_file1,'w')
- #fout2 = open(out_file2,'w')
+ fout1 = open(out_file1, 'w')
+ #fout2 = open(out_file2, 'w')
except:
print >> sys.stderr, "Unable to open output file"
sys.exit()
@@ -47,11 +46,10 @@
try:
maf_reader = bx.align.maf.Reader( open(inp_file, 'r') )
except:
- print >>sys.stderr, "Your MAF file appears to be malformed."
+ print >> sys.stderr, "Your MAF file appears to be malformed."
sys.exit()
- maf_count = 0
- print >>fout1, "#Block\tSource\tSeq1_Start\tSeq1_End\tSeq2_Start\tSeq2_End\tIndel_length"
+ print >> fout1, "#Block\tSource\tSeq1_Start\tSeq1_End\tSeq2_Start\tSeq2_End\tIndel_length"
for block_ind, block in enumerate(maf_reader):
if len(block.components) < 2:
continue
@@ -84,19 +82,19 @@
#write 2
if prev_pos_gap2 == 1:
prev_pos_gap2 = 0
- print >>fout1,"%d\t%s\t%s\t%s\t%s\t%s\t%s" %(block_ind+1,src2,nt_pos1,nt_pos1+1,nt_pos2-1,nt_pos2-1+gaplen2,gaplen2)
+ print >> fout1, "%d\t%s\t%s\t%s\t%s\t%s\t%s" % ( block_ind+1, src2, nt_pos1, nt_pos1+1, nt_pos2-1, nt_pos2-1+gaplen2, gaplen2 )
if pos == len(seq1)-1:
- print >>fout1,"%d\t%s\t%s\t%s\t%s\t%s\t%s" %(block_ind+1,src1,nt_pos1,nt_pos1+1,nt_pos2+1-gaplen1,nt_pos2+1,gaplen1)
+ print >> fout1, "%d\t%s\t%s\t%s\t%s\t%s\t%s" % ( block_ind+1, src1, nt_pos1, nt_pos1+1, nt_pos2+1-gaplen1, nt_pos2+1, gaplen1 )
else:
prev_pos_gap1 = 0
prev_pos_gap2 = 0
"""
if prev_pos_gap1 == 1:
prev_pos_gap1 = 0
- print >>fout1,"%d\t%s\t%s\t%s\t%s" %(block_ind+1,src1,nt_pos1-1,nt_pos1,gaplen1)
+ print >> fout1, "%d\t%s\t%s\t%s\t%s" % ( block_ind+1, src1, nt_pos1-1, nt_pos1, gaplen1 )
elif prev_pos_gap2 == 1:
prev_pos_gap2 = 0
- print >>fout1,"%d\t%s\t%s\t%s\t%s" %(block_ind+1,src2,nt_pos2-1,nt_pos2,gaplen2)
+ print >> fout1, "%d\t%s\t%s\t%s\t%s" % ( block_ind+1, src2, nt_pos2-1, nt_pos2, gaplen2 )
"""
else:
nt_pos1 += 1
@@ -105,19 +103,21 @@
#write both
if prev_pos_gap1 == 1:
prev_pos_gap1 = 0
- print >>fout1,"%d\t%s\t%s\t%s\t%s\t%s\t%s" %(block_ind+1,src1,nt_pos1-1,nt_pos1,nt_pos2-gaplen1,nt_pos2,gaplen1)
+ print >> fout1, "%d\t%s\t%s\t%s\t%s\t%s\t%s" % ( block_ind+1, src1, nt_pos1-1, nt_pos1, nt_pos2-gaplen1, nt_pos2, gaplen1 )
elif prev_pos_gap2 == 1:
prev_pos_gap2 = 0
- print >>fout1,"%d\t%s\t%s\t%s\t%s\t%s\t%s" %(block_ind+1,src2,nt_pos1-gaplen2,nt_pos1,nt_pos2-1,nt_pos2,gaplen2)
+ print >> fout1, "%d\t%s\t%s\t%s\t%s\t%s\t%s" % ( block_ind+1, src2, nt_pos1-gaplen2, nt_pos1, nt_pos2-1, nt_pos2, gaplen2 )
else:
gaplen2 += 1
prev_pos_gap2 = 1
#write 1
if prev_pos_gap1 == 1:
prev_pos_gap1 = 0
- print >>fout1,"%d\t%s\t%s\t%s\t%s\t%s\t%s" %(block_ind+1,src1,nt_pos1-1,nt_pos1,nt_pos2,nt_pos2+gaplen1,gaplen1)
+ print >> fout1, "%d\t%s\t%s\t%s\t%s\t%s\t%s" % ( block_ind+1, src1, nt_pos1-1, nt_pos1, nt_pos2, nt_pos2+gaplen1, gaplen1 )
if pos == len(seq1)-1:
- print >>fout1,"%d\t%s\t%s\t%s\t%s\t%s\t%s" %(block_ind+1,src2,nt_pos1+1-gaplen2,nt_pos1+1,nt_pos2,nt_pos2+1,gaplen2)
+ print >> fout1, "%d\t%s\t%s\t%s\t%s\t%s\t%s" % ( block_ind+1, src2, nt_pos1+1-gaplen2, nt_pos1+1, nt_pos2, nt_pos2+1, gaplen2 )
pos += 1
+
+
if __name__ == "__main__":
main()
diff -r c043a2ca8051de612d4e895ba55e6dce0697a8d3 -r 82d2c2109792d098ddc5ede7992ae132e596584a tools/regVariation/linear_regression.py
--- a/tools/regVariation/linear_regression.py
+++ b/tools/regVariation/linear_regression.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
from galaxy import eggs
-import sys, string
+import sys
from rpy import *
import numpy
@@ -15,14 +15,14 @@
outfile = sys.argv[4]
outfile2 = sys.argv[5]
-print "Predictor columns: %s; Response column: %d" %(x_cols,y_col+1)
+print "Predictor columns: %s; Response column: %d" % ( x_cols, y_col+1 )
fout = open(outfile,'w')
elems = []
for i, line in enumerate( file ( infile )):
line = line.rstrip('\r\n')
if len( line )>0 and not line.startswith( '#' ):
elems = line.split( '\t' )
- break
+ break
if i == 30:
break # Hopefully we'll never get here...
@@ -32,12 +32,12 @@
y_vals = []
x_vals = []
-for k,col in enumerate(x_cols):
+for k, col in enumerate(x_cols):
x_cols[k] = int(col)-1
x_vals.append([])
NA = 'NA'
-for ind,line in enumerate( file( infile )):
+for ind, line in enumerate( file( infile )):
if line and not line.startswith( '#' ):
try:
fields = line.split("\t")
@@ -46,7 +46,7 @@
except:
yval = r('NA')
y_vals.append(yval)
- for k,col in enumerate(x_cols):
+ for k, col in enumerate(x_cols):
try:
xval = float(fields[col])
except:
@@ -57,7 +57,7 @@
x_vals1 = numpy.asarray(x_vals).transpose()
-dat= r.list(x=array(x_vals1), y=y_vals)
+dat = r.list(x=array(x_vals1), y=y_vals)
set_default_mode(NO_CONVERSION)
try:
@@ -66,8 +66,8 @@
stop_err("Error performing linear regression on the input data.\nEither the response column or one of the predictor columns contain only non-numeric or invalid values.")
set_default_mode(BASIC_CONVERSION)
-coeffs=linear_model.as_py()['coefficients']
-yintercept= coeffs['(Intercept)']
+coeffs = linear_model.as_py()['coefficients']
+yintercept = coeffs['(Intercept)']
summary = r.summary(linear_model)
co = summary.get('coefficients', 'NA')
@@ -82,8 +82,8 @@
except:
pass
-print >>fout, "Y-intercept\t%s" %(yintercept)
-print >>fout, "p-value (Y-intercept)\t%s" %(pvaly)
+print >> fout, "Y-intercept\t%s" % (yintercept)
+print >> fout, "p-value (Y-intercept)\t%s" % (pvaly)
if len(x_vals) == 1: #Simple linear regression case with 1 predictor variable
try:
@@ -94,22 +94,22 @@
pval = r.round(float(co[1][3]), digits=10)
except:
pval = 'NA'
- print >>fout, "Slope (c%d)\t%s" %(x_cols[0]+1,slope)
- print >>fout, "p-value (c%d)\t%s" %(x_cols[0]+1,pval)
+ print >> fout, "Slope (c%d)\t%s" % ( x_cols[0]+1, slope )
+ print >> fout, "p-value (c%d)\t%s" % ( x_cols[0]+1, pval )
else: #Multiple regression case with >1 predictors
- ind=1
+ ind = 1
while ind < len(coeffs.keys()):
try:
slope = r.round(float(coeffs['x'+str(ind)]), digits=10)
except:
slope = 'NA'
- print >>fout, "Slope (c%d)\t%s" %(x_cols[ind-1]+1,slope)
+ print >> fout, "Slope (c%d)\t%s" % ( x_cols[ind-1]+1, slope )
try:
pval = r.round(float(co[ind][3]), digits=10)
except:
pval = 'NA'
- print >>fout, "p-value (c%d)\t%s" %(x_cols[ind-1]+1,pval)
- ind+=1
+ print >> fout, "p-value (c%d)\t%s" % ( x_cols[ind-1]+1, pval )
+ ind += 1
rsq = summary.get('r.squared','NA')
adjrsq = summary.get('adj.r.squared','NA')
@@ -125,14 +125,14 @@
except:
pass
-print >>fout, "R-squared\t%s" %(rsq)
-print >>fout, "Adjusted R-squared\t%s" %(adjrsq)
-print >>fout, "F-statistic\t%s" %(fstat)
-print >>fout, "Sigma\t%s" %(sigma)
+print >> fout, "R-squared\t%s" % (rsq)
+print >> fout, "Adjusted R-squared\t%s" % (adjrsq)
+print >> fout, "F-statistic\t%s" % (fstat)
+print >> fout, "Sigma\t%s" % (sigma)
r.pdf( outfile2, 8, 8 )
if len(x_vals) == 1: #Simple linear regression case with 1 predictor variable
- sub_title = "Slope = %s; Y-int = %s" %(slope,yintercept)
+ sub_title = "Slope = %s; Y-int = %s" % ( slope, yintercept )
try:
r.plot(x=x_vals[0], y=y_vals, xlab="X", ylab="Y", sub=sub_title, main="Scatterplot with regression")
r.abline(a=yintercept, b=slope, col="red")
diff -r c043a2ca8051de612d4e895ba55e6dce0697a8d3 -r 82d2c2109792d098ddc5ede7992ae132e596584a tools/regVariation/logistic_regression_vif.py
--- a/tools/regVariation/logistic_regression_vif.py
+++ b/tools/regVariation/logistic_regression_vif.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
from galaxy import eggs
-import sys, string
+import sys
from rpy import *
import numpy
@@ -15,14 +15,14 @@
outfile = sys.argv[4]
-print "Predictor columns: %s; Response column: %d" %(x_cols,y_col+1)
+print "Predictor columns: %s; Response column: %d" % ( x_cols, y_col+1 )
fout = open(outfile,'w')
elems = []
-for i, line in enumerate( file ( infile )):
+for i, line in enumerate( file( infile ) ):
line = line.rstrip('\r\n')
if len( line )>0 and not line.startswith( '#' ):
elems = line.split( '\t' )
- break
+ break
if i == 30:
break # Hopefully we'll never get here...
@@ -32,12 +32,12 @@
y_vals = []
x_vals = []
-for k,col in enumerate(x_cols):
+for k, col in enumerate(x_cols):
x_cols[k] = int(col)-1
x_vals.append([])
NA = 'NA'
-for ind,line in enumerate( file( infile )):
+for ind, line in enumerate( file( infile )):
if line and not line.startswith( '#' ):
try:
fields = line.split("\t")
@@ -46,7 +46,7 @@
except:
yval = r('NA')
y_vals.append(yval)
- for k,col in enumerate(x_cols):
+ for k, col in enumerate(x_cols):
try:
xval = float(fields[col])
except:
@@ -57,46 +57,45 @@
x_vals1 = numpy.asarray(x_vals).transpose()
-check1=0
-check0=0
+check1 = 0
+check0 = 0
for i in y_vals:
if i == 1:
- check1=1
+ check1 = 1
if i == 0:
- check0=1
-if check1==0 or check0==0:
+ check0 = 1
+if check1 == 0 or check0 == 0:
sys.exit("Warning: logistic regression must have at least two classes")
for i in y_vals:
- if i not in [1,0,r('NA')]:
- print >>fout, str(i)
+ if i not in [1, 0, r('NA')]:
+ print >> fout, str(i)
sys.exit("Warning: the current version of this tool can run only with two classes and need to be labeled as 0 and 1.")
-
-
-dat= r.list(x=array(x_vals1), y=y_vals)
-novif=0
+
+dat = r.list(x=array(x_vals1), y=y_vals)
+novif = 0
set_default_mode(NO_CONVERSION)
try:
- linear_model = r.glm(r("y ~ x"), data = r.na_exclude(dat),family="binomial")
+ linear_model = r.glm(r("y ~ x"), data=r.na_exclude(dat), family="binomial")
except RException, rex:
stop_err("Error performing logistic regression on the input data.\nEither the response column or one of the predictor columns contain only non-numeric or invalid values.")
if len(x_cols)>1:
try:
r('suppressPackageStartupMessages(library(car))')
- r.assign('dat',dat)
- r.assign('ncols',len(x_cols))
- vif=r.vif(r('glm(dat$y ~ ., data = na.exclude(data.frame(as.matrix(dat$x,ncol=ncols))->datx),family="binomial")'))
+ r.assign('dat', dat)
+ r.assign('ncols', len(x_cols))
+ vif = r.vif(r('glm(dat$y ~ ., data = na.exclude(data.frame(as.matrix(dat$x,ncol=ncols))->datx), family="binomial")'))
except RException, rex:
print rex
else:
- novif=1
-
+ novif = 1
+
set_default_mode(BASIC_CONVERSION)
-coeffs=linear_model.as_py()['coefficients']
-null_deviance=linear_model.as_py()['null.deviance']
-residual_deviance=linear_model.as_py()['deviance']
-yintercept= coeffs['(Intercept)']
+coeffs = linear_model.as_py()['coefficients']
+null_deviance = linear_model.as_py()['null.deviance']
+residual_deviance = linear_model.as_py()['deviance']
+yintercept = coeffs['(Intercept)']
summary = r.summary(linear_model)
co = summary.get('coefficients', 'NA')
"""
@@ -109,14 +108,14 @@
pvaly = r.round(float(co[0][3]), digits=10)
except:
pass
-print >>fout, "response column\tc%d" %(y_col+1)
-tempP=[]
+print >> fout, "response column\tc%d" % (y_col+1)
+tempP = []
for i in x_cols:
tempP.append('c'+str(i+1))
-tempP=','.join(tempP)
-print >>fout, "predictor column(s)\t%s" %(tempP)
-print >>fout, "Y-intercept\t%s" %(yintercept)
-print >>fout, "p-value (Y-intercept)\t%s" %(pvaly)
+tempP = ','.join(tempP)
+print >> fout, "predictor column(s)\t%s" % (tempP)
+print >> fout, "Y-intercept\t%s" % (yintercept)
+print >> fout, "p-value (Y-intercept)\t%s" % (pvaly)
if len(x_vals) == 1: #Simple linear regression case with 1 predictor variable
try:
@@ -127,44 +126,43 @@
pval = r.round(float(co[1][3]), digits=10)
except:
pval = 'NA'
- print >>fout, "Slope (c%d)\t%s" %(x_cols[0]+1,slope)
- print >>fout, "p-value (c%d)\t%s" %(x_cols[0]+1,pval)
+ print >> fout, "Slope (c%d)\t%s" % ( x_cols[0]+1, slope )
+ print >> fout, "p-value (c%d)\t%s" % ( x_cols[0]+1, pval )
else: #Multiple regression case with >1 predictors
- ind=1
+ ind = 1
while ind < len(coeffs.keys()):
try:
slope = r.round(float(coeffs['x'+str(ind)]), digits=10)
except:
slope = 'NA'
- print >>fout, "Slope (c%d)\t%s" %(x_cols[ind-1]+1,slope)
+ print >> fout, "Slope (c%d)\t%s" % ( x_cols[ind-1]+1, slope )
try:
pval = r.round(float(co[ind][3]), digits=10)
except:
pval = 'NA'
- print >>fout, "p-value (c%d)\t%s" %(x_cols[ind-1]+1,pval)
- ind+=1
+ print >> fout, "p-value (c%d)\t%s" % ( x_cols[ind-1]+1, pval )
+ ind += 1
rsq = summary.get('r.squared','NA')
-
try:
- rsq= r.round(float((null_deviance-residual_deviance)/null_deviance), digits=5)
- null_deviance= r.round(float(null_deviance), digits=5)
- residual_deviance= r.round(float(residual_deviance), digits=5)
+ rsq = r.round(float((null_deviance-residual_deviance)/null_deviance), digits=5)
+ null_deviance = r.round(float(null_deviance), digits=5)
+ residual_deviance = r.round(float(residual_deviance), digits=5)
except:
pass
-print >>fout, "Null deviance\t%s" %(null_deviance)
-print >>fout, "Residual deviance\t%s" %(residual_deviance)
-print >>fout, "pseudo R-squared\t%s" %(rsq)
-print >>fout, "\n"
-print >>fout, 'vif'
+print >> fout, "Null deviance\t%s" % (null_deviance)
+print >> fout, "Residual deviance\t%s" % (residual_deviance)
+print >> fout, "pseudo R-squared\t%s" % (rsq)
+print >> fout, "\n"
+print >> fout, 'vif'
-if novif==0:
- py_vif=vif.as_py()
- count=0
+if novif == 0:
+ py_vif = vif.as_py()
+ count = 0
for i in sorted(py_vif.keys()):
- print >>fout,'c'+str(x_cols[count]+1) ,str(py_vif[i])
- count+=1
-elif novif==1:
- print >>fout, "vif can calculate only when model have more than 1 predictor"
+ print >> fout, 'c'+str(x_cols[count]+1), str(py_vif[i])
+ count += 1
+elif novif == 1:
+ print >> fout, "vif can calculate only when model have more than 1 predictor"
diff -r c043a2ca8051de612d4e895ba55e6dce0697a8d3 -r 82d2c2109792d098ddc5ede7992ae132e596584a tools/regVariation/maf_cpg_filter.py
--- a/tools/regVariation/maf_cpg_filter.py
+++ b/tools/regVariation/maf_cpg_filter.py
@@ -10,7 +10,7 @@
"""
from galaxy import eggs
-import pkg_resources
+import pkg_resources
pkg_resources.require( "bx-python" )
try:
pkg_resources.require( "numpy" )
@@ -54,7 +54,7 @@
defn = "non-CpG"
cpgfilter.run( reader, writer.write )
- print "%2.2f percent bases masked; Mask character = %s, Definition = %s" %(float(cpgfilter.masked)/float(cpgfilter.total) * 100, mask, defn)
+ print "%2.2f percent bases masked; Mask character = %s, Definition = %s" % ( float(cpgfilter.masked)/float(cpgfilter.total) * 100, mask, defn )
if __name__ == "__main__":
main()
diff -r c043a2ca8051de612d4e895ba55e6dce0697a8d3 -r 82d2c2109792d098ddc5ede7992ae132e596584a tools/regVariation/microsats_alignment_level.py
--- a/tools/regVariation/microsats_alignment_level.py
+++ b/tools/regVariation/microsats_alignment_level.py
@@ -4,7 +4,11 @@
Uses SPUTNIK to fetch microsatellites and extracts orthologous repeats from the sputnik output.
"""
from galaxy import eggs
-import sys, os, tempfile, string, math, re
+import os
+import re
+import string
+import sys
+import tempfile
def reverse_complement(text):
DNA_COMP = string.maketrans( "ACGTacgt", "TGCAtgca" )
@@ -12,31 +16,26 @@
comp.reverse()
return "".join(comp)
+
def main():
if len(sys.argv) != 8:
- print >>sys.stderr, "Insufficient number of arguments."
+ print >> sys.stderr, "Insufficient number of arguments."
sys.exit()
infile = open(sys.argv[1],'r')
separation = int(sys.argv[2])
outfile = sys.argv[3]
- align_type = sys.argv[4]
- if align_type == "2way":
- align_type_len = 2
- elif align_type == "3way":
- align_type_len = 3
mono_threshold = int(sys.argv[5])
non_mono_threshold = int(sys.argv[6])
allow_different_units = int(sys.argv[7])
- print "Min distance = %d bp; Min threshold for mono repeats = %d; Min threshold for non-mono repeats = %d; Allow different motifs = %s" %(separation, mono_threshold, non_mono_threshold, allow_different_units==1)
+ print "Min distance = %d bp; Min threshold for mono repeats = %d; Min threshold for non-mono repeats = %d; Allow different motifs = %s" % ( separation, mono_threshold, non_mono_threshold, allow_different_units==1 )
try:
fout = open(outfile, "w")
- print >>fout, "#Block\tSeq1_Name\tSeq1_Start\tSeq1_End\tSeq1_Type\tSeq1_Length\tSeq1_RepeatNumber\tSeq1_Unit\tSeq2_Name\tSeq2_Start\tSeq2_End\tSeq2_Type\tSeq2_Length\tSeq2_RepeatNumber\tSeq2_Unit"
+ print >> fout, "#Block\tSeq1_Name\tSeq1_Start\tSeq1_End\tSeq1_Type\tSeq1_Length\tSeq1_RepeatNumber\tSeq1_Unit\tSeq2_Name\tSeq2_Start\tSeq2_End\tSeq2_Type\tSeq2_Length\tSeq2_RepeatNumber\tSeq2_Unit"
#sputnik_cmd = os.path.join(os.path.split(sys.argv[0])[0], "sputnik")
sputnik_cmd = "sputnik"
input = infile.read()
- skipped = 0
block_num = 0
input = input.replace('\r','\n')
for block in input.split('\n\n'):
@@ -44,26 +43,24 @@
tmpin = tempfile.NamedTemporaryFile()
tmpout = tempfile.NamedTemporaryFile()
tmpin.write(block.strip())
- blk = tmpin.read()
cmdline = sputnik_cmd + " " + tmpin.name + " > /dev/null 2>&1 >> " + tmpout.name
try:
os.system(cmdline)
- except Exception, es:
+ except Exception:
continue
sputnik_out = tmpout.read()
tmpin.close()
tmpout.close()
if sputnik_out != "":
if len(block.split('>')[1:]) != 2: #len(sputnik_out.split('>')):
- skipped += 1
continue
align_block = block.strip().split('>')
lendict = {'mononucleotide':1, 'dinucleotide':2, 'trinucleotide':3, 'tetranucleotide':4, 'pentanucleotide':5, 'hexanucleotide':6}
- blockdict={}
- r=0
- namelist=[]
- for k,sput_block in enumerate(sputnik_out.split('>')[1:]):
+ blockdict = {}
+ r = 0
+ namelist = []
+ for k, sput_block in enumerate(sputnik_out.split('>')[1:]):
whole_seq = ''.join(align_block[k+1].split('\n')[1:]).replace('\n','').strip()
p = re.compile('\n(\S*nucleotide)')
repeats = p.split(sput_block.strip())
@@ -71,13 +68,12 @@
j = 1
name = repeats[0].strip()
try:
- coords = re.search('\d+[-_:]\d+',name).group()
- coords = coords.replace('_','-').replace(':','-')
- except Exception, e:
+ coords = re.search('\d+[-_:]\d+', name).group()
+ coords = coords.replace('_', '-').replace(':', '-')
+ except Exception:
coords = '0-0'
- pass
r += 1
- blockdict[r]={}
+ blockdict[r] = {}
try:
sp_name = name[:name.index('.')]
chr_name = name[name.index('.'):name.index('(')]
@@ -91,11 +87,10 @@
continue
if blockdict[r].has_key('types'):
- blockdict[r]['types'].append(repeats[j].strip()) #type of microsat
+ blockdict[r]['types'].append(repeats[j].strip()) #type of microsat
else:
- blockdict[r]['types'] = [repeats[j].strip()] #type of microsat
+ blockdict[r]['types'] = [repeats[j].strip()] #type of microsat
- sequence = ''.join(align_block[r].split('\n')[1:]).replace('\n','').strip()
start = int(repeats[j+1].split('--')[0].split(':')[0].strip())
#check to see if there are gaps before the start of the repeat, and change the start accordingly
sgaps = 0
@@ -107,7 +102,7 @@
break #break at the 1st non-gap character
ch_pos -= 1
if blockdict[r].has_key('starts'):
- blockdict[r]['starts'].append(start+sgaps) #start co-ords adjusted with alignment co-ords to include GAPS
+ blockdict[r]['starts'].append(start+sgaps) #start co-ords adjusted with alignment co-ords to include GAPS
else:
blockdict[r]['starts'] = [start+sgaps]
@@ -120,7 +115,7 @@
else:
break #break at the 1st non-gap character
if blockdict[r].has_key('ends'):
- blockdict[r]['ends'].append(end+egaps) #end co-ords adjusted with alignment co-ords to include GAPS
+ blockdict[r]['ends'].append(end+egaps) #end co-ords adjusted with alignment co-ords to include GAPS
else:
blockdict[r]['ends'] = [end+egaps]
@@ -134,20 +129,20 @@
gaps_before_start = whole_seq[:rel_start].count('-')
if blockdict[r].has_key('gaps_before_start'):
- blockdict[r]['gaps_before_start'].append(gaps_before_start) #lengths
+ blockdict[r]['gaps_before_start'].append(gaps_before_start) #lengths
else:
- blockdict[r]['gaps_before_start'] = [gaps_before_start] #lengths
+ blockdict[r]['gaps_before_start'] = [gaps_before_start] #lengths
- whole_seq_start= int(coords.split('-')[0])
+ whole_seq_start = int(coords.split('-')[0])
if blockdict[r].has_key('whole_seq_start'):
- blockdict[r]['whole_seq_start'].append(whole_seq_start) #lengths
+ blockdict[r]['whole_seq_start'].append(whole_seq_start) #lengths
else:
- blockdict[r]['whole_seq_start'] = [whole_seq_start] #lengths
+ blockdict[r]['whole_seq_start'] = [whole_seq_start] #lengths
if blockdict[r].has_key('lengths'):
- blockdict[r]['lengths'].append(repeat_len) #lengths
+ blockdict[r]['lengths'].append(repeat_len) #lengths
else:
- blockdict[r]['lengths'] = [repeat_len] #lengths
+ blockdict[r]['lengths'] = [repeat_len] #lengths
if blockdict[r].has_key('counts'):
blockdict[r]['counts'].append(str(int(repeat_len)/lendict[repeats[j].strip()])) #Repeat Unit
@@ -159,10 +154,10 @@
else:
blockdict[r]['units'] = [repeat_seq[:lendict[repeats[j].strip()]]] #Repeat Unit
- except Exception, eh:
+ except Exception:
pass
- j+=2
- #check the co-ords of all repeats corresponding to a sequence and remove adjacent repeats separated by less than the user-specified 'separation'.
+ j += 2
+ #check the co-ords of all repeats corresponding to a sequence and remove adjacent repeats separated by less than the user-specified 'separation'.
delete_index_list = []
for ind, item in enumerate(blockdict[r]['ends']):
try:
@@ -171,7 +166,7 @@
delete_index_list.append(ind)
if ind+1 not in delete_index_list:
delete_index_list.append(ind+1)
- except Exception, ek:
+ except Exception:
pass
for index in delete_index_list: #mark them for deletion
try:
@@ -183,7 +178,7 @@
blockdict[r]['lengths'][index] = 'marked'
blockdict[r]['counts'][index] = 'marked'
blockdict[r]['units'][index] = 'marked'
- except Exception, ej:
+ except Exception:
pass
#remove 'marked' elements from all the lists
"""
@@ -192,19 +187,19 @@
if elem == 'marked':
blockdict[r][key].remove(elem)
"""
- #print blockdict
+ #print blockdict
- #make sure that the blockdict has keys for both the species
+ #make sure that the blockdict has keys for both the species
if (1 not in blockdict) or (2 not in blockdict):
continue
visited_2 = [0 for x in range(len(blockdict[2]['starts']))]
- for ind1,coord_s1 in enumerate(blockdict[1]['starts']):
+ for ind1, coord_s1 in enumerate(blockdict[1]['starts']):
if coord_s1 == 'marked':
continue
coord_e1 = blockdict[1]['ends'][ind1]
out = []
- for ind2,coord_s2 in enumerate(blockdict[2]['starts']):
+ for ind2, coord_s2 in enumerate(blockdict[2]['starts']):
if coord_s2 == 'marked':
visited_2[ind2] = 1
continue
@@ -216,7 +211,7 @@
else:
if (blockdict[1]['units'][ind1] not in blockdict[2]['units'][ind2]*2) and (reverse_complement(blockdict[1]['units'][ind1]) not in blockdict[2]['units'][ind2]*2):
continue
- #print >>sys.stderr, (reverse_complement(blockdict[1]['units'][ind1]) not in blockdict[2]['units'][ind2]*2)
+ #print >> sys.stderr, (reverse_complement(blockdict[1]['units'][ind1]) not in blockdict[2]['units'][ind2]*2)
#skip if the repeat number thresholds are not met
if blockdict[1]['types'][ind1] == 'mononucleotide':
if (int(blockdict[1]['counts'][ind1]) < mono_threshold):
@@ -231,12 +226,12 @@
else:
if (int(blockdict[2]['counts'][ind2]) < non_mono_threshold):
continue
- #print "s1,e1=%s,%s; s2,e2=%s,%s" %(coord_s1,coord_e1,coord_s2,coord_e2)
- if (coord_s1 in range(coord_s2,coord_e2)) or (coord_e1 in range(coord_s2,coord_e2)):
+ #print "s1,e1=%s,%s; s2,e2=%s,%s" % ( coord_s1, coord_e1, coord_s2, coord_e2 )
+ if (coord_s1 in range(coord_s2, coord_e2)) or (coord_e1 in range(coord_s2, coord_e2)):
out.append(str(block_num))
out.append(namelist[0])
rel_start = blockdict[1]['whole_seq_start'][ind1] + coord_s1 - blockdict[1]['gaps_before_start'][ind1]
- rel_end = rel_start + int(blockdict[1]['lengths'][ind1])
+ rel_end = rel_start + int(blockdict[1]['lengths'][ind1])
out.append(str(rel_start))
out.append(str(rel_end))
out.append(blockdict[1]['types'][ind1])
@@ -245,16 +240,16 @@
out.append(blockdict[1]['units'][ind1])
out.append(namelist[1])
rel_start = blockdict[2]['whole_seq_start'][ind2] + coord_s2 - blockdict[2]['gaps_before_start'][ind2]
- rel_end = rel_start + int(blockdict[2]['lengths'][ind2])
+ rel_end = rel_start + int(blockdict[2]['lengths'][ind2])
out.append(str(rel_start))
out.append(str(rel_end))
out.append(blockdict[2]['types'][ind2])
out.append(blockdict[2]['lengths'][ind2])
out.append(blockdict[2]['counts'][ind2])
out.append(blockdict[2]['units'][ind2])
- print >>fout, '\t'.join(out)
+ print >> fout, '\t'.join(out)
visited_2[ind2] = 1
- out=[]
+ out = []
if 0 in visited_2: #there are still some elements in 2nd set which haven't found orthologs yet.
for ind2, coord_s2 in enumerate(blockdict[2]['starts']):
@@ -264,7 +259,7 @@
continue
coord_e2 = blockdict[2]['ends'][ind2]
out = []
- for ind1,coord_s1 in enumerate(blockdict[1]['starts']):
+ for ind1, coord_s1 in enumerate(blockdict[1]['starts']):
if coord_s1 == 'marked':
continue
coord_e1 = blockdict[1]['ends'][ind1]
@@ -290,11 +285,11 @@
if (int(blockdict[2]['counts'][ind2]) < non_mono_threshold):
continue
- if (coord_s2 in range(coord_s1,coord_e1)) or (coord_e2 in range(coord_s1,coord_e1)):
- out.append(str(block_num))
+ if (coord_s2 in range(coord_s1, coord_e1)) or (coord_e2 in range(coord_s1, coord_e1)):
+ out.append(str(block_num))
out.append(namelist[0])
rel_start = blockdict[1]['whole_seq_start'][ind1] + coord_s1 - blockdict[1]['gaps_before_start'][ind1]
- rel_end = rel_start + int(blockdict[1]['lengths'][ind1])
+ rel_end = rel_start + int(blockdict[1]['lengths'][ind1])
out.append(str(rel_start))
out.append(str(rel_end))
out.append(blockdict[1]['types'][ind1])
@@ -303,21 +298,21 @@
out.append(blockdict[1]['units'][ind1])
out.append(namelist[1])
rel_start = blockdict[2]['whole_seq_start'][ind2] + coord_s2 - blockdict[2]['gaps_before_start'][ind2]
- rel_end = rel_start + int(blockdict[2]['lengths'][ind2])
+ rel_end = rel_start + int(blockdict[2]['lengths'][ind2])
out.append(str(rel_start))
out.append(str(rel_end))
out.append(blockdict[2]['types'][ind2])
out.append(blockdict[2]['lengths'][ind2])
out.append(blockdict[2]['counts'][ind2])
out.append(blockdict[2]['units'][ind2])
- print >>fout, '\t'.join(out)
+ print >> fout, '\t'.join(out)
visited_2[ind2] = 1
- out=[]
+ out = []
- #print >>fout, blockdict
+ #print >> fout, blockdict
except Exception, exc:
- print >>sys.stderr, "type(exc),args,exc: %s, %s, %s" %(type(exc), exc.args, exc)
+ print >> sys.stderr, "type(exc),args,exc: %s, %s, %s" % ( type(exc), exc.args, exc )
+
if __name__ == "__main__":
main()
-
diff -r c043a2ca8051de612d4e895ba55e6dce0697a8d3 -r 82d2c2109792d098ddc5ede7992ae132e596584a tools/regVariation/microsats_mutability.py
--- a/tools/regVariation/microsats_mutability.py
+++ b/tools/regVariation/microsats_mutability.py
@@ -4,7 +4,10 @@
This tool computes microsatellite mutability for the orthologous microsatellites fetched from 'Extract Orthologous Microsatellites from pair-wise alignments' tool.
"""
from galaxy import eggs
-import sys, string, re, commands, tempfile, os, fileinput
+import fileinput
+import string
+import sys
+import tempfile
from galaxy.tools.util.galaxyops import *
from bx.intervals.io import *
from bx.intervals.operations import quicksect
@@ -19,7 +22,7 @@
p_group_cols = [p_group, p_group+7]
s_group_cols = [s_group, s_group+7]
num_generations = int(sys.argv[7])
-region = sys.argv[8]
+region = sys.argv[8]
int_file = sys.argv[9]
if int_file != "None": #User has specified an interval file
try:
@@ -28,31 +31,35 @@
chr_col_i, start_col_i, end_col_i, strand_col_i = parse_cols_arg( sys.argv[11] )
except:
stop_err("Unable to open input Interval file")
-
+
+
def stop_err(msg):
sys.stderr.write(msg)
sys.exit()
+
def reverse_complement(text):
DNA_COMP = string.maketrans( "ACGTacgt", "TGCAtgca" )
comp = [ch for ch in text.translate(DNA_COMP)]
comp.reverse()
return "".join(comp)
+
def get_unique_elems(elems):
- seen=set()
+ seen = set()
return[x for x in elems if x not in seen and not seen.add(x)]
+
def get_binned_lists(uniqlist, binsize):
- binnedlist=[]
+ binnedlist = []
uniqlist.sort()
start = int(uniqlist[0])
- bin_ind=0
- l_ind=0
+ bin_ind = 0
+ l_ind = 0
binnedlist.append([])
while l_ind < len(uniqlist):
elem = int(uniqlist[l_ind])
- if elem in range(start,start+binsize):
+ if elem in range(start, start+binsize):
binnedlist[bin_ind].append(elem)
else:
start += binsize
@@ -62,39 +69,38 @@
l_ind += 1
return binnedlist
-def fetch_weight(H,C,t):
+
+def fetch_weight(H, C, t):
if (H-(C-H)) < t:
return 2.0
else:
return 1.0
-def mutabilityEstimator(repeats1,repeats2,thresholds):
+
+def mutabilityEstimator(repeats1, repeats2, thresholds):
mut_num = 0.0 #Mutability Numerator
mut_den = 0.0 #Mutability denominator
- for ind,H in enumerate(repeats1):
+ for ind, H in enumerate(repeats1):
C = repeats2[ind]
t = thresholds[ind]
- w = fetch_weight(H,C,t)
+ w = fetch_weight(H, C, t)
mut_num += ((H-C)*(H-C)*w)
mut_den += w
return [mut_num, mut_den]
+
def output_writer(blk, blk_lines):
global winspecies, speciesind
- all_elems_1=[]
- all_elems_2=[]
- all_s_elems_1=[]
- all_s_elems_2=[]
+ all_elems_1 = []
+ all_elems_2 = []
+ all_s_elems_1 = []
+ all_s_elems_2 = []
for bline in blk_lines:
if not(bline):
continue
items = bline.split('\t')
seq1 = items[1]
- start1 = items[2]
- end1 = items[3]
seq2 = items[8]
- start2 = items[9]
- end2 = items[10]
if p_group_cols[0] == 6:
items[p_group_cols[0]] = int(items[p_group_cols[0]])
items[p_group_cols[1]] = int(items[p_group_cols[1]])
@@ -111,8 +117,8 @@
if s_group_cols[0] != -1:
uniq_s_elems_1 = get_unique_elems(all_s_elems_1)
uniq_s_elems_2 = get_unique_elems(all_s_elems_2)
- mut1={}
- mut2={}
+ mut1 = {}
+ mut2 = {}
count1 = {}
count2 = {}
"""
@@ -120,12 +126,12 @@
uniq_elems_1 = get_unique_units(j.sort(lambda x, y: len(x)-len(y)))
"""
if p_group_cols[0] == 6: #i.e. the option chosen is group-by repeat number.
- uniq_elems_1 = get_binned_lists(uniq_elems_1,p_bin_size)
- uniq_elems_2 = get_binned_lists(uniq_elems_2,p_bin_size)
+ uniq_elems_1 = get_binned_lists( uniq_elems_1, p_bin_size )
+ uniq_elems_2 = get_binned_lists( uniq_elems_2, p_bin_size )
if s_group_cols[0] == 6: #i.e. the option chosen is subgroup-by repeat number.
- uniq_s_elems_1 = get_binned_lists(uniq_s_elems_1,s_bin_size)
- uniq_s_elems_2 = get_binned_lists(uniq_s_elems_2,s_bin_size)
+ uniq_s_elems_1 = get_binned_lists( uniq_s_elems_1, s_bin_size )
+ uniq_s_elems_2 = get_binned_lists( uniq_s_elems_2, s_bin_size )
for pitem1 in uniq_elems_1:
#repeats1 = []
@@ -143,61 +149,61 @@
if p_group_cols[0] == 6:
belems[p_group_cols[0]] = int(belems[p_group_cols[0]])
if belems[p_group_cols[0]] in pitem1:
- if belems[s_group_cols[0]]==sitem1:
+ if belems[s_group_cols[0]] == sitem1:
repeats1.append(int(belems[6]))
repeats2.append(int(belems[13]))
if belems[4] == 'mononucleotide':
thresholds.append(mono_threshold)
else:
thresholds.append(non_mono_threshold)
- mut1[str(pitem1)+'\t'+str(sitem1)]=mutabilityEstimator(repeats1,repeats2,thresholds)
+ mut1[str(pitem1)+'\t'+str(sitem1)] = mutabilityEstimator( repeats1, repeats2, thresholds )
if region == 'align':
- count1[str(pitem1)+'\t'+str(sitem1)]=min(sum(repeats1),sum(repeats2))
- else:
+ count1[str(pitem1)+'\t'+str(sitem1)] = min( sum(repeats1), sum(repeats2) )
+ else:
if winspecies == 1:
- count1["%s\t%s" %(pitem1,sitem1)]=sum(repeats1)
+ count1["%s\t%s" % ( pitem1, sitem1 )] = sum(repeats1)
elif winspecies == 2:
- count1["%s\t%s" %(pitem1,sitem1)]=sum(repeats2)
+ count1["%s\t%s" % ( pitem1, sitem1 )] = sum(repeats2)
else:
if type(sitem1) == list:
if s_group_cols[0] == 6:
belems[s_group_cols[0]] = int(belems[s_group_cols[0]])
- if belems[p_group_cols[0]]==pitem1 and belems[s_group_cols[0]] in sitem1:
+ if belems[p_group_cols[0]] == pitem1 and belems[s_group_cols[0]] in sitem1:
repeats1.append(int(belems[6]))
repeats2.append(int(belems[13]))
if belems[4] == 'mononucleotide':
thresholds.append(mono_threshold)
else:
thresholds.append(non_mono_threshold)
- mut1["%s\t%s" %(pitem1,sitem1)]=mutabilityEstimator(repeats1,repeats2,thresholds)
+ mut1["%s\t%s" % ( pitem1, sitem1 )] = mutabilityEstimator( repeats1, repeats2, thresholds )
if region == 'align':
- count1[str(pitem1)+'\t'+str(sitem1)]=min(sum(repeats1),sum(repeats2))
- else:
+ count1[str(pitem1)+'\t'+str(sitem1)] = min( sum(repeats1), sum(repeats2) )
+ else:
if winspecies == 1:
- count1[str(pitem1)+'\t'+str(sitem1)]=sum(repeats1)
+ count1[str(pitem1)+'\t'+str(sitem1)] = sum(repeats1)
elif winspecies == 2:
- count1[str(pitem1)+'\t'+str(sitem1)]=sum(repeats2)
+ count1[str(pitem1)+'\t'+str(sitem1)] = sum(repeats2)
else:
- if belems[p_group_cols[0]]==pitem1 and belems[s_group_cols[0]]==sitem1:
+ if belems[p_group_cols[0]] == pitem1 and belems[s_group_cols[0]] == sitem1:
repeats1.append(int(belems[6]))
repeats2.append(int(belems[13]))
if belems[4] == 'mononucleotide':
thresholds.append(mono_threshold)
else:
thresholds.append(non_mono_threshold)
- mut1["%s\t%s" %(pitem1,sitem1)]=mutabilityEstimator(repeats1,repeats2,thresholds)
+ mut1["%s\t%s" % ( pitem1, sitem1 )] = mutabilityEstimator( repeats1, repeats2, thresholds )
if region == 'align':
- count1[str(pitem1)+'\t'+str(sitem1)]=min(sum(repeats1),sum(repeats2))
- else:
+ count1[str(pitem1)+'\t'+str(sitem1)] = min( sum(repeats1), sum(repeats2) )
+ else:
if winspecies == 1:
- count1["%s\t%s" %(pitem1,sitem1)]=sum(repeats1)
+ count1["%s\t%s" % ( pitem1, sitem1 )] = sum(repeats1)
elif winspecies == 2:
- count1["%s\t%s" %(pitem1,sitem1)]=sum(repeats2)
+ count1["%s\t%s" % ( pitem1, sitem1 )] = sum(repeats2)
else: #Sub-group by feature is None
for bline in blk_lines:
belems = bline.split('\t')
if type(pitem1) == list:
- #print >>sys.stderr, "item: " + str(item1)
+ #print >> sys.stderr, "item: " + str(item1)
if p_group_cols[0] == 6:
belems[p_group_cols[0]] = int(belems[p_group_cols[0]])
if belems[p_group_cols[0]] in pitem1:
@@ -208,21 +214,21 @@
else:
thresholds.append(non_mono_threshold)
else:
- if belems[p_group_cols[0]]==pitem1:
+ if belems[p_group_cols[0]] == pitem1:
repeats1.append(int(belems[6]))
repeats2.append(int(belems[13]))
if belems[4] == 'mononucleotide':
thresholds.append(mono_threshold)
else:
thresholds.append(non_mono_threshold)
- mut1["%s" %(pitem1)]=mutabilityEstimator(repeats1,repeats2,thresholds)
+ mut1["%s" % (pitem1)] = mutabilityEstimator( repeats1, repeats2, thresholds )
if region == 'align':
- count1["%s" %(pitem1)]=min(sum(repeats1),sum(repeats2))
- else:
+ count1["%s" % (pitem1)] = min( sum(repeats1), sum(repeats2) )
+ else:
if winspecies == 1:
- count1[str(pitem1)]=sum(repeats1)
+ count1[str(pitem1)] = sum(repeats1)
elif winspecies == 2:
- count1[str(pitem1)]=sum(repeats2)
+ count1[str(pitem1)] = sum(repeats2)
for pitem2 in uniq_elems_2:
#repeats1 = []
@@ -239,57 +245,57 @@
if type(pitem2) == list:
if p_group_cols[0] == 6:
belems[p_group_cols[1]] = int(belems[p_group_cols[1]])
- if belems[p_group_cols[1]] in pitem2 and belems[s_group_cols[1]]==sitem2:
+ if belems[p_group_cols[1]] in pitem2 and belems[s_group_cols[1]] == sitem2:
repeats2.append(int(belems[13]))
repeats1.append(int(belems[6]))
if belems[4] == 'mononucleotide':
thresholds.append(mono_threshold)
else:
thresholds.append(non_mono_threshold)
- mut2["%s\t%s" %(pitem2,sitem2)]=mutabilityEstimator(repeats2,repeats1,thresholds)
+ mut2["%s\t%s" % ( pitem2, sitem2 )] = mutabilityEstimator( repeats2, repeats1, thresholds )
#count2[str(pitem2)+'\t'+str(sitem2)]=len(repeats2)
if region == 'align':
- count2["%s\t%s" %(pitem2,sitem2)]=min(sum(repeats1),sum(repeats2))
- else:
+ count2["%s\t%s" % ( pitem2, sitem2 )] = min( sum(repeats1), sum(repeats2) )
+ else:
if winspecies == 1:
- count2["%s\t%s" %(pitem2,sitem2)]=len(repeats2)
+ count2["%s\t%s" % ( pitem2, sitem2 )] = len(repeats2)
elif winspecies == 2:
- count2["%s\t%s" %(pitem2,sitem2)]=len(repeats1)
+ count2["%s\t%s" % ( pitem2, sitem2 )] = len(repeats1)
else:
if type(sitem2) == list:
if s_group_cols[0] == 6:
belems[s_group_cols[1]] = int(belems[s_group_cols[1]])
- if belems[p_group_cols[1]]==pitem2 and belems[s_group_cols[1]] in sitem2:
+ if belems[p_group_cols[1]] == pitem2 and belems[s_group_cols[1]] in sitem2:
repeats2.append(int(belems[13]))
repeats1.append(int(belems[6]))
if belems[4] == 'mononucleotide':
thresholds.append(mono_threshold)
else:
thresholds.append(non_mono_threshold)
- mut2["%s\t%s" %(pitem2,sitem2)]=mutabilityEstimator(repeats2,repeats1,thresholds)
+ mut2["%s\t%s" % ( pitem2, sitem2 )] = mutabilityEstimator( repeats2, repeats1, thresholds )
if region == 'align':
- count2["%s\t%s" %(pitem2,sitem2)]=min(sum(repeats1),sum(repeats2))
- else:
+ count2["%s\t%s" % ( pitem2, sitem2 )] = min( sum(repeats1), sum(repeats2) )
+ else:
if winspecies == 1:
- count2["%s\t%s" %(pitem2,sitem2)]=len(repeats2)
+ count2["%s\t%s" % ( pitem2, sitem2 )] = len(repeats2)
elif winspecies == 2:
- count2["%s\t%s" %(pitem2,sitem2)]=len(repeats1)
+ count2["%s\t%s" % ( pitem2, sitem2 )] = len(repeats1)
else:
- if belems[p_group_cols[1]]==pitem2 and belems[s_group_cols[1]]==sitem2:
+ if belems[p_group_cols[1]] == pitem2 and belems[s_group_cols[1]] == sitem2:
repeats1.append(int(belems[13]))
repeats2.append(int(belems[6]))
if belems[4] == 'mononucleotide':
thresholds.append(mono_threshold)
else:
thresholds.append(non_mono_threshold)
- mut2["%s\t%s" %(pitem2,sitem2)]=mutabilityEstimator(repeats2,repeats1,thresholds)
+ mut2["%s\t%s" % ( pitem2, sitem2 )] = mutabilityEstimator( repeats2, repeats1, thresholds )
if region == 'align':
- count2["%s\t%s" %(pitem2,sitem2)]=min(sum(repeats1),sum(repeats2))
- else:
+ count2["%s\t%s" % ( pitem2, sitem2 )] = min( sum(repeats1), sum(repeats2) )
+ else:
if winspecies == 1:
- count2["%s\t%s" %(pitem2,sitem2)]=len(repeats2)
+ count2["%s\t%s" % ( pitem2, sitem2 )] = len(repeats2)
elif winspecies == 2:
- count2["%s\t%s" %(pitem2,sitem2)]=len(repeats1)
+ count2["%s\t%s" % ( pitem2, sitem2 )] = len(repeats1)
else: #Sub-group by feature is None
for bline in blk_lines:
belems = bline.split('\t')
@@ -304,21 +310,21 @@
else:
thresholds.append(non_mono_threshold)
else:
- if belems[p_group_cols[1]]==pitem2:
+ if belems[p_group_cols[1]] == pitem2:
repeats2.append(int(belems[13]))
repeats1.append(int(belems[6]))
if belems[4] == 'mononucleotide':
thresholds.append(mono_threshold)
else:
thresholds.append(non_mono_threshold)
- mut2["%s" %(pitem2)]=mutabilityEstimator(repeats2,repeats1,thresholds)
+ mut2["%s" % (pitem2)] = mutabilityEstimator( repeats2, repeats1, thresholds )
if region == 'align':
- count2["%s" %(pitem2)]=min(sum(repeats1),sum(repeats2))
+ count2["%s" % (pitem2)] = min( sum(repeats1), sum(repeats2) )
else:
if winspecies == 1:
- count2["%s" %(pitem2)]=sum(repeats2)
+ count2["%s" % (pitem2)] = sum(repeats2)
elif winspecies == 2:
- count2["%s" %(pitem2)]=sum(repeats1)
+ count2["%s" % (pitem2)] = sum(repeats1)
for key in mut1.keys():
if key in mut2.keys():
mut = (mut1[key][0]+mut2[key][0])/(mut1[key][1]+mut2[key][1])
@@ -328,9 +334,9 @@
unit_found = False
if p_group_cols[0] == 7 or s_group_cols[0] == 7: #if it is Repeat Unit (AG, GCT etc.) check for reverse-complements too
if p_group_cols[0] == 7:
- this,other = 0,1
+ this, other = 0, 1
else:
- this,other = 1,0
+ this, other = 1, 0
groups1 = key.split('\t')
mutn = mut1[key][0]
mutd = mut1[key][1]
@@ -351,28 +357,29 @@
else:
mut = mut1[key][0]/mut1[key][1]
count = count1[key]
- mut = "%.2e" %(mut/num_generations)
+ mut = "%.2e" % (mut/num_generations)
if region == 'align':
- print >>fout, str(blk) + '\t'+seq1 + '\t' + seq2 + '\t' +key.strip()+ '\t'+str(mut) + '\t'+ str(count)
+ print >> fout, str(blk) + '\t'+seq1 + '\t' + seq2 + '\t' +key.strip()+ '\t'+str(mut) + '\t'+ str(count)
elif region == 'win':
- fout.write("%s\t%s\t%s\t%s\n" %(blk,key.strip(),mut,count))
+ fout.write("%s\t%s\t%s\t%s\n" % ( blk, key.strip(), mut, count ))
fout.flush()
#catch any remaining repeats, for instance if the orthologous position contained different repeat units
for remaining_key in mut2.keys():
mut = mut2[remaining_key][0]/mut2[remaining_key][1]
- mut = "%.2e" %(mut/num_generations)
+ mut = "%.2e" % (mut/num_generations)
count = count2[remaining_key]
if region == 'align':
- print >>fout, str(blk) + '\t'+seq1 + '\t'+seq2 + '\t'+remaining_key.strip()+ '\t'+str(mut)+ '\t'+ str(count)
+ print >> fout, str(blk) + '\t'+seq1 + '\t'+seq2 + '\t'+remaining_key.strip()+ '\t'+str(mut)+ '\t'+ str(count)
elif region == 'win':
- fout.write("%s\t%s\t%s\t%s\n" %(blk,remaining_key.strip(),mut,count))
+ fout.write("%s\t%s\t%s\t%s\n" % ( blk, remaining_key.strip(), mut, count ))
fout.flush()
- #print >>fout, blk + '\t'+remaining_key.strip()+ '\t'+str(mut)+ '\t'+ str(count)
+ #print >> fout, blk + '\t'+remaining_key.strip()+ '\t'+str(mut)+ '\t'+ str(count)
+
def counter(node, start, end, report_func):
if start <= node.start < end and start < node.end <= end:
- report_func(node)
+ report_func(node)
if node.right:
counter(node.right, start, end, report_func)
if node.left:
@@ -381,8 +388,8 @@
counter(node.right, start, end, report_func)
elif node.start >= end and node.left and node.left.maxend > start:
counter(node.left, start, end, report_func)
-
-
+
+
def main():
infile = sys.argv[1]
@@ -400,21 +407,18 @@
if region == 'win':
if dbkey_i in elems[1]:
winspecies = 1
- speciesind = 1
+ speciesind = 1
elif dbkey_i in elems[8]:
winspecies = 2
speciesind = 8
else:
- stop_err("The species build corresponding to your interval file is not present in the Microsatellite file.")
+ stop_err("The species build corresponding to your interval file is not present in the Microsatellite file.")
fin = open(infile, 'r')
skipped = 0
- blk=0
- win=0
- linestr=""
+ linestr = ""
if region == 'win':
-
msats = NiceReaderWrapper( fileinput.FileInput( infile ),
chrom_col = speciesind,
start_col = speciesind+1,
@@ -435,7 +439,7 @@
ichr = ielems[chr_col_i]
istart = int(ielems[start_col_i])
iend = int(ielems[end_col_i])
- isrc = "%s.%s" %(dbkey_i,ichr)
+ isrc = "%s.%s" % ( dbkey_i, ichr )
if isrc not in msatTree.chroms:
continue
result = []
@@ -450,14 +454,14 @@
tmpfile1.seek(0)
output_writer(iline, tmpfile1.readlines())
except:
- skipped+=1
+ skipped += 1
if skipped:
- print "Skipped %d intervals as invalid." %(skipped)
+ print "Skipped %d intervals as invalid." % (skipped)
elif region == 'align':
if s_group_cols[0] != -1:
- print >>fout, "#Window\tSpecies_1\tSpecies_2\tGroupby_Feature\tSubGroupby_Feature\tMutability\tCount"
+ print >> fout, "#Window\tSpecies_1\tSpecies_2\tGroupby_Feature\tSubGroupby_Feature\tMutability\tCount"
else:
- print >>fout, "#Window\tSpecies_1\tWindow_Start\tWindow_End\tSpecies_2\tGroupby_Feature\tMutability\tCount"
+ print >> fout, "#Window\tSpecies_1\tWindow_Start\tWindow_End\tSpecies_2\tGroupby_Feature\tMutability\tCount"
prev_bnum = -1
try:
for line in fin:
@@ -481,9 +485,11 @@
prev_bnum = new_bnum
output_writer(prev_bnum, linestr.strip().replace('\r','\n').split('\n'))
except Exception, ea:
- print >>sys.stderr, ea
+ print >> sys.stderr, ea
skipped += 1
if skipped:
- print "Skipped %d lines as invalid." %(skipped)
+ print "Skipped %d lines as invalid." % (skipped)
+
+
if __name__ == "__main__":
- main()
\ No newline at end of file
+ main()
diff -r c043a2ca8051de612d4e895ba55e6dce0697a8d3 -r 82d2c2109792d098ddc5ede7992ae132e596584a tools/regVariation/partialR_square.py
--- a/tools/regVariation/partialR_square.py
+++ b/tools/regVariation/partialR_square.py
@@ -2,7 +2,7 @@
from galaxy import eggs
-import sys, string
+import sys
from rpy import *
import numpy
@@ -13,6 +13,7 @@
sys.stderr.write(msg)
sys.exit()
+
def sscombs(s):
if len(s) == 1:
return [s]
@@ -26,14 +27,14 @@
x_cols = sys.argv[3].split(',')
outfile = sys.argv[4]
-print "Predictor columns: %s; Response column: %d" %(x_cols,y_col+1)
+print "Predictor columns: %s; Response column: %d" % ( x_cols, y_col+1 )
fout = open(outfile,'w')
for i, line in enumerate( file ( infile )):
line = line.rstrip('\r\n')
if len( line )>0 and not line.startswith( '#' ):
elems = line.split( '\t' )
- break
+ break
if i == 30:
break # Hopefully we'll never get here...
@@ -43,7 +44,7 @@
y_vals = []
x_vals = []
-for k,col in enumerate(x_cols):
+for k, col in enumerate(x_cols):
x_cols[k] = int(col)-1
x_vals.append([])
"""
@@ -51,13 +52,13 @@
float( elems[x_cols[k]] )
except:
try:
- msg = "This operation cannot be performed on non-numeric column %d containing value '%s'." %( col, elems[x_cols[k]] )
+ msg = "This operation cannot be performed on non-numeric column %d containing value '%s'." % ( col, elems[x_cols[k]] )
except:
msg = "This operation cannot be performed on non-numeric data."
stop_err( msg )
"""
NA = 'NA'
-for ind,line in enumerate( file( infile )):
+for ind, line in enumerate( file( infile )):
if line and not line.startswith( '#' ):
try:
fields = line.split("\t")
@@ -65,20 +66,20 @@
yval = float(fields[y_col])
except Exception, ey:
yval = r('NA')
- #print >>sys.stderr, "ey = %s" %ey
+ #print >> sys.stderr, "ey = %s" %ey
y_vals.append(yval)
- for k,col in enumerate(x_cols):
+ for k, col in enumerate(x_cols):
try:
xval = float(fields[col])
except Exception, ex:
xval = r('NA')
- #print >>sys.stderr, "ex = %s" %ex
+ #print >> sys.stderr, "ex = %s" %ex
x_vals[k].append(xval)
except:
pass
x_vals1 = numpy.asarray(x_vals).transpose()
-dat= r.list(x=array(x_vals1), y=y_vals)
+dat = r.list(x=array(x_vals1), y=y_vals)
set_default_mode(NO_CONVERSION)
try:
@@ -91,7 +92,7 @@
fullr2 = summary.get('r.squared','NA')
if fullr2 == 'NA':
- stop_error("Error in linear regression")
+ stop_err("Error in linear regression")
if len(x_vals) < 10:
s = ""
@@ -100,10 +101,10 @@
else:
stop_err("This tool only works with less than 10 predictors.")
-print >>fout, "#Model\tR-sq\tpartial_R_Terms\tpartial_R_Value"
+print >> fout, "#Model\tR-sq\tpartial_R_Terms\tpartial_R_Value"
all_combos = sorted(sscombs(s), key=len)
all_combos.reverse()
-for j,cols in enumerate(all_combos):
+for j, cols in enumerate(all_combos):
#if len(cols) == len(s): #Same as the full model above
# continue
if len(cols) == 1:
@@ -113,7 +114,7 @@
for col in cols:
x_v.append(x_vals[int(col)])
x_vals1 = numpy.asarray(x_v).transpose()
- dat= r.list(x=array(x_vals1), y=y_vals)
+ dat = r.list(x=array(x_vals1), y=y_vals)
set_default_mode(NO_CONVERSION)
red = r.lm(r("y ~ x"), data= dat) #Reduced model
set_default_mode(BASIC_CONVERSION)
@@ -136,11 +137,11 @@
partial_R_col_str = "-"
partial_R = "-"
try:
- redr2 = "%.4f" %(float(redr2))
+ redr2 = "%.4f" % (float(redr2))
except:
pass
try:
- partial_R = "%.4f" %(float(partial_R))
+ partial_R = "%.4f" % (float(partial_R))
except:
pass
- print >>fout, "%s\t%s\t%s\t%s" %(col_str,redr2,partial_R_col_str,partial_R)
+ print >> fout, "%s\t%s\t%s\t%s" % ( col_str, redr2, partial_R_col_str, partial_R )
This diff is so big that we needed to truncate the remainder.
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: guerler: Grids: Fix filter column separator
by commits-noreply@bitbucket.org 03 Feb '14
by commits-noreply@bitbucket.org 03 Feb '14
03 Feb '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/c043a2ca8051/
Changeset: c043a2ca8051
User: guerler
Date: 2014-02-03 22:23:53
Summary: Grids: Fix filter column separator
Affected #: 1 file
diff -r 519797105a694ac9044e5e6139380a51c5c5fc52 -r c043a2ca8051de612d4e895ba55e6dce0697a8d3 lib/galaxy/web/framework/helpers/grids.py
--- a/lib/galaxy/web/framework/helpers/grids.py
+++ b/lib/galaxy/web/framework/helpers/grids.py
@@ -132,7 +132,7 @@
if len( column_filter ) == 1:
column_filter = column_filter[0]
# Interpret ',' as a separator for multiple terms.
- elif isinstance( column_filter, basestring ) and column_filter.find(',') != -1:
+ if isinstance( column_filter, basestring ) and column_filter.find(',') != -1:
column_filter = column_filter.split(',')
# Check if filter is empty
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: guerler: Grids: Fix grids with preserved states
by commits-noreply@bitbucket.org 03 Feb '14
by commits-noreply@bitbucket.org 03 Feb '14
03 Feb '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/519797105a69/
Changeset: 519797105a69
User: guerler
Date: 2014-02-03 22:21:12
Summary: Grids: Fix grids with preserved states
Affected #: 3 files
diff -r 37544732f61c32202d134c263c1257f0920e36a0 -r 519797105a694ac9044e5e6139380a51c5c5fc52 lib/galaxy/web/framework/helpers/grids.py
--- a/lib/galaxy/web/framework/helpers/grids.py
+++ b/lib/galaxy/web/framework/helpers/grids.py
@@ -105,6 +105,7 @@
column_filter = kwargs.get( "f-" + column.key )
elif column.key in base_filter:
column_filter = base_filter.get( column.key )
+
# Method (1) combines a mix of strings and lists of strings into a single string and (2) attempts to de-jsonify all strings.
def from_json_string_recurse(item):
decoded_list = []
@@ -131,15 +132,24 @@
if len( column_filter ) == 1:
column_filter = column_filter[0]
# Interpret ',' as a separator for multiple terms.
- if isinstance( column_filter, basestring ) and column_filter.find(',') != -1:
+ elif isinstance( column_filter, basestring ) and column_filter.find(',') != -1:
column_filter = column_filter.split(',')
- # If filter criterion is empty, do nothing.
- if column_filter == '':
- continue
+
+ # Check if filter is empty
+ if isinstance( column_filter, list ):
+ # Remove empty strings from filter list
+ column_filter = [x for x in column_filter if x != '']
+ if len(column_filter) == 0:
+ continue;
+ elif isinstance(column_filter, basestring):
+ # If filter criterion is empty, do nothing.
+ if column_filter == '':
+ continue
+
# Update query.
query = column.filter( trans, trans.user, query, column_filter )
# Upate current filter dict.
- #Column filters are rendered in various places, sanitize them all here.
+ # Column filters are rendered in various places, sanitize them all here.
cur_filter_dict[ column.key ] = sanitize_text(column_filter)
# Carry filter along to newly generated urls; make sure filter is a string so
# that we can encode to UTF-8 and thus handle user input to filters.
@@ -200,10 +210,7 @@
if page_num == 0:
# Show all rows in page.
total_num_rows = query.count()
- # persistent page='all'
page_num = 1
- #page_num = 'all'
- #extra_url_args['page'] = page_num
num_pages = 1
else:
# Show a limited number of rows. Before modifying query, get the total number of rows that query
diff -r 37544732f61c32202d134c263c1257f0920e36a0 -r 519797105a694ac9044e5e6139380a51c5c5fc52 static/scripts/mvc/grid/grid-model.js
--- a/static/scripts/mvc/grid/grid-model.js
+++ b/static/scripts/mvc/grid/grid-model.js
@@ -37,8 +37,8 @@
if (cur_val === null || cur_val === undefined) {
new_val = value;
}
- else if (typeof(cur_val) == "string") {
- if (cur_val == "All") {
+ else if (typeof(cur_val) == 'string') {
+ if (cur_val == 'All') {
new_val = value;
} else {
// Replace string with array.
@@ -70,29 +70,16 @@
return false;
}
- var removed = true;
- if (typeof(cur_val) === "string") {
- if (cur_val == "All") {
- // Unexpected. Throw error?
- removed = false;
- }
- else {
- // Remove condition.
- delete this.attributes.filters[key];
+ if (typeof(cur_val) === 'string') {
+ // overwrite/remove condition.
+ this.attributes.filters[key] = '';
+ } else {
+ // filter contains an array of conditions.
+ var condition_index = _.indexOf(cur_val, condition);
+ if (condition_index !== -1) {
+ cur_val[condition_index] = '';
}
}
- else {
- // Filter contains an array of conditions.
- var condition_index = _.indexOf(cur_val, condition);
- if (condition_index !== -1) {
- cur_val.splice(condition_index, 1);
- }
- else {
- removed = false;
- }
- }
-
- return removed;
},
/**
diff -r 37544732f61c32202d134c263c1257f0920e36a0 -r 519797105a694ac9044e5e6139380a51c5c5fc52 static/scripts/packed/mvc/grid/grid-model.js
--- a/static/scripts/packed/mvc/grid/grid-model.js
+++ b/static/scripts/packed/mvc/grid/grid-model.js
@@ -1,1 +1,1 @@
-define([],function(){return Backbone.Model.extend({defaults:{url_base:"",async:false,async_ops:[],categorical_filters:[],filters:{},sort_key:null,show_item_checkboxes:false,advanced_search:false,cur_page:1,num_pages:1,operation:undefined,item_ids:undefined},can_async_op:function(a){return _.indexOf(this.attributes.async_ops,a)!==-1},add_filter:function(e,f,b){if(b){var c=this.attributes.filters[e],a;if(c===null||c===undefined){a=f}else{if(typeof(c)=="string"){if(c=="All"){a=f}else{var d=[];d[0]=c;d[1]=f;a=d}}else{a=c;a.push(f)}}this.attributes.filters[e]=a}else{this.attributes.filters[e]=f}},remove_filter:function(b,e){var a=this.attributes.filters[b];if(a===null||a===undefined){return false}var d=true;if(typeof(a)==="string"){if(a=="All"){d=false}else{delete this.attributes.filters[b]}}else{var c=_.indexOf(a,e);if(c!==-1){a.splice(c,1)}else{d=false}}return d},get_url_data:function(){var a={async:this.attributes.async,sort:this.attributes.sort_key,page:this.attributes.cur_page,show_item_checkboxes:this.attributes.show_item_checkboxes,advanced_search:this.attributes.advanced_search};if(this.attributes.operation){a.operation=this.attributes.operation}if(this.attributes.item_ids){a.id=this.attributes.item_ids}var b=this;_.each(_.pairs(b.attributes.filters),function(c){a["f-"+c[0]]=c[1]});return a},get_url:function(a){return this.get("url_base")+"?"+$.param(this.get_url_data())+"&"+$.param(a)}})});
\ No newline at end of file
+define([],function(){return Backbone.Model.extend({defaults:{url_base:"",async:false,async_ops:[],categorical_filters:[],filters:{},sort_key:null,show_item_checkboxes:false,advanced_search:false,cur_page:1,num_pages:1,operation:undefined,item_ids:undefined},can_async_op:function(a){return _.indexOf(this.attributes.async_ops,a)!==-1},add_filter:function(e,f,b){if(b){var c=this.attributes.filters[e],a;if(c===null||c===undefined){a=f}else{if(typeof(c)=="string"){if(c=="All"){a=f}else{var d=[];d[0]=c;d[1]=f;a=d}}else{a=c;a.push(f)}}this.attributes.filters[e]=a}else{this.attributes.filters[e]=f}},remove_filter:function(b,d){var a=this.attributes.filters[b];if(a===null||a===undefined){return false}if(typeof(a)==="string"){this.attributes.filters[b]=""}else{var c=_.indexOf(a,d);if(c!==-1){a[c]=""}}},get_url_data:function(){var a={async:this.attributes.async,sort:this.attributes.sort_key,page:this.attributes.cur_page,show_item_checkboxes:this.attributes.show_item_checkboxes,advanced_search:this.attributes.advanced_search};if(this.attributes.operation){a.operation=this.attributes.operation}if(this.attributes.item_ids){a.id=this.attributes.item_ids}var b=this;_.each(_.pairs(b.attributes.filters),function(c){a["f-"+c[0]]=c[1]});return a},get_url:function(a){return this.get("url_base")+"?"+$.param(this.get_url_data())+"&"+$.param(a)}})});
\ No newline at end of file
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jmchilton: Improve examples of newer tool functional test features.
by commits-noreply@bitbucket.org 03 Feb '14
by commits-noreply@bitbucket.org 03 Feb '14
03 Feb '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/37544732f61c/
Changeset: 37544732f61c
User: jmchilton
Date: 2014-02-03 17:11:31
Summary: Improve examples of newer tool functional test features.
Affected #: 3 files
diff -r 0f636b29e4f58d1568e8658dcae57e3c45a14563 -r 37544732f61c32202d134c263c1257f0920e36a0 test/functional/tools/README.txt
--- a/test/functional/tools/README.txt
+++ b/test/functional/tools/README.txt
@@ -1,3 +1,3 @@
This directory contains tools only useful for testing the tool test framework
and demonstrating it features. Run the driver script 'run_functional_tests.sh'
-with '-installed' as first argument to run through these tests.
+with '-framework' as first argument to run through these tests.
diff -r 0f636b29e4f58d1568e8658dcae57e3c45a14563 -r 37544732f61c32202d134c263c1257f0920e36a0 test/functional/tools/disambiguate_cond.xml
--- a/test/functional/tools/disambiguate_cond.xml
+++ b/test/functional/tools/disambiguate_cond.xml
@@ -46,19 +46,8 @@
<data name="out_file1" format="txt" /></outputs><tests>
- <test>
- <param name="p1|use" value="True"/>
- <param name="p2|use" value="False"/>
- <param name="p3|use" value="True"/>
- <param name="p4|use" value="True" />
- <param name="p4|file" value="simple_line.txt" />
- <output name="out_file1">
- <assert_contents>
- <has_line line="4 7 4" />
- <has_line line="This is a line of text." />
- </assert_contents>
- </output>
- </test>
+ <!-- Can use nested conditional blocks as shown below to disambiguate
+ various nested parameters. --><test><conditional name="p1"><param name="use" value="False"/>
@@ -83,5 +72,23 @@
</assert_contents></output></test>
+ <!-- Can also use a more concise, flat verison of this, but it the
+ above version is more clear and should be considered preferable.
+ -->
+ <test>
+ <param name="p1|use" value="True"/>
+ <param name="p2|use" value="False"/>
+ <param name="p3|use" value="True"/>
+ <param name="p4|use" value="True" />
+ <!-- Only need to specify enough of a suffix to disambiguate,
+ but don't do this - it is too clever. -->
+ <param name="p4|file" value="simple_line.txt" />
+ <output name="out_file1">
+ <assert_contents>
+ <has_line line="4 7 4" />
+ <has_line line="This is a line of text." />
+ </assert_contents>
+ </output>
+ </test></tests></tool>
diff -r 0f636b29e4f58d1568e8658dcae57e3c45a14563 -r 37544732f61c32202d134c263c1257f0920e36a0 test/functional/tools/disambiguate_repeats.xml
--- a/test/functional/tools/disambiguate_repeats.xml
+++ b/test/functional/tools/disambiguate_repeats.xml
@@ -14,17 +14,24 @@
<data name="out_file1" format="txt" /></outputs><tests>
- <!-- Can use prefixes to disambiguate inputs or force order. -->
+ <!-- Can disambiguate repeats and specify multiple blocks using,
+ nested structure. --><test>
- <param name="queries_1|input" value="simple_line_alternative.txt"/>
- <param name="queries_0|input" value="simple_line.txt"/>
-
- <param name="more_queries_1|input" value="simple_line_alternative.txt" />
- <param name="more_queries_0|input" value="simple_line.txt"/>
-
- <output name="out_file1" file="simple_lines_interleaved.txt"/>
+ <repeat name="queries">
+ <param name="input" value="simple_line.txt"/>
+ </repeat>
+ <repeat name="more_queries">
+ <param name="input" value="simple_line_alternative.txt"/>
+ </repeat>
+ <output name="out_file1">
+ <assert_contents>
+ <has_line line="This is a line of text." />
+ <has_line line="This is a different line of text." />
+ </assert_contents>
+ </output></test>
-
+ <!-- Multiple such blocks can be specified but only with newer API
+ driven tests. --><test><repeat name="queries"><param name="input" value="simple_line.txt"/>
@@ -40,6 +47,17 @@
</repeat><output name="out_file1" file="simple_lines_interleaved.txt"/></test>
+ <!-- Can also use prefixes to disambiguate inputs or force order, but
+ the above nested structure is preferable. -->
+ <test>
+ <param name="queries_1|input" value="simple_line_alternative.txt"/>
+ <param name="queries_0|input" value="simple_line.txt"/>
+
+ <param name="more_queries_1|input" value="simple_line_alternative.txt" />
+ <param name="more_queries_0|input" value="simple_line.txt"/>
+
+ <output name="out_file1" file="simple_lines_interleaved.txt"/>
+ </test></tests></tool>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
7 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/045da9050e44/
Changeset: 045da9050e44
Branch: workflow_params
User: simleo
Date: 2014-01-30 12:15:55
Summary: Created new branch workflow_params
Affected #: 0 files
https://bitbucket.org/galaxy/galaxy-central/commits/cb881765cb99/
Changeset: cb881765cb99
Branch: workflow_params
User: simleo
Date: 2014-01-30 12:18:58
Summary: changed step_id passing convention in the workflows API
Affected #: 1 file
diff -r 045da9050e4489f7f559ec0e106790410856e35d -r cb881765cb992eb4aff08d8d62a74f2ca6dfa5de lib/galaxy/webapps/galaxy/api/workflows.py
--- a/lib/galaxy/webapps/galaxy/api/workflows.py
+++ b/lib/galaxy/webapps/galaxy/api/workflows.py
@@ -214,19 +214,12 @@
step.state = step.module.state
# Update step parameters as directed by payload's parameter mapping.
- if step.tool_id in param_map:
- param_dict = param_map[ step.tool_id ]
- step_id = param_dict.get( 'step_id', '' )
-
+ param_dict = param_map.get(str(step.id), param_map.get(step.tool_id))
+ if param_dict is not None:
# Backward compatibility: convert param/value dict to new 'name': 'value' format.
if 'param' in param_dict and 'value' in param_dict:
param_dict[ param_dict['param'] ] = param_dict['value']
-
- # Update step if there's no step id (i.e. all steps with tool are
- # updated) or update if step ids match.
- if not step_id or ( step_id and int( step_id ) == step.id ):
- for name, value in param_dict.items():
- step.state.inputs[ name ] = value
+ step.state.inputs.update(param_dict)
if step.tool_errors:
trans.response.status = 400
https://bitbucket.org/galaxy/galaxy-central/commits/713b2604e8ce/
Changeset: 713b2604e8ce
Branch: workflow_params
User: simleo
Date: 2014-02-02 14:21:49
Summary: workflow API: more flexible step parameter updating
Affected #: 1 file
diff -r cb881765cb992eb4aff08d8d62a74f2ca6dfa5de -r 713b2604e8cec21b762222cc193b17d057eb680e lib/galaxy/webapps/galaxy/api/workflows.py
--- a/lib/galaxy/webapps/galaxy/api/workflows.py
+++ b/lib/galaxy/webapps/galaxy/api/workflows.py
@@ -19,6 +19,39 @@
log = logging.getLogger(__name__)
+def _update_step_parameters(step, param_map):
+ """
+ Update ``step`` parameters based on the user-provided ``param_map`` dict.
+
+ ``param_map`` should be structured as follows::
+
+ PARAM_MAP = {STEP_ID: PARAM_DICT, ...}
+ PARAM_DICT = {NAME: VALUE, ...}
+
+ For backwards compatibility, the following (deprecated) formats is
+ also supported for ``param_map``::
+
+ PARAM_MAP = {TOOL_ID: PARAM_DICT, ...}
+
+ in which case PARAM_DICT affects all steps with the given tool id.
+ If both by-tool-id and by-step-id specifications are used, the
+ latter takes precedence.
+
+ Finally (again, for backwards compatibility), PARAM_DICT can also
+ be specified as::
+
+ PARAM_DICT = {'param': NAME, 'value': VALUE}
+
+ Note that this format allows only one parameter to be set per step.
+ """
+ param_dict = param_map.get(step.tool_id, {}).copy()
+ param_dict.update(param_map.get(str(step.id), {}))
+ if param_dict:
+ if 'param' in param_dict and 'value' in param_dict:
+ param_dict[param_dict['param']] = param_dict['value']
+ step.state.inputs.update(param_dict)
+
+
class WorkflowsAPIController(BaseAPIController, UsesStoredWorkflowMixin):
@web.expose_api
@@ -212,15 +245,7 @@
# are not persisted so we need to do it every time)
step.module.add_dummy_datasets( connections=step.input_connections )
step.state = step.module.state
-
- # Update step parameters as directed by payload's parameter mapping.
- param_dict = param_map.get(str(step.id), param_map.get(step.tool_id))
- if param_dict is not None:
- # Backward compatibility: convert param/value dict to new 'name': 'value' format.
- if 'param' in param_dict and 'value' in param_dict:
- param_dict[ param_dict['param'] ] = param_dict['value']
- step.state.inputs.update(param_dict)
-
+ _update_step_parameters(step, param_map)
if step.tool_errors:
trans.response.status = 400
return "Workflow cannot be run because of validation errors in some steps: %s" % step_errors
https://bitbucket.org/galaxy/galaxy-central/commits/d8a1b16c94e2/
Changeset: d8a1b16c94e2
Branch: workflow_params
User: simleo
Date: 2014-02-02 14:35:57
Summary: fixed a typo
Affected #: 1 file
diff -r 713b2604e8cec21b762222cc193b17d057eb680e -r d8a1b16c94e26e89be0ce0d1b3b696be4ac7957e lib/galaxy/webapps/galaxy/api/workflows.py
--- a/lib/galaxy/webapps/galaxy/api/workflows.py
+++ b/lib/galaxy/webapps/galaxy/api/workflows.py
@@ -28,7 +28,7 @@
PARAM_MAP = {STEP_ID: PARAM_DICT, ...}
PARAM_DICT = {NAME: VALUE, ...}
- For backwards compatibility, the following (deprecated) formats is
+ For backwards compatibility, the following (deprecated) format is
also supported for ``param_map``::
PARAM_MAP = {TOOL_ID: PARAM_DICT, ...}
https://bitbucket.org/galaxy/galaxy-central/commits/19bff78d5c91/
Changeset: 19bff78d5c91
User: jmchilton
Date: 2014-02-03 00:44:50
Summary: Merge pull request #317.
Affected #: 1 file
diff -r 6788cffb792bc88bb6cfb3a15aff1d29a8c4a700 -r 19bff78d5c91db39a34cc18ea467faf7e5129736 lib/galaxy/webapps/galaxy/api/workflows.py
--- a/lib/galaxy/webapps/galaxy/api/workflows.py
+++ b/lib/galaxy/webapps/galaxy/api/workflows.py
@@ -19,6 +19,39 @@
log = logging.getLogger(__name__)
+def _update_step_parameters(step, param_map):
+ """
+ Update ``step`` parameters based on the user-provided ``param_map`` dict.
+
+ ``param_map`` should be structured as follows::
+
+ PARAM_MAP = {STEP_ID: PARAM_DICT, ...}
+ PARAM_DICT = {NAME: VALUE, ...}
+
+ For backwards compatibility, the following (deprecated) format is
+ also supported for ``param_map``::
+
+ PARAM_MAP = {TOOL_ID: PARAM_DICT, ...}
+
+ in which case PARAM_DICT affects all steps with the given tool id.
+ If both by-tool-id and by-step-id specifications are used, the
+ latter takes precedence.
+
+ Finally (again, for backwards compatibility), PARAM_DICT can also
+ be specified as::
+
+ PARAM_DICT = {'param': NAME, 'value': VALUE}
+
+ Note that this format allows only one parameter to be set per step.
+ """
+ param_dict = param_map.get(step.tool_id, {}).copy()
+ param_dict.update(param_map.get(str(step.id), {}))
+ if param_dict:
+ if 'param' in param_dict and 'value' in param_dict:
+ param_dict[param_dict['param']] = param_dict['value']
+ step.state.inputs.update(param_dict)
+
+
class WorkflowsAPIController(BaseAPIController, UsesStoredWorkflowMixin):
@web.expose_api
@@ -212,22 +245,7 @@
# are not persisted so we need to do it every time)
step.module.add_dummy_datasets( connections=step.input_connections )
step.state = step.module.state
-
- # Update step parameters as directed by payload's parameter mapping.
- if step.tool_id in param_map:
- param_dict = param_map[ step.tool_id ]
- step_id = param_dict.get( 'step_id', '' )
-
- # Backward compatibility: convert param/value dict to new 'name': 'value' format.
- if 'param' in param_dict and 'value' in param_dict:
- param_dict[ param_dict['param'] ] = param_dict['value']
-
- # Update step if there's no step id (i.e. all steps with tool are
- # updated) or update if step ids match.
- if not step_id or ( step_id and int( step_id ) == step.id ):
- for name, value in param_dict.items():
- step.state.inputs[ name ] = value
-
+ _update_step_parameters(step, param_map)
if step.tool_errors:
trans.response.status = 400
return "Workflow cannot be run because of validation errors in some steps: %s" % step_errors
https://bitbucket.org/galaxy/galaxy-central/commits/0f636b29e4f5/
Changeset: 0f636b29e4f5
User: jmchilton
Date: 2014-02-03 00:54:28
Summary: Workflow API functional testing - parameter replacement.
Test parameter replacement while running workflows - both by tool id (deprecated) and new post pull request #317 replacement by step id.
Affected #: 3 files
diff -r 19bff78d5c91db39a34cc18ea467faf7e5129736 -r 0f636b29e4f58d1568e8658dcae57e3c45a14563 test/functional/api/helpers.py
--- a/test/functional/api/helpers.py
+++ b/test/functional/api/helpers.py
@@ -3,7 +3,11 @@
from json import loads
from pkg_resources import resource_string
+# Simple workflow that takes an input and call cat wrapper on it.
workflow_str = resource_string( __name__, "test_workflow_1.ga" )
+# Simple workflow that takes an input and filters with random lines twice in a
+# row - first grabbing 8 lines at random and then 6.
+workflow_random_x2_str = resource_string( __name__, "test_workflow_2.ga" )
class TestsDatasets:
@@ -68,8 +72,8 @@
def __init__( self, api_test_case ):
self.api_test_case = api_test_case
- def load_workflow( self, name, add_pja=False ):
- workflow = loads( workflow_str )
+ def load_workflow( self, name, content=workflow_str, add_pja=False ):
+ workflow = loads( content )
workflow[ "name" ] = name
if add_pja:
tool_step = workflow[ "steps" ][ "2" ]
@@ -80,6 +84,9 @@
)
return workflow
+ def load_random_x2_workflow( self, name ):
+ return self.load_workflow( name, content=workflow_random_x2_str )
+
def simple_workflow( self, name, **create_kwds ):
workflow = self.load_workflow( name )
return self.create_workflow( workflow, **create_kwds )
diff -r 19bff78d5c91db39a34cc18ea467faf7e5129736 -r 0f636b29e4f58d1568e8658dcae57e3c45a14563 test/functional/api/test_workflow_2.ga
--- /dev/null
+++ b/test/functional/api/test_workflow_2.ga
@@ -0,0 +1,89 @@
+{
+ "a_galaxy_workflow": "true",
+ "annotation": "",
+ "format-version": "0.1",
+ "name": "random_lines_x2",
+ "steps": {
+ "0": {
+ "annotation": "",
+ "id": 0,
+ "input_connections": {},
+ "inputs": [
+ {
+ "description": "",
+ "name": "Input Dataset"
+ }
+ ],
+ "name": "Input dataset",
+ "outputs": [],
+ "position": {
+ "left": 10,
+ "top": 10
+ },
+ "tool_errors": null,
+ "tool_id": null,
+ "tool_state": "{\"name\": \"Input Dataset\"}",
+ "tool_version": null,
+ "type": "data_input",
+ "user_outputs": []
+ },
+ "1": {
+ "annotation": "",
+ "id": 1,
+ "input_connections": {
+ "input": {
+ "id": 0,
+ "output_name": "output"
+ }
+ },
+ "inputs": [],
+ "name": "Select random lines",
+ "outputs": [
+ {
+ "name": "out_file1",
+ "type": "input"
+ }
+ ],
+ "position": {
+ "left": 230,
+ "top": 10
+ },
+ "post_job_actions": {},
+ "tool_errors": null,
+ "tool_id": "random_lines1",
+ "tool_state": "{\"__page__\": 0, \"num_lines\": \"\\\"8\\\"\", \"seed_source\": \"{\\\"__current_case__\\\": 0, \\\"seed_source_selector\\\": \\\"no_seed\\\"}\", \"input\": \"null\", \"chromInfo\": \"\\\"/home/john/workspace/galaxy-central-workflows-params/tool-data/shared/ucsc/chrom/?.len\\\"\", \"__rerun_remap_job_id__\": null}",
+ "tool_version": null,
+ "type": "tool",
+ "user_outputs": []
+ },
+ "2": {
+ "annotation": "",
+ "id": 2,
+ "input_connections": {
+ "input": {
+ "id": 1,
+ "output_name": "out_file1"
+ }
+ },
+ "inputs": [],
+ "name": "Select random lines",
+ "outputs": [
+ {
+ "name": "out_file1",
+ "type": "input"
+ }
+ ],
+ "position": {
+ "left": 450,
+ "top": 10
+ },
+ "post_job_actions": {},
+ "tool_errors": null,
+ "tool_id": "random_lines1",
+ "tool_state": "{\"__page__\": 0, \"num_lines\": \"\\\"6\\\"\", \"seed_source\": \"{\\\"__current_case__\\\": 0, \\\"seed_source_selector\\\": \\\"no_seed\\\"}\", \"input\": \"null\", \"chromInfo\": \"\\\"/home/john/workspace/galaxy-central-workflows-params/tool-data/shared/ucsc/chrom/?.len\\\"\", \"__rerun_remap_job_id__\": null}",
+ "tool_version": null,
+ "type": "tool",
+ "user_outputs": []
+ }
+ }
+}
\ No newline at end of file
diff -r 19bff78d5c91db39a34cc18ea467faf7e5129736 -r 0f636b29e4f58d1568e8658dcae57e3c45a14563 test/functional/api/test_workflows.py
--- a/test/functional/api/test_workflows.py
+++ b/test/functional/api/test_workflows.py
@@ -67,6 +67,31 @@
self._assert_status_code_is( run_workflow_response, 200 )
self._wait_for_history( history_id, assert_ok=True )
+ def test_run_replace_params_by_tool( self ):
+ workflow_request, history_id = self._setup_random_x2_workflow( "test_for_replace_tool_params" )
+ workflow_request[ "parameters" ] = dumps( dict( random_lines1=dict( num_lines=5 ) ) )
+ run_workflow_response = self._post( "workflows", data=workflow_request )
+ self._assert_status_code_is( run_workflow_response, 200 )
+ self._wait_for_history( history_id, assert_ok=True )
+ # Would be 8 and 6 without modification
+ self.__assert_lines_hid_line_count_is( history_id, 2, 5 )
+ self.__assert_lines_hid_line_count_is( history_id, 3, 5 )
+
+ def test_run_replace_params_by_steps( self ):
+ workflow_request, history_id = self._setup_random_x2_workflow( "test_for_replace_step_params" )
+ workflow_summary_response = self._get( "workflows/%s" % workflow_request[ "workflow_id" ] )
+ self._assert_status_code_is( workflow_summary_response, 200 )
+ steps = workflow_summary_response.json()[ "steps" ]
+ last_step_id = str( max( map( int, steps.keys() ) ) )
+ params = dumps( { last_step_id: dict( num_lines=5 ) } )
+ workflow_request[ "parameters" ] = params
+ run_workflow_response = self._post( "workflows", data=workflow_request )
+ self._assert_status_code_is( run_workflow_response, 200 )
+ self._wait_for_history( history_id, assert_ok=True )
+ # Would be 8 and 6 without modification
+ self.__assert_lines_hid_line_count_is( history_id, 2, 8 )
+ self.__assert_lines_hid_line_count_is( history_id, 3, 5 )
+
def test_pja_import_export( self ):
workflow = self.workflow_populator.load_workflow( name="test_for_pja_import", add_pja=True )
uploaded_workflow_id = self.workflow_populator.create_workflow( workflow )
@@ -94,9 +119,7 @@
def _setup_workflow_run( self, workflow ):
uploaded_workflow_id = self.workflow_populator.create_workflow( workflow )
- workflow_show_resposne = self._get( "workflows/%s" % uploaded_workflow_id )
- self._assert_status_code_is( workflow_show_resposne, 200 )
- workflow_inputs = workflow_show_resposne.json()[ "inputs" ]
+ workflow_inputs = self._workflow_inputs( uploaded_workflow_id )
step_1 = step_2 = None
for key, value in workflow_inputs.iteritems():
label = value[ "label" ]
@@ -117,6 +140,29 @@
)
return workflow_request, history_id
+ def _setup_random_x2_workflow( self, name ):
+ workflow = self.workflow_populator.load_random_x2_workflow( name )
+ uploaded_workflow_id = self.workflow_populator.create_workflow( workflow )
+ workflow_inputs = self._workflow_inputs( uploaded_workflow_id )
+ key = workflow_inputs.keys()[ 0 ]
+ history_id = self._new_history()
+ ten_lines = "\n".join( map( str, range( 10 ) ) )
+ hda1 = self._new_dataset( history_id, content=ten_lines )
+ workflow_request = dict(
+ history="hist_id=%s" % history_id,
+ workflow_id=uploaded_workflow_id,
+ ds_map=dumps( {
+ key: self._ds_entry(hda1),
+ } ),
+ )
+ return workflow_request, history_id
+
+ def _workflow_inputs( self, uploaded_workflow_id ):
+ workflow_show_resposne = self._get( "workflows/%s" % uploaded_workflow_id )
+ self._assert_status_code_is( workflow_show_resposne, 200 )
+ workflow_inputs = workflow_show_resposne.json()[ "inputs" ]
+ return workflow_inputs
+
def _ds_entry( self, hda ):
return dict( src="hda", id=hda[ "id" ] )
@@ -124,6 +170,15 @@
names = self.__workflow_names()
assert name in names, "No workflows with name %s in users workflows <%s>" % ( name, names )
+ def __assert_lines_hid_line_count_is( self, history, hid, lines ):
+ contents_url = "histories/%s/contents" % history
+ history_contents_response = self._get( contents_url )
+ self._assert_status_code_is( history_contents_response, 200 )
+ hda_summary = filter( lambda hc: hc[ "hid" ] == hid, history_contents_response.json() )[ 0 ]
+ hda_info_response = self._get( "%s/%s" % ( contents_url, hda_summary[ "id" ] ) )
+ self._assert_status_code_is( hda_info_response, 200 )
+ self.assertEquals( hda_info_response.json()[ "metadata_data_lines" ], lines )
+
def __workflow_names( self ):
index_response = self._get( "workflows" )
self._assert_status_code_is( index_response, 200 )
https://bitbucket.org/galaxy/galaxy-central/commits/0617610e18a6/
Changeset: 0617610e18a6
Branch: workflow_params
User: jmchilton
Date: 2014-02-03 00:55:31
Summary: Close branch workflow_params.
Affected #: 0 files
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
7 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/5f373922d077/
Changeset: 5f373922d077
User: jmchilton
Date: 2014-01-29 15:42:39
Summary: Move some history export logic out of controller into model...
For reuse by API.
Affected #: 3 files
diff -r 6b627e15f18b13a489ca4e4343703931d9333021 -r 5f373922d0773eccab37f51145b8e880760e86b4 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -647,6 +647,7 @@
return self.library_dataset_dataset_association
return None
+
class JobExportHistoryArchive( object ):
def __init__( self, job=None, history=None, dataset=None, compressed=False, \
history_attrs_filename=None, datasets_attrs_filename=None,
@@ -659,6 +660,35 @@
self.datasets_attrs_filename = datasets_attrs_filename
self.jobs_attrs_filename = jobs_attrs_filename
+ @property
+ def up_to_date( self ):
+ """ Return False, if a new export should be generated for corresponding
+ history.
+ """
+ job = self.job
+ return job.state not in [ Job.states.ERROR, Job.states.DELETED ] \
+ and job.update_time > self.history.update_time
+
+ @property
+ def ready( self ):
+ return self.job.state == Job.states.OK
+
+ @property
+ def preparing( self ):
+ return self.job.state in [ Job.states.RUNNING, Job.states.QUEUED, Job.states.WAITING ]
+
+ @property
+ def export_name( self ):
+ # Stream archive.
+ valid_chars = '.,^_-()[]0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
+ hname = self.history.name
+ hname = ''.join(c in valid_chars and c or '_' for c in hname)[0:150]
+ hname = "Galaxy-History-%s.tar" % ( hname )
+ if self.compressed:
+ hname += ".gz"
+ return hname
+
+
class JobImportHistoryArchive( object ):
def __init__( self, job=None, history=None, archive_dir=None ):
self.job = job
@@ -875,6 +905,11 @@
return changed
@property
+ def latest_export( self ):
+ exports = self.exports
+ return exports and exports[ 0 ]
+
+ @property
def get_disk_size_bytes( self ):
return self.get_disk_size( nice_size=False )
diff -r 6b627e15f18b13a489ca4e4343703931d9333021 -r 5f373922d0773eccab37f51145b8e880760e86b4 lib/galaxy/model/mapping.py
--- a/lib/galaxy/model/mapping.py
+++ b/lib/galaxy/model/mapping.py
@@ -1295,6 +1295,7 @@
mapper( model.History, model.History.table,
properties=dict( galaxy_sessions=relation( model.GalaxySessionToHistoryAssociation ),
datasets=relation( model.HistoryDatasetAssociation, backref="history", order_by=asc(model.HistoryDatasetAssociation.table.c.hid) ),
+ exports=relation( model.JobExportHistoryArchive, primaryjoin=( model.JobExportHistoryArchive.table.c.history_id == model.History.table.c.id ), order_by=desc( model.JobExportHistoryArchive.table.c.id ) ),
active_datasets=relation( model.HistoryDatasetAssociation, primaryjoin=( ( model.HistoryDatasetAssociation.table.c.history_id == model.History.table.c.id ) & not_( model.HistoryDatasetAssociation.table.c.deleted ) ), order_by=asc( model.HistoryDatasetAssociation.table.c.hid ), viewonly=True ),
visible_datasets=relation( model.HistoryDatasetAssociation, primaryjoin=( ( model.HistoryDatasetAssociation.table.c.history_id == model.History.table.c.id ) & not_( model.HistoryDatasetAssociation.table.c.deleted ) & model.HistoryDatasetAssociation.table.c.visible ),
order_by=asc( model.HistoryDatasetAssociation.table.c.hid ), viewonly=True ),
diff -r 6b627e15f18b13a489ca4e4343703931d9333021 -r 5f373922d0773eccab37f51145b8e880760e86b4 lib/galaxy/webapps/galaxy/controllers/history.py
--- a/lib/galaxy/webapps/galaxy/controllers/history.py
+++ b/lib/galaxy/webapps/galaxy/controllers/history.py
@@ -704,24 +704,17 @@
#
# If history has already been exported and it has not changed since export, stream it.
#
- jeha = trans.sa_session.query( model.JobExportHistoryArchive ).filter_by( history=history ) \
- .order_by( model.JobExportHistoryArchive.id.desc() ).first()
- if jeha and ( jeha.job.state not in [ model.Job.states.ERROR, model.Job.states.DELETED ] ) \
- and jeha.job.update_time > history.update_time:
- if jeha.job.state == model.Job.states.OK:
- # Stream archive.
- valid_chars = '.,^_-()[]0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
- hname = history.name
- hname = ''.join(c in valid_chars and c or '_' for c in hname)[0:150]
- hname = "Galaxy-History-%s.tar" % ( hname )
+ jeha = history.latest_export
+ if jeha and jeha.up_to_date:
+ if jeha.ready:
if jeha.compressed:
- hname += ".gz"
trans.response.set_content_type( 'application/x-gzip' )
else:
trans.response.set_content_type( 'application/x-tar' )
- trans.response.headers["Content-Disposition"] = 'attachment; filename="%s"' % ( hname )
+ disposition = 'attachment; filename="%s"' % jeha.export_name
+ trans.response.headers["Content-Disposition"] = disposition
return open( trans.app.object_store.get_filename( jeha.dataset ) )
- elif jeha.job.state in [ model.Job.states.RUNNING, model.Job.states.QUEUED, model.Job.states.WAITING ]:
+ elif jeha.preparing:
return trans.show_message( "Still exporting history %(n)s; please check back soon. Link: <a href='%(s)s'>%(s)s</a>" \
% ( { 'n' : history.name, 's' : url_for( controller='history', action="export_archive", id=id, qualified=True ) } ) )
https://bitbucket.org/galaxy/galaxy-central/commits/e0a4bdd5b89f/
Changeset: e0a4bdd5b89f
User: jmchilton
Date: 2014-01-29 15:42:39
Summary: Refactor history export functionality into ExportsHistoryMixin...
... for reuse in API.
Affected #: 2 files
diff -r 5f373922d0773eccab37f51145b8e880760e86b4 -r e0a4bdd5b89f326b6b1c8ed17aec084b89c5e1ac lib/galaxy/web/base/controller.py
--- a/lib/galaxy/web/base/controller.py
+++ b/lib/galaxy/web/base/controller.py
@@ -562,6 +562,40 @@
return changed
+class ExportsHistoryMixin:
+
+ def serve_ready_history_export( self, trans, jeha ):
+ assert jeha.ready
+ if jeha.compressed:
+ trans.response.set_content_type( 'application/x-gzip' )
+ else:
+ trans.response.set_content_type( 'application/x-tar' )
+ disposition = 'attachment; filename="%s"' % jeha.export_name
+ trans.response.headers["Content-Disposition"] = disposition
+ return open( trans.app.object_store.get_filename( jeha.dataset ) )
+
+ def queue_history_export( self, trans, history, gzip=True, include_hidden=False, include_deleted=False ):
+ # Convert options to booleans.
+ #
+ if isinstance( gzip, basestring ):
+ gzip = ( gzip in [ 'True', 'true', 'T', 't' ] )
+ if isinstance( include_hidden, basestring ):
+ include_hidden = ( include_hidden in [ 'True', 'true', 'T', 't' ] )
+ if isinstance( include_deleted, basestring ):
+ include_deleted = ( include_deleted in [ 'True', 'true', 'T', 't' ] )
+
+ # Run job to do export.
+ history_exp_tool = trans.app.toolbox.get_tool( '__EXPORT_HISTORY__' )
+ params = {
+ 'history_to_export': history,
+ 'compress': gzip,
+ 'include_hidden': include_hidden,
+ 'include_deleted': include_deleted
+ }
+
+ history_exp_tool.execute( trans, incoming=params, set_output_hid=True )
+
+
class UsesHistoryDatasetAssociationMixin:
"""
Mixin for controllers that use HistoryDatasetAssociation objects.
diff -r 5f373922d0773eccab37f51145b8e880760e86b4 -r e0a4bdd5b89f326b6b1c8ed17aec084b89c5e1ac lib/galaxy/webapps/galaxy/controllers/history.py
--- a/lib/galaxy/webapps/galaxy/controllers/history.py
+++ b/lib/galaxy/webapps/galaxy/controllers/history.py
@@ -13,6 +13,7 @@
from galaxy.util.sanitize_html import sanitize_html
from galaxy.web import error, url_for
from galaxy.web.base.controller import BaseUIController, SharableMixin, UsesHistoryDatasetAssociationMixin, UsesHistoryMixin
+from galaxy.web.base.controller import ExportsHistoryMixin
from galaxy.web.base.controller import ERROR, INFO, SUCCESS, WARNING
from galaxy.web.framework.helpers import grids, iff, time_ago
@@ -187,7 +188,7 @@
return query.filter( self.model_class.published == True ).filter( self.model_class.slug != None ).filter( self.model_class.deleted == False )
class HistoryController( BaseUIController, SharableMixin, UsesAnnotations, UsesItemRatings,
- UsesHistoryMixin, UsesHistoryDatasetAssociationMixin ):
+ UsesHistoryMixin, UsesHistoryDatasetAssociationMixin, ExportsHistoryMixin ):
@web.expose
def index( self, trans ):
return ""
@@ -679,16 +680,6 @@
def export_archive( self, trans, id=None, gzip=True, include_hidden=False, include_deleted=False ):
""" Export a history to an archive. """
#
- # Convert options to booleans.
- #
- if isinstance( gzip, basestring ):
- gzip = ( gzip in [ 'True', 'true', 'T', 't' ] )
- if isinstance( include_hidden, basestring ):
- include_hidden = ( include_hidden in [ 'True', 'true', 'T', 't' ] )
- if isinstance( include_deleted, basestring ):
- include_deleted = ( include_deleted in [ 'True', 'true', 'T', 't' ] )
-
- #
# Get history to export.
#
if id:
@@ -707,25 +698,12 @@
jeha = history.latest_export
if jeha and jeha.up_to_date:
if jeha.ready:
- if jeha.compressed:
- trans.response.set_content_type( 'application/x-gzip' )
- else:
- trans.response.set_content_type( 'application/x-tar' )
- disposition = 'attachment; filename="%s"' % jeha.export_name
- trans.response.headers["Content-Disposition"] = disposition
- return open( trans.app.object_store.get_filename( jeha.dataset ) )
+ return self.serve_ready_history_export( trans, jeha )
elif jeha.preparing:
return trans.show_message( "Still exporting history %(n)s; please check back soon. Link: <a href='%(s)s'>%(s)s</a>" \
% ( { 'n' : history.name, 's' : url_for( controller='history', action="export_archive", id=id, qualified=True ) } ) )
- # Run job to do export.
- history_exp_tool = trans.app.toolbox.get_tool( '__EXPORT_HISTORY__' )
- params = {
- 'history_to_export' : history,
- 'compress' : gzip,
- 'include_hidden' : include_hidden,
- 'include_deleted' : include_deleted }
- history_exp_tool.execute( trans, incoming = params, set_output_hid = True )
+ self.queue_history_export( trans, history, gzip=gzip, include_hidden=include_hidden, include_deleted=include_deleted )
url = url_for( controller='history', action="export_archive", id=id, qualified=True )
return trans.show_message( "Exporting History '%(n)s'. Use this link to download \
the archive or import it to another Galaxy server: \
https://bitbucket.org/galaxy/galaxy-central/commits/3a3561fa8df3/
Changeset: 3a3561fa8df3
User: jmchilton
Date: 2014-01-29 15:42:41
Summary: Refactor history import functionality into ImportsHistoryMixin...
... for reuse in API.
Affected #: 2 files
diff -r e0a4bdd5b89f326b6b1c8ed17aec084b89c5e1ac -r 3a3561fa8df334b43eff9ce0d648b38932bb27e6 lib/galaxy/web/base/controller.py
--- a/lib/galaxy/web/base/controller.py
+++ b/lib/galaxy/web/base/controller.py
@@ -596,6 +596,15 @@
history_exp_tool.execute( trans, incoming=params, set_output_hid=True )
+class ImportsHistoryMixin:
+
+ def queue_history_import( self, trans, archive_type, archive_source ):
+ # Run job to do import.
+ history_imp_tool = trans.app.toolbox.get_tool( '__IMPORT_HISTORY__' )
+ incoming = { '__ARCHIVE_SOURCE__' : archive_source, '__ARCHIVE_TYPE__' : archive_type }
+ history_imp_tool.execute( trans, incoming=incoming )
+
+
class UsesHistoryDatasetAssociationMixin:
"""
Mixin for controllers that use HistoryDatasetAssociation objects.
diff -r e0a4bdd5b89f326b6b1c8ed17aec084b89c5e1ac -r 3a3561fa8df334b43eff9ce0d648b38932bb27e6 lib/galaxy/webapps/galaxy/controllers/history.py
--- a/lib/galaxy/webapps/galaxy/controllers/history.py
+++ b/lib/galaxy/webapps/galaxy/controllers/history.py
@@ -14,6 +14,7 @@
from galaxy.web import error, url_for
from galaxy.web.base.controller import BaseUIController, SharableMixin, UsesHistoryDatasetAssociationMixin, UsesHistoryMixin
from galaxy.web.base.controller import ExportsHistoryMixin
+from galaxy.web.base.controller import ImportsHistoryMixin
from galaxy.web.base.controller import ERROR, INFO, SUCCESS, WARNING
from galaxy.web.framework.helpers import grids, iff, time_ago
@@ -188,7 +189,8 @@
return query.filter( self.model_class.published == True ).filter( self.model_class.slug != None ).filter( self.model_class.deleted == False )
class HistoryController( BaseUIController, SharableMixin, UsesAnnotations, UsesItemRatings,
- UsesHistoryMixin, UsesHistoryDatasetAssociationMixin, ExportsHistoryMixin ):
+ UsesHistoryMixin, UsesHistoryDatasetAssociationMixin, ExportsHistoryMixin,
+ ImportsHistoryMixin ):
@web.expose
def index( self, trans ):
return ""
@@ -669,10 +671,7 @@
# TODO: add support for importing via a file.
#.add_input( "file", "Archived History File", "archive_file", value=None, error=None )
)
- # Run job to do import.
- history_imp_tool = trans.app.toolbox.get_tool( '__IMPORT_HISTORY__' )
- incoming = { '__ARCHIVE_SOURCE__' : archive_source, '__ARCHIVE_TYPE__' : archive_type }
- history_imp_tool.execute( trans, incoming=incoming )
+ self.queue_history_import( trans, archive_type=archive_type, archive_source=archive_source )
return trans.show_message( "Importing history from '%s'. \
This history will be visible when the import is complete" % archive_source )
https://bitbucket.org/galaxy/galaxy-central/commits/17f1d214f8a4/
Changeset: 17f1d214f8a4
User: jmchilton
Date: 2014-01-29 15:42:42
Summary: Add new API decorator for raw downloads.
Affected #: 2 files
diff -r 3a3561fa8df334b43eff9ce0d648b38932bb27e6 -r 17f1d214f8a44995f082e0fd32d3801edef02303 lib/galaxy/web/__init__.py
--- a/lib/galaxy/web/__init__.py
+++ b/lib/galaxy/web/__init__.py
@@ -19,3 +19,4 @@
# TODO: Drop and make these the default.
from framework import _future_expose_api
from framework import _future_expose_api_anonymous
+from framework import _future_expose_api_raw
diff -r 3a3561fa8df334b43eff9ce0d648b38932bb27e6 -r 17f1d214f8a44995f082e0fd32d3801edef02303 lib/galaxy/web/framework/__init__.py
--- a/lib/galaxy/web/framework/__init__.py
+++ b/lib/galaxy/web/framework/__init__.py
@@ -274,6 +274,10 @@
return _future_expose_api( func, to_json=to_json, user_required=False )
+def _future_expose_api_raw( func ):
+ return _future_expose_api( func, to_json=False, user_required=True )
+
+
# TODO: rename as expose_api and make default.
def _future_expose_api( func, to_json=True, user_required=True ):
"""
https://bitbucket.org/galaxy/galaxy-central/commits/ea320e650645/
Changeset: ea320e650645
User: jmchilton
Date: 2014-01-29 15:42:42
Summary: Implement history exporting via API.
Two new routes one that can be PUT against to poll for download_url and one to fetch a completed download via a GET. Includes simple test case. This doesn't cleanly map to REST to my mind so I have hobbled together this approach, happy for further input.
Good reading:
https://restful-api-design.readthedocs.org/en/latest/methods.html#asynchron…
http://stackoverflow.com/questions/4099869/is-it-wrong-to-return-202-accept…
Affected #: 5 files
diff -r 17f1d214f8a44995f082e0fd32d3801edef02303 -r ea320e650645a2512b1ac9ab7c35a8cec4ef2bda lib/galaxy/tools/actions/history_imp_exp.py
--- a/lib/galaxy/tools/actions/history_imp_exp.py
+++ b/lib/galaxy/tools/actions/history_imp_exp.py
@@ -14,7 +14,8 @@
# Create job.
#
job = trans.app.model.Job()
- job.session_id = trans.get_galaxy_session().id
+ session = trans.get_galaxy_session()
+ job.session_id = session and session.id
job.history_id = trans.history.id
job.tool_id = tool.id
job.user_id = trans.user.id
@@ -76,8 +77,13 @@
# Create the job and output dataset objects
#
job = trans.app.model.Job()
- job.session_id = trans.get_galaxy_session().id
- job.history_id = trans.history.id
+ session = trans.get_galaxy_session()
+ job.session_id = session and session.id
+ if history:
+ history_id = history.id
+ else:
+ history_id = trans.history.id
+ job.history_id = history_id
job.tool_id = tool.id
if trans.user:
# If this is an actual user, run the job as that individual. Otherwise we're running as guest.
diff -r 17f1d214f8a44995f082e0fd32d3801edef02303 -r ea320e650645a2512b1ac9ab7c35a8cec4ef2bda lib/galaxy/web/base/controller.py
--- a/lib/galaxy/web/base/controller.py
+++ b/lib/galaxy/web/base/controller.py
@@ -593,7 +593,7 @@
'include_deleted': include_deleted
}
- history_exp_tool.execute( trans, incoming=params, set_output_hid=True )
+ history_exp_tool.execute( trans, incoming=params, history=history, set_output_hid=True )
class ImportsHistoryMixin:
diff -r 17f1d214f8a44995f082e0fd32d3801edef02303 -r ea320e650645a2512b1ac9ab7c35a8cec4ef2bda lib/galaxy/webapps/galaxy/api/histories.py
--- a/lib/galaxy/webapps/galaxy/api/histories.py
+++ b/lib/galaxy/webapps/galaxy/api/histories.py
@@ -8,21 +8,25 @@
pkg_resources.require( "Paste" )
from paste.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPInternalServerError, HTTPException
+from galaxy import exceptions
from galaxy import web
from galaxy.web import _future_expose_api as expose_api
from galaxy.web import _future_expose_api_anonymous as expose_api_anonymous
+from galaxy.web import _future_expose_api_raw as expose_api_raw
from galaxy.util import string_as_bool
from galaxy.util import restore_text
from galaxy.web.base.controller import BaseAPIController
from galaxy.web.base.controller import UsesHistoryMixin
from galaxy.web.base.controller import UsesTagsMixin
+from galaxy.web.base.controller import ExportsHistoryMixin
from galaxy.web import url_for
import logging
log = logging.getLogger( __name__ )
-class HistoriesController( BaseAPIController, UsesHistoryMixin, UsesTagsMixin ):
+class HistoriesController( BaseAPIController, UsesHistoryMixin, UsesTagsMixin,
+ ExportsHistoryMixin ):
@expose_api_anonymous
def index( self, trans, deleted='False', **kwd ):
@@ -329,6 +333,67 @@
return changed
+ @expose_api
+ def archive_export( self, trans, id, **kwds ):
+ """
+ export_archive( self, trans, id, payload )
+ * PUT /api/histories/{id}/exports:
+ start job (if needed) to create history export for corresponding
+ history.
+
+ :type id: str
+ :param id: the encoded id of the history to undelete
+
+ :rtype: dict
+ :returns: object containing url to fetch export from.
+ """
+ # PUT instead of POST because multiple requests should just result
+ # in one object being created.
+ history_id = id
+ history = self.get_history( trans, history_id, check_ownership=False, check_accessible=True )
+ jeha = history.latest_export
+ up_to_date = jeha and jeha.up_to_date
+ if not up_to_date:
+ # Need to create new JEHA + job.
+ gzip = kwds.get( "gzip", True )
+ include_hidden = kwds.get( "include_hidden", False )
+ include_deleted = kwds.get( "include_deleted", False )
+ self.queue_history_export( trans, history, gzip=gzip, include_hidden=include_hidden, include_deleted=include_deleted )
+
+ if up_to_date and jeha.ready:
+ jeha_id = trans.security.encode_id( jeha.id )
+ return dict( download_url=url_for( "history_archive_download", id=history_id, jeha_id=jeha_id ) )
+ else:
+ # Valid request, just resource is not ready yet.
+ trans.response.status = "202 Accepted"
+ return ''
+
+ @expose_api_raw
+ def archive_download( self, trans, id, jeha_id, **kwds ):
+ """
+ export_download( self, trans, id, jeha_id )
+ * GET /api/histories/{id}/exports/{jeha_id}:
+ If ready and available, return raw contents of exported history.
+ Use/poll "PUT /api/histories/{id}/exports" to initiate the creation
+ of such an export - when ready that route will return 200 status
+ code (instead of 202) with a JSON dictionary containing a
+ `download_url`.
+ """
+ # Seems silly to put jeha_id in here, but want GET to be immuatable?
+ # and this is being accomplished this way.
+ history = self.get_history( trans, id, check_ownership=False, check_accessible=True )
+ matching_exports = filter( lambda e: trans.security.encode_id( e.id ) == jeha_id, history.exports )
+ if not matching_exports:
+ raise exceptions.ObjectNotFound()
+
+ jeha = matching_exports[ 0 ]
+ if not jeha.ready:
+ # User should not have been given this URL, PUT export should have
+ # return a 202.
+ raise exceptions.MessageException( "Export not available or not yet ready." )
+
+ return self.serve_ready_history_export( trans, jeha )
+
def _validate_and_parse_update_payload( self, payload ):
"""
Validate and parse incomming data payload for a history.
diff -r 17f1d214f8a44995f082e0fd32d3801edef02303 -r ea320e650645a2512b1ac9ab7c35a8cec4ef2bda lib/galaxy/webapps/galaxy/buildapp.py
--- a/lib/galaxy/webapps/galaxy/buildapp.py
+++ b/lib/galaxy/webapps/galaxy/buildapp.py
@@ -154,6 +154,11 @@
webapp.mapper.connect( "set_as_current", "/api/histories/{id}/set_as_current",
controller="histories", action="set_as_current", conditions=dict( method=["POST"] ) )
+ webapp.mapper.connect( "history_archive_export", "/api/histories/{id}/exports",
+ controller="histories", action="archive_export", conditions=dict( method=[ "PUT" ] ) )
+ webapp.mapper.connect( "history_archive_download", "/api/histories/{id}/exports/{jeha_id}",
+ controller="histories", action="archive_download", conditions=dict( method=[ "GET" ] ) )
+
webapp.mapper.connect( "create_api_key", "/api/users/:user_id/api_key",
controller="users", action="api_key", user_id=None, conditions=dict( method=["POST"] ) )
diff -r 17f1d214f8a44995f082e0fd32d3801edef02303 -r ea320e650645a2512b1ac9ab7c35a8cec4ef2bda test/functional/api/test_histories.py
--- a/test/functional/api/test_histories.py
+++ b/test/functional/api/test_histories.py
@@ -1,9 +1,14 @@
+import time
from base import api
-# requests.post or something like it if unavailable
+# requests.{post,put,get} or something like it if unavailable
from base.interactor import post_request
+from base.interactor import put_request
+from base.interactor import get_request
+from .helpers import TestsDatasets
-class HistoriesApiTestCase( api.ApiTestCase ):
+
+class HistoriesApiTestCase( api.ApiTestCase, TestsDatasets ):
def test_create_history( self ):
# Create a history.
@@ -24,3 +29,24 @@
histories_url = self._api_url( "histories" )
create_response = post_request( url=histories_url, data=post_data )
self._assert_status_code_is( create_response, 403 )
+
+ def test_export( self ):
+ history_id = self._new_history()
+ self._new_dataset( history_id, content="1 2 3" )
+ self._wait_for_history( history_id, assert_ok=True )
+ export_url = self._api_url( "histories/%s/exports" % history_id , use_key=True )
+ put_response = put_request( export_url )
+ self._assert_status_code_is( put_response, 202 )
+ while True:
+ put_response = put_request( export_url )
+ if put_response.status_code == 202:
+ time.sleep( .1 )
+ else:
+ break
+ self._assert_status_code_is( put_response, 200 )
+ response = put_response.json()
+ self._assert_has_keys( response, "download_url" )
+ download_path = response[ "download_url" ]
+ full_download_url = "%s%s?key=%s" % ( self.url, download_path, self.galaxy_interactor.api_key )
+ download_response = get_request( full_download_url )
+ self._assert_status_code_is( download_response, 200 )
https://bitbucket.org/galaxy/galaxy-central/commits/f456b2eaf3dc/
Changeset: f456b2eaf3dc
User: jmchilton
Date: 2014-01-29 15:42:42
Summary: Implement history import via API.
Affected #: 3 files
diff -r ea320e650645a2512b1ac9ab7c35a8cec4ef2bda -r f456b2eaf3dc61df52d710f710590d37b31386ca lib/galaxy/tools/actions/history_imp_exp.py
--- a/lib/galaxy/tools/actions/history_imp_exp.py
+++ b/lib/galaxy/tools/actions/history_imp_exp.py
@@ -16,7 +16,13 @@
job = trans.app.model.Job()
session = trans.get_galaxy_session()
job.session_id = session and session.id
- job.history_id = trans.history.id
+ if history:
+ history_id = history.id
+ elif trans.history:
+ history_id = trans.history.id
+ else:
+ history_id = None
+ job.history_id = history_id
job.tool_id = tool.id
job.user_id = trans.user.id
start_job_state = job.state #should be job.states.NEW
diff -r ea320e650645a2512b1ac9ab7c35a8cec4ef2bda -r f456b2eaf3dc61df52d710f710590d37b31386ca lib/galaxy/webapps/galaxy/api/histories.py
--- a/lib/galaxy/webapps/galaxy/api/histories.py
+++ b/lib/galaxy/webapps/galaxy/api/histories.py
@@ -19,6 +19,7 @@
from galaxy.web.base.controller import UsesHistoryMixin
from galaxy.web.base.controller import UsesTagsMixin
from galaxy.web.base.controller import ExportsHistoryMixin
+from galaxy.web.base.controller import ImportsHistoryMixin
from galaxy.web import url_for
import logging
@@ -26,7 +27,7 @@
class HistoriesController( BaseAPIController, UsesHistoryMixin, UsesTagsMixin,
- ExportsHistoryMixin ):
+ ExportsHistoryMixin, ImportsHistoryMixin ):
@expose_api_anonymous
def index( self, trans, deleted='False', **kwd ):
@@ -176,6 +177,9 @@
:rtype: dict
:returns: element view of new history
"""
+ if self.__create_via_import( payload ):
+ return self.__import_archive( trans, payload )
+
hist_name = None
if payload.get( 'name', None ):
hist_name = restore_text( payload['name'] )
@@ -394,6 +398,14 @@
return self.serve_ready_history_export( trans, jeha )
+ def __create_via_import( self, payload ):
+ return "archive_source" in payload
+
+ def __import_archive( self, trans, payload ):
+ archive_type = payload.get( "archive_type", "url" )
+ archive_source = payload[ "archive_source" ]
+ self.queue_history_import( trans, archive_type=archive_type, archive_source=archive_source )
+
def _validate_and_parse_update_payload( self, payload ):
"""
Validate and parse incomming data payload for a history.
diff -r ea320e650645a2512b1ac9ab7c35a8cec4ef2bda -r f456b2eaf3dc61df52d710f710590d37b31386ca test/functional/api/test_histories.py
--- a/test/functional/api/test_histories.py
+++ b/test/functional/api/test_histories.py
@@ -7,7 +7,6 @@
from .helpers import TestsDatasets
-
class HistoriesApiTestCase( api.ApiTestCase, TestsDatasets ):
def test_create_history( self ):
@@ -31,12 +30,13 @@
self._assert_status_code_is( create_response, 403 )
def test_export( self ):
- history_id = self._new_history()
+ history_id = self._new_history( name="for_export" )
self._new_dataset( history_id, content="1 2 3" )
self._wait_for_history( history_id, assert_ok=True )
export_url = self._api_url( "histories/%s/exports" % history_id , use_key=True )
put_response = put_request( export_url )
self._assert_status_code_is( put_response, 202 )
+ # TODO: Break after some period of time.
while True:
put_response = put_request( export_url )
if put_response.status_code == 202:
@@ -50,3 +50,21 @@
full_download_url = "%s%s?key=%s" % ( self.url, download_path, self.galaxy_interactor.api_key )
download_response = get_request( full_download_url )
self._assert_status_code_is( download_response, 200 )
+
+ def history_names():
+ history_index = self._get( "histories" )
+ return map( lambda h: h[ "name" ], history_index.json() )
+
+ import_name = "imported from archive: for_export"
+ assert import_name not in history_names()
+
+ import_data = dict( archive_source=full_download_url, archive_type="url" )
+ import_response = self._post( "histories", data=import_data )
+
+ self._assert_status_code_is( import_response, 200 )
+ found = False
+ while not found:
+ time.sleep( .1 )
+ if import_name in history_names():
+ found = True
+ assert found, "%s not in history names %s" % ( import_name, history_names() )
https://bitbucket.org/galaxy/galaxy-central/commits/6788cffb792b/
Changeset: 6788cffb792b
User: jmchilton
Date: 2014-02-02 22:23:46
Summary: Introduce method ready_name_for_url.
Pulled code out of create_item_slug placed in util for reused in other operations - such as creating a history export. Update history export to use this. Slugs mapped names to a signficantly more restricted character set so history export file names will now be all lower case for instance and will never contain parens, commas, periods, or underscores for instance.
Previous incarnation of this idea (not committed) also 'lower'-ed the name - this behavior is not desired for history exports the way it is for slugs - hence moved .lower() back to create_item_slug and renamed the method ready_name_for_url instead of ready_name_for_slug.
Idea from Jeremy Goecks (https://bitbucket.org/galaxy/galaxy-central/pull-request/314/history-import…)
Affected #: 3 files
diff -r f456b2eaf3dc61df52d710f710590d37b31386ca -r 6788cffb792bc88bb6cfb3a15aff1d29a8c4a700 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -27,6 +27,7 @@
from galaxy.model.item_attrs import Dictifiable, UsesAnnotations
from galaxy.security import get_permitted_actions
from galaxy.util import is_multi_byte, nice_size, Params, restore_text, send_mail
+from galaxy.util import ready_name_for_url
from galaxy.util.bunch import Bunch
from galaxy.util.hash_util import new_secure_hash
from galaxy.util.directory_hash import directory_hash_id
@@ -680,9 +681,7 @@
@property
def export_name( self ):
# Stream archive.
- valid_chars = '.,^_-()[]0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
- hname = self.history.name
- hname = ''.join(c in valid_chars and c or '_' for c in hname)[0:150]
+ hname = ready_name_for_url( self.history.name )
hname = "Galaxy-History-%s.tar" % ( hname )
if self.compressed:
hname += ".gz"
diff -r f456b2eaf3dc61df52d710f710590d37b31386ca -r 6788cffb792bc88bb6cfb3a15aff1d29a8c4a700 lib/galaxy/util/__init__.py
--- a/lib/galaxy/util/__init__.py
+++ b/lib/galaxy/util/__init__.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
"""
Utility functions used systemwide.
@@ -391,6 +392,28 @@
return out
+def ready_name_for_url( raw_name ):
+ """ General method to convert a string (i.e. object name) to a URL-ready
+ slug.
+
+ >>> ready_name_for_url( "My Cool Object" )
+ 'My-Cool-Object'
+ >>> ready_name_for_url( "!My Cool Object!" )
+ 'My-Cool-Object'
+ >>> ready_name_for_url( "Hello₩◎ґʟⅾ" )
+ 'Hello'
+ """
+
+ # Replace whitespace with '-'
+ slug_base = re.sub( "\s+", "-", raw_name )
+ # Remove all non-alphanumeric characters.
+ slug_base = re.sub( "[^a-zA-Z0-9\-]", "", slug_base )
+ # Remove trailing '-'.
+ if slug_base.endswith('-'):
+ slug_base = slug_base[:-1]
+ return slug_base
+
+
def in_directory( file, directory ):
"""
Return true, if the common prefix of both is equal to directory
diff -r f456b2eaf3dc61df52d710f710590d37b31386ca -r 6788cffb792bc88bb6cfb3a15aff1d29a8c4a700 lib/galaxy/web/base/controller.py
--- a/lib/galaxy/web/base/controller.py
+++ b/lib/galaxy/web/base/controller.py
@@ -2643,13 +2643,7 @@
item_name = item.name
elif hasattr( item, 'title' ):
item_name = item.title
- # Replace whitespace with '-'
- slug_base = re.sub( "\s+", "-", item_name.lower() )
- # Remove all non-alphanumeric characters.
- slug_base = re.sub( "[^a-zA-Z0-9\-]", "", slug_base )
- # Remove trailing '-'.
- if slug_base.endswith('-'):
- slug_base = slug_base[:-1]
+ slug_base = util.ready_name_for_url( item_name.lower() )
else:
slug_base = cur_slug
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jmchilton: Refactor duplicated get_display_name code out of galaxy.model.
by commits-noreply@bitbucket.org 02 Feb '14
by commits-noreply@bitbucket.org 02 Feb '14
02 Feb '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/6b627e15f18b/
Changeset: 6b627e15f18b
User: jmchilton
Date: 2014-02-02 19:47:15
Summary: Refactor duplicated get_display_name code out of galaxy.model.
Also added test cases. Need to use this downstream in for dataset collections.
Affected #: 2 files
diff -r 89d7cf67241f36e5f2c4de0aa31328fb7823e7d5 -r 6b627e15f18b13a489ca4e4343703931d9333021 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -71,6 +71,19 @@
datatypes_registry = d_registry
+class HasName:
+
+ def get_display_name( self ):
+ """
+ These objects have a name attribute can be either a string or a unicode
+ object. If string, convert to unicode object assuming 'utf-8' format.
+ """
+ name = self.name
+ if isinstance(name, str):
+ name = unicode(name, 'utf-8')
+ return name
+
+
class User( object, Dictifiable ):
use_pbkdf2 = True
"""
@@ -716,7 +729,7 @@
self.user = user
self.group = group
-class History( object, Dictifiable, UsesAnnotations ):
+class History( object, Dictifiable, UsesAnnotations, HasName ):
dict_collection_visible_keys = ( 'id', 'name', 'published', 'deleted' )
dict_element_visible_keys = ( 'id', 'name', 'published', 'deleted', 'genome_build', 'purged' )
@@ -823,16 +836,6 @@
# This needs to be a list
return [ hda for hda in self.datasets if not hda.dataset.deleted ]
- def get_display_name( self ):
- """
- History name can be either a string or a unicode object.
- If string, convert to unicode object assuming 'utf-8' format.
- """
- history_name = self.name
- if isinstance(history_name, str):
- history_name = unicode(history_name, 'utf-8')
- return history_name
-
def to_dict( self, view='collection', value_mapper = None ):
# Get basic value.
@@ -1604,7 +1607,7 @@
return msg
-class HistoryDatasetAssociation( DatasetInstance, Dictifiable, UsesAnnotations ):
+class HistoryDatasetAssociation( DatasetInstance, Dictifiable, UsesAnnotations, HasName ):
"""
Resource class that creates a relation between a dataset and a user history.
"""
@@ -1734,17 +1737,6 @@
for assoc in self.implicitly_converted_parent_datasets:
assoc.clear( purge = purge, delete_dataset = False )
- def get_display_name( self ):
- """
- Return the name of this HDA in either ascii or utf-8 encoding.
- """
- # Name can be either a string or a unicode object.
- # If string, convert to unicode object assuming 'utf-8' format.
- hda_name = self.name
- if isinstance(hda_name, str):
- hda_name = unicode(hda_name, 'utf-8')
- return hda_name
-
def get_access_roles( self, trans ):
"""
Return The access roles associated with this HDA's dataset.
@@ -1878,7 +1870,7 @@
self.subset = subset
self.location = location
-class Library( object, Dictifiable ):
+class Library( object, Dictifiable, HasName ):
permitted_actions = get_permitted_actions( filter='LIBRARY' )
dict_collection_visible_keys = ( 'id', 'name' )
dict_element_visible_keys = ( 'id', 'deleted', 'name', 'description', 'synopsis', 'root_folder_id' )
@@ -1939,15 +1931,8 @@
if lp.action == trans.app.security_agent.permitted_actions.LIBRARY_ACCESS.action:
roles.append( lp.role )
return roles
- def get_display_name( self ):
- # Library name can be either a string or a unicode object. If string,
- # convert to unicode object assuming 'utf-8' format.
- name = self.name
- if isinstance( name, str ):
- name = unicode( name, 'utf-8' )
- return name
-class LibraryFolder( object, Dictifiable ):
+class LibraryFolder( object, Dictifiable, HasName ):
dict_element_visible_keys = ( 'id', 'parent_id', 'name', 'description', 'item_count', 'genome_build', 'update_time' )
def __init__( self, name=None, description=None, item_count=0, order_id=None ):
self.name = name or "Unnamed folder"
@@ -2012,13 +1997,7 @@
def activatable_library_datasets( self ):
# This needs to be a list
return [ ld for ld in self.datasets if ld.library_dataset_dataset_association and not ld.library_dataset_dataset_association.dataset.deleted ]
- def get_display_name( self ):
- # Library folder name can be either a string or a unicode object. If string,
- # convert to unicode object assuming 'utf-8' format.
- name = self.name
- if isinstance( name, str ):
- name = unicode( name, 'utf-8' )
- return name
+
def to_dict( self, view='collection' ):
rval = super( LibraryFolder, self ).to_dict( view=view )
info_association, inherited = self.get_info_association()
@@ -2129,7 +2108,7 @@
rval['metadata_' + name] = val
return rval
-class LibraryDatasetDatasetAssociation( DatasetInstance ):
+class LibraryDatasetDatasetAssociation( DatasetInstance, HasName ):
def __init__( self,
copied_from_history_dataset_association=None,
copied_from_library_dataset_dataset_association=None,
@@ -2304,15 +2283,6 @@
def templates_json( self, use_name=False ):
return json.dumps( self.templates_dict( use_name=use_name ) )
- def get_display_name( self ):
- """
- LibraryDatasetDatasetAssociation name can be either a string or a unicode object.
- If string, convert to unicode object assuming 'utf-8' format.
- """
- ldda_name = self.name
- if isinstance( ldda_name, str ):
- ldda_name = unicode( ldda_name, 'utf-8' )
- return ldda_name
class ExtendedMetadata( object ):
def __init__(self, data):
diff -r 89d7cf67241f36e5f2c4de0aa31328fb7823e7d5 -r 6b627e15f18b13a489ca4e4343703931d9333021 test/unit/test_galaxy_mapping.py
--- a/test/unit/test_galaxy_mapping.py
+++ b/test/unit/test_galaxy_mapping.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
import unittest
import galaxy.model.mapping as mapping
@@ -95,6 +96,35 @@
self.persist( visualization )
persist_and_check_rating( model.VisualizationRatingAssociation, visualization=visualization )
+ def test_display_name( self ):
+
+ def assert_display_name_converts_to_unicode( item, name ):
+ assert not isinstance( item.name, unicode )
+ assert isinstance( item.get_display_name(), unicode )
+ assert item.get_display_name() == name
+
+ ldda = self.model.LibraryDatasetDatasetAssociation( name='ldda_name' )
+ assert_display_name_converts_to_unicode( ldda, 'ldda_name' )
+
+ hda = self.model.HistoryDatasetAssociation( name='hda_name' )
+ assert_display_name_converts_to_unicode( hda, 'hda_name' )
+
+ history = self.model.History( name='history_name' )
+ assert_display_name_converts_to_unicode( history, 'history_name' )
+
+ library = self.model.Library( name='library_name' )
+ assert_display_name_converts_to_unicode( library, 'library_name' )
+
+ library_folder = self.model.LibraryFolder( name='library_folder' )
+ assert_display_name_converts_to_unicode( library_folder, 'library_folder' )
+
+ history = self.model.History(
+ name=u'Hello₩◎ґʟⅾ'
+ )
+ assert isinstance( history.name, unicode )
+ assert isinstance( history.get_display_name(), unicode )
+ assert history.get_display_name() == u'Hello₩◎ґʟⅾ'
+
def test_tags( self ):
model = self.model
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0