galaxy-commits
Threads by month
- ----- 2025 -----
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- 15302 discussions
galaxy-dist commit 98009db17cb2: Refactor sam_bitwise_flag_filter tool, simplifying it and making it faster, especially when there are multiple flag criteria
by commits-noreply@bitbucket.org 20 Nov '10
by commits-noreply@bitbucket.org 20 Nov '10
20 Nov '10
# HG changeset patch -- Bitbucket.org
# Project galaxy-dist
# URL http://bitbucket.org/galaxy/galaxy-dist/overview
# User Kanwei Li <kanwei(a)gmail.com>
# Date 1289677046 18000
# Node ID 98009db17cb2b21db626fc9c185a8fd22109a4cd
# Parent cadf13f67c6593d99f8134ba10baa211a026439b
Refactor sam_bitwise_flag_filter tool, simplifying it and making it faster, especially when there are multiple flag criteria
--- a/tools/samtools/sam_bitwise_flag_filter.py
+++ b/tools/samtools/sam_bitwise_flag_filter.py
@@ -1,4 +1,5 @@
#!/usr/bin/env python
+# Refactored on 11/13/2010 by Kanwei Li
import sys
import optparse
@@ -104,74 +105,45 @@ options (listed below) default to 'None'
default = '2',
help='Column containing SAM bitwise flag. 1-based')
- parser.add_option(
- '-d','--debug',
- dest='debug',
- action='store_true',
- default = False,
- help='Print debugging info')
-
options, args = parser.parse_args()
if options.input_sam:
infile = open ( options.input_sam, 'r')
else:
infile = sys.stdin
-
- option_values = { '0': False, '1': True, None: None }
-
- states = [];
- states.append( option_values[ options.is_paired ] )
- states.append( option_values[ options.is_proper_pair ] )
- states.append( option_values[ options.is_unmapped ] )
- states.append( option_values[ options.mate_is_unmapped ] )
- states.append( option_values[ options.query_strand ] )
- states.append( option_values[ options.mate_strand ] )
- states.append( option_values[ options.is_first ] )
- states.append( option_values[ options.is_second ] )
- states.append( option_values[ options.is_not_primary ] )
- states.append( option_values[ options.is_bad_quality ] )
- states.append( option_values[ options.is_duplicate ] )
-
+
+ opt_ary = [
+ options.is_paired,
+ options.is_proper_pair,
+ options.is_unmapped,
+ options.mate_is_unmapped,
+ options.query_strand,
+ options.mate_strand,
+ options.is_first,
+ options.is_second,
+ options.is_not_primary,
+ options.is_bad_quality,
+ options.is_duplicate
+ ]
+
+ opt_map = { '0': False, '1': True }
+ used_indices = [(index, opt_map[opt]) for index, opt in enumerate(opt_ary) if opt is not None]
+ flag_col = int( options.flag_col ) - 1
+
for line in infile:
line = line.rstrip( '\r\n' )
if line and not line.startswith( '#' ) and not line.startswith( '@' ) :
fields = line.split( '\t' )
- sam_states = []
- sam_states.append( bool( int( fields[ int( options.flag_col ) - 1 ] ) & 0x0001 ) )
- sam_states.append( bool( int( fields[ int( options.flag_col ) - 1 ] ) & 0x0002 ) )
- sam_states.append( bool( int( fields[ int( options.flag_col ) - 1 ] ) & 0x0004 ) )
- sam_states.append( bool( int( fields[ int( options.flag_col ) - 1 ] ) & 0x0008 ) )
- sam_states.append( bool( int( fields[ int( options.flag_col ) - 1 ] ) & 0x0010 ) )
- sam_states.append( bool( int( fields[ int( options.flag_col ) - 1 ] ) & 0x0020 ) )
- sam_states.append( bool( int( fields[ int( options.flag_col ) - 1 ] ) & 0x0040 ) )
- sam_states.append( bool( int( fields[ int( options.flag_col ) - 1 ] ) & 0x0080 ) )
- sam_states.append( bool( int( fields[ int( options.flag_col ) - 1 ] ) & 0x0100 ) )
- sam_states.append( bool( int( fields[ int( options.flag_col ) - 1 ] ) & 0x0200 ) )
- sam_states.append( bool( int( fields[ int( options.flag_col ) - 1 ] ) & 0x0400 ) )
-
- joined_states = zip(states,sam_states)
- searchable_fields = []
-
- for i in range( len( joined_states ) ):
- if joined_states[i][0] != None:
- searchable_fields.append( joined_states[ i ] )
+ flags = int( fields[flag_col] )
valid_line = True
-
- for i in range( len( searchable_fields ) ):
- if searchable_fields[i][0] != searchable_fields[i][1]:
+ for index, opt_bool in used_indices:
+ if bool(flags & 0x0001 << index) != opt_bool:
valid_line = False
+ break
if valid_line:
print line
- if options.debug:
- for i in range( len( joined_states ) ):
- print i, joined_states[i][0], joined_states[i][1]
-
-# if skipped_lines > 0:
-# print 'Skipped %d invalid lines' % skipped_lines
-
if __name__ == "__main__": main()
1
0
galaxy-dist commit b124f54952de: Small fixes and refactor to workflows: remove unused ensure_popup_helper(), and fix extra comma that was returning error in IE.
by commits-noreply@bitbucket.org 20 Nov '10
by commits-noreply@bitbucket.org 20 Nov '10
20 Nov '10
# HG changeset patch -- Bitbucket.org
# Project galaxy-dist
# URL http://bitbucket.org/galaxy/galaxy-dist/overview
# User Kanwei Li <kanwei(a)gmail.com>
# Date 1289591135 18000
# Node ID b124f54952dee106f4ca87ea89a7f828c41562c3
# Parent b7f712cecaa9f527f1161f923ebe798eba526cb6
Small fixes and refactor to workflows: remove unused ensure_popup_helper(), and fix extra comma that was returning error in IE.
--- a/templates/workflow/editor.mako
+++ b/templates/workflow/editor.mako
@@ -13,7 +13,6 @@
ensure_dd_helper();
make_left_panel( $("#left"), $("#center"), $("#left-border" ) );
make_right_panel( $("#right"), $("#center"), $("#right-border" ) );
- ensure_popup_helper();
## handle_minwidth_hint = rp.handle_minwidth_hint;
</script></%def>
@@ -82,8 +81,7 @@
"${initial_text}": function() {
// Show/hide menu and update vars, user preferences.
var menu = $('#tool-search');
- if (menu.is(":visible"))
- {
+ if (menu.is(":visible")) {
// Hide menu.
pref_value = "False";
menu_option_text = "Search Tools";
@@ -91,9 +89,7 @@
// Reset search.
reset_tool_search(true);
- }
- else
- {
+ } else {
// Show menu.
pref_value = "True";
menu_option_text = "Hide Search";
@@ -158,18 +154,17 @@
var next = this_label.next();
var no_visible_tools = true;
// Look through tools following label and, if none are visible, hide label.
- while (next.length != 0 && next.hasClass("toolTitle"))
- {
- if (next.is(":visible"))
- {
+ while (next.length !== 0 && next.hasClass("toolTitle")) {
+ if (next.is(":visible")) {
no_visible_tools = false;
break;
+ } else {
+ next = next.next();
}
- else
- next = next.next();
}
- if (no_visible_tools)
+ if (no_visible_tools) {
this_label.hide();
+ }
});
} else {
$("#search-no-results").show();
@@ -211,9 +206,9 @@
scroll_to_nodes();
canvas_manager.draw_overview();
// Determine if any parameters were 'upgraded' and provide message
- upgrade_message = ""
- $.each( data['upgrade_messages'], function( k, v ) {
- upgrade_message += ( "<li>Step " + ( parseInt(k) + 1 ) + ": " + workflow.nodes[k].name + "<ul>");
+ upgrade_message = "";
+ $.each( data.upgrade_messages, function( k, v ) {
+ upgrade_message += ( "<li>Step " + ( parseInt(k, 10) + 1 ) + ": " + workflow.nodes[k].name + "<ul>");
$.each( v, function( i, vv ) {
upgrade_message += "<li>" + vv +"</li>";
});
@@ -256,7 +251,7 @@
"Layout": layout_editor,
"Save" : save_current_workflow,
##"Load a Workflow" : load_workflow,
- "Close": close_editor,
+ "Close": close_editor
});
function edit_workflow_outputs(){
@@ -297,21 +292,21 @@
workflow.has_changes = true;
});
$('#workflow-output-area').show();
- };
+ }
function layout_editor() {
workflow.layout();
workflow.fit_canvas_to_nodes();
scroll_to_nodes();
canvas_manager.draw_overview();
- };
+ }
function edit_workflow_attributes() {
workflow.clear_active_node();
$('.right-content').hide();
$('#edit-attributes').show();
- };
+ }
$.jStore.engineReady(function() {
// On load, set the size to the pref stored in local storage if it exists
@@ -354,7 +349,11 @@
// Lets the overview be toggled visible and invisible, adjusting the arrows accordingly
$("#close-viewport").click( function() {
- $("#overview-border").css("right") == "0px" ? hide_overview() : show_overview();
+ if ( $("#overview-border").css("right") === "0px" ) {
+ hide_overview();
+ } else {
+ show_overview();
+ }
});
// Unload handler
@@ -405,7 +404,7 @@
function scroll_to_nodes() {
var cv = $("#canvas-viewport");
- var cc = $("#canvas-container")
+ var cc = $("#canvas-container");
var top, left;
if ( cc.width() < cv.width() ) {
left = ( cv.width() - cc.width() ) / 2;
@@ -436,14 +435,14 @@
node.init_field_data( data );
},
error: function( x, e ) {
- var m = "error loading field data"
- if ( x.status == 0 ) {
- m += ", server unavailable"
+ var m = "error loading field data";
+ if ( x.status === 0 ) {
+ m += ", server unavailable";
}
node.error( m );
}
});
- };
+ }
function add_node_for_module( type, title ) {
node = prebuild_node( type, title );
@@ -466,7 +465,7 @@
node.error( m );
}
});
- };
+ }
<%
from galaxy.jobs.actions.post import ActionBox
@@ -500,12 +499,12 @@
}
function new_pja(action_type, target, node){
- if (node.post_job_actions == undefined){
+ if (node.post_job_actions === undefined){
//New tool node, set up dict.
node.post_job_actions = {};
}
- if (node.post_job_actions[action_type+target] == undefined){
- var new_pja = new Object();
+ if (node.post_job_actions[action_type+target] === undefined){
+ var new_pja = {};
new_pja.action_type = action_type;
new_pja.output_name = target;
node.post_job_actions[action_type+target] = null;
@@ -513,7 +512,7 @@
display_pja(new_pja, node);
workflow.active_form_has_changes = true;
return true;
- }else{
+ } else {
return false;
}
}
@@ -576,7 +575,7 @@
var value = $(this).attr( 'value' );
options[ $(this).text() ] = function() {
$(form).append( "<input type='hidden' name='"+name+"' value='"+value+"' />" ).submit();
- }
+ };
});
b.insertAfter( this );
$(this).remove();
@@ -613,9 +612,9 @@
"Don't Save": do_close
} );
} else {
- window.document.location = "${next_url}"
+ window.document.location = "${next_url}";
}
- }
+ };
var save_current_workflow = function ( eventObj, success_callback ) {
show_modal( "Saving workflow", "progress" );
@@ -634,14 +633,14 @@
type: "POST",
data: {
id: "${trans.security.encode_id( stored.id )}",
- workflow_data: function() { return JSON.stringify( workflow.to_simple() ) },
+ workflow_data: function() { return JSON.stringify( workflow.to_simple() ); },
"_": "true"
},
dataType: 'json',
success: function( data ) {
var body = $("<div></div>").text( data.message );
if ( data.errors ) {
- body.addClass( "warningmark" )
+ body.addClass( "warningmark" );
var errlist = $( "<ul/>" );
$.each( data.errors, function( i, v ) {
$("<li></li>").text( v ).appendTo( errlist );
@@ -663,7 +662,7 @@
}
}
});
- }
+ };
// We bind to ajaxStop because of auto-saving, since the form submission ajax
// call needs to be completed so that the new data is saved
@@ -677,7 +676,7 @@
} else {
savefn(success_callback);
}
- }
+ };
</script></%def>
1
0
galaxy-dist commit dc4af741ffc4: Fixed missing substitution of get_id_tag for a galaxy_job_id in drmaa runner.
by commits-noreply@bitbucket.org 20 Nov '10
by commits-noreply@bitbucket.org 20 Nov '10
20 Nov '10
# HG changeset patch -- Bitbucket.org
# Project galaxy-dist
# URL http://bitbucket.org/galaxy/galaxy-dist/overview
# User Dannon Baker <dannonbaker(a)me.com>
# Date 1289573664 18000
# Node ID dc4af741ffc455c4737aff67c7d7de48c8628392
# Parent dbe6ab1eaa7ca7f9754c22bb2f6edd3fc26e3432
Fixed missing substitution of get_id_tag for a galaxy_job_id in drmaa runner.
--- a/lib/galaxy/jobs/runners/drmaa.py
+++ b/lib/galaxy/jobs/runners/drmaa.py
@@ -175,7 +175,7 @@ class DRMAAJobRunner( BaseJobRunner ):
log.debug("(%s) command is: %s" % ( galaxy_id_tag, command_line ) )
# runJob will raise if there's a submit problem
job_id = self.ds.runJob(jt)
- log.info("(%s) queued as %s" % ( galaxy_job_id, job_id ) )
+ log.info("(%s) queued as %s" % ( galaxy_id_tag, job_id ) )
# store runner information for tracking if Galaxy restarts
job_wrapper.set_runner( runner_url, job_id )
1
0
galaxy-dist commit b7f712cecaa9: Prevent Rename Dataset Action from allowing a blank input.
by commits-noreply@bitbucket.org 20 Nov '10
by commits-noreply@bitbucket.org 20 Nov '10
20 Nov '10
# HG changeset patch -- Bitbucket.org
# Project galaxy-dist
# URL http://bitbucket.org/galaxy/galaxy-dist/overview
# User Dannon Baker <dannonbaker(a)me.com>
# Date 1289590343 18000
# Node ID b7f712cecaa9f527f1161f923ebe798eba526cb6
# Parent fc1f67023d86a69ea7229501b093d61748630f4c
Prevent Rename Dataset Action from allowing a blank input.
--- a/lib/galaxy/jobs/actions/post.py
+++ b/lib/galaxy/jobs/actions/post.py
@@ -60,14 +60,12 @@ class DefaultJobAction(object):
class EmailAction(DefaultJobAction):
name = "EmailAction"
verbose_name = "Email Notification"
-
@classmethod
def execute(cls, app, sa_session, action, job):
smtp_server = app.config.smtp_server
- if action.action_arguments:
- if action.action_arguments.has_key('host'):
- host = action.action_arguments['host']
+ if action.action_arguments and action.action_arguments.has_key('host'):
+ host = action.action_arguments['host']
else:
host = 'usegalaxy.org'
if smtp_server is None:
@@ -107,9 +105,8 @@ class EmailAction(DefaultJobAction):
@classmethod
def get_short_str(cls, pja):
- if pja.action_arguments:
- if pja.action_arguments.has_key('host'):
- return "Email the current user from server %s when this job is complete." % pja.action_arguments['host']
+ if pja.action_arguments and pja.action_arguments.has_key('host'):
+ return "Email the current user from server %s when this job is complete." % pja.action_arguments['host']
else:
return "Email the current user when this job is complete."
@@ -153,9 +150,11 @@ class RenameDatasetAction(DefaultJobActi
@classmethod
def execute(cls, app, sa_session, action, job):
- for dataset_assoc in job.output_datasets:
- if action.output_name == '' or dataset_assoc.name == action.output_name:
- dataset_assoc.dataset.name = action.action_arguments['newname']
+ # Prevent renaming a dataset to the empty string.
+ if action.action_arguments and action.action_arguments.has_key('newname') and action.action_arguments['newname'] != '':
+ for dataset_assoc in job.output_datasets:
+ if action.output_name == '' or dataset_assoc.name == action.output_name:
+ dataset_assoc.dataset.name = action.action_arguments['newname']
@classmethod
def get_config_form(cls, trans):
@@ -173,7 +172,11 @@ class RenameDatasetAction(DefaultJobActi
@classmethod
def get_short_str(cls, pja):
- return "Rename output '%s' to '%s'." % (pja.output_name, pja.action_arguments['newname'])
+ # Prevent renaming a dataset to the empty string.
+ if pja.action_arguments and pja.action_arguments.has_key('newname') and pja.action_arguments['newname'] != '':
+ return "Rename output '%s' to '%s'." % (pja.output_name, pja.action_arguments['newname'])
+ else:
+ return "Rename action used without a new name specified. Output name will be unchanged."
class HideDatasetAction(DefaultJobAction):
1
0
galaxy-dist commit b5b6b92a71e9: Refactored tracks/data function into multiple helper functions that can be reused as needed.
by commits-noreply@bitbucket.org 20 Nov '10
by commits-noreply@bitbucket.org 20 Nov '10
20 Nov '10
# HG changeset patch -- Bitbucket.org
# Project galaxy-dist
# URL http://bitbucket.org/galaxy/galaxy-dist/overview
# User jeremy goecks <jeremy.goecks(a)emory.edu>
# Date 1289568743 18000
# Node ID b5b6b92a71e960691bb514af32ba2e21a731bd4e
# Parent a6cd0223a53b4f9498a0dde69dcef53387188e5f
Refactored tracks/data function into multiple helper functions that can be reused as needed.
--- a/lib/galaxy/web/controllers/tracks.py
+++ b/lib/galaxy/web/controllers/tracks.py
@@ -7,7 +7,7 @@ pkg_resources.require( "bx-python" )
from bx.seq.twobit import TwoBitFile
from galaxy import model
-from galaxy.util.json import to_json_string, from_json_string
+from galaxy.util.json import from_json_string
from galaxy.web.base.controller import *
from galaxy.web.framework import simplejson
from galaxy.web.framework.helpers import grids
@@ -249,29 +249,21 @@ class TracksController( BaseController,
"""
Called by the browser to request a block of data
"""
+
+ # Parameter check.
+ if not chrom:
+ return messages.NO_DATA
+
+ # Dataset check.
dataset = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( dataset_id )
- if not dataset or not chrom:
- return messages.NO_DATA
- if dataset.state == trans.app.model.Job.states.ERROR:
- return messages.ERROR
- if dataset.state != trans.app.model.Job.states.OK:
- return messages.PENDING
-
- track_type, data_sources = dataset.datatype.get_track_type()
- for source_type, data_source in data_sources.iteritems():
- try:
- converted_dataset = dataset.get_converted_dataset(trans, data_source)
- except ValueError:
- return messages.NO_CONVERTER
-
- # Need to check states again for the converted version
- if converted_dataset and converted_dataset.state == model.Dataset.states.ERROR:
- job_id = trans.sa_session.query( trans.app.model.JobToOutputDatasetAssociation ).filter_by( dataset_id=converted_dataset.id ).first().job_id
- job = trans.sa_session.query( trans.app.model.Job ).get( job_id )
- return { 'kind': messages.ERROR, 'message': job.stderr }
-
- if not converted_dataset or converted_dataset.state != model.Dataset.states.OK:
- return messages.PENDING
+ msg = self._check_dataset_state( trans, dataset )
+ if msg:
+ return msg
+
+ # Get converted datasets.
+ data_sources, message = self._get_converted_datasets( trans, dataset )
+ if not data_sources:
+ return message
extra_info = None
if 'index' in data_sources:
@@ -365,4 +357,43 @@ class TracksController( BaseController,
@web.expose
def list_tracks( self, trans, **kwargs ):
return self.tracks_grid( trans, **kwargs )
-
+
+
+ #
+ # Helper methods.
+ #
+
+ def _check_dataset_state( self, trans, dataset ):
+ """
+ Returns a message if dataset is not ready to be used in visualization.
+ """
+ if not dataset:
+ return messages.NO_DATA
+ if dataset.state == trans.app.model.Job.states.ERROR:
+ return messages.ERROR
+ if dataset.state != trans.app.model.Job.states.OK:
+ return messages.PENDING
+ return None
+
+ def _get_converted_datasets( self, trans, dataset ):
+ """
+ Returns (a) converted datasets for a dataset and (b) dictionary of
+ any messages based on or derived from the conversion.
+ """
+ track_type, data_sources = dataset.datatype.get_track_type()
+ for source_type, data_source in data_sources.iteritems():
+ try:
+ converted_dataset = dataset.get_converted_dataset(trans, data_source)
+ except ValueError:
+ return None, messages.NO_CONVERTER
+
+ # Need to check states again for the converted version
+ if converted_dataset and converted_dataset.state == model.Dataset.states.ERROR:
+ job_id = trans.sa_session.query( trans.app.model.JobToOutputDatasetAssociation ).filter_by( dataset_id=converted_dataset.id ).first().job_id
+ job = trans.sa_session.query( trans.app.model.Job ).get( job_id )
+ return None, { 'kind': messages.ERROR, 'message': job.stderr }
+
+ if not converted_dataset or converted_dataset.state != model.Dataset.states.OK:
+ return None, messages.PENDING
+
+ return data_sources, None
1
0
galaxy-dist commit a6cd0223a53b: Add -parse_deflines switch to BLAST+ wrappers
by commits-noreply@bitbucket.org 20 Nov '10
by commits-noreply@bitbucket.org 20 Nov '10
20 Nov '10
# HG changeset patch -- Bitbucket.org
# Project galaxy-dist
# URL http://bitbucket.org/galaxy/galaxy-dist/overview
# User peterjc <p.j.a.cock(a)googlemail.com>
# Date 1287758911 -3600
# Node ID a6cd0223a53b4f9498a0dde69dcef53387188e5f
# Parent 082399b4681b878d20a727150832ef30679b7e1c
Add -parse_deflines switch to BLAST+ wrappers
--- a/tools/ncbi_blast_plus/ncbi_blastn_wrapper.xml
+++ b/tools/ncbi_blast_plus/ncbi_blastn_wrapper.xml
@@ -27,6 +27,7 @@ blastn
-word_size $adv_opts.word_size
#end if
$adv_opts.ungapped
+$adv_opts.parse_deflines
## End of advanced options:
#end if
</command>
@@ -99,6 +100,7 @@ blastn
<validator type="in_range" min="0" /></param><param name="ungapped" type="boolean" label="Perform ungapped alignment only?" truevalue="-ungapped" falsevalue="" checked="false" />
+ <param name="parse_deflines" type="boolean" label="Should the query and subject defline(s) be parsed?" truevalue="-parse_deflines" falsevalue="" checked="false" help="This affects the formatting of the query/subject ID strings"/></when></conditional></inputs>
--- a/tools/ncbi_blast_plus/ncbi_blastp_wrapper.xml
+++ b/tools/ncbi_blast_plus/ncbi_blastp_wrapper.xml
@@ -28,6 +28,7 @@ blastp
#end if
##Ungapped disabled for now - see comments below
##$adv_opts.ungapped
+$adv_opts.parse_deflines
## End of advanced options:
#end if
</command>
@@ -104,6 +105,7 @@ blastp
Tried using '-ungapped -comp_based_stats F' and blastp crashed with 'Attempt to access NULL pointer.'
<param name="ungapped" type="boolean" label="Perform ungapped alignment only?" truevalue="-ungapped -comp_based_stats F" falsevalue="" checked="false" />
-->
+ <param name="parse_deflines" type="boolean" label="Should the query and subject defline(s) be parsed?" truevalue="-parse_deflines" falsevalue="" checked="false" help="This affects the formatting of the query/subject ID strings"/></when></conditional></inputs>
--- a/tools/ncbi_blast_plus/ncbi_tblastx_wrapper.xml
+++ b/tools/ncbi_blast_plus/ncbi_tblastx_wrapper.xml
@@ -26,6 +26,7 @@ tblastx
#if (str($adv_opts.word_size) and int(str($adv_opts.word_size)) > 0):
-word_size $adv_opts.word_size
#end if
+$adv_opts.parse_deflines
## End of advanced options:
#end if
</command>
@@ -97,6 +98,7 @@ tblastx
<param name="word_size" type="integer" value="0" label="Word size for wordfinder algorithm" help="Use zero for default, otherwise minimum 2."><validator type="in_range" min="0" /></param>
+ <param name="parse_deflines" type="boolean" label="Should the query and subject defline(s) be parsed?" truevalue="-parse_deflines" falsevalue="" checked="false" help="This affects the formatting of the query/subject ID strings"/></when></conditional></inputs>
--- a/tools/ncbi_blast_plus/ncbi_blastx_wrapper.xml
+++ b/tools/ncbi_blast_plus/ncbi_blastx_wrapper.xml
@@ -27,6 +27,7 @@ blastx
-word_size $adv_opts.word_size
#end if
$adv_opts.ungapped
+$adv_opts.parse_deflines
## End of advanced options:
#end if
</command>
@@ -99,6 +100,7 @@ blastx
<validator type="in_range" min="0" /></param><param name="ungapped" type="boolean" label="Perform ungapped alignment only?" truevalue="-ungapped" falsevalue="" checked="false" />
+ <param name="parse_deflines" type="boolean" label="Should the query and subject defline(s) be parsed?" truevalue="-parse_deflines" falsevalue="" checked="false" help="This affects the formatting of the query/subject ID strings"/></when></conditional></inputs>
--- a/tools/ncbi_blast_plus/ncbi_tblastn_wrapper.xml
+++ b/tools/ncbi_blast_plus/ncbi_tblastn_wrapper.xml
@@ -27,6 +27,7 @@ tblastn
#end if
##Ungapped disabled for now - see comments below
##$adv_opts.ungapped
+$adv_opts.parse_deflines
## End of advanced options:
#end if
</command>
@@ -99,6 +100,7 @@ tblastn
Tried using '-ungapped -comp_based_stats F' and tblastn crashed with 'Attempt to access NULL pointer.'
<param name="ungapped" type="boolean" label="Perform ungapped alignment only?" truevalue="-ungapped -comp_based_stats F" falsevalue="" checked="false" />
-->
+ <param name="parse_deflines" type="boolean" label="Should the query and subject defline(s) be parsed?" truevalue="-parse_deflines" falsevalue="" checked="false" help="This affects the formatting of the query/subject ID strings"/></when></conditional></inputs>
1
0
galaxy-dist commit 2389c323a1e6: Fix for compatibility with the refactored base job runner build_command_line.
by commits-noreply@bitbucket.org 20 Nov '10
by commits-noreply@bitbucket.org 20 Nov '10
20 Nov '10
# HG changeset patch -- Bitbucket.org
# Project galaxy-dist
# URL http://bitbucket.org/galaxy/galaxy-dist/overview
# User Dannon Baker <dannonbaker(a)me.com>
# Date 1289568484 18000
# Node ID 2389c323a1e6a50de6d48d5431d16d830ccef021
# Parent ca23ea683d26df004e666ede82950c712e3ac637
Fix for compatibility with the refactored base job runner build_command_line.
DRMAA runner now uses get_id_tag() in *Wrapper instead of job_id directly for creation of .sh .o and .e files, as well as some debugging.
This fixes the issue where many tasks for the same job get submitted at the same time and these files overwrite one another or collide in the submission process.
--- a/lib/galaxy/jobs/__init__.py
+++ b/lib/galaxy/jobs/__init__.py
@@ -295,6 +295,10 @@ class JobWrapper( object ):
def get_job( self ):
return self.sa_session.query( model.Job ).get( self.job_id )
+ def get_id_tag(self):
+ # For compatability with drmaa, which uses job_id right now, and TaskWrapper
+ return str(self.job_id)
+
def get_param_dict( self ):
"""
Restore the dictionary of parameters from the database.
@@ -788,6 +792,10 @@ class TaskWrapper(JobWrapper):
def get_task( self ):
return self.sa_session.query(model.Task).get(self.task_id)
+ def get_id_tag(self):
+ # For compatibility with drmaa job runner and TaskWrapper, instead of using job_id directly
+ return "%s_%s" % (self.job_id, self.task_id)
+
def get_param_dict( self ):
"""
Restore the dictionary of parameters from the database.
@@ -964,7 +972,7 @@ class TaskWrapper(JobWrapper):
def setup_external_metadata( self, exec_dir = None, tmp_dir = None, dataset_files_path = None, config_root = None, datatypes_config = None, set_extension = True, **kwds ):
# There is no metadata setting for tasks. This is handled after the merge, at the job level.
- pass
+ return ""
@property
def user( self ):
--- a/lib/galaxy/jobs/runners/tasks.py
+++ b/lib/galaxy/jobs/runners/tasks.py
@@ -117,8 +117,12 @@ class TaskedJobRunner( object ):
basic.merge(working_directory, output_filename)
log.debug('execution finished: %s' % command_line)
for tw in task_wrappers:
- stdout += tw.get_task().stdout
- stderr += tw.get_task().stderr
+ # Prevent repetitive output, e.g. "Sequence File Aligned"x20
+ # Eventually do a reduce for jobs that output "N reads mapped", combining all N for tasks.
+ if stdout.strip() != tw.get_task().stdout.strip():
+ stdout += tw.get_task().stdout
+ if stderr.strip() != tw.get_task().stderr.strip():
+ stderr += tw.get_task().stderr
except Exception:
job_wrapper.fail( "failure running job", exception=True )
log.exception("failure running job %d" % job_wrapper.job_id)
--- a/lib/galaxy/jobs/runners/drmaa.py
+++ b/lib/galaxy/jobs/runners/drmaa.py
@@ -125,7 +125,7 @@ class DRMAAJobRunner( BaseJobRunner ):
command_line = self.build_command_line( job_wrapper, include_metadata=True )
except:
job_wrapper.fail( "failure preparing job", exception=True )
- log.exception("failure running job %d" % job_wrapper.job_id)
+ log.exception("failure running job %d" % job_wrapper.get_id_tag())
return
runner_url = job_wrapper.tool.job_runner
@@ -137,7 +137,7 @@ class DRMAAJobRunner( BaseJobRunner ):
# Check for deletion before we change state
if job_wrapper.get_state() == model.Job.states.DELETED:
- log.debug( "Job %s deleted by user before it entered the queue" % job_wrapper.job_id )
+ log.debug( "Job %s deleted by user before it entered the queue" % job_wrapper.get_id_tag() )
job_wrapper.cleanup()
return
@@ -145,10 +145,10 @@ class DRMAAJobRunner( BaseJobRunner ):
job_wrapper.change_state( model.Job.states.QUEUED )
# define job attributes
- ofile = "%s/database/pbs/%s.o" % (os.getcwd(), job_wrapper.job_id)
- efile = "%s/database/pbs/%s.e" % (os.getcwd(), job_wrapper.job_id)
+ ofile = "%s/database/pbs/%s.o" % (os.getcwd(), job_wrapper.get_id_tag())
+ efile = "%s/database/pbs/%s.e" % (os.getcwd(), job_wrapper.get_id_tag())
jt = self.ds.createJobTemplate()
- jt.remoteCommand = "%s/database/pbs/galaxy_%s.sh" % (os.getcwd(), job_wrapper.job_id)
+ jt.remoteCommand = "%s/database/pbs/galaxy_%s.sh" % (os.getcwd(), job_wrapper.get_id_tag())
jt.outputPath = ":%s" % ofile
jt.errorPath = ":%s" % efile
native_spec = self.get_native_spec( runner_url )
@@ -163,14 +163,16 @@ class DRMAAJobRunner( BaseJobRunner ):
# job was deleted while we were preparing it
if job_wrapper.get_state() == model.Job.states.DELETED:
- log.debug( "Job %s deleted by user before it entered the queue" % job_wrapper.job_id )
+ log.debug( "Job %s deleted by user before it entered the queue" % job_wrapper.get_id_tag() )
self.cleanup( ( ofile, efile, jt.remoteCommand ) )
job_wrapper.cleanup()
return
- galaxy_job_id = job_wrapper.job_id
- log.debug("(%s) submitting file %s" % ( galaxy_job_id, jt.remoteCommand ) )
- log.debug("(%s) command is: %s" % ( galaxy_job_id, command_line ) )
+ # wrapper.get_id_tag() instead of job_id for compatibility with TaskWrappers.
+ galaxy_id_tag = job_wrapper.get_id_tag()
+
+ log.debug("(%s) submitting file %s" % ( galaxy_id_tag, jt.remoteCommand ) )
+ log.debug("(%s) command is: %s" % ( galaxy_id_tag, command_line ) )
# runJob will raise if there's a submit problem
job_id = self.ds.runJob(jt)
log.info("(%s) queued as %s" % ( galaxy_job_id, job_id ) )
1
0
galaxy-dist commit ecc1cefbccba: Enhance GFFReader to read and return complete features (genes/transcripts), which are composed of multiple intervals/blocks. This required hacking around a couple limitations of bx-python, but these can be easily fixed when bx-python is updated. Use enhanced functionality to correctly create converted datasets for visualizing GFF files.
by commits-noreply@bitbucket.org 20 Nov '10
by commits-noreply@bitbucket.org 20 Nov '10
20 Nov '10
# HG changeset patch -- Bitbucket.org
# Project galaxy-dist
# URL http://bitbucket.org/galaxy/galaxy-dist/overview
# User jeremy goecks <jeremy.goecks(a)emory.edu>
# Date 1289591537 18000
# Node ID ecc1cefbccba6135f824f9573f06c130fd6d08b4
# Parent b124f54952dee106f4ca87ea89a7f828c41562c3
Enhance GFFReader to read and return complete features (genes/transcripts), which are composed of multiple intervals/blocks. This required hacking around a couple limitations of bx-python, but these can be easily fixed when bx-python is updated. Use enhanced functionality to correctly create converted datasets for visualizing GFF files.
Also updated GOPS tools to use a simpler GFF reader wrapper to read in intervals and convert to BED coordinates.
--- a/tools/new_operations/gops_subtract.py
+++ b/tools/new_operations/gops_subtract.py
@@ -44,11 +44,11 @@ def main():
# Set readers to handle either GFF or default format.
if in1_gff_format:
- in1_reader_wrapper = GFFReaderWrapper
+ in1_reader_wrapper = GFFIntervalToBEDReaderWrapper
else:
in1_reader_wrapper = NiceReaderWrapper
if in2_gff_format:
- in2_reader_wrapper = GFFReaderWrapper
+ in2_reader_wrapper = GFFIntervalToBEDReaderWrapper
else:
in2_reader_wrapper = NiceReaderWrapper
--- a/tools/new_operations/gops_intersect.py
+++ b/tools/new_operations/gops_intersect.py
@@ -44,11 +44,11 @@ def main():
# Set readers to handle either GFF or default format.
if in1_gff_format:
- in1_reader_wrapper = GFFReaderWrapper
+ in1_reader_wrapper = GFFIntervalToBEDReaderWrapper
else:
in1_reader_wrapper = NiceReaderWrapper
if in2_gff_format:
- in2_reader_wrapper = GFFReaderWrapper
+ in2_reader_wrapper = GFFIntervalToBEDReaderWrapper
else:
in2_reader_wrapper = NiceReaderWrapper
@@ -66,10 +66,10 @@ def main():
fix_strand=True )
out_file = open( out_fname, "w" )
-
+
try:
for line in intersect( [g1,g2], pieces=pieces, mincols=mincols ):
- if type( line ) == GenomicInterval:
+ if isinstance( line, GenomicInterval ):
if in1_gff_format:
line = convert_bed_coords_to_gff( line )
out_file.write( "%s\n" % "\t".join( line.fields ) )
--- /dev/null
+++ b/lib/galaxy/datatypes/converters/gff_to_interval_index_converter.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python
+
+"""
+Convert from GFF file to interval index file.
+
+usage:
+ python gff_to_interval_index_converter.py [input] [output]
+"""
+
+from __future__ import division
+
+import sys, fileinput
+from galaxy import eggs
+import pkg_resources; pkg_resources.require( "bx-python" )
+from galaxy.tools.util.gff_util import *
+from bx.interval_index_file import Indexes
+
+def main():
+ # Arguments
+ input_fname, out_fname = sys.argv[1:]
+
+ # Do conversion.
+ chr_col, start_col, end_col, strand_col = ( 0, 3, 4, 6 )
+ index = Indexes()
+ offset = 0
+ reader_wrapper = GFFReaderWrapper( fileinput.FileInput( input_fname ),
+ chrom_col=chr_col,
+ start_col=start_col,
+ end_col=end_col,
+ strand_col=strand_col,
+ fix_strand=True )
+ for feature in list( reader_wrapper ):
+ # TODO: need to address comments:
+ # if comment:
+ # increment_offset.
+
+ # Add feature; index expects BED coordinates.
+ convert_gff_coords_to_bed( feature )
+ index.add( feature.chrom, feature.start, feature.end, offset )
+
+ # Increment offset by feature length; feature length is all
+ # intervals/lines that comprise feature.
+ feature_len = 0
+ for interval in feature.intervals:
+ # HACK: +1 for EOL char. Need bx-python to provide raw_line itself
+ # b/c TableReader strips EOL characters, thus changing the line
+ # length.
+ feature_len += len( interval.raw_line ) + 1
+ offset += feature_len
+
+ index.write( open(out_fname, "w") )
+
+if __name__ == "__main__":
+ main()
+
--- a/lib/galaxy/datatypes/converters/interval_to_summary_tree_converter.py
+++ b/lib/galaxy/datatypes/converters/interval_to_summary_tree_converter.py
@@ -14,7 +14,7 @@ import pkg_resources; pkg_resources.requ
from galaxy.visualization.tracks.summary import *
from bx.intervals.io import *
from bx.cookbook import doc_optparse
-from galaxy.tools.util.gff_util import GFFReaderWrapper
+from galaxy.tools.util.gff_util import *
def main():
# Read options, args.
@@ -40,9 +40,12 @@ def main():
strand_col=strand_col,
fix_strand=True )
st = SummaryTree(block_size=25, levels=6, draw_cutoff=150, detail_cutoff=30)
- for line in list( reader_wrapper ):
- if type( line ) is GenomicInterval:
- st.insert_range( line.chrom, long( line.start ), long( line.end ) )
+ for feature in list( reader_wrapper ):
+ if isinstance( feature, GenomicInterval ):
+ # Tree expects BED coordinates.
+ if type( feature ) is GFFFeature:
+ convert_gff_coords_to_bed( feature )
+ st.insert_range( feature.chrom, long( feature.start ), long( feature.end ) )
st.write(out_fname)
--- a/lib/galaxy/datatypes/converters/gff_to_interval_index_converter.xml
+++ b/lib/galaxy/datatypes/converters/gff_to_interval_index_converter.xml
@@ -1,6 +1,6 @@
-<tool id="CONVERTER_gff_to_interval_index_0" name="Convert BED to Interval Index" version="1.0.0" hidden="true">
+<tool id="CONVERTER_gff_to_interval_index_0" name="Convert GFF to Interval Index" version="1.0.0" hidden="true"><!-- <description>__NOT_USED_CURRENTLY_FOR_CONVERTERS__</description> -->
- <command interpreter="python">interval_to_interval_index_converter.py $input1 $output1 --gff</command>
+ <command interpreter="python">gff_to_interval_index_converter.py $input1 $output1</command><inputs><page><param format="gff" name="input1" type="data" label="Choose GFF file"/>
--- a/tools/new_operations/flanking_features.py
+++ b/tools/new_operations/flanking_features.py
@@ -153,11 +153,11 @@ def main():
# Set readers to handle either GFF or default format.
if in1_gff_format:
- in1_reader_wrapper = GFFReaderWrapper
+ in1_reader_wrapper = GFFIntervalToBEDReaderWrapper
else:
in1_reader_wrapper = NiceReaderWrapper
if in2_gff_format:
- in2_reader_wrapper = GFFReaderWrapper
+ in2_reader_wrapper = GFFIntervalToBEDReaderWrapper
else:
in2_reader_wrapper = NiceReaderWrapper
--- a/lib/galaxy/tools/util/gff_util.py
+++ b/lib/galaxy/tools/util/gff_util.py
@@ -2,25 +2,173 @@
Provides utilities for working with GFF files.
"""
-from bx.intervals.io import NiceReaderWrapper, GenomicInterval
+from bx.intervals.io import *
+
+class GFFInterval( GenomicInterval ):
+ """
+ A GFF interval, including attributes. If file is strictly a GFF file,
+ only attribute is 'group.'
+ """
+ def __init__( self, reader, fields, chrom_col, start_col, end_col, strand_col, default_strand, \
+ fix_strand=False, raw_line='' ):
+ GenomicInterval.__init__( self, reader, fields, chrom_col, start_col, end_col, strand_col, \
+ default_strand, fix_strand=fix_strand )
+ self.raw_line = raw_line
+ self.attributes = parse_gff_attributes( fields[8] )
+
+class GFFFeature( GenomicInterval ):
+ """
+ A GFF feature, which can include multiple intervals.
+ """
+ def __init__( self, reader, chrom_col, start_col, end_col, strand_col, default_strand, \
+ fix_strand=False, intervals=[] ):
+ GenomicInterval.__init__( self, reader, intervals[0].fields, chrom_col, start_col, end_col, \
+ strand_col, default_strand, fix_strand=fix_strand )
+ self.intervals = intervals
+ # Use intervals to set feature attributes.
+ for interval in self.intervals:
+ # Error checking.
+ if interval.chrom != self.chrom:
+ raise ValueError( "interval chrom does not match self chrom: %i != %i" % \
+ ( interval.chrom, self.chrom ) )
+ if interval.strand != self.strand:
+ raise ValueError( "interval strand does not match self strand: %s != %s" % \
+ ( interval.strand, self.strand ) )
+ # Set start, end of interval.
+ if interval.start < self.start:
+ self.start = interval.start
+ if interval.end > self.end:
+ self.end = interval.end
+
+class GFFIntervalToBEDReaderWrapper( NiceReaderWrapper ):
+ """
+ Reader wrapper that reads GFF intervals/lines and automatically converts
+ them to BED format.
+ """
+
+ def parse_row( self, line ):
+ # HACK: this should return a GFF interval, but bx-python operations
+ # require GenomicInterval objects and subclasses will not work.
+ interval = GenomicInterval( self, line.split( "\t" ), self.chrom_col, self.start_col, \
+ self.end_col, self.strand_col, self.default_strand, \
+ fix_strand=self.fix_strand )
+ interval = convert_gff_coords_to_bed( interval )
+ return interval
class GFFReaderWrapper( NiceReaderWrapper ):
"""
- Reader wrapper converts GFF format--starting and ending coordinates are 1-based, closed--to the
- 'traditional'/BED interval format--0 based, half-open. This is useful when using GFF files as inputs
- to tools that expect traditional interval format.
+ Reader wrapper for GFF files.
+
+ Wrapper has two major functions:
+ (1) group entries for GFF file (via group column), GFF3 (via id attribute ),
+ or GTF (via gene_id/transcript id);
+ (2) convert coordinates from GFF format--starting and ending coordinates
+ are 1-based, closed--to the 'traditional'/BED interval format--0 based,
+ half-open. This is useful when using GFF files as inputs to tools that
+ expect traditional interval format.
"""
+
+ def __init__( self, reader, **kwargs ):
+ """
+ Create wrapper. Defaults are group_entries=False and
+ convert_coords_to_bed=True to support backward compatibility.
+ """
+ NiceReaderWrapper.__init__( self, reader, **kwargs )
+ self.group_entries = kwargs.get( 'group_entries', False )
+ self.convert_coords_to_bed = kwargs.get( 'convert_coords_to_bed', True )
+ self.last_line = None
+ self.cur_offset = 0
+ self.seed_interval = None
+
def parse_row( self, line ):
- interval = GenomicInterval( self, line.split( "\t" ), self.chrom_col, self.start_col, self.end_col, \
- self.strand_col, self.default_strand, fix_strand=self.fix_strand )
- interval = convert_gff_coords_to_bed( interval )
+ interval = GFFInterval( self, line.split( "\t" ), self.chrom_col, self.start_col, \
+ self.end_col, self.strand_col, self.default_strand, \
+ fix_strand=self.fix_strand, raw_line=line )
+ if self.convert_coords_to_bed:
+ interval = convert_gff_coords_to_bed( interval )
return interval
+ def next( self ):
+ """ Returns next GFFFeature. """
+
+ #
+ # Helper function.
+ #
+
+ def handle_parse_error( parse_error ):
+ """ Actions to take when ParseError found. """
+ if self.outstream:
+ if self.print_delegate and hasattr(self.print_delegate,"__call__"):
+ self.print_delegate( self.outstream, e, self )
+ self.skipped += 1
+ # no reason to stuff an entire bad file into memmory
+ if self.skipped < 10:
+ self.skipped_lines.append( ( self.linenum, self.current_line, str( e ) ) )
+
+ #
+ # Get next GFFFeature
+ #
+
+ # If there is no seed interval, set one. Also, if there are no more
+ # intervals to read, this is where iterator dies.
+ if not self.seed_interval:
+ while not self.seed_interval:
+ try:
+ self.seed_interval = GenomicIntervalReader.next( self )
+ except ParseError, e:
+ handle_parse_error( e )
+
+ # Initialize feature name from seed.
+ feature_group = self.seed_interval.attributes.get( 'group', None ) # For GFF
+ feature_id = self.seed_interval.attributes.get( 'id', None ) # For GFF3
+ feature_gene_id = self.seed_interval.attributes.get( 'gene_id', None ) # For GTF
+ feature_transcript_id = self.seed_interval.attributes.get( 'transcript_id', None ) # For GTF
+
+ # Read all intervals associated with seed.
+ feature_intervals = []
+ feature_intervals.append( self.seed_interval )
+ while True:
+ try:
+ interval = GenomicIntervalReader.next( self )
+ except StopIteration, e:
+ # No more intervals to read, but last feature needs to be
+ # returned.
+ interval = None
+ break
+ except ParseError, e:
+ handle_parse_error( e )
+
+ # If interval not associated with feature, break.
+ group = interval.attributes.get( 'group', None )
+ if group and feature_group != group:
+ break
+ id = interval.attributes.get( 'id', None )
+ if id and feature_id != id:
+ break
+ gene_id = interval.attributes.get( 'gene_id', None )
+ transcript_id = interval.attributes.get( 'transcript_id', None )
+ if transcript_id and transcript_id != feature_transcript_id and gene_id and \
+ gene_id != feature_gene_id:
+ break
+
+ # Interval associated with feature.
+ feature_intervals.append( interval )
+
+ # Last interval read is the seed for the next interval.
+ self.seed_interval = interval
+
+ # Return GFF feature with all intervals.
+ return GFFFeature( self, self.chrom_col, self.start_col, self.end_col, self.strand_col, \
+ self.default_strand, fix_strand=self.fix_strand, \
+ intervals=feature_intervals )
+
+
def convert_bed_coords_to_gff( interval ):
"""
- Converts an interval object's coordinates from BED format to GFF format. Accepted object types include
- GenomicInterval and list (where the first element in the list is the interval's start, and the second
- element is the interval's end).
+ Converts an interval object's coordinates from BED format to GFF format.
+ Accepted object types include GenomicInterval and list (where the first
+ element in the list is the interval's start, and the second element is
+ the interval's end).
"""
if type( interval ) is GenomicInterval:
interval.start += 1
@@ -30,9 +178,10 @@ def convert_bed_coords_to_gff( interval
def convert_gff_coords_to_bed( interval ):
"""
- Converts an interval object's coordinates from GFF format to BED format. Accepted object types include
- GenomicInterval and list (where the first element in the list is the interval's start, and the second
- element is the interval's end).
+ Converts an interval object's coordinates from GFF format to BED format.
+ Accepted object types include GenomicInterval and list (where the first
+ element in the list is the interval's start, and the second element is
+ the interval's end).
"""
if type( interval ) is GenomicInterval:
interval.start -= 1
@@ -42,10 +191,15 @@ def convert_gff_coords_to_bed( interval
def parse_gff_attributes( attr_str ):
"""
- Parses a GFF/GTF attribute string and returns a dictionary of name-value pairs.
- The general format for a GFF3 attributes string is name1=value1;name2=value2
- The general format for a GTF attribute string is name1 "value1" ; name2 "value2"
- """
+ Parses a GFF/GTF attribute string and returns a dictionary of name-value
+ pairs. The general format for a GFF3 attributes string is
+ name1=value1;name2=value2
+ The general format for a GTF attribute string is
+ name1 "value1" ; name2 "value2"
+ The general format for a GFF attribute string is a single string that
+ denotes the interval's group; in this case, method returns a dictionary
+ with a single key-value pair, and key name is 'group'
+ """
attributes_list = attr_str.split(";")
attributes = {}
for name_value_pair in attributes_list:
@@ -53,6 +207,9 @@ def parse_gff_attributes( attr_str ):
pair = name_value_pair.strip().split(" ")
if len( pair ) == 1:
pair = name_value_pair.strip().split("=")
+ if len( pair ) == 1:
+ # Could not split for some reason -- raise exception?
+ continue
if pair == '':
continue
name = pair[0].strip()
@@ -61,4 +218,9 @@ def parse_gff_attributes( attr_str ):
# Need to strip double quote from values
value = pair[1].strip(" \"")
attributes[ name ] = value
+
+ if len( attributes ) == 0:
+ # Could not split attributes string, so entire string must be
+ # 'group' attribute. This is the case for strictly GFF files.
+ attributes['group'] = attr_str
return attributes
1
0
galaxy-dist commit cadf13f67c65: More changes to sample tracking functional tests
by commits-noreply@bitbucket.org 20 Nov '10
by commits-noreply@bitbucket.org 20 Nov '10
20 Nov '10
# HG changeset patch -- Bitbucket.org
# Project galaxy-dist
# URL http://bitbucket.org/galaxy/galaxy-dist/overview
# User rc
# Date 1289597856 18000
# Node ID cadf13f67c6593d99f8134ba10baa211a026439b
# Parent ecc1cefbccba6135f824f9573f06c130fd6d08b4
More changes to sample tracking functional tests
- assign target data library, editing samples, assign sample barcode
--- a/test/functional/test_sample_tracking.py
+++ b/test/functional/test_sample_tracking.py
@@ -4,7 +4,9 @@ from base.twilltestcase import *
from base.test_db_util import *
sample_states = [ ( 'New', 'Sample entered into the system' ),
- ( 'Received', 'Sample tube received' ),
+ ( 'Received', 'Sample tube received' ),
+ ( 'Library Started', 'Sample library preparation' ),
+ ( 'Run Started', 'Sequence run in progress' ),
( 'Done', 'Sequence run complete' ) ]
address_dict = dict( short_desc="Office",
name="James Bond",
@@ -93,6 +95,49 @@ class TestFormsAndRequests( TwillTestCas
global role2
role2 = get_role_by_name( name )
assert role2 is not None, 'Problem retrieving role named "Role2" from the database'
+ def test_006_create_library_and_folder( self ):
+ """Testing creating the target data library and folder"""
+ # Logged in as admin_user
+ for index in range( 0, 2 ):
+ name = 'library%s' % str( index + 1 )
+ description = '%s description' % name
+ synopsis = '%s synopsis' % name
+ self.create_library( name=name, description=description, synopsis=synopsis )
+ # Get the libraries for later use
+ global library1
+ library1 = get_library( 'library1', 'library1 description', 'library1 synopsis' )
+ assert library1 is not None, 'Problem retrieving library (library1) from the database'
+ global library2
+ library2 = get_library( 'library2', 'library2 description', 'library2 synopsis' )
+ assert library2 is not None, 'Problem retrieving library (library2) from the database'
+ # setup add_library_item permission to regular_user1
+ # Set permissions on the library, sort for later testing.
+ permissions_in = [ 'LIBRARY_ACCESS' ]
+ permissions_out = []
+ # Role1 members are: admin_user, regular_user1, regular_user3.
+ # Each of these users will be permitted for LIBRARY_ACCESS, LIBRARY_ADD on
+ # library1 and library2.
+ for library in [ library1, library2 ]:
+ self.library_permissions( self.security.encode_id( library.id ),
+ library.name,
+ str( role1.id ),
+ permissions_in,
+ permissions_out )
+ # adding a folder
+ for library in [ library1, library2 ]:
+ name = "%s_folder1" % library.name
+ description = "%s description" % name
+ self.add_folder( 'library_admin',
+ self.security.encode_id( library.id ),
+ self.security.encode_id( library.root_folder.id ),
+ name=name,
+ description=description )
+ global library1_folder1
+ library1_folder1 = get_folder( library1.root_folder.id, 'library1_folder1', 'library1_folder1 description' )
+ assert library1_folder1 is not None, 'Problem retrieving library folder named "library1_folder1" from the database'
+ global library2_folder1
+ library2_folder1 = get_folder( library2.root_folder.id, 'library2_folder1', 'library2_folder1 description' )
+ assert library2_folder1 is not None, 'Problem retrieving library folder named "library2_folder1" from the database'
#
# ====== Form definition test methods ================================================
#
@@ -283,25 +328,25 @@ class TestFormsAndRequests( TwillTestCas
self.view_request( cntrller='requests',
request_id=self.security.encode_id( request1.id ),
strings_displayed=[ 'Sequencing request "%s"' % request1.name,
- 'There are no samples.',
- sample_form_layout_grid_name ],
+ 'There are no samples.' ],
strings_not_displayed=[ request1.states.SUBMITTED,
request1.states.COMPLETE,
- request1.states.REJECTED ] )
+ request1.states.REJECTED,
+ 'Submit request' ] ) # this button should NOT show up as there are no samples yet
# check if the request is showing in the 'new' filter
self.check_request_grid( cntrller='requests',
state=request1.states.NEW,
strings_displayed=[ request1.name ] )
self.view_request_history( cntrller='requests',
request_id=self.security.encode_id( request1.id ),
- strings_displayed=[ 'History of Sequencing Request "%s"' % request1.name,
+ strings_displayed=[ 'History of sequencing request "%s"' % request1.name,
request1.states.NEW,
'Request created' ],
strings_not_displayed=[ request1.states.SUBMITTED,
request1.states.COMPLETE,
request1.states.REJECTED ] )
def test_030_edit_basic_request_info( self ):
- """Testing editing the basic information of a sequencing request"""
+ """Testing editing the basic information and email settings of a sequencing request"""
# logged in as regular_user1
fields = [ 'option2', str( user_address1.id ), 'field3 value (edited)' ]
new_name=request1.name + ' (Renamed)'
@@ -315,43 +360,186 @@ class TestFormsAndRequests( TwillTestCas
strings_displayed=[ 'Edit sequencing request "%s"' % request1.name ],
strings_displayed_after_submit=[ new_name, new_desc ] )
refresh( request1 )
+ # now check email notification settings
+ check_sample_states = [ ( request1.type.states[0].name, request1.type.states[0].id, True ),
+ ( request1.type.states[2].name, request1.type.states[2].id, True ),
+ ( request1.type.states[4].name, request1.type.states[4].id, True ) ]#[ ( state.id, True ) for state in request1.type.states ]
+ strings_displayed = [ 'Edit sequencing request "%s"' % request1.name,
+ 'Email notification settings' ]
+ additional_emails = [ 'test@.bx.psu.edu', 'test2@.bx.psu.edu' ]
+ strings_displayed_after_submit = [ "The changes made to the email notification settings have been saved",
+ '\r\n'.join( additional_emails ) ]
+ self.edit_request_email_settings( cntrller='requests',
+ request_id=self.security.encode_id( request1.id ),
+ check_request_owner=True,
+ additional_emails='\r\n'.join( additional_emails ),
+ check_sample_states=check_sample_states,
+ strings_displayed=strings_displayed,
+ strings_displayed_after_submit=strings_displayed_after_submit )
+ # lastly check the details in the request page
+ strings_displayed = [ 'Sequencing request "%s"' % new_name,
+ new_desc ]
+ for field in fields:
+ strings_displayed.append( field )
+ for state_name, id, is_checked in check_sample_states:
+ strings_displayed.append( state_name )
+ for email in additional_emails:
+ strings_displayed.append( email )
+ self.view_request( cntrller='requests',
+ request_id=self.security.encode_id( request1.id ),
+ strings_displayed=strings_displayed,
+ strings_not_displayed=[] )
def test_035_add_samples_to_request( self ):
"""Testing adding samples to request"""
# logged in as regular_user1
# Sample fields - the tuple represents a sample name and a list of sample form field values
- sample_value_tuples = [ ( 'Sample1', [ 'option1', 'sample1 field2 value', 'sample1 field3 value' ] ),
- ( 'Sample2', [ 'option2', 'sample2 field2 value', 'sample2 field3 value' ] ),
- ( 'Sample3', [ 'option1', 'sample3 field2 value', 'sample3 field3 value' ] ) ]
+ target_library_info = dict(library=self.security.encode_id(library2.id),
+ folder=self.security.encode_id(library2_folder1.id) )
+ sample_value_tuples = \
+ [ ( 'Sample1', target_library_info, [ 'option1', 'sample1 field2 value', 'sample1 field3 value' ] ),
+ ( 'Sample2', target_library_info, [ 'option2', 'sample2 field2 value', 'sample2 field3 value' ] ),
+ ( 'Sample3', target_library_info, [ 'option1', 'sample3 field2 value', 'sample3 field3 value' ] ) ]
strings_displayed_after_submit = [ 'Unsubmitted' ]
- for sample_name, field_values in sample_value_tuples:
+ for sample_name, lib_info, field_values in sample_value_tuples:
strings_displayed_after_submit.append( sample_name )
+ # add the sample values too
+ for values in field_values:
+ strings_displayed_after_submit.append( values )
# Add samples to the request
self.add_samples( cntrller='requests',
request_id=self.security.encode_id( request1.id ),
- request_name=request1.name,
sample_value_tuples=sample_value_tuples,
strings_displayed=[ 'Add Samples to Request "%s"' % request1.name,
'<input type="text" name="sample_0_name" value="Sample_1" size="10"/>' ], # sample name input field
strings_displayed_after_submit=strings_displayed_after_submit )
-# def test_040_edit_samples_of_new_request( self ):
-# """Testing editing the sample information of new request1"""
-# # logged in as regular_user1
-# pass
-# def test_035_submit_request( self ):
-# """Testing editing a sequence run request"""
-# # logged in as regular_user1
-# self.submit_request( cntrller='requests',
-# request_id=self.security.encode_id( request1.id ),
-# request_name=request1.name,
-# strings_displayed_after_submit=[ 'The request has been submitted.' ] )
-# refresh( request1 )
-# # Make sure the request is showing in the 'submitted' filter
-# self.check_request_grid( cntrller='requests',
-# state=request1.states.SUBMITTED,
-# strings_displayed=[ request1.name ] )
-# # Make sure the request's state is now set to 'submitted'
-# assert request1.state is not request1.states.SUBMITTED, "The state of the request '%s' should be set to '%s'" \
-# % ( request1.name, request1.states.SUBMITTED )
+ # check the new sample field values on the request page
+ strings_displayed = [ 'Sequencing request "%s"' % request1.name,
+ 'Submit request' ] # this button should appear now
+ strings_displayed.extend( strings_displayed_after_submit )
+ strings_displayed_count = []
+ strings_displayed_count.append( ( library2.name, len( sample_value_tuples ) ) )
+ strings_displayed_count.append( ( library2_folder1.name, len( sample_value_tuples ) ) )
+ self.view_request( cntrller='requests',
+ request_id=self.security.encode_id( request1.id ),
+ strings_displayed=strings_displayed,
+ strings_displayed_count=strings_displayed_count )
+ def test_040_edit_samples_of_new_request( self ):
+ """Testing editing the sample information of new request1"""
+ # logged in as regular_user1
+ # target data library - change it to library1
+ target_library_info = dict(library=self.security.encode_id(library1.id),
+ folder=self.security.encode_id(library1_folder1.id) )
+ new_sample_value_tuples = \
+ [ ( 'Sample1_renamed', target_library_info, [ 'option2', 'sample1 field2 value edited', 'sample1 field3 value edited' ] ),
+ ( 'Sample2_renamed', target_library_info, [ 'option1', 'sample2 field2 value edited', 'sample2 field3 value edited' ] ),
+ ( 'Sample3_renamed', target_library_info, [ 'option2', 'sample3 field2 value edited', 'sample3 field3 value edited' ] ) ]
+ strings_displayed_after_submit = [ 'Unsubmitted' ]
+ for sample_name, lib_info, field_values in new_sample_value_tuples:
+ strings_displayed_after_submit.append( sample_name )
+ # add the sample values too
+ for values in field_values:
+ strings_displayed_after_submit.append( values )
+ # Add samples to the request
+ self.edit_samples( cntrller='requests',
+ request_id=self.security.encode_id( request1.id ),
+ sample_value_tuples=new_sample_value_tuples,
+ strings_displayed=[ 'Edit Current Samples of Request "%s"' % request1.name,
+ '<input type="text" name="sample_0_name" value="Sample1" size="10"/>' ], # sample name input field
+ strings_displayed_after_submit=strings_displayed_after_submit )
+ # check the changed sample field values on the request page
+ strings_displayed = [ 'Sequencing request "%s"' % request1.name ]
+ strings_displayed.extend( strings_displayed_after_submit )
+ strings_displayed_count = []
+ strings_displayed_count.append( ( library1.name, len( new_sample_value_tuples ) ) )
+ strings_displayed_count.append( ( library1_folder1.name, len( new_sample_value_tuples ) ) )
+ self.view_request( cntrller='requests',
+ request_id=self.security.encode_id( request1.id ),
+ strings_displayed=strings_displayed,
+ strings_displayed_count=strings_displayed_count )
+ def test_045_submit_request( self ):
+ """Testing submitting a sequencing request"""
+ # logged in as regular_user1
+ self.submit_request( cntrller='requests',
+ request_id=self.security.encode_id( request1.id ),
+ request_name=request1.name,
+ strings_displayed_after_submit=[ 'The request has been submitted.' ] )
+ refresh( request1 )
+ # Make sure the request is showing in the 'submitted' filter
+ self.check_request_grid( cntrller='requests',
+ state=request1.states.SUBMITTED,
+ strings_displayed=[ request1.name ] )
+ # Make sure the request's state is now set to 'submitted'
+ assert request1.state is not request1.states.SUBMITTED, "The state of the request '%s' should be set to '%s'" \
+ % ( request1.name, request1.states.SUBMITTED )
+ # the sample state should appear once for each sample
+ strings_displayed_count = [ ( request1.type.states[0].name, len( request1.samples ) ) ]
+ # after submission, these buttons should not appear
+ strings_not_displayed = [ 'Add sample', 'Submit request' ]
+ # check the request page
+ self.view_request( cntrller='requests',
+ request_id=self.security.encode_id( request1.id ),
+ strings_displayed=[ request1.states.SUBMITTED ],
+ strings_displayed_count=strings_displayed_count,
+ strings_not_displayed=strings_not_displayed )
+ strings_displayed=[ 'History of sequencing request "%s"' % request1.name,
+ 'Request submitted by %s' % regular_user1.email,
+ 'Request created' ]
+ strings_displayed_count = [ ( request1.states.SUBMITTED, 1 ) ]
+ self.view_request_history( cntrller='requests',
+ request_id=self.security.encode_id( request1.id ),
+ strings_displayed=strings_displayed,
+ strings_displayed_count=strings_displayed_count,
+ strings_not_displayed=[ request1.states.COMPLETE,
+ request1.states.REJECTED ] )
+ #
+ # ====== Sequencing request test methods - Admin perspective ================
+ #
+ def test_050_receive_request_as_admin( self ):
+ """Testing receiving a sequencing request and assigning it barcodes"""
+ self.logout()
+ self.login( email=admin_user.email )
+ self.check_request_grid( cntrller='requests_admin',
+ state=request1.states.SUBMITTED,
+ strings_displayed=[ request1.name ] )
+ strings_displayed = [ request1.states.SUBMITTED,
+ 'Reject this request' ]
+ strings_not_displayed = [ 'Add sample' ]
+ self.view_request( cntrller='requests_admin',
+ request_id=self.security.encode_id( request1.id ),
+ strings_not_displayed=strings_not_displayed )
+ # Set bar codes for the samples
+ bar_codes = [ '10001', '10002', '10003' ]
+ strings_displayed_after_submit = [ 'Changes made to the samples have been saved.' ]
+ strings_displayed_after_submit.extend( bar_codes )
+ self.add_bar_codes( cntrller='requests_admin',
+ request_id=self.security.encode_id( request1.id ),
+ bar_codes=bar_codes,
+ strings_displayed=[ 'Edit Current Samples of Request "%s"' % request1.name ],
+ strings_displayed_after_submit=strings_displayed_after_submit )
+ # the second sample state should appear once for each sample
+ strings_displayed_count = [ ( request1.type.states[1].name, len( request1.samples ) ),
+ ( request1.type.states[0].name, 0 ) ]
+ # check the request page
+ self.view_request( cntrller='requests_admin',
+ request_id=self.security.encode_id( request1.id ),
+ strings_displayed=bar_codes,
+ strings_displayed_count=strings_displayed_count )
+ # the sample state descriptions of the future states should not appear
+ # here the state names are not checked as all of them appear at the top of
+ # the page like: state1 > state2 > state3
+ strings_not_displayed=[ request1.type.states[2].desc,
+ request1.type.states[3].desc,
+ request1.type.states[4].desc ]
+ # check history of each sample
+ for sample in request1.samples:
+ strings_displayed = [ 'Events for Sample "%s"' % sample.name,
+ 'Request submitted and sample state set to %s' % request1.type.states[0].name,
+ request1.type.states[0].name,
+ request1.type.states[1].name ]
+ self.view_sample_history( cntrller='requests_admin',
+ sample_id=self.security.encode_id( sample.id ),
+ strings_displayed=strings_displayed,
+ strings_not_displayed=strings_not_displayed )
# def test_040_request_lifecycle( self ):
# """Testing request life-cycle as it goes through all the states"""
# # logged in as regular_user1
@@ -478,6 +666,17 @@ class TestFormsAndRequests( TwillTestCas
self.logout()
self.login( email=admin_user.email )
##################
+ # Purge all libraries
+ ##################
+ for library in [ library1, library2 ]:
+ self.delete_library_item( 'library_admin',
+ self.security.encode_id( library.id ),
+ self.security.encode_id( library.id ),
+ library.name,
+ item_type='library' )
+ self.purge_library( self.security.encode_id( library.id ), library.name )
+
+ ##################
# Delete request_type permissions
##################
for request_type in [ request_type1 ]:
--- a/test/base/twilltestcase.py
+++ b/test/base/twilltestcase.py
@@ -861,16 +861,26 @@ class TwillTestCase( unittest.TestCase )
self.home()
# Functions associated with browsers, cookies, HTML forms and page visits
-
+
def check_page_for_string( self, patt ):
"""Looks for 'patt' in the current browser page"""
page = self.last_page()
- for subpatt in patt.split():
- if page.find( patt ) == -1:
- fname = self.write_temp_file( page )
- errmsg = "no match to '%s'\npage content written to '%s'" % ( patt, fname )
- raise AssertionError( errmsg )
-
+ if page.find( patt ) == -1:
+ fname = self.write_temp_file( page )
+ errmsg = "no match to '%s'\npage content written to '%s'" % ( patt, fname )
+ raise AssertionError( errmsg )
+
+ def check_string_count_in_page( self, patt, min_count ):
+ """Checks the number of 'patt' occurrences in the current browser page"""
+ page = self.last_page()
+ patt_count = page.count( patt )
+ # The number of occurrences of patt in the page should be at least min_count
+ # so show error if patt_count is less than min_count
+ if patt_count < min_count:
+ fname = self.write_temp_file( page )
+ errmsg = "%i occurrences of '%s' found instead of %i.\npage content written to '%s' " % ( min_count, patt, patt_count, fname )
+ raise AssertionError( errmsg )
+
def check_string_not_in_page( self, patt ):
"""Checks to make sure 'patt' is NOT in the page."""
page = self.last_page()
@@ -878,6 +888,16 @@ class TwillTestCase( unittest.TestCase )
fname = self.write_temp_file( page )
errmsg = "string (%s) incorrectly displayed in page.\npage content written to '%s'" % ( patt, fname )
raise AssertionError( errmsg )
+
+ def check_page(self, strings_displayed, strings_displayed_count, strings_not_displayed):
+ """Checks a page for strings displayed, not displayed and number of occurrences of a string"""
+ for check_str in strings_displayed:
+ self.check_page_for_string(check_str)
+ for check_str, count in strings_displayed_count:
+ self.check_string_count_in_page(check_str, count)
+ for check_str in strings_not_displayed:
+ self.check_string_not_in_page(check_str)
+
def write_temp_file( self, content, suffix='.html' ):
fd, fname = tempfile.mkstemp( suffix=suffix, prefix='twilltestcase-' )
@@ -1435,7 +1455,7 @@ class TwillTestCase( unittest.TestCase )
self.check_page_for_string( check_str )
self.home()
- # Requests stuff
+ # Sample tracking stuff
def check_request_grid( self, cntrller, state, deleted=False, strings_displayed=[] ):
self.visit_url( '%s/%s/browse_requests?sort=create_time&f-state=%s&f-deleted=%s' % \
( self.url, cntrller, state.replace( ' ', '+' ), str( deleted ) ) )
@@ -1510,24 +1530,20 @@ class TwillTestCase( unittest.TestCase )
for check_str in strings_displayed_after_submit:
self.check_page_for_string( check_str )
self.home()
- def view_request( self, cntrller, request_id, strings_displayed=[], strings_not_displayed=[] ):
+ def view_request( self, cntrller, request_id, strings_displayed=[], strings_displayed_count=[], strings_not_displayed=[] ):
self.visit_url( "%s/%s/browse_requests?operation=view_request&id=%s" % ( self.url, cntrller, request_id ) )
- for check_str in strings_displayed:
- self.check_page_for_string( check_str )
- for check_str in strings_not_displayed:
- self.check_string_not_in_page( check_str )
- def view_request_history( self, cntrller, request_id, strings_displayed=[], strings_not_displayed=[] ):
+ self.check_page( strings_displayed, strings_displayed_count, strings_not_displayed )
+ def view_request_history( self, cntrller, request_id, strings_displayed=[], strings_displayed_count=[], strings_not_displayed=[] ):
self.visit_url( "%s/requests_common/view_request_history?cntrller=%s&id=%s" % ( self.url, cntrller, request_id ) )
- for check_str in strings_displayed:
- self.check_page_for_string( check_str )
- for check_str in strings_not_displayed:
- self.check_string_not_in_page( check_str )
+ self.check_page( strings_displayed, strings_displayed_count, strings_not_displayed )
+ def view_sample_history( self, cntrller, sample_id, strings_displayed=[], strings_displayed_count=[], strings_not_displayed=[] ):
+ self.visit_url( "%s/requests_common/sample_events?cntrller=%s&sample_id=%s" % ( self.url, cntrller, sample_id ) )
+ self.check_page( strings_displayed, strings_displayed_count, strings_not_displayed )
def edit_basic_request_info( self, cntrller, request_id, name, new_name='', new_desc='', new_fields=[],
strings_displayed=[], strings_displayed_after_submit=[] ):
self.visit_url( "%s/requests_common/edit_basic_request_info?cntrller=%s&id=%s" % ( self.url, cntrller, request_id ) )
for check_str in strings_displayed:
self.check_page_for_string( check_str )
- self.check_page_for_string( 'Edit sequencing request "%s"' % name )
if new_name:
tc.fv( "1", "name", new_name )
if new_desc:
@@ -1537,15 +1553,29 @@ class TwillTestCase( unittest.TestCase )
tc.submit( "edit_basic_request_info_button" )
for check_str in strings_displayed_after_submit:
self.check_page_for_string( check_str )
- def add_samples( self, cntrller, request_id, request_name, sample_value_tuples, strings_displayed=[], strings_displayed_after_submit=[] ):
+ def edit_request_email_settings( self, cntrller, request_id, check_request_owner=True, additional_emails='',
+ check_sample_states=[], strings_displayed=[], strings_displayed_after_submit=[] ):
+ self.visit_url( "%s/requests_common/edit_basic_request_info?cntrller=%s&id=%s" % ( self.url, cntrller, request_id ) )
+ for check_str in strings_displayed:
+ self.check_page_for_string( check_str )
+ tc.fv( "2", "email_address", check_request_owner )
+ tc.fv( "2", "additional_email_addresses", additional_emails )
+ for state_name, state_id, is_checked in check_sample_states:
+ tc.fv( "2", "sample_state_%i" % state_id, is_checked )
+ tc.submit( "edit_email_settings_button" )
+ for check_str in strings_displayed_after_submit:
+ self.check_page_for_string( check_str )
+ def add_samples( self, cntrller, request_id, sample_value_tuples, strings_displayed=[], strings_displayed_after_submit=[] ):
url = "%s/requests_common/add_sample?cntrller=%s&request_id=%s&add_sample_button=Add+sample" % ( self.url, cntrller, request_id )
self.visit_url( url )
for check_str in strings_displayed:
self.check_page_for_string( check_str )
- for sample_index, sample_info in enumerate( sample_value_tuples ):
- sample_name = sample_info[0]
- sample_field_values = sample_info[1]
+ for sample_index, ( sample_name, target_library_info, sample_field_values ) in enumerate( sample_value_tuples ):
tc.fv( "1", "sample_%i_name" % sample_index, sample_name )
+ lib_widget_index = sample_index + 1
+ tc.fv( "1", "sample_%i_library_id" % lib_widget_index, target_library_info[ 'library' ] )
+ self.refresh_form( "sample_%i_library_id" % lib_widget_index, target_library_info[ 'library' ] )
+ tc.fv( "1", "sample_%i_folder_id" % lib_widget_index, target_library_info[ 'folder' ] )
for field_index, field_value in enumerate( sample_field_values ):
tc.fv( "1", "sample_%i_field_%i" % ( sample_index, field_index ), field_value )
# Do not click on Add sample button when all the sample have been added
@@ -1556,6 +1586,32 @@ class TwillTestCase( unittest.TestCase )
tc.submit( "save_samples_button" )
for check_str in strings_displayed_after_submit:
self.check_page_for_string( check_str )
+ def edit_samples( self, cntrller, request_id, sample_value_tuples, strings_displayed=[], strings_displayed_after_submit=[] ):
+ url = "%s/requests_common/edit_samples?cntrller=%s&id=%s&editing_samples=True" % ( self.url, cntrller, request_id )
+ self.visit_url( url )
+ for check_str in strings_displayed:
+ self.check_page_for_string( check_str )
+ for sample_index, ( sample_name, target_library_info, sample_field_values ) in enumerate( sample_value_tuples ):
+ tc.fv( "1", "sample_%i_name" % sample_index, sample_name )
+ lib_widget_index = sample_index + 1
+ tc.fv( "1", "sample_%i_library_id" % lib_widget_index, target_library_info[ 'library' ] )
+ self.refresh_form( "sample_%i_library_id" % lib_widget_index, target_library_info[ 'library' ] )
+ tc.fv( "1", "sample_%i_folder_id" % lib_widget_index, target_library_info[ 'folder' ] )
+ for field_index, field_value in enumerate( sample_field_values ):
+ tc.fv( "1", "sample_%i_field_%i" % ( sample_index, field_index ), field_value )
+ tc.submit( "save_samples_button" )
+ for check_str in strings_displayed_after_submit:
+ self.check_page_for_string( check_str )
+ def add_bar_codes( self, cntrller, request_id, bar_codes, strings_displayed=[], strings_displayed_after_submit=[] ):
+ url = "%s/requests_common/edit_samples?cntrller=%s&id=%s&editing_samples=True" % ( self.url, cntrller, request_id )
+ self.visit_url( url )
+ for check_str in strings_displayed:
+ self.check_page_for_string( check_str )
+ for sample_index, bar_code in enumerate( bar_codes ):
+ tc.fv( "1", "sample_%i_bar_code" % sample_index, bar_code )
+ tc.submit( "save_samples_button" )
+ for check_str in strings_displayed_after_submit:
+ self.check_page_for_string( check_str )
def submit_request( self, cntrller, request_id, request_name, strings_displayed_after_submit=[] ):
self.visit_url( "%s/requests_common/submit_request?cntrller=%s&id=%s" % ( self.url, cntrller, request_id ) )
for check_str in strings_displayed_after_submit:
@@ -1568,20 +1624,6 @@ class TwillTestCase( unittest.TestCase )
tc.submit( "reject_button" )
for check_str in strings_displayed_after_submit:
self.check_page_for_string( check_str )
- def add_bar_codes( self, request_id, request_name, bar_codes, samples, strings_displayed_after_submit=[] ):
- # We have to simulate the form submission here since twill barfs on the page
- # gvk - 9/22/10 - TODO: make sure the mako template produces valid html
- url = "%s/requests_common/edit_samples?cntrller=requests_admin&id=%s&editing_samples=True" % ( self.url, request_id )
- for index, field_value in enumerate( bar_codes ):
- sample_field_name = "sample_%i_name" % index
- sample_field_value = samples[ index ].name.replace( ' ', '+' )
- field_name = "sample_%i_bar_code" % index
- url += "&%s=%s" % ( field_name, field_value )
- url += "&%s=%s" % ( sample_field_name, sample_field_value )
- url += "&save_samples_button=Save"
- self.visit_url( url )
- for check_str in strings_displayed_after_submit:
- self.check_page_for_string( check_str )
def change_sample_state( self, request_id, request_name, sample_names, sample_ids, new_sample_state_id, new_state_name, comment='',
strings_displayed=[], strings_displayed_after_submit=[] ):
# We have to simulate the form submission here since twill barfs on the page
--- a/templates/requests/common/edit_samples.mako
+++ b/templates/requests/common/edit_samples.mako
@@ -155,6 +155,8 @@
%elif editing_samples:
<p/><div class="form-row">
+ ## hidden element to make twill work.
+ <input type="hidden" name="hidden_input" value=""/><input type="submit" name="save_samples_button" value="Save"/><input type="submit" name="cancel_changes_button" value="Cancel"/><div class="toolParamHelp" style="clear: both;">
1
0
# HG changeset patch -- Bitbucket.org
# Project galaxy-dist
# URL http://bitbucket.org/galaxy/galaxy-dist/overview
# User Dannon Baker <dannonbaker(a)me.com>
# Date 1289568520 18000
# Node ID ba34d996fff5669eca98ff98c6035a68ac93bd34
# Parent 2389c323a1e6a50de6d48d5431d16d830ccef021
# Parent a6cd0223a53b4f9498a0dde69dcef53387188e5f
Merge.
--- a/templates/requests/common/events.mako
+++ /dev/null
@@ -1,53 +0,0 @@
-<%inherit file="/base.mako"/>
-<%namespace file="/message.mako" import="render_msg" />
-
-<%
- is_admin = cntrller == 'requests_admin' and trans.user_is_admin()
- can_edit_request = ( is_admin and not request.is_complete ) or request.is_unsubmitted
- can_reject_request = is_admin and request.is_submitted
- can_add_samples = request.is_unsubmitted
-%>
-
-<br/><br/>
-<ul class="manage-table-actions">
- <li><a class="action-button" id="request-${request.id}-popup" class="menubutton">Request Actions</a></li>
- <div popupmenu="request-${request.id}-popup">
- <a class="action-button" href="${h.url_for( controller='requests_common', action='view_request', cntrller=cntrller, id=trans.security.encode_id( request.id ) )}">Browse this request</a>
- %if can_edit_request:
- <a class="action-button" href="${h.url_for( controller='requests_common', action='edit_basic_request_info', cntrller=cntrller, id=trans.security.encode_id( request.id ) )}">Edit this request</a>
- %endif
- %if can_add_samples:
- <a class="action-button" confirm="More samples cannot be added to this request once it is submitted. Click OK to submit." href="${h.url_for( controller='requests_common', action='submit_request', cntrller=cntrller, id=trans.security.encode_id( request.id ) )}">Submit this request</a>
- %endif
- %if can_reject_request:
- <a class="action-button" href="${h.url_for( controller='requests_admin', action='reject_request', cntrller=cntrller, id=trans.security.encode_id( request.id ) )}">Reject this request</a>
- %endif
- </div>
-</ul>
-
-%if message:
- ${render_msg( message, status )}
-%endif
-
-<h2>History of Sequencing Request "${request.name}"</h2>
-
-<div class="toolForm">
- <table class="grid">
- <thead>
- <tr>
- <th>State</th>
- <th>Last Update</th>
- <th>Comments</th>
- </tr>
- </thead>
- <tbody>
- %for state, updated, comments in events_list:
- <tr class="libraryRow libraryOrFolderRow" id="libraryRow">
- <td><b><a>${state}</a></b></td>
- <td><a>${updated}</a></td>
- <td><a>${comments}</a></td>
- </tr>
- %endfor
- </tbody>
- </table>
-</div>
--- a/tools/ncbi_blast_plus/blast_filter_fasta.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python
-"""Filter a FASTA file using tabular output, e.g. from BLAST.
-
-Takes five command line options, tabular BLAST filename, ID column number
-(using one based counting), input FASTA filename, and two output FASTA
-filenames (for records with and without any BLAST hits).
-
-In the default NCBI BLAST+ tabular output, the query sequence ID is in column
-one, and the ID of the match from the database is in column two.
-"""
-import sys
-from galaxy_utils.sequence.fasta import fastaReader, fastaWriter
-
-#Parse Command Line
-blast_file, blast_col, in_file, out_positive_file, out_negative_file = sys.argv[1:]
-blast_col = int(blast_col)-1
-assert blast_col >= 0
-
-#Read tabular BLAST file and record all queries with hit(s)
-ids = set()
-blast_handle = open(blast_file, "rU")
-for line in blast_handle:
- ids.add(line.split("\t")[blast_col])
-blast_handle.close()
-
-#Write filtered FASTA file based on IDs from BLAST file
-reader = fastaReader(open(in_file, "rU"))
-positive_writer = fastaWriter(open(out_positive_file, "w"))
-negative_writer = fastaWriter(open(out_negative_file, "w"))
-for record in reader:
- #The [1:] is because the fastaReader leaves the > on the identifer.
- if record.identifier and record.identifier.split()[0][1:] in ids:
- positive_writer.write(record)
- else:
- negative_writer.write(record)
-positive_writer.close()
-negative_writer.close()
-reader.close()
--- a/tools/ncbi_blast_plus/blast_filter_fasta.xml
+++ /dev/null
@@ -1,37 +0,0 @@
-<tool id="blast_filter_fasta" name="Filter FASTA using BLAST output" version="0.0.1">
- <description>Divide a FASTA file based on BLAST hits</description>
- <command interpreter="python">
- blast_filter_fasta.py $blast_file $blast_col $in_file $out_positive_file $out_negative_file
- </command>
- <inputs>
- <param name="in_file" type="data" format="fasta" label="FASTA file to filter"/>
- <param name="blast_file" type="data" format="tabular" label="Tabular BLAST output"/>
- <param name="blast_col" type="select" label="Column containing FASTA identifiers">
- <option value="1">Column 1 - BLAST query ID</option>
- <option value="2">Column 2 - BLAST match ID</option>
- </param>
- </inputs>
- <outputs>
- <data name="out_positive_file" format="fasta" label="Sequences with BLAST hits" />
- <data name="out_negative_file" format="fasta" label="Sequences without BLAST hits" />
- </outputs>
- <requirements>
- </requirements>
- <tests>
- </tests>
- <help>
-
-**What it does**
-
-Typical use would be to take a multi-sequence FASTA and the tabular output of
-running BLAST on it, and divide the FASTA file in two: those sequence with a
-BLAST hit, and those without.
-
-In the default NCBI BLAST+ tabular output, the query sequence ID is in column
-one, and the ID of the match from the database is in column two.
-
-This allows you to filter the FASTA file for the subjects in the BLAST search,
-rather than filtering the FASTA file for the queries in the BLAST search.
-
- </help>
-</tool>
1
0