galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
November 2013
- 1 participants
- 208 discussions
commit/galaxy-central: jmchilton: Allow API driven tests via install_and_test_tool_shed_repositories framework.
by commits-noreply@bitbucket.org 22 Nov '13
by commits-noreply@bitbucket.org 22 Nov '13
22 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/b2feff0cd2e7/
Changeset: b2feff0cd2e7
User: jmchilton
Date: 2013-11-22 07:50:37
Summary: Allow API driven tests via install_and_test_tool_shed_repositories framework.
Greg/Dave - I don't have the infrastructure/knowledge to test this changeset, so sorry if it breaks something, but this is what I believe is nessecary if you want to set GALAXY_TEST_DEFAULT_INTERACTOR=api or if anyone posts a tool to the tool shed that forces the issue by marking a test case with 'interactor="api"'.
Affected #: 1 file
diff -r d896ca45280fbc7764fa2fe82fe10d7537d858dc -r b2feff0cd2e7dbc3d9ba4b5bed55bd744406c6b1 test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -91,6 +91,7 @@
default_galaxy_test_port_min = 10000
default_galaxy_test_port_max = 10999
default_galaxy_test_host = '127.0.0.1'
+default_galaxy_master_api_key = "123456"
# should this serve static resources (scripts, images, styles, etc.)
STATIC_ENABLED = True
@@ -792,6 +793,7 @@
# Write the embedded web application's specific configuration to a temporary file. This is necessary in order for
# the external metadata script to find the right datasets.
kwargs = dict( admin_users = 'test(a)bx.psu.edu',
+ master_api_key = default_galaxy_master_api_key,
allow_user_creation = True,
allow_user_deletion = True,
allow_library_path_paste = True,
@@ -1181,7 +1183,7 @@
test_toolbox.toolbox = app.toolbox
# Generate the test methods for this installed repository. We need to pass in True here, or it will look
# in $GALAXY_HOME/test-data for test data, which may result in missing or invalid test files.
- test_toolbox.build_tests( testing_shed_tools=True )
+ test_toolbox.build_tests( testing_shed_tools=True, master_api_key=default_galaxy_master_api_key )
# Set up nose to run the generated functional tests.
test_config = nose.config.Config( env=os.environ, plugins=nose.plugins.manager.DefaultPluginManager() )
test_config.configure( sys.argv )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jmchilton: Fixes related to implicit defaults of param values.
by commits-noreply@bitbucket.org 21 Nov '13
by commits-noreply@bitbucket.org 21 Nov '13
21 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/d896ca45280f/
Changeset: d896ca45280f
User: jmchilton
Date: 2013-11-22 06:37:22
Summary: Fixes related to implicit defaults of param values.
Logic errors related to them being contained in repeat blocks and to picking the top value in a select by default when no other value is marked as default.
Other small adjustments - add another sample tool demonstrating the problem and clean up error messages.
Affected #: 3 files
diff -r 1c306a40b1c3f6f3abc51f44d2bc77a5bb1d6695 -r d896ca45280fbc7764fa2fe82fe10d7537d858dc lib/galaxy/tools/test.py
--- a/lib/galaxy/tools/test.py
+++ b/lib/galaxy/tools/test.py
@@ -78,13 +78,15 @@
if selected:
default_option = name
else:
- default_option = test_param.static_options[0]
+ first_option = test_param.static_options[0]
+ first_option_value = first_option[1]
+ default_option = first_option_value
matches_declared_value = lambda case_value: case_value == default_option
else:
# No explicit value for this param and cannot determine a
# default - give up. Previously this would just result in a key
# error exception.
- msg = "Failed to find test parameter specification required for conditional %s" % cond
+ msg = "Failed to find test parameter value specification required for conditional %s" % cond.name
raise Exception( msg )
# Check the tool's defined cases against predicate to determine
@@ -93,7 +95,9 @@
if matches_declared_value( case.value ):
return case
else:
- log.info("Failed to find case matching test parameter specification for cond %s. Remainder of test behavior is unspecified." % cond)
+ msg_template = "%s - Failed to find case matching value (%s) for test parameter specification for conditional %s. Remainder of test behavior is unspecified."
+ msg = msg_template % ( self.tool.id, declared_value, cond.name )
+ log.info( msg )
def __split_if_str( self, value ):
split = isinstance(value, str)
@@ -143,10 +147,20 @@
case_value = raw_input[ 1 ] if raw_input else None
case = self.__matching_case_for_value( value, case_value )
if case:
- expanded_value = self.__split_if_str( case.value )
- expanded_inputs[ case_context.for_state() ] = expanded_value
for input_name, input_value in case.inputs.items():
- expanded_inputs.update( self.__process_raw_inputs( { input_name: input_value }, raw_inputs, parent_context=cond_context ) )
+ case_inputs = self.__process_raw_inputs( { input_name: input_value }, raw_inputs, parent_context=cond_context )
+ expanded_inputs.update( case_inputs )
+ expanded_case_value = self.__split_if_str( case.value )
+ if case_value is not None:
+ # A bit tricky here - we are growing inputs with value
+ # that may be implicit (i.e. not defined by user just
+ # a default defined in tool). So we do not want to grow
+ # expanded_inputs and risk repeat block viewing this
+ # as a new instance with value defined and hence enter
+ # an infinite loop - hence the "case_value is not None"
+ # check.
+ expanded_inputs[ case_context.for_state() ] = expanded_case_value
+
elif isinstance( value, grouping.Repeat ):
repeat_index = 0
while True:
diff -r 1c306a40b1c3f6f3abc51f44d2bc77a5bb1d6695 -r d896ca45280fbc7764fa2fe82fe10d7537d858dc test/functional/tools/implicit_default_conds.xml
--- /dev/null
+++ b/test/functional/tools/implicit_default_conds.xml
@@ -0,0 +1,49 @@
+<tool id="implicit_default_conds" name="implicit_default_conds">
+ <command>
+ echo "$param_group[0].p1.val" >> $out_file1;
+ echo "$param_group[0].p2.val" >> $out_file1;
+ </command>
+ <inputs>
+ <repeat name="param_group" title="Param Group" min="1">
+ <conditional name="p1">
+ <param name="type" type="select">
+ <option value="default">THE DEFAULT</option>
+ <option value="different">A different value</option>
+ </param>
+ <when value="default">
+ <param name="val" value="p1default" type="text" />
+ </when>
+ <when value="different">
+ <param name="val" value="p1different" type="text" />
+ </when>
+ </conditional>
+ <conditional name="p2">
+ <param name="type" type="select">
+ <option value="default">THE DEFAULT</option>
+ <option value="different" selected="true">A different value</option>
+ </param>
+ <when value="default">
+ <param name="val" value="p2default" type="text" />
+ </when>
+ <when value="different">
+ <param name="val" value="p2different" type="text" />
+ </when>
+ </conditional>
+ <param name="int_param" type="integer" value="8" />
+ </repeat>
+ </inputs>
+ <outputs>
+ <data name="out_file1" format="txt" />
+ </outputs>
+ <tests>
+ <test>
+ <param name="int_param" value="7" /><!-- Specify at least one value in repeat to force one instance. -->
+ <output name="out_file1">
+ <assert_contents>
+ <has_line line="p1default" />
+ <has_line line="p2different" />
+ </assert_contents>
+ </output>
+ </test>
+ </tests>
+</tool>
diff -r 1c306a40b1c3f6f3abc51f44d2bc77a5bb1d6695 -r d896ca45280fbc7764fa2fe82fe10d7537d858dc test/functional/tools/samples_tool_conf.xml
--- a/test/functional/tools/samples_tool_conf.xml
+++ b/test/functional/tools/samples_tool_conf.xml
@@ -13,4 +13,5 @@
<tool file="output_order.xml" /><tool file="disambiguate_repeats.xml" /><tool file="min_repeat.xml" />
+ <tool file="implicit_default_conds.xml" /></toolbox>
\ No newline at end of file
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jmchilton: Clean up tool framework test tools.
by commits-noreply@bitbucket.org 21 Nov '13
by commits-noreply@bitbucket.org 21 Nov '13
21 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/1c306a40b1c3/
Changeset: 1c306a40b1c3
User: jmchilton
Date: 2013-11-22 04:17:04
Summary: Clean up tool framework test tools.
Add example of nested conditional disambiugation.
Affected #: 3 files
diff -r 4615f7424dcb4d2b61be5f388c761f45a1302a03 -r 1c306a40b1c3f6f3abc51f44d2bc77a5bb1d6695 test/functional/tools/composite.xml
--- a/test/functional/tools/composite.xml
+++ b/test/functional/tools/composite.xml
@@ -1,5 +1,4 @@
-<tool id="velvet_inspired" name="velvet_inspired" version="1.0.0">
- <description>Velvet sequence assembler for very short reads</description>
+<tool id="composite" version="1.0.0"><command>cat '$input.extra_files_path/Sequences' > $output</command><inputs><param name="input" type="data" format="velvet" label="Velvet Dataset" help="Prepared by velveth."/>
diff -r 4615f7424dcb4d2b61be5f388c761f45a1302a03 -r 1c306a40b1c3f6f3abc51f44d2bc77a5bb1d6695 test/functional/tools/disambiguate_cond.xml
--- a/test/functional/tools/disambiguate_cond.xml
+++ b/test/functional/tools/disambiguate_cond.xml
@@ -1,6 +1,4 @@
-<tool id="handle_cond" name="handle_cond">
- <description>tail-to-head</description>
- <!-- -->
+<tool id="disambiguate_cond" name="disambiguate_cond"><command>
echo "$p1.p1v $p2.p2v $p3.p3v" > $out_file1; cat "$files.p4.file" >> $out_file1;
</command>
@@ -61,5 +59,29 @@
</assert_contents></output></test>
+ <test>
+ <conditional name="p1">
+ <param name="use" value="False"/>
+ </conditional>
+ <conditional name="p2">
+ <param name="use" value="True"/>
+ </conditional>
+ <conditional name="p3">
+ <param name="use" value="False"/>
+ </conditional>
+ <conditional name="files">
+ <param name="attach_files" value="True" />
+ <conditional name="p4">
+ <param name="use" value="True"/>
+ <param name="file" value="simple_line_alternative.txt" />
+ </conditional>
+ </conditional>
+ <output name="out_file1">
+ <assert_contents>
+ <has_line line="7 4 7" />
+ <has_line line="This is a different line of text." />
+ </assert_contents>
+ </output>
+ </test></tests></tool>
diff -r 4615f7424dcb4d2b61be5f388c761f45a1302a03 -r 1c306a40b1c3f6f3abc51f44d2bc77a5bb1d6695 test/functional/tools/multi_repeats.xml
--- a/test/functional/tools/multi_repeats.xml
+++ b/test/functional/tools/multi_repeats.xml
@@ -1,5 +1,4 @@
-<tool id="multirepeat" name="multirepeat">
- <description>tail-to-head</description>
+<tool id="multi_repeats" name="multi_repeats"><command>
cat $input1 #for $q in $queries# ${q.input2} #end for# #for $q in $more_queries# ${q.more_queries_input} #end for# > $out_file1
</command>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jmchilton: Fix API handling of tools containing repeat statement with min set.
by commits-noreply@bitbucket.org 21 Nov '13
by commits-noreply@bitbucket.org 21 Nov '13
21 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/4615f7424dcb/
Changeset: 4615f7424dcb
User: jmchilton
Date: 2013-11-22 03:18:47
Summary: Fix API handling of tools containing repeat statement with min set.
Affected #: 3 files
diff -r f302dae15be9185139d8cc17cd0208a448e7e9b0 -r 4615f7424dcb4d2b61be5f388c761f45a1302a03 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -1982,6 +1982,7 @@
group_errors = [ ]
any_group_errors = False
rep_index = 0
+ del group_state[:] # Clear prepopulated defaults if repeat.min set.
while True:
rep_name = "%s_%d" % ( key, rep_index )
if not any( [ incoming_key.startswith(rep_name) for incoming_key in incoming.keys() ] ):
diff -r f302dae15be9185139d8cc17cd0208a448e7e9b0 -r 4615f7424dcb4d2b61be5f388c761f45a1302a03 test/functional/tools/min_repeat.xml
--- /dev/null
+++ b/test/functional/tools/min_repeat.xml
@@ -0,0 +1,27 @@
+<tool id="min_repeat" name="min_repeat">
+ <command>
+ cat #for $q in $queries# ${q.input} #end for# > $out_file1 ;
+ cat #for $q in $queries2# ${q.input2} #end for# > $out_file2
+ </command>
+ <inputs>
+ <repeat name="queries" title="Dataset" min="1">
+ <param name="input" type="data" label="Select" />
+ </repeat>
+ <repeat name="queries2" title="Dataset" min="1">
+ <param name="input2" type="data" label="Select" />
+ </repeat>
+ </inputs>
+ <outputs>
+ <data name="out_file1" format="txt" />
+ <data name="out_file2" format="txt" />
+ </outputs>
+ <tests>
+ <test>
+ <param name="input" value="simple_line.txt"/>
+ <param name="input" value="simple_line.txt"/>
+ <param name="input2" value="simple_line_alternative.txt"/>
+ <output name="out_file1" file="simple_line_x2.txt"/>
+ <output name="out_file2" file="simple_line_alternative.txt"/>
+ </test>
+ </tests>
+</tool>
diff -r f302dae15be9185139d8cc17cd0208a448e7e9b0 -r 4615f7424dcb4d2b61be5f388c761f45a1302a03 test/functional/tools/samples_tool_conf.xml
--- a/test/functional/tools/samples_tool_conf.xml
+++ b/test/functional/tools/samples_tool_conf.xml
@@ -12,4 +12,5 @@
<tool file="metadata.xml" /><tool file="output_order.xml" /><tool file="disambiguate_repeats.xml" />
+ <tool file="min_repeat.xml" /></toolbox>
\ No newline at end of file
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
44 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/fd2ae1a670a6/
Changeset: fd2ae1a670a6
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Refactor twill tool test driver to encode hda id earlier.
And pass an encoded id to display action in root controller.
Would definitely like to simplify or better yet remove root.display if not used anywhere else, it should be coming in through dataset controller.
Affected #: 2 files
diff -r 5471fa863712ba3f89ad9adceb194daf0baeafa2 -r fd2ae1a670a6caf79ad3e2baa3d3d4242bef27d6 lib/galaxy/webapps/galaxy/controllers/root.py
--- a/lib/galaxy/webapps/galaxy/controllers/root.py
+++ b/lib/galaxy/webapps/galaxy/controllers/root.py
@@ -172,12 +172,16 @@
## ---- Dataset display / editing ----------------------------------------
@web.expose
- def display( self, trans, id=None, hid=None, tofile=None, toext=".txt", **kwd ):
+ def display( self, trans, id=None, hid=None, tofile=None, toext=".txt", encoded_id=None, **kwd ):
"""Returns data directly into the browser.
Sets the mime-type according to the extension.
+
+ Used by the twill tool test driver - used anywhere else? Would like to drop hid
+ argument and path if unneeded now. Likewise, would like to drop encoded_id=XXX
+ and use assume id is encoded (likely id wouldn't be coming in encoded if this
+ is used anywhere else though.)
"""
- #TODO: unused?
#TODO: unencoded id
if hid is not None:
try:
@@ -192,6 +196,8 @@
else:
raise Exception( "No dataset with hid '%d'" % hid )
else:
+ if encoded_id and not id:
+ id = trans.security.decode_id( encoded_id )
try:
data = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id )
except:
diff -r 5471fa863712ba3f89ad9adceb194daf0baeafa2 -r fd2ae1a670a6caf79ad3e2baa3d3d4242bef27d6 test/base/twilltestcase.py
--- a/test/base/twilltestcase.py
+++ b/test/base/twilltestcase.py
@@ -826,8 +826,10 @@
if ext != test_ext:
raise AssertionError( errmsg )
else:
+ hda_id = self.security.encode_id( elem.get( 'id' ) )
self.home()
- self.visit_page( "display?hid=" + hid )
+ # See not in controllers/root.py about encoded_id.
+ self.visit_page( "display?encoded_id=%s" % hda_id )
data = self.last_page()
if attributes is not None and attributes.get( "assert_list", None ) is not None:
try:
@@ -918,7 +920,7 @@
if base_name is None:
base_name = os.path.split(file_name)[-1]
temp_name = self.makeTfname(fname=base_name)
- self.visit_url( "%s/datasets/%s/display/%s" % ( self.url, self.security.encode_id( hda_id ), base_name ) )
+ self.visit_url( "%s/datasets/%s/display/%s" % ( self.url, hda_id, base_name ) )
data = self.last_page()
file( temp_name, 'wb' ).write( data )
if self.keepOutdir > '':
https://bitbucket.org/galaxy/galaxy-central/commits/5c5eaf0c19da/
Changeset: 5c5eaf0c19da
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Refactor shared code in functional_tests.py into a method.
Affected #: 1 file
diff -r fd2ae1a670a6caf79ad3e2baa3d3d4242bef27d6 -r 5c5eaf0c19da5b347d05a1f664e1fc394a680735 scripts/functional_tests.py
--- a/scripts/functional_tests.py
+++ b/scripts/functional_tests.py
@@ -415,6 +415,16 @@
os.environ[ 'GALAXY_TEST_SAVE' ] = galaxy_test_save
# Pass in through script setenv, will leave a copy of ALL test validate files
os.environ[ 'GALAXY_TEST_HOST' ] = galaxy_test_host
+
+ def _run_functional_test( testing_shed_tools=None ):
+ functional.test_toolbox.toolbox = app.toolbox
+ functional.test_toolbox.build_tests( testing_shed_tools=testing_shed_tools )
+ test_config = nose.config.Config( env=os.environ, ignoreFiles=ignore_files, plugins=nose.plugins.manager.DefaultPluginManager() )
+ test_config.configure( sys.argv )
+ result = run_tests( test_config )
+ success = result.wasSuccessful()
+ return success
+
if testing_migrated_tools or testing_installed_tools:
shed_tools_dict = {}
if testing_migrated_tools:
@@ -438,12 +448,7 @@
for installed_tool_panel_config in installed_tool_panel_configs:
tool_configs.append( installed_tool_panel_config )
app.toolbox = tools.ToolBox( tool_configs, app.config.tool_path, app )
- functional.test_toolbox.toolbox = app.toolbox
- functional.test_toolbox.build_tests( testing_shed_tools=True )
- test_config = nose.config.Config( env=os.environ, ignoreFiles=ignore_files, plugins=nose.plugins.manager.DefaultPluginManager() )
- test_config.configure( sys.argv )
- result = run_tests( test_config )
- success = result.wasSuccessful()
+ success = _run_functional_test( testing_shed_tools=True )
try:
os.unlink( tmp_tool_panel_conf )
except:
@@ -453,14 +458,9 @@
except:
log.info( "Unable to remove file: %s" % galaxy_tool_shed_test_file )
else:
- functional.test_toolbox.toolbox = app.toolbox
- functional.test_toolbox.build_tests()
if galaxy_test_file_dir:
os.environ[ 'GALAXY_TEST_FILE_DIR' ] = galaxy_test_file_dir
- test_config = nose.config.Config( env=os.environ, ignoreFiles=ignore_files, plugins=nose.plugins.manager.DefaultPluginManager() )
- test_config.configure( sys.argv )
- result = run_tests( test_config )
- success = result.wasSuccessful()
+ success = _run_functional_test( )
except:
log.exception( "Failure running tests" )
https://bitbucket.org/galaxy/galaxy-central/commits/faf49c10ea03/
Changeset: faf49c10ea03
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Small simplification to test/functional/test_toolbox.py.
Affected #: 1 file
diff -r 5c5eaf0c19da5b347d05a1f664e1fc394a680735 -r faf49c10ea033cf7fcde67ebfd426d9e344ea396 test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -12,10 +12,12 @@
class ToolTestCase( TwillTestCase ):
"""Abstract test case that runs tests based on a `galaxy.tools.test.ToolTest`"""
- def do_it( self, testdef, shed_tool_id=None ):
+ def do_it( self, testdef ):
"""
Run through a tool test case.
"""
+ shed_tool_id = self.shed_tool_id
+
self.__handle_test_def_errors( testdef )
latest_history = self.__setup_test_history()
@@ -206,22 +208,21 @@
for i, tool_id in enumerate( toolbox.tools_by_id ):
tool = toolbox.get_tool( tool_id )
if tool.tests:
+ shed_tool_id = None if not testing_shed_tools else tool.id
# Create a new subclass of ToolTestCase, dynamically adding methods
# named test_tool_XXX that run each test defined in the tool config.
name = "TestForTool_" + tool.id.replace( ' ', '_' )
baseclasses = ( ToolTestCase, )
namespace = dict()
for j, testdef in enumerate( tool.tests ):
- def make_test_method( td, shed_tool_id=None ):
+ def make_test_method( td ):
def test_tool( self ):
- self.do_it( td, shed_tool_id=shed_tool_id )
+ self.do_it( td )
return test_tool
- if testing_shed_tools:
- test_method = make_test_method( testdef, shed_tool_id=tool.id )
- else:
- test_method = make_test_method( testdef )
+ test_method = make_test_method( testdef )
test_method.__doc__ = "%s ( %s ) > %s" % ( tool.name, tool.id, testdef.name )
namespace[ 'test_tool_%06d' % j ] = test_method
+ namespace[ 'shed_tool_id' ] = shed_tool_id
# The new.classobj function returns a new class object, with name name, derived
# from baseclasses (which should be a tuple of classes) and with namespace dict.
new_class_obj = new.classobj( name, baseclasses, namespace )
https://bitbucket.org/galaxy/galaxy-central/commits/65c2a41d4a33/
Changeset: 65c2a41d4a33
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Refactor test data input setup logic out of TestToolBox into ToolTestBuilder.
Simplifying test_toolbox.py will hopefully make it easier to provide an alternative API driven option.
Affected #: 2 files
diff -r faf49c10ea033cf7fcde67ebfd426d9e344ea396 -r 65c2a41d4a336d2bf5cf40d8b980ecb2e99cb163 lib/galaxy/tools/test.py
--- a/lib/galaxy/tools/test.py
+++ b/lib/galaxy/tools/test.py
@@ -6,6 +6,9 @@
log = logging.getLogger( __name__ )
+DEFAULT_FTYPE = 'auto'
+DEFAULT_DBKEY = 'hg17'
+
class ToolTestBuilder( object ):
"""
@@ -29,6 +32,31 @@
self.__parse_elem( test_elem, i )
+ def test_data( self ):
+ """
+ Iterator over metadata representing the required files for upload.
+ """
+ for fname, extra in self.required_files:
+ data_dict = dict(
+ fname=fname,
+ metadata=extra.get( 'metadata', [] ),
+ composite_data=extra.get( 'composite_data', [] ),
+ ftype=extra.get( 'ftype', DEFAULT_FTYPE ),
+ dbkey=extra.get( 'dbkey', DEFAULT_DBKEY ),
+ )
+ edit_attributes = extra.get( 'edit_attributes', [] )
+
+ #currently only renaming is supported
+ for edit_att in edit_attributes:
+ if edit_att.get( 'type', None ) == 'name':
+ new_name = edit_att.get( 'value', None )
+ assert new_name, 'You must supply the new dataset name as the value tag of the edit_attributes tag'
+ data_dict['name'] = new_name
+ else:
+ raise Exception( 'edit_attributes type (%s) is unimplemented' % edit_att.get( 'type', None ) )
+
+ yield data_dict
+
def __parse_elem( self, test_elem, i ):
# Composite datasets need a unique name: each test occurs in a fresh
# history, but we'll keep it unique per set of tests - use i (test #)
diff -r faf49c10ea033cf7fcde67ebfd426d9e344ea396 -r 65c2a41d4a336d2bf5cf40d8b980ecb2e99cb163 test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -57,32 +57,20 @@
def __setup_test_data( self, testdef, shed_tool_id ):
# Upload any needed files
- for fname, extra in testdef.required_files:
- metadata = extra.get( 'metadata', [] )
- composite_data = extra.get( 'composite_data', [] )
- self.upload_file( fname,
- ftype=extra.get( 'ftype', 'auto' ),
- dbkey=extra.get( 'dbkey', 'hg17' ),
- metadata=metadata,
- composite_data=composite_data,
+ for test_data in testdef.test_data():
+ self.upload_file( test_data['fname'],
+ ftype=test_data['ftype'],
+ dbkey=test_data['dbkey'],
+ metadata=test_data['metadata'],
+ composite_data=test_data['composite_data'],
shed_tool_id=shed_tool_id )
-
- print "Uploaded file: ", fname, ", ftype: ", extra.get( 'ftype', 'auto' ), ", extra: ", extra
- #Post upload attribute editing
- edit_attributes = extra.get( 'edit_attributes', [] )
-
- #currently only renaming is supported
- for edit_att in edit_attributes:
- if edit_att.get( 'type', None ) == 'name':
- new_name = edit_att.get( 'value', None )
- assert new_name, 'You must supply the new dataset name as the value tag of the edit_attributes tag'
- hda_id = self.get_history_as_data_list()[-1].get( 'id' )
- try:
- self.edit_hda_attribute_info( hda_id=str(hda_id), new_name=new_name )
- except:
- print "### call to edit_hda failed for hda_id %s, new_name=%s" % (hda_id, new_name)
- else:
- raise Exception( 'edit_attributes type (%s) is unimplemented' % edit_att.get( 'type', None ) )
+ name = test_data.get('name', None)
+ if name:
+ hda_id = self.get_history_as_data_list()[-1].get( 'id' )
+ try:
+ self.edit_hda_attribute_info( hda_id=str(hda_id), new_name=name )
+ except:
+ print "### call to edit_hda failed for hda_id %s, new_name=%s" % (hda_id, name)
def __run_tool( self, testdef ):
# We need to handle the case where we've uploaded a valid compressed file since the upload
https://bitbucket.org/galaxy/galaxy-central/commits/3b4ed0725792/
Changeset: 3b4ed0725792
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Refactor data staging and tool running logic out of test/functional/test_toolbox.py.
In a new twill specific interactor class, idea here is to proceed by implementing a matching API interactor class.
Affected #: 1 file
diff -r 65c2a41d4a336d2bf5cf40d8b980ecb2e99cb163 -r 3b4ed072579212764322cbdd8440ecbaad836ca9 test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -20,16 +20,20 @@
self.__handle_test_def_errors( testdef )
- latest_history = self.__setup_test_history()
+ galaxy_interactor = GalaxyInteractorTwill( self )
- self.__setup_test_data( testdef, shed_tool_id )
+ test_history = galaxy_interactor.new_history()
- data_list = self.__run_tool( testdef )
+ # Upload any needed files
+ for test_data in testdef.test_data():
+ galaxy_interactor.stage_data( test_data, shed_tool_id )
+
+ data_list = galaxy_interactor.run_tool( testdef )
self.assertTrue( data_list )
self.__verify_outputs( testdef, shed_tool_id, data_list )
- self.__delete_history( latest_history )
+ galaxy_interactor.delete_history( test_history )
def __handle_test_def_errors(self, testdef):
# If the test generation had an error, raise
@@ -39,81 +43,6 @@
else:
raise Exception( "Test parse failure" )
- def __setup_test_history( self ):
- # Start with a new history
- self.logout()
- self.login( email='test(a)bx.psu.edu' )
- admin_user = sa_session.query( galaxy.model.User ).filter( galaxy.model.User.table.c.email == 'test(a)bx.psu.edu' ).one()
- self.new_history()
- latest_history = sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted == False,
- galaxy.model.History.table.c.user_id == admin_user.id ) ) \
- .order_by( desc( galaxy.model.History.table.c.create_time ) ) \
- .first()
- assert latest_history is not None, "Problem retrieving latest_history from database"
- if len( self.get_history_as_data_list() ) > 0:
- raise AssertionError("ToolTestCase.do_it failed")
- return latest_history
-
- def __setup_test_data( self, testdef, shed_tool_id ):
- # Upload any needed files
- for test_data in testdef.test_data():
- self.upload_file( test_data['fname'],
- ftype=test_data['ftype'],
- dbkey=test_data['dbkey'],
- metadata=test_data['metadata'],
- composite_data=test_data['composite_data'],
- shed_tool_id=shed_tool_id )
- name = test_data.get('name', None)
- if name:
- hda_id = self.get_history_as_data_list()[-1].get( 'id' )
- try:
- self.edit_hda_attribute_info( hda_id=str(hda_id), new_name=name )
- except:
- print "### call to edit_hda failed for hda_id %s, new_name=%s" % (hda_id, name)
-
- def __run_tool( self, testdef ):
- # We need to handle the case where we've uploaded a valid compressed file since the upload
- # tool will have uncompressed it on the fly.
- all_inputs = {}
- for name, value, _ in testdef.inputs:
- all_inputs[ name ] = value
-
- # See if we have a grouping.Repeat element
- repeat_name = None
- for input_name, input_value in testdef.tool.inputs_by_page[0].items():
- if isinstance( input_value, grouping.Repeat ) and all_inputs.get( input_name, 1 ) not in [ 0, "0" ]: # default behavior is to test 1 repeat, for backwards compatibility
- if not input_value.min: # If input_value.min == 1, the element is already on the page don't add new element.
- repeat_name = input_name
- break
-
- #check if we need to verify number of outputs created dynamically by tool
- if testdef.tool.force_history_refresh:
- job_finish_by_output_count = len( self.get_history_as_data_list() )
- else:
- job_finish_by_output_count = False
-
- # Do the first page
- page_inputs = self.__expand_grouping(testdef.tool.inputs_by_page[0], all_inputs)
-
- # Run the tool
- self.run_tool( testdef.tool.id, repeat_name=repeat_name, **page_inputs )
- print "page_inputs (0)", page_inputs
- # Do other pages if they exist
- for i in range( 1, testdef.tool.npages ):
- page_inputs = self.__expand_grouping(testdef.tool.inputs_by_page[i], all_inputs)
- self.submit_form( **page_inputs )
- print "page_inputs (%i)" % i, page_inputs
-
- # Check the results ( handles single or multiple tool outputs ). Make sure to pass the correct hid.
- # The output datasets from the tool should be in the same order as the testdef.outputs.
- data_list = None
- while data_list is None:
- data_list = self.get_history_as_data_list()
- if job_finish_by_output_count and len( testdef.outputs ) > ( len( data_list ) - job_finish_by_output_count ):
- data_list = None
- return data_list
-
def __verify_outputs( self, testdef, shed_tool_id, data_list ):
maxseconds = testdef.maxseconds
@@ -135,8 +64,87 @@
print >>sys.stderr, self.get_job_stderr( elem.get( 'id' ), format=True )
raise
- def __delete_history( self, latest_history ):
- self.delete_history( id=self.security.encode_id( latest_history.id ) )
+
+class GalaxyInteractorTwill( object ):
+
+ def __init__( self, twill_test_case ):
+ self.twill_test_case = twill_test_case
+
+ def stage_data( self, test_data, shed_tool_id ):
+ self.twill_test_case.upload_file( test_data['fname'],
+ ftype=test_data['ftype'],
+ dbkey=test_data['dbkey'],
+ metadata=test_data['metadata'],
+ composite_data=test_data['composite_data'],
+ shed_tool_id=shed_tool_id )
+ name = test_data.get('name', None)
+ if name:
+ hda_id = self.twill_test_case.get_history_as_data_list()[-1].get( 'id' )
+ try:
+ self.twill_test_case.edit_hda_attribute_info( hda_id=str(hda_id), new_name=name )
+ except:
+ print "### call to edit_hda failed for hda_id %s, new_name=%s" % (hda_id, name)
+
+ def run_tool( self, testdef ):
+ # We need to handle the case where we've uploaded a valid compressed file since the upload
+ # tool will have uncompressed it on the fly.
+ all_inputs = {}
+ for name, value, _ in testdef.inputs:
+ all_inputs[ name ] = value
+
+ # See if we have a grouping.Repeat element
+ repeat_name = None
+ for input_name, input_value in testdef.tool.inputs_by_page[0].items():
+ if isinstance( input_value, grouping.Repeat ) and all_inputs.get( input_name, 1 ) not in [ 0, "0" ]: # default behavior is to test 1 repeat, for backwards compatibility
+ if not input_value.min: # If input_value.min == 1, the element is already on the page don't add new element.
+ repeat_name = input_name
+ break
+
+ #check if we need to verify number of outputs created dynamically by tool
+ if testdef.tool.force_history_refresh:
+ job_finish_by_output_count = len( self.twill_test_case.get_history_as_data_list() )
+ else:
+ job_finish_by_output_count = False
+
+ # Do the first page
+ page_inputs = self.__expand_grouping(testdef.tool.inputs_by_page[0], all_inputs)
+
+ # Run the tool
+ self.twill_test_case.run_tool( testdef.tool.id, repeat_name=repeat_name, **page_inputs )
+ print "page_inputs (0)", page_inputs
+ # Do other pages if they exist
+ for i in range( 1, testdef.tool.npages ):
+ page_inputs = self.__expand_grouping(testdef.tool.inputs_by_page[i], all_inputs)
+ self.twill_test_case.submit_form( **page_inputs )
+ print "page_inputs (%i)" % i, page_inputs
+
+ # Check the results ( handles single or multiple tool outputs ). Make sure to pass the correct hid.
+ # The output datasets from the tool should be in the same order as the testdef.outputs.
+ data_list = None
+ while data_list is None:
+ data_list = self.twill_test_case.get_history_as_data_list()
+ if job_finish_by_output_count and len( testdef.outputs ) > ( len( data_list ) - job_finish_by_output_count ):
+ data_list = None
+ return data_list
+
+ def new_history( self ):
+ # Start with a new history
+ self.twill_test_case.logout()
+ self.twill_test_case.login( email='test(a)bx.psu.edu' )
+ admin_user = sa_session.query( galaxy.model.User ).filter( galaxy.model.User.table.c.email == 'test(a)bx.psu.edu' ).one()
+ self.twill_test_case.new_history()
+ latest_history = sa_session.query( galaxy.model.History ) \
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == admin_user.id ) ) \
+ .order_by( desc( galaxy.model.History.table.c.create_time ) ) \
+ .first()
+ assert latest_history is not None, "Problem retrieving latest_history from database"
+ if len( self.twill_test_case.get_history_as_data_list() ) > 0:
+ raise AssertionError("ToolTestCase.do_it failed")
+ return latest_history
+
+ def delete_history( self, latest_history ):
+ self.twill_test_case.delete_history( id=self.twill_test_case.security.encode_id( latest_history.id ) )
def __expand_grouping( self, tool_inputs, declared_inputs, prefix='' ):
expanded_inputs = {}
https://bitbucket.org/galaxy/galaxy-central/commits/c31db7ed3f0a/
Changeset: c31db7ed3f0a
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Upload a tool test's inputs in parallel.
Probably small optimization in most cases since tests generally have few inputs.
Affected #: 2 files
diff -r 3b4ed072579212764322cbdd8440ecbaad836ca9 -r c31db7ed3f0a1970e45fc5fbe085e07d024cc30a test/base/twilltestcase.py
--- a/test/base/twilltestcase.py
+++ b/test/base/twilltestcase.py
@@ -197,7 +197,7 @@
filename = os.path.join( *path )
file(filename, 'wt').write(buffer.getvalue())
- def upload_file( self, filename, ftype='auto', dbkey='unspecified (?)', space_to_tab=False, metadata=None, composite_data=None, shed_tool_id=None ):
+ def upload_file( self, filename, ftype='auto', dbkey='unspecified (?)', space_to_tab=False, metadata=None, composite_data=None, name=None, shed_tool_id=None, wait=True ):
"""
Uploads a file. If shed_tool_id has a value, we're testing tools migrated from the distribution to the tool shed,
so the tool-data directory of test data files is contained in the installed tool shed repository.
@@ -218,12 +218,19 @@
filename = self.get_filename( filename, shed_tool_id=shed_tool_id )
tc.formfile( "tool_form", "file_data", filename )
tc.fv( "tool_form", "space_to_tab", space_to_tab )
+ if name:
+ # NAME is a hidden form element, so the following prop must
+ # set to use it.
+ tc.config("readonly_controls_writeable", 1)
+ tc.fv( "tool_form", "NAME", name )
tc.submit( "runtool_btn" )
self.home()
except AssertionError, err:
errmsg = "Uploading file resulted in the following exception. Make sure the file (%s) exists. " % filename
errmsg += str( err )
raise AssertionError( errmsg )
+ if not wait:
+ return
# Make sure every history item has a valid hid
hids = self.get_hids_in_history()
for hid in hids:
diff -r 3b4ed072579212764322cbdd8440ecbaad836ca9 -r c31db7ed3f0a1970e45fc5fbe085e07d024cc30a test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -25,8 +25,11 @@
test_history = galaxy_interactor.new_history()
# Upload any needed files
+ upload_waits = []
for test_data in testdef.test_data():
- galaxy_interactor.stage_data( test_data, shed_tool_id )
+ upload_waits.append( galaxy_interactor.stage_data_async( test_data, shed_tool_id ) )
+ for upload_wait in upload_waits:
+ upload_wait()
data_list = galaxy_interactor.run_tool( testdef )
self.assertTrue( data_list )
@@ -70,20 +73,24 @@
def __init__( self, twill_test_case ):
self.twill_test_case = twill_test_case
- def stage_data( self, test_data, shed_tool_id ):
+ def stage_data_async( self, test_data, shed_tool_id, async=True ):
+ name = test_data.get( 'name', None )
+ if name:
+ async = False
self.twill_test_case.upload_file( test_data['fname'],
ftype=test_data['ftype'],
dbkey=test_data['dbkey'],
metadata=test_data['metadata'],
composite_data=test_data['composite_data'],
- shed_tool_id=shed_tool_id )
- name = test_data.get('name', None)
+ shed_tool_id=shed_tool_id,
+ wait=(not async) )
if name:
hda_id = self.twill_test_case.get_history_as_data_list()[-1].get( 'id' )
try:
self.twill_test_case.edit_hda_attribute_info( hda_id=str(hda_id), new_name=name )
except:
print "### call to edit_hda failed for hda_id %s, new_name=%s" % (hda_id, name)
+ return lambda: self.twill_test_case.wait()
def run_tool( self, testdef ):
# We need to handle the case where we've uploaded a valid compressed file since the upload
https://bitbucket.org/galaxy/galaxy-central/commits/e2df40d1cc38/
Changeset: e2df40d1cc38
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Inject master API key into functional test Galaxy.
Override or set external key with GALAXY_TEST_MASTER_API_KEY.
Affected #: 2 files
diff -r c31db7ed3f0a1970e45fc5fbe085e07d024cc30a -r e2df40d1cc38c08f6bb229c28f3a044a51013618 scripts/functional_tests.py
--- a/scripts/functional_tests.py
+++ b/scripts/functional_tests.py
@@ -62,6 +62,7 @@
default_galaxy_test_port_max = 9999
default_galaxy_locales = 'en'
default_galaxy_test_file_dir = "test-data"
+default_galaxy_master_key = "TEST123"
migrated_tool_panel_config = 'migrated_tools_conf.xml'
installed_tool_panel_configs = [ 'shed_tool_conf.xml' ]
@@ -296,6 +297,7 @@
pass
# ---- Build Application --------------------------------------------------
+ master_api_key = os.environ.get( "GALAXY_TEST_MASTER_API_KEY", default_galaxy_master_key )
app = None
if start_server:
kwargs = dict( admin_users='test(a)bx.psu.edu',
@@ -322,7 +324,9 @@
tool_parse_help=False,
update_integrated_tool_panel=False,
use_heartbeat=False,
- user_library_import_dir=user_library_import_dir )
+ user_library_import_dir=user_library_import_dir,
+ master_api_key=master_api_key,
+ )
if psu_production:
kwargs[ 'global_conf' ] = None
if not database_connection.startswith( 'sqlite://' ):
@@ -418,7 +422,10 @@
def _run_functional_test( testing_shed_tools=None ):
functional.test_toolbox.toolbox = app.toolbox
- functional.test_toolbox.build_tests( testing_shed_tools=testing_shed_tools )
+ functional.test_toolbox.build_tests(
+ testing_shed_tools=testing_shed_tools,
+ master_api_key=master_api_key,
+ )
test_config = nose.config.Config( env=os.environ, ignoreFiles=ignore_files, plugins=nose.plugins.manager.DefaultPluginManager() )
test_config.configure( sys.argv )
result = run_tests( test_config )
diff -r c31db7ed3f0a1970e45fc5fbe085e07d024cc30a -r e2df40d1cc38c08f6bb229c28f3a044a51013618 test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -191,7 +191,7 @@
return expanded_inputs
-def build_tests( testing_shed_tools=False ):
+def build_tests( testing_shed_tools=False, master_api_key=None ):
"""
If the module level variable `toolbox` is set, generate `ToolTestCase`
classes for all of its tests and put them into this modules globals() so
@@ -226,6 +226,7 @@
test_method.__doc__ = "%s ( %s ) > %s" % ( tool.name, tool.id, testdef.name )
namespace[ 'test_tool_%06d' % j ] = test_method
namespace[ 'shed_tool_id' ] = shed_tool_id
+ namespace[ 'master_api_key' ] = master_api_key
# The new.classobj function returns a new class object, with name name, derived
# from baseclasses (which should be a tuple of classes) and with namespace dict.
new_class_obj = new.classobj( name, baseclasses, namespace )
https://bitbucket.org/galaxy/galaxy-central/commits/3f105f22fc57/
Changeset: 3f105f22fc57
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Tool functional tests - refactor reusable function for verifing output
... from verify_dataset_correctness. Introduce simple, extensible abstraction for fetching outputs for verification in twilltestcase.py. Will want to reuse the core verification of the displayed dataset in the API driven case and will not need or be able to use the extra cruft related to checking UI, job state, etc...
Also small twilltestcase code structure updates for Python 2.6+.
Affected #: 1 file
diff -r e2df40d1cc38c08f6bb229c28f3a044a51013618 -r 3f105f22fc5781d89421f5bcb6d9a0bd5825e9b0 test/base/twilltestcase.py
--- a/test/base/twilltestcase.py
+++ b/test/base/twilltestcase.py
@@ -833,70 +833,83 @@
if ext != test_ext:
raise AssertionError( errmsg )
else:
+ # See not in controllers/root.py about encoded_id.
hda_id = self.security.encode_id( elem.get( 'id' ) )
- self.home()
- # See not in controllers/root.py about encoded_id.
- self.visit_page( "display?encoded_id=%s" % hda_id )
+ self.verify_hid( filename, hid=hid, hda_id=hda_id, attributes=attributes, shed_tool_id=shed_tool_id)
+
+ def verify_hid( self, filename, hda_id, attributes, shed_tool_id, hid="", dataset_fetcher=None):
+ dataset_fetcher = dataset_fetcher or self.__default_dataset_fetcher()
+ data = dataset_fetcher( hda_id )
+ if attributes is not None and attributes.get( "assert_list", None ) is not None:
+ try:
+ verify_assertions(data, attributes["assert_list"])
+ except AssertionError, err:
+ errmsg = 'History item %s different than expected\n' % (hid)
+ errmsg += str( err )
+ raise AssertionError( errmsg )
+ if filename is not None:
+ local_name = self.get_filename( filename, shed_tool_id=shed_tool_id )
+ temp_name = self.makeTfname(fname=filename)
+ file( temp_name, 'wb' ).write( data )
+
+ # if the server's env has GALAXY_TEST_SAVE, save the output file to that dir
+ if self.keepOutdir:
+ ofn = os.path.join( self.keepOutdir, os.path.basename( local_name ) )
+ log.debug( 'keepoutdir: %s, ofn: %s', self.keepOutdir, ofn )
+ try:
+ shutil.copy( temp_name, ofn )
+ except Exception, exc:
+ error_log_msg = ( 'TwillTestCase could not save output file %s to %s: ' % ( temp_name, ofn ) )
+ error_log_msg += str( exc )
+ log.error( error_log_msg, exc_info=True )
+ else:
+ log.debug('## GALAXY_TEST_SAVE=%s. saved %s' % ( self.keepOutdir, ofn ) )
+ try:
+ if attributes is None:
+ attributes = {}
+ compare = attributes.get( 'compare', 'diff' )
+ if attributes.get( 'ftype', None ) == 'bam':
+ local_fh, temp_name = self._bam_to_sam( local_name, temp_name )
+ local_name = local_fh.name
+ extra_files = attributes.get( 'extra_files', None )
+ if compare == 'diff':
+ self.files_diff( local_name, temp_name, attributes=attributes )
+ elif compare == 're_match':
+ self.files_re_match( local_name, temp_name, attributes=attributes )
+ elif compare == 're_match_multiline':
+ self.files_re_match_multiline( local_name, temp_name, attributes=attributes )
+ elif compare == 'sim_size':
+ delta = attributes.get('delta', '100')
+ s1 = len(data)
+ s2 = os.path.getsize(local_name)
+ if abs(s1 - s2) > int(delta):
+ raise Exception( 'Files %s=%db but %s=%db - compare (delta=%s) failed' % (temp_name, s1, local_name, s2, delta) )
+ elif compare == "contains":
+ self.files_contains( local_name, temp_name, attributes=attributes )
+ else:
+ raise Exception( 'Unimplemented Compare type: %s' % compare )
+ if extra_files:
+ self.verify_extra_files_content( extra_files, hda_id, shed_tool_id=shed_tool_id, dataset_fetcher=dataset_fetcher )
+ except AssertionError, err:
+ errmsg = 'History item %s different than expected, difference (using %s):\n' % ( hid, compare )
+ errmsg += "( %s v. %s )\n" % ( local_name, temp_name )
+ errmsg += str( err )
+ raise AssertionError( errmsg )
+ finally:
+ os.remove( temp_name )
+
+ def __default_dataset_fetcher( self ):
+ def fetcher( hda_id, filename=None ):
+ if filename is None:
+ page_url = "display?encoded_id=%s" % hda_id
+ self.home() # I assume this is not needed.
+ else:
+ page_url = "datasets/%s/display/%s" % ( hda_id, filename )
+ self.visit_page( page_url )
data = self.last_page()
- if attributes is not None and attributes.get( "assert_list", None ) is not None:
- try:
- verify_assertions(data, attributes["assert_list"])
- except AssertionError, err:
- errmsg = 'History item %s different than expected\n' % (hid)
- errmsg += str( err )
- raise AssertionError( errmsg )
- if filename is not None:
- local_name = self.get_filename( filename, shed_tool_id=shed_tool_id )
- temp_name = self.makeTfname(fname=filename)
- file( temp_name, 'wb' ).write( data )
+ return data
- # if the server's env has GALAXY_TEST_SAVE, save the output file to that dir
- if self.keepOutdir:
- ofn = os.path.join( self.keepOutdir, os.path.basename( local_name ) )
- log.debug( 'keepoutdir: %s, ofn: %s', self.keepOutdir, ofn )
- try:
- shutil.copy( temp_name, ofn )
- except Exception, exc:
- error_log_msg = ( 'TwillTestCase could not save output file %s to %s: ' % ( temp_name, ofn ) )
- error_log_msg += str( exc )
- log.error( error_log_msg, exc_info=True )
- else:
- log.debug('## GALAXY_TEST_SAVE=%s. saved %s' % ( self.keepOutdir, ofn ) )
- try:
- # have to nest try-except in try-finally to handle 2.4
- try:
- if attributes is None:
- attributes = {}
- compare = attributes.get( 'compare', 'diff' )
- if attributes.get( 'ftype', None ) == 'bam':
- local_fh, temp_name = self._bam_to_sam( local_name, temp_name )
- local_name = local_fh.name
- extra_files = attributes.get( 'extra_files', None )
- if compare == 'diff':
- self.files_diff( local_name, temp_name, attributes=attributes )
- elif compare == 're_match':
- self.files_re_match( local_name, temp_name, attributes=attributes )
- elif compare == 're_match_multiline':
- self.files_re_match_multiline( local_name, temp_name, attributes=attributes )
- elif compare == 'sim_size':
- delta = attributes.get('delta', '100')
- s1 = len(data)
- s2 = os.path.getsize(local_name)
- if abs(s1 - s2) > int(delta):
- raise Exception( 'Files %s=%db but %s=%db - compare (delta=%s) failed' % (temp_name, s1, local_name, s2, delta) )
- elif compare == "contains":
- self.files_contains( local_name, temp_name, attributes=attributes )
- else:
- raise Exception( 'Unimplemented Compare type: %s' % compare )
- if extra_files:
- self.verify_extra_files_content( extra_files, elem.get( 'id' ), shed_tool_id=shed_tool_id )
- except AssertionError, err:
- errmsg = 'History item %s different than expected, difference (using %s):\n' % ( hid, compare )
- errmsg += "( %s v. %s )\n" % ( local_name, temp_name )
- errmsg += str( err )
- raise AssertionError( errmsg )
- finally:
- os.remove( temp_name )
+ return fetcher
def _bam_to_sam( self, local_name, temp_name ):
temp_local = tempfile.NamedTemporaryFile( suffix='.sam', prefix='local_bam_converted_to_sam_' )
@@ -909,7 +922,7 @@
os.remove( temp_name )
return temp_local, temp_temp
- def verify_extra_files_content( self, extra_files, hda_id, shed_tool_id=None ):
+ def verify_extra_files_content( self, extra_files, hda_id, dataset_fetcher, shed_tool_id=None ):
files_list = []
for extra_type, extra_value, extra_name, extra_attributes in extra_files:
if extra_type == 'file':
@@ -920,44 +933,42 @@
else:
raise ValueError( 'unknown extra_files type: %s' % extra_type )
for filename, filepath, attributes in files_list:
- self.verify_composite_datatype_file_content( filepath, hda_id, base_name=filename, attributes=attributes, shed_tool_id=shed_tool_id )
+ self.verify_composite_datatype_file_content( filepath, hda_id, base_name=filename, attributes=attributes, dataset_fetcher=dataset_fetcher, shed_tool_id=shed_tool_id )
- def verify_composite_datatype_file_content( self, file_name, hda_id, base_name=None, attributes=None, shed_tool_id=None ):
+ def verify_composite_datatype_file_content( self, file_name, hda_id, base_name=None, attributes=None, dataset_fetcher=None, shed_tool_id=None ):
+ dataset_fetcher = dataset_fetcher or self.__default_dataset_fetcher()
local_name = self.get_filename( file_name, shed_tool_id=shed_tool_id )
if base_name is None:
base_name = os.path.split(file_name)[-1]
temp_name = self.makeTfname(fname=base_name)
- self.visit_url( "%s/datasets/%s/display/%s" % ( self.url, hda_id, base_name ) )
- data = self.last_page()
+ data = dataset_fetcher( hda_id, base_name )
file( temp_name, 'wb' ).write( data )
if self.keepOutdir > '':
ofn = os.path.join(self.keepOutdir, base_name)
shutil.copy(temp_name, ofn)
log.debug('## GALAXY_TEST_SAVE=%s. saved %s' % (self.keepOutdir, ofn))
try:
- # have to nest try-except in try-finally to handle 2.4
- try:
- if attributes is None:
- attributes = {}
- compare = attributes.get( 'compare', 'diff' )
- if compare == 'diff':
- self.files_diff( local_name, temp_name, attributes=attributes )
- elif compare == 're_match':
- self.files_re_match( local_name, temp_name, attributes=attributes )
- elif compare == 're_match_multiline':
- self.files_re_match_multiline( local_name, temp_name, attributes=attributes )
- elif compare == 'sim_size':
- delta = attributes.get('delta', '100')
- s1 = len(data)
- s2 = os.path.getsize(local_name)
- if abs(s1 - s2) > int(delta):
- raise Exception( 'Files %s=%db but %s=%db - compare (delta=%s) failed' % (temp_name, s1, local_name, s2, delta) )
- else:
- raise Exception( 'Unimplemented Compare type: %s' % compare )
- except AssertionError, err:
- errmsg = 'Composite file (%s) of History item %s different than expected, difference (using %s):\n' % ( base_name, hda_id, compare )
- errmsg += str( err )
- raise AssertionError( errmsg )
+ if attributes is None:
+ attributes = {}
+ compare = attributes.get( 'compare', 'diff' )
+ if compare == 'diff':
+ self.files_diff( local_name, temp_name, attributes=attributes )
+ elif compare == 're_match':
+ self.files_re_match( local_name, temp_name, attributes=attributes )
+ elif compare == 're_match_multiline':
+ self.files_re_match_multiline( local_name, temp_name, attributes=attributes )
+ elif compare == 'sim_size':
+ delta = attributes.get('delta', '100')
+ s1 = len(data)
+ s2 = os.path.getsize(local_name)
+ if abs(s1 - s2) > int(delta):
+ raise Exception( 'Files %s=%db but %s=%db - compare (delta=%s) failed' % (temp_name, s1, local_name, s2, delta) )
+ else:
+ raise Exception( 'Unimplemented Compare type: %s' % compare )
+ except AssertionError, err:
+ errmsg = 'Composite file (%s) of History item %s different than expected, difference (using %s):\n' % ( base_name, hda_id, compare )
+ errmsg += str( err )
+ raise AssertionError( errmsg )
finally:
os.remove( temp_name )
https://bitbucket.org/galaxy/galaxy-central/commits/a35a8198b8fd/
Changeset: a35a8198b8fd
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Outline new API based galaxy interactor for functional tests.
Add mechanism to tool test parser code to determine which Galaxy interactor should be used ('twill' or 'api' are current options). This maybe a stop gap to force usage of the API interactor until that becomes the default or may prove in the long term to be essential if there are certain tools that will always require Twill-specific functionality or if new browser-based JavaScript (e.g. w/selenium) are implemented. An interactor value can be specified at the tests level and/or at the level of individual test elements in the tool XML (using the 'interactor' attribute on either element).
The current default interactor is 'twill'. The default interactor app-wide can be overridden with the GALAXY_TEST_DEFAULT_INTERACTOR environment variable.
Affected #: 3 files
diff -r 3f105f22fc5781d89421f5bcb6d9a0bd5825e9b0 -r a35a8198b8fd1a0fdea1183ca299fa6ceb41d060 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -52,7 +52,7 @@
from galaxy.tools.parameters.output import ToolOutputActionGroup
from galaxy.tools.parameters.validation import LateValidationError
from galaxy.tools.filters import FilterFactory
-from galaxy.tools.test import ToolTestBuilder
+from galaxy.tools.test import parse_tests_elem
from galaxy.util import listify, parse_xml, rst_to_html, string_as_bool, string_to_object, xml_text, xml_to_string
from galaxy.util.bunch import Bunch
from galaxy.util.expressions import ExpressionContext
@@ -1222,7 +1222,7 @@
tests_elem = root.find( "tests" )
if tests_elem:
try:
- self.parse_tests( tests_elem )
+ self.tests = parse_tests_elem( self, tests_elem )
except:
log.exception( "Failed to parse tool tests" )
else:
@@ -1564,16 +1564,6 @@
log.error( "Traceback: %s" % trace_msg )
return return_level
- def parse_tests( self, tests_elem ):
- """
- Parse any "<test>" elements, create a `ToolTestBuilder` for each and
- store in `self.tests`.
- """
- self.tests = []
- for i, test_elem in enumerate( tests_elem.findall( 'test' ) ):
- test = ToolTestBuilder( self, test_elem, i )
- self.tests.append( test )
-
def parse_input_page( self, input_elem, enctypes ):
"""
Parse a page of inputs. This basically just calls 'parse_input_elem',
diff -r 3f105f22fc5781d89421f5bcb6d9a0bd5825e9b0 -r a35a8198b8fd1a0fdea1183ca299fa6ceb41d060 lib/galaxy/tools/test.py
--- a/lib/galaxy/tools/test.py
+++ b/lib/galaxy/tools/test.py
@@ -1,3 +1,4 @@
+import os
import os.path
from parameters import basic
from parameters import grouping
@@ -8,6 +9,21 @@
DEFAULT_FTYPE = 'auto'
DEFAULT_DBKEY = 'hg17'
+DEFAULT_INTERACTOR = "twill" # Default mechanism test code uses for interacting with Galaxy instance.
+
+
+def parse_tests_elem(tool, tests_elem):
+ """
+ Build ToolTestBuilder objects for each "<test>" elements and
+ return default interactor (if any).
+ """
+ default_interactor = os.environ.get( 'GALAXY_TEST_DEFAULT_INTERACTOR', DEFAULT_INTERACTOR )
+ tests_default_interactor = tests_elem.get( 'interactor', default_interactor )
+ tests = []
+ for i, test_elem in enumerate( tests_elem.findall( 'test' ) ):
+ test = ToolTestBuilder( tool, test_elem, i, default_interactor=tests_default_interactor )
+ tests.append( test )
+ return tests
class ToolTestBuilder( object ):
@@ -17,7 +33,7 @@
doing dynamic tests in this way allows better integration)
"""
- def __init__( self, tool, test_elem, i ):
+ def __init__( self, tool, test_elem, i, default_interactor ):
name = test_elem.get( 'name', 'Test-%d' % (i + 1) )
maxseconds = int( test_elem.get( 'maxseconds', '120' ) )
@@ -30,7 +46,7 @@
self.error = False
self.exception = None
- self.__parse_elem( test_elem, i )
+ self.__parse_elem( test_elem, i, default_interactor )
def test_data( self ):
"""
@@ -57,12 +73,18 @@
yield data_dict
- def __parse_elem( self, test_elem, i ):
+ def __parse_elem( self, test_elem, i, default_interactor ):
# Composite datasets need a unique name: each test occurs in a fresh
# history, but we'll keep it unique per set of tests - use i (test #)
# and composite_data_names_counter (instance per test #)
composite_data_names_counter = 0
try:
+ # Mechanism test code uses for interacting with Galaxy instance,
+ # until 'api' is the default switch this to API to use its new
+ # features. Once 'api' is the default set to 'twill' to use legacy
+ # features or workarounds.
+ self.interactor = test_elem.get( 'interactor', default_interactor )
+
for param_elem in test_elem.findall( "param" ):
attrib = dict( param_elem.attrib )
if 'values' in attrib:
diff -r 3f105f22fc5781d89421f5bcb6d9a0bd5825e9b0 -r a35a8198b8fd1a0fdea1183ca299fa6ceb41d060 test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -1,6 +1,8 @@
import sys
import new
+import os
from galaxy.tools.parameters import grouping
+from galaxy.util import string_as_bool
from base.twilltestcase import TwillTestCase
import galaxy.model
from galaxy.model.orm import and_, desc
@@ -20,7 +22,7 @@
self.__handle_test_def_errors( testdef )
- galaxy_interactor = GalaxyInteractorTwill( self )
+ galaxy_interactor = self.__galaxy_interactor( testdef )
test_history = galaxy_interactor.new_history()
@@ -38,6 +40,11 @@
galaxy_interactor.delete_history( test_history )
+ def __galaxy_interactor( self, testdef ):
+ interactor_key = testdef.interactor
+ interactor_class = GALAXY_INTERACTORS[ interactor_key ]
+ return interactor_class( self )
+
def __handle_test_def_errors(self, testdef):
# If the test generation had an error, raise
if testdef.error:
@@ -68,6 +75,25 @@
raise
+class GalaxyInteractorApi( object ):
+
+ def __init__( self, twill_test_case ):
+ self.twill_test_case = twill_test_case
+ self.master_api_key = twill_test_case.master_api_key
+
+ def new_history( self ):
+ return None
+
+ def stage_data_async( self, test_data, shed_tool_id, async=True ):
+ return lambda: True
+
+ def run_tool( self, testdef ):
+ return []
+
+ def delete_history( self, history ):
+ return None
+
+
class GalaxyInteractorTwill( object ):
def __init__( self, twill_test_case ):
@@ -231,3 +257,9 @@
# from baseclasses (which should be a tuple of classes) and with namespace dict.
new_class_obj = new.classobj( name, baseclasses, namespace )
G[ name ] = new_class_obj
+
+
+GALAXY_INTERACTORS = {
+ 'api': GalaxyInteractorApi,
+ 'twill': GalaxyInteractorTwill,
+}
https://bitbucket.org/galaxy/galaxy-central/commits/15c6170f58c8/
Changeset: 15c6170f58c8
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Twill-less tool tests initial implementation.
Implement GalaxyInteractorApi, is working for simple tests.
Lots left to do likely - conditionals, repeats, composite uploads, pages(?).
Introduces dependency on Python requests package - subsequent changeset will provide urllib2 based fallback if requests unavailable.
Affected #: 1 file
diff -r a35a8198b8fd1a0fdea1183ca299fa6ceb41d060 -r 15c6170f58c87ed719efb37b62c9eab1c2956102 test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -7,6 +7,8 @@
import galaxy.model
from galaxy.model.orm import and_, desc
from galaxy.model.mapping import context as sa_session
+from simplejson import dumps
+import requests
toolbox = None
@@ -29,14 +31,14 @@
# Upload any needed files
upload_waits = []
for test_data in testdef.test_data():
- upload_waits.append( galaxy_interactor.stage_data_async( test_data, shed_tool_id ) )
+ upload_waits.append( galaxy_interactor.stage_data_async( test_data, test_history, shed_tool_id ) )
for upload_wait in upload_waits:
upload_wait()
- data_list = galaxy_interactor.run_tool( testdef )
+ data_list = galaxy_interactor.run_tool( testdef, test_history )
self.assertTrue( data_list )
- self.__verify_outputs( testdef, shed_tool_id, data_list )
+ self.__verify_outputs( testdef, test_history, shed_tool_id, data_list, galaxy_interactor )
galaxy_interactor.delete_history( test_history )
@@ -53,53 +55,167 @@
else:
raise Exception( "Test parse failure" )
- def __verify_outputs( self, testdef, shed_tool_id, data_list ):
+ def __verify_outputs( self, testdef, history, shed_tool_id, data_list, galaxy_interactor ):
maxseconds = testdef.maxseconds
- elem_index = 0 - len( testdef.outputs )
+ output_index = 0 - len( testdef.outputs )
for output_tuple in testdef.outputs:
# Get the correct hid
- elem = data_list[ elem_index ]
- self.assertTrue( elem is not None )
- self.__verify_output( output_tuple, shed_tool_id, elem, maxseconds=maxseconds )
- elem_index += 1
-
- def __verify_output( self, output_tuple, shed_tool_id, elem, maxseconds ):
+ output_data = data_list[ output_index ]
+ self.assertTrue( output_data is not None )
name, outfile, attributes = output_tuple
- elem_hid = elem.get( 'hid' )
- try:
- self.verify_dataset_correctness( outfile, hid=elem_hid, attributes=attributes, shed_tool_id=shed_tool_id )
- except Exception:
- print >>sys.stderr, self.get_job_stdout( elem.get( 'id' ), format=True )
- print >>sys.stderr, self.get_job_stderr( elem.get( 'id' ), format=True )
- raise
+ galaxy_interactor.verify_output( history, output_data, outfile, attributes=attributes, shed_tool_id=shed_tool_id, maxseconds=maxseconds )
+ output_index += 1
class GalaxyInteractorApi( object ):
def __init__( self, twill_test_case ):
self.twill_test_case = twill_test_case
- self.master_api_key = twill_test_case.master_api_key
+ self.api_url = "%s/api" % twill_test_case.url.rstrip("/")
+ self.api_key = self.__get_user_key( twill_test_case.master_api_key )
+ self.uploads = {}
+
+ def verify_output( self, history_id, output_data, outfile, attributes, shed_tool_id, maxseconds ):
+ hid = output_data.get( 'id' )
+ try:
+ fetcher = self.__dataset_fetcher( history_id )
+ self.twill_test_case.verify_hid( outfile, hda_id=hid, attributes=attributes, dataset_fetcher=fetcher, shed_tool_id=shed_tool_id )
+ except Exception:
+ ## TODO: Print this!
+ # print >>sys.stderr, self.twill_test_case.get_job_stdout( output_data.get( 'id' ), format=True )
+ ## TODO: Print this!
+ # print >>sys.stderr, self.twill_test_case.get_job_stderr( output_data.get( 'id' ), format=True )
+ raise
def new_history( self ):
- return None
+ history_json = self.__post( "histories", {"name": "test_history"} ).json()
+ return history_json[ 'id' ]
- def stage_data_async( self, test_data, shed_tool_id, async=True ):
- return lambda: True
+ def stage_data_async( self, test_data, history_id, shed_tool_id, async=True ):
+ fname = test_data[ 'fname' ]
+ file_name = self.twill_test_case.get_filename( fname, shed_tool_id=shed_tool_id )
+ name = test_data.get( 'name', None )
+ if not name:
+ name = os.path.basename( file_name )
+ tool_input = {
+ "file_type": test_data[ 'ftype' ],
+ "dbkey": test_data[ 'dbkey' ], # TODO: Handle it! Doesn't work if undefined, does seem to in Twill.
+ "files_0|NAME": name,
+ "files_0|type": "upload_dataset",
+ }
+ files = {
+ "files_0|file_data": open( file_name, 'rb')
+ }
+ submit_response = self.__submit_tool( history_id, "upload1", tool_input, extra_data={"type": "upload_dataset"}, files=files ).json()
+ dataset = submit_response["outputs"][0]
+ #raise Exception(str(dataset))
+ hid = dataset['id']
+ self.uploads[ fname ] = {"src": "hda", "id": hid}
+ return self.__wait_for_history( history_id )
- def run_tool( self, testdef ):
- return []
+ def run_tool( self, testdef, history_id ):
+ # We need to handle the case where we've uploaded a valid compressed file since the upload
+ # tool will have uncompressed it on the fly.
+ all_inputs = {}
+ for name, value, _ in testdef.inputs:
+ all_inputs[ name ] = value
+
+ for key, value in all_inputs.iteritems():
+ # TODO: Restrict this to param inputs.
+ if value in self.uploads:
+ all_inputs[key] = self.uploads[ value ]
+
+ # TODO: Handle repeats.
+ # TODO: Handle pages.
+ # TODO: Handle force_history_refresh
+ datasets = self.__submit_tool( history_id, tool_id=testdef.tool.id, tool_input=all_inputs )
+ self.__wait_for_history( history_id )() # TODO: Remove and respect maxseconds!
+ return datasets.json()[ 'outputs' ]
+
+ def output_hid( self, output_data ):
+ return output_data[ 'id' ]
def delete_history( self, history ):
return None
+ def __wait_for_history( self, history_id ):
+ def wait():
+ while True:
+ history_json = self.__get( "histories/%s" % history_id ).json()
+ state = history_json[ 'state' ]
+ if state == 'ok':
+ #raise Exception(str(self.__get( self.__get( "histories/%s/contents" % history_id ).json()[0]['url'] ).json() ) )
+ #raise Exception(str(self.__get( self.__get( "histories/%s/contents" % history_id ).json()[0]['url'] ).json() ) )
+ break
+ elif state == 'error':
+ raise Exception("History in error state.")
+ return wait
+
+ def __submit_tool( self, history_id, tool_id, tool_input, extra_data={}, files=None ):
+ data = dict(
+ history_id=history_id,
+ tool_id=tool_id,
+ inputs=dumps( tool_input ),
+ **extra_data
+ )
+ return self.__post( "tools", files=files, data=data )
+
+ def __get_user_key( self, admin_key ):
+ all_users = self.__get( 'users', key=admin_key ).json()
+ try:
+ test_user = [ user for user in all_users if user["email"] == 'test(a)bx.psu.edu' ][0]
+ except IndexError:
+ data = dict(
+ email='test(a)bx.psu.edu',
+ password='testuser',
+ username='admin-user',
+ )
+ test_user = self.__post( 'users', data, key=admin_key ).json()
+ return self.__post( "users/%s/api_key" % test_user['id'], key=admin_key ).json()
+
+ def __dataset_fetcher( self, history_id ):
+ def fetcher( hda_id, base_name=None ):
+ url = "histories/%s/contents/%s/display" % (history_id, hda_id)
+ if base_name:
+ url += "&filename=%s" % base_name
+ return self.__get( url ).text
+
+ return fetcher
+
+ def __post( self, path, data={}, files=None, key=None):
+ if not key:
+ key = self.api_key
+ data = data.copy()
+ data['key'] = key
+ return requests.post( "%s/%s" % (self.api_url, path), data=data, files=files )
+
+ def __get( self, path, data={}, key=None ):
+ if not key:
+ key = self.api_key
+ data = data.copy()
+ data['key'] = key
+ if path.startswith("/api"):
+ path = path[ len("/api"): ]
+ url = "%s/%s" % (self.api_url, path)
+ return requests.get( url, params=data )
+
class GalaxyInteractorTwill( object ):
def __init__( self, twill_test_case ):
self.twill_test_case = twill_test_case
- def stage_data_async( self, test_data, shed_tool_id, async=True ):
+ def verify_output( self, history, output_data, outfile, attributes, shed_tool_id, maxseconds ):
+ hid = output_data.get( 'hid' )
+ try:
+ self.twill_test_case.verify_dataset_correctness( outfile, hid=hid, attributes=attributes, shed_tool_id=shed_tool_id )
+ except Exception:
+ print >>sys.stderr, self.twill_test_case.get_job_stdout( output_data.get( 'id' ), format=True )
+ print >>sys.stderr, self.twill_test_case.get_job_stderr( output_data.get( 'id' ), format=True )
+ raise
+
+ def stage_data_async( self, test_data, history, shed_tool_id, async=True ):
name = test_data.get( 'name', None )
if name:
async = False
@@ -118,7 +234,7 @@
print "### call to edit_hda failed for hda_id %s, new_name=%s" % (hda_id, name)
return lambda: self.twill_test_case.wait()
- def run_tool( self, testdef ):
+ def run_tool( self, testdef, test_history ):
# We need to handle the case where we've uploaded a valid compressed file since the upload
# tool will have uncompressed it on the fly.
all_inputs = {}
@@ -179,6 +295,9 @@
def delete_history( self, latest_history ):
self.twill_test_case.delete_history( id=self.twill_test_case.security.encode_id( latest_history.id ) )
+ def output_hid( self, output_data ):
+ return output_data.get( 'hid' )
+
def __expand_grouping( self, tool_inputs, declared_inputs, prefix='' ):
expanded_inputs = {}
for key, value in tool_inputs.items():
https://bitbucket.org/galaxy/galaxy-central/commits/1411df5a5fcc/
Changeset: 1411df5a5fcc
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Twill-less tool tests - allow tests with GALAXY_TEST_EXTERNAL...
... if GALAXY_TEST_EXTERNAL is ever fixed.
Affected #: 2 files
diff -r 15c6170f58c87ed719efb37b62c9eab1c2956102 -r 1411df5a5fcc2686512b5560435279f305fc755a scripts/functional_tests.py
--- a/scripts/functional_tests.py
+++ b/scripts/functional_tests.py
@@ -63,6 +63,7 @@
default_galaxy_locales = 'en'
default_galaxy_test_file_dir = "test-data"
default_galaxy_master_key = "TEST123"
+default_galaxy_user_key = None
migrated_tool_panel_config = 'migrated_tools_conf.xml'
installed_tool_panel_configs = [ 'shed_tool_conf.xml' ]
@@ -425,6 +426,7 @@
functional.test_toolbox.build_tests(
testing_shed_tools=testing_shed_tools,
master_api_key=master_api_key,
+ user_api_key=os.environ.get( "GALAXY_TEST_USER_API_KEY", default_galaxy_user_key ),
)
test_config = nose.config.Config( env=os.environ, ignoreFiles=ignore_files, plugins=nose.plugins.manager.DefaultPluginManager() )
test_config.configure( sys.argv )
diff -r 15c6170f58c87ed719efb37b62c9eab1c2956102 -r 1411df5a5fcc2686512b5560435279f305fc755a test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -73,7 +73,7 @@
def __init__( self, twill_test_case ):
self.twill_test_case = twill_test_case
self.api_url = "%s/api" % twill_test_case.url.rstrip("/")
- self.api_key = self.__get_user_key( twill_test_case.master_api_key )
+ self.api_key = self.__get_user_key( twill_test_case.user_api_key, twill_test_case.master_api_key )
self.uploads = {}
def verify_output( self, history_id, output_data, outfile, attributes, shed_tool_id, maxseconds ):
@@ -161,7 +161,9 @@
)
return self.__post( "tools", files=files, data=data )
- def __get_user_key( self, admin_key ):
+ def __get_user_key( self, user_key, admin_key ):
+ if user_key:
+ return user_key
all_users = self.__get( 'users', key=admin_key ).json()
try:
test_user = [ user for user in all_users if user["email"] == 'test(a)bx.psu.edu' ][0]
@@ -336,7 +338,7 @@
return expanded_inputs
-def build_tests( testing_shed_tools=False, master_api_key=None ):
+def build_tests( testing_shed_tools=False, master_api_key=None, user_api_key=None ):
"""
If the module level variable `toolbox` is set, generate `ToolTestCase`
classes for all of its tests and put them into this modules globals() so
@@ -372,6 +374,7 @@
namespace[ 'test_tool_%06d' % j ] = test_method
namespace[ 'shed_tool_id' ] = shed_tool_id
namespace[ 'master_api_key' ] = master_api_key
+ namespace[ 'user_api_key' ] = user_api_key
# The new.classobj function returns a new class object, with name name, derived
# from baseclasses (which should be a tuple of classes) and with namespace dict.
new_class_obj = new.classobj( name, baseclasses, namespace )
https://bitbucket.org/galaxy/galaxy-central/commits/2dcc0a0e3732/
Changeset: 2dcc0a0e3732
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Move test_toolbox's expand_grouping out into tool/test.py.
Will be merged with related tree code already in test.py in a subsequent changeset.
Affected #: 2 files
diff -r 1411df5a5fcc2686512b5560435279f305fc755a -r 2dcc0a0e3732cf32b603279fdc443c6a2a9d9ae8 lib/galaxy/tools/test.py
--- a/lib/galaxy/tools/test.py
+++ b/lib/galaxy/tools/test.py
@@ -73,6 +73,43 @@
yield data_dict
+ def expand_grouping( self, tool_inputs, declared_inputs, prefix='' ):
+ expanded_inputs = {}
+ for key, value in tool_inputs.items():
+ if isinstance( value, grouping.Conditional ):
+ if prefix:
+ new_prefix = "%s|%s" % ( prefix, value.name )
+ else:
+ new_prefix = value.name
+ for i, case in enumerate( value.cases ):
+ if declared_inputs[ value.test_param.name ] == case.value:
+ if isinstance(case.value, str):
+ expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = case.value.split( "," )
+ else:
+ expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = case.value
+ for input_name, input_value in case.inputs.items():
+ expanded_inputs.update( self.expand_grouping( { input_name: input_value }, declared_inputs, prefix=new_prefix ) )
+ elif isinstance( value, grouping.Repeat ):
+ for repeat_index in xrange( 0, 1 ): # need to allow for and figure out how many repeats we have
+ for r_name, r_value in value.inputs.iteritems():
+ new_prefix = "%s_%d" % ( value.name, repeat_index )
+ if prefix:
+ new_prefix = "%s|%s" % ( prefix, new_prefix )
+ expanded_inputs.update( self.expand_grouping( { new_prefix : r_value }, declared_inputs, prefix=new_prefix ) )
+ elif value.name not in declared_inputs:
+ print "%s not declared in tool test, will not change default value." % value.name
+ elif isinstance(declared_inputs[value.name], str):
+ if prefix:
+ expanded_inputs["%s|%s" % ( prefix, value.name ) ] = declared_inputs[value.name].split(",")
+ else:
+ expanded_inputs[value.name] = declared_inputs[value.name].split(",")
+ else:
+ if prefix:
+ expanded_inputs["%s|%s" % ( prefix, value.name ) ] = declared_inputs[value.name]
+ else:
+ expanded_inputs[value.name] = declared_inputs[value.name]
+ return expanded_inputs
+
def __parse_elem( self, test_elem, i, default_interactor ):
# Composite datasets need a unique name: each test occurs in a fresh
# history, but we'll keep it unique per set of tests - use i (test #)
diff -r 1411df5a5fcc2686512b5560435279f305fc755a -r 2dcc0a0e3732cf32b603279fdc443c6a2a9d9ae8 test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -258,14 +258,14 @@
job_finish_by_output_count = False
# Do the first page
- page_inputs = self.__expand_grouping(testdef.tool.inputs_by_page[0], all_inputs)
+ page_inputs = testdef.expand_grouping(testdef.tool.inputs_by_page[0], all_inputs)
# Run the tool
self.twill_test_case.run_tool( testdef.tool.id, repeat_name=repeat_name, **page_inputs )
print "page_inputs (0)", page_inputs
# Do other pages if they exist
for i in range( 1, testdef.tool.npages ):
- page_inputs = self.__expand_grouping(testdef.tool.inputs_by_page[i], all_inputs)
+ page_inputs = testdef.expand_grouping(testdef.tool.inputs_by_page[i], all_inputs)
self.twill_test_case.submit_form( **page_inputs )
print "page_inputs (%i)" % i, page_inputs
@@ -300,43 +300,6 @@
def output_hid( self, output_data ):
return output_data.get( 'hid' )
- def __expand_grouping( self, tool_inputs, declared_inputs, prefix='' ):
- expanded_inputs = {}
- for key, value in tool_inputs.items():
- if isinstance( value, grouping.Conditional ):
- if prefix:
- new_prefix = "%s|%s" % ( prefix, value.name )
- else:
- new_prefix = value.name
- for i, case in enumerate( value.cases ):
- if declared_inputs[ value.test_param.name ] == case.value:
- if isinstance(case.value, str):
- expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = case.value.split( "," )
- else:
- expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = case.value
- for input_name, input_value in case.inputs.items():
- expanded_inputs.update( self.__expand_grouping( { input_name: input_value }, declared_inputs, prefix=new_prefix ) )
- elif isinstance( value, grouping.Repeat ):
- for repeat_index in xrange( 0, 1 ): # need to allow for and figure out how many repeats we have
- for r_name, r_value in value.inputs.iteritems():
- new_prefix = "%s_%d" % ( value.name, repeat_index )
- if prefix:
- new_prefix = "%s|%s" % ( prefix, new_prefix )
- expanded_inputs.update( self.__expand_grouping( { new_prefix : r_value }, declared_inputs, prefix=new_prefix ) )
- elif value.name not in declared_inputs:
- print "%s not declared in tool test, will not change default value." % value.name
- elif isinstance(declared_inputs[value.name], str):
- if prefix:
- expanded_inputs["%s|%s" % ( prefix, value.name ) ] = declared_inputs[value.name].split(",")
- else:
- expanded_inputs[value.name] = declared_inputs[value.name].split(",")
- else:
- if prefix:
- expanded_inputs["%s|%s" % ( prefix, value.name ) ] = declared_inputs[value.name]
- else:
- expanded_inputs[value.name] = declared_inputs[value.name]
- return expanded_inputs
-
def build_tests( testing_shed_tools=False, master_api_key=None, user_api_key=None ):
"""
https://bitbucket.org/galaxy/galaxy-central/commits/0435cbff42d1/
Changeset: 0435cbff42d1
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Simplify logic in tool test expand_grouping.
Affected #: 1 file
diff -r 2dcc0a0e3732cf32b603279fdc443c6a2a9d9ae8 -r 0435cbff42d144e90cf750851f3998b3217a4891 lib/galaxy/tools/test.py
--- a/lib/galaxy/tools/test.py
+++ b/lib/galaxy/tools/test.py
@@ -76,17 +76,13 @@
def expand_grouping( self, tool_inputs, declared_inputs, prefix='' ):
expanded_inputs = {}
for key, value in tool_inputs.items():
+ expanded_key = value.name if not prefix else "%s|%s" % (prefix, value.name)
if isinstance( value, grouping.Conditional ):
- if prefix:
- new_prefix = "%s|%s" % ( prefix, value.name )
- else:
- new_prefix = value.name
+ new_prefix = expanded_key
for i, case in enumerate( value.cases ):
if declared_inputs[ value.test_param.name ] == case.value:
- if isinstance(case.value, str):
- expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = case.value.split( "," )
- else:
- expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = case.value
+ expanded_value = self.__split_if_str(case.value)
+ expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = expanded_value
for input_name, input_value in case.inputs.items():
expanded_inputs.update( self.expand_grouping( { input_name: input_value }, declared_inputs, prefix=new_prefix ) )
elif isinstance( value, grouping.Repeat ):
@@ -98,18 +94,17 @@
expanded_inputs.update( self.expand_grouping( { new_prefix : r_value }, declared_inputs, prefix=new_prefix ) )
elif value.name not in declared_inputs:
print "%s not declared in tool test, will not change default value." % value.name
- elif isinstance(declared_inputs[value.name], str):
- if prefix:
- expanded_inputs["%s|%s" % ( prefix, value.name ) ] = declared_inputs[value.name].split(",")
- else:
- expanded_inputs[value.name] = declared_inputs[value.name].split(",")
else:
- if prefix:
- expanded_inputs["%s|%s" % ( prefix, value.name ) ] = declared_inputs[value.name]
- else:
- expanded_inputs[value.name] = declared_inputs[value.name]
+ value = self.__split_if_str(declared_inputs[value.name])
+ expanded_inputs[expanded_key] = value
return expanded_inputs
+ def __split_if_str( self, value ):
+ split = isinstance(value, str)
+ if split:
+ value = value.split(",")
+ return value
+
def __parse_elem( self, test_elem, i, default_interactor ):
# Composite datasets need a unique name: each test occurs in a fresh
# history, but we'll keep it unique per set of tests - use i (test #)
https://bitbucket.org/galaxy/galaxy-central/commits/8b99c7bc5e11/
Changeset: 8b99c7bc5e11
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Tool functional tests - all unspecified default conditional values.
Fix should apply to both twill and API variants. For now just grabbing first option in select list, a subsequent changeset adds much richer options including handling of booleans and options labeled as default.
Affected #: 1 file
diff -r 0435cbff42d144e90cf750851f3998b3217a4891 -r 8b99c7bc5e11cd7c452c2ad86f951109b495271c lib/galaxy/tools/test.py
--- a/lib/galaxy/tools/test.py
+++ b/lib/galaxy/tools/test.py
@@ -73,18 +73,29 @@
yield data_dict
+ def __matching_case( self, cond, declared_inputs ):
+ param = cond.test_param
+ declared_value = declared_inputs.get( param.name, None )
+ for i, case in enumerate( cond.cases ):
+ if declared_value and (case.value == declared_value):
+ return case
+ if not declared_value:
+ # TODO: Default might not be top value, fix this.
+ return case
+ print "Not matching case found for %s value %s. Test may fail in unexpected ways." % ( param.name, declared_value )
+
def expand_grouping( self, tool_inputs, declared_inputs, prefix='' ):
expanded_inputs = {}
for key, value in tool_inputs.items():
expanded_key = value.name if not prefix else "%s|%s" % (prefix, value.name)
if isinstance( value, grouping.Conditional ):
new_prefix = expanded_key
- for i, case in enumerate( value.cases ):
- if declared_inputs[ value.test_param.name ] == case.value:
- expanded_value = self.__split_if_str(case.value)
- expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = expanded_value
- for input_name, input_value in case.inputs.items():
- expanded_inputs.update( self.expand_grouping( { input_name: input_value }, declared_inputs, prefix=new_prefix ) )
+ case = self.__matching_case( value, declared_inputs )
+ if case:
+ expanded_value = self.__split_if_str(case.value)
+ expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = expanded_value
+ for input_name, input_value in case.inputs.items():
+ expanded_inputs.update( self.expand_grouping( { input_name: input_value }, declared_inputs, prefix=new_prefix ) )
elif isinstance( value, grouping.Repeat ):
for repeat_index in xrange( 0, 1 ): # need to allow for and figure out how many repeats we have
for r_name, r_value in value.inputs.iteritems():
https://bitbucket.org/galaxy/galaxy-central/commits/3360087fcac9/
Changeset: 3360087fcac9
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Twill-less tool tests - handle repeats and conditionals.
Expand 'inputs' tree when building tool inputs for API functional tests.
Code is in there for both flat inputs (the way the UI would submit them), and a nested tree structure. The nested tree structure will not work with the Galaxy API currently - but it is the way the API should work :).
Affected #: 2 files
diff -r 8b99c7bc5e11cd7c452c2ad86f951109b495271c -r 3360087fcac9cfecb7848c0b60a26b91c66d1c8b lib/galaxy/tools/test.py
--- a/lib/galaxy/tools/test.py
+++ b/lib/galaxy/tools/test.py
@@ -73,6 +73,24 @@
yield data_dict
+ def to_dict( self, tool_inputs, declared_inputs ):
+ expanded_inputs = {}
+ for key, value in tool_inputs.items():
+ if isinstance( value, grouping.Conditional ):
+ for i, case in enumerate( value.cases ):
+ if declared_inputs[ value.test_param.name ] == case.value:
+ pass # TODO
+ elif isinstance( value, grouping.Repeat ):
+ values = []
+ for r_name, r_value in value.inputs.iteritems():
+ values.append( self.to_dict( {r_name: r_value} , declared_inputs ) )
+ expanded_inputs[ value.name ] = values
+ elif value.name not in declared_inputs:
+ print "%s not declared in tool test, will not change default value." % value.name
+ else:
+ expanded_inputs[ value.name ] = declared_inputs[value.name]
+ return expanded_inputs
+
def __matching_case( self, cond, declared_inputs ):
param = cond.test_param
declared_value = declared_inputs.get( param.name, None )
diff -r 8b99c7bc5e11cd7c452c2ad86f951109b495271c -r 3360087fcac9cfecb7848c0b60a26b91c66d1c8b test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -126,10 +126,30 @@
if value in self.uploads:
all_inputs[key] = self.uploads[ value ]
- # TODO: Handle repeats.
- # TODO: Handle pages.
- # TODO: Handle force_history_refresh
- datasets = self.__submit_tool( history_id, tool_id=testdef.tool.id, tool_input=all_inputs )
+ # TODO: Handle pages?
+ # TODO: Handle force_history_refresh?
+ flat_inputs = True
+ if flat_inputs:
+ # Build up tool_input flately (e.g {"a_repeat_0|a_repeat_param" : "value1"})
+ expanded_inputs = {}
+ expanded_inputs.update(testdef.expand_grouping(testdef.tool.inputs_by_page[0], all_inputs))
+ for i in range( 1, testdef.tool.npages ):
+ expanded_inputs.update(testdef.expand_grouping(testdef.tool.inputs_by_page[i], all_inputs))
+
+ # # HACK: Flatten single-value lists. Required when using expand_grouping
+ for key, value in expanded_inputs.iteritems():
+ if isinstance(value, list) and len(value) == 1:
+ expanded_inputs[key] = value[0]
+ tool_input = expanded_inputs
+ else:
+ # Build up tool_input as nested dictionary (e.g. {"a_repeat": [{"a_repeat_param" : "value1"}]})
+ # Doesn't work with the tool API at this time.
+ tool_input = {}
+ tool_input.update(testdef.to_dict(testdef.tool.inputs_by_page[0], all_inputs))
+ for i in range( 1, testdef.tool.npages ):
+ tool_input.update(testdef.to_dict(testdef.tool.inputs_by_page[i], all_inputs))
+
+ datasets = self.__submit_tool( history_id, tool_id=testdef.tool.id, tool_input=tool_input )
self.__wait_for_history( history_id )() # TODO: Remove and respect maxseconds!
return datasets.json()[ 'outputs' ]
https://bitbucket.org/galaxy/galaxy-central/commits/47eeac9693d3/
Changeset: 47eeac9693d3
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Twill-less tool tests fix for nested input filenames.
Affected #: 1 file
diff -r 3360087fcac9cfecb7848c0b60a26b91c66d1c8b -r 47eeac9693d38593a60bf43f1920a6417fca2e6a test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -111,7 +111,7 @@
dataset = submit_response["outputs"][0]
#raise Exception(str(dataset))
hid = dataset['id']
- self.uploads[ fname ] = {"src": "hda", "id": hid}
+ self.uploads[ os.path.basename(fname) ] = self.uploads[ fname ] = {"src": "hda", "id": hid}
return self.__wait_for_history( history_id )
def run_tool( self, testdef, history_id ):
https://bitbucket.org/galaxy/galaxy-central/commits/b21e913e21e9/
Changeset: b21e913e21e9
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Twill-less tool tests fix for tests with html outputs.
For example gatk_analyze_covariates.
Affected #: 1 file
diff -r 47eeac9693d38593a60bf43f1920a6417fca2e6a -r b21e913e21e951a9d54de4b8e480334dabb70e2d test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -198,7 +198,7 @@
def __dataset_fetcher( self, history_id ):
def fetcher( hda_id, base_name=None ):
- url = "histories/%s/contents/%s/display" % (history_id, hda_id)
+ url = "histories/%s/contents/%s/display?raw=true" % (history_id, hda_id)
if base_name:
url += "&filename=%s" % base_name
return self.__get( url ).text
https://bitbucket.org/galaxy/galaxy-central/commits/ff31838afb70/
Changeset: ff31838afb70
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Twill-less tool tests fix for binary data in results.
Use requests' response.content instead of response.text for reading results.
Fixes the following error 'UnicodeEncodeError: 'ascii' codec can't encode characters in position 1-3: ordinal not in range(128)' when dealing with ASN.1 binary files.
Affected #: 1 file
diff -r b21e913e21e951a9d54de4b8e480334dabb70e2d -r ff31838afb704f86e10ce4798f8d41e6dfcecb59 test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -201,7 +201,7 @@
url = "histories/%s/contents/%s/display?raw=true" % (history_id, hda_id)
if base_name:
url += "&filename=%s" % base_name
- return self.__get( url ).text
+ return self.__get( url ).content
return fetcher
https://bitbucket.org/galaxy/galaxy-central/commits/deaa67411c92/
Changeset: deaa67411c92
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Twill-less tool tests - enhancements related to errors.
- Report formatted standard error and standard output of tools like Twill interactor.
- Respect testdef.maxseconds to timeout test cases.
- Check history item - like Twill interactor do not verify 'error' datasets.
- Properly raise exceptions with reported API error messages when running tools.
Affected #: 3 files
diff -r ff31838afb704f86e10ce4798f8d41e6dfcecb59 -r deaa67411c9212f8c24f3062e8c7710466118f34 lib/galaxy/tools/test.py
--- a/lib/galaxy/tools/test.py
+++ b/lib/galaxy/tools/test.py
@@ -10,6 +10,7 @@
DEFAULT_FTYPE = 'auto'
DEFAULT_DBKEY = 'hg17'
DEFAULT_INTERACTOR = "twill" # Default mechanism test code uses for interacting with Galaxy instance.
+DEFAULT_MAX_SECS = 120
def parse_tests_elem(tool, tests_elem):
@@ -35,7 +36,7 @@
def __init__( self, tool, test_elem, i, default_interactor ):
name = test_elem.get( 'name', 'Test-%d' % (i + 1) )
- maxseconds = int( test_elem.get( 'maxseconds', '120' ) )
+ maxseconds = int( test_elem.get( 'maxseconds', DEFAULT_MAX_SECS ) )
self.tool = tool
self.name = name
diff -r ff31838afb704f86e10ce4798f8d41e6dfcecb59 -r deaa67411c9212f8c24f3062e8c7710466118f34 test/base/twilltestcase.py
--- a/test/base/twilltestcase.py
+++ b/test/base/twilltestcase.py
@@ -616,6 +616,9 @@
self.visit_page( "datasets/%s/%s" % ( self.security.encode_id( hda_id ), stream ) )
output = self.last_page()
+ return self._format_stream( output, stream, format )
+
+ def _format_stream( self, output, stream, format ):
if format:
msg = "---------------------- >> begin tool %s << -----------------------\n" % stream
msg += output + "\n"
@@ -1409,13 +1412,16 @@
return True
return False
- def wait( self, maxseconds=120 ):
+ def wait( self, **kwds ):
"""Waits for the tools to finish"""
+ return self.wait_for(lambda: self.get_running_datasets(), **kwds)
+
+ def wait_for(self, func, maxseconds=120):
sleep_amount = 0.1
slept = 0
- self.home()
while slept <= maxseconds:
- if self.get_running_datasets():
+ result = func()
+ if result:
time.sleep( sleep_amount )
slept += sleep_amount
sleep_amount *= 2
diff -r ff31838afb704f86e10ce4798f8d41e6dfcecb59 -r deaa67411c9212f8c24f3062e8c7710466118f34 test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -64,7 +64,14 @@
output_data = data_list[ output_index ]
self.assertTrue( output_data is not None )
name, outfile, attributes = output_tuple
- galaxy_interactor.verify_output( history, output_data, outfile, attributes=attributes, shed_tool_id=shed_tool_id, maxseconds=maxseconds )
+ try:
+ galaxy_interactor.verify_output( history, output_data, outfile, attributes=attributes, shed_tool_id=shed_tool_id, maxseconds=maxseconds )
+ except Exception:
+ for stream in ['stdout', 'stderr']:
+ stream_output = galaxy_interactor.get_job_stream( history, output_data, stream=stream )
+ print >>sys.stderr, self._format_stream( stream_output, stream=stream, format=True )
+ raise
+
output_index += 1
@@ -77,16 +84,16 @@
self.uploads = {}
def verify_output( self, history_id, output_data, outfile, attributes, shed_tool_id, maxseconds ):
+ self.twill_test_case.wait_for( lambda: not self.__history_ready( history_id ), maxseconds=maxseconds)
hid = output_data.get( 'id' )
- try:
- fetcher = self.__dataset_fetcher( history_id )
- self.twill_test_case.verify_hid( outfile, hda_id=hid, attributes=attributes, dataset_fetcher=fetcher, shed_tool_id=shed_tool_id )
- except Exception:
- ## TODO: Print this!
- # print >>sys.stderr, self.twill_test_case.get_job_stdout( output_data.get( 'id' ), format=True )
- ## TODO: Print this!
- # print >>sys.stderr, self.twill_test_case.get_job_stderr( output_data.get( 'id' ), format=True )
- raise
+ fetcher = self.__dataset_fetcher( history_id )
+ ## TODO: Twill version verifys dataset is 'ok' in here.
+ self.twill_test_case.verify_hid( outfile, hda_id=hid, attributes=attributes, dataset_fetcher=fetcher, shed_tool_id=shed_tool_id )
+
+ def get_job_stream( self, history_id, output_data, stream ):
+ hid = output_data.get( 'id' )
+ data = self.__get( "histories/%s/contents/%s/provenance" % (history_id, hid) ).json()
+ return data.get( stream, '' )
def new_history( self ):
history_json = self.__post( "histories", {"name": "test_history"} ).json()
@@ -150,8 +157,11 @@
tool_input.update(testdef.to_dict(testdef.tool.inputs_by_page[i], all_inputs))
datasets = self.__submit_tool( history_id, tool_id=testdef.tool.id, tool_input=tool_input )
- self.__wait_for_history( history_id )() # TODO: Remove and respect maxseconds!
- return datasets.json()[ 'outputs' ]
+ datasets_object = datasets.json()
+ try:
+ return datasets_object[ 'outputs' ]
+ except KeyError:
+ raise Exception( datasets_object[ 'message' ] )
def output_hid( self, output_data ):
return output_data[ 'id' ]
@@ -161,17 +171,19 @@
def __wait_for_history( self, history_id ):
def wait():
- while True:
- history_json = self.__get( "histories/%s" % history_id ).json()
- state = history_json[ 'state' ]
- if state == 'ok':
- #raise Exception(str(self.__get( self.__get( "histories/%s/contents" % history_id ).json()[0]['url'] ).json() ) )
- #raise Exception(str(self.__get( self.__get( "histories/%s/contents" % history_id ).json()[0]['url'] ).json() ) )
- break
- elif state == 'error':
- raise Exception("History in error state.")
+ while not self.__history_ready( history_id ):
+ pass
return wait
+ def __history_ready( self, history_id ):
+ history_json = self.__get( "histories/%s" % history_id ).json()
+ state = history_json[ 'state' ]
+ if state == 'ok':
+ return True
+ elif state == 'error':
+ raise Exception("History in error state.")
+ return False
+
def __submit_tool( self, history_id, tool_id, tool_input, extra_data={}, files=None ):
data = dict(
history_id=history_id,
@@ -230,12 +242,10 @@
def verify_output( self, history, output_data, outfile, attributes, shed_tool_id, maxseconds ):
hid = output_data.get( 'hid' )
- try:
- self.twill_test_case.verify_dataset_correctness( outfile, hid=hid, attributes=attributes, shed_tool_id=shed_tool_id )
- except Exception:
- print >>sys.stderr, self.twill_test_case.get_job_stdout( output_data.get( 'id' ), format=True )
- print >>sys.stderr, self.twill_test_case.get_job_stderr( output_data.get( 'id' ), format=True )
- raise
+ self.twill_test_case.verify_dataset_correctness( outfile, hid=hid, attributes=attributes, shed_tool_id=shed_tool_id, maxseconds=maxseconds )
+
+ def get_job_stream( self, history_id, output_data, stream ):
+ return self.twill_test_case._get_job_stream_output( output_data.get( 'id' ), stream=stream, format=False )
def stage_data_async( self, test_data, history, shed_tool_id, async=True ):
name = test_data.get( 'name', None )
https://bitbucket.org/galaxy/galaxy-central/commits/f5add174debe/
Changeset: f5add174debe
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Add new option, tools, and data to functional test framework to test/demonstrate tool framework itself.
Add smaple repeat tool demonstrating some weaknesses of twill-based current framework.
Affected #: 12 files
diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 run_functional_tests.sh
--- a/run_functional_tests.sh
+++ b/run_functional_tests.sh
@@ -13,6 +13,8 @@
echo "'run_functional_tests.sh -list' for listing all the tool ids"
echo "'run_functional_tests.sh -toolshed' for running all the test scripts in the ./test/tool_shed/functional directory"
echo "'run_functional_tests.sh -toolshed testscriptname' for running one test script named testscriptname in the .test/tool_shed/functional directory"
+ echo "'run_functional_tests.sh -framework' for running through example tool tests testing framework features in test/functional/tools"
+ echo "'run_functional_tests.sh -framework -id toolid' for testing one framework tool (in test/functional/tools/) with id 'toolid'"
elif [ $1 = '-id' ]; then
python ./scripts/functional_tests.py -v functional.test_toolbox:TestForTool_$2 --with-nosehtml --html-report-file run_functional_tests.html
elif [ $1 = '-sid' ]; then
@@ -46,6 +48,14 @@
else
python ./test/tool_shed/functional_tests.py -v --with-nosehtml --html-report-file ./test/tool_shed/run_functional_tests.html $2
fi
+elif [ $1 = '-framework' ]; then
+ if [ ! $2 ]; then
+ python ./scripts/functional_tests.py -v functional.test_toolbox --with-nosehtml --html-report-file run_functional_tests.html -framework
+ elif [ $2 = '-id' ]; then
+ python ./scripts/functional_tests.py -v functional.test_toolbox:TestForTool_$3 --with-nosehtml --html-report-file run_functional_tests.html -framework
+ else
+ echo "Invalid test option selected, if -framework first argument to $0, optional second argument must be -id followed a tool id."
+ fi
else
python ./scripts/functional_tests.py -v --with-nosehtml --html-report-file run_functional_tests.html $1
fi
diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 scripts/functional_tests.py
--- a/scripts/functional_tests.py
+++ b/scripts/functional_tests.py
@@ -178,6 +178,7 @@
os.environ[ 'HTTP_ACCEPT_LANGUAGE' ] = default_galaxy_locales
testing_migrated_tools = '-migrated' in sys.argv
testing_installed_tools = '-installed' in sys.argv
+ datatypes_conf_override = None
if testing_migrated_tools or testing_installed_tools:
sys.argv.pop()
@@ -197,8 +198,21 @@
# Exclude all files except test_toolbox.py.
ignore_files = ( re.compile( r'^test_[adghlmsu]*' ), re.compile( r'^test_ta*' ) )
else:
- tool_config_file = os.environ.get( 'GALAXY_TEST_TOOL_CONF', 'tool_conf.xml' )
- galaxy_test_file_dir = os.environ.get( 'GALAXY_TEST_FILE_DIR', default_galaxy_test_file_dir )
+ framework_test = '-framework' in sys.argv # Run through suite of tests testing framework.
+ if framework_test:
+ sys.argv.pop()
+ framework_tool_dir = os.path.join('test', 'functional', 'tools')
+ tool_conf = os.path.join( framework_tool_dir, 'samples_tool_conf.xml' )
+ datatypes_conf_override = os.path.join( framework_tool_dir, 'sample_datatypes_conf.xml' )
+ test_dir = os.path.join( framework_tool_dir, 'test-data')
+
+ tool_path = framework_tool_dir
+ else:
+ # Use tool_conf.xml toolbox.
+ tool_conf = 'tool_conf.xml'
+ test_dir = default_galaxy_test_file_dir
+ tool_config_file = os.environ.get( 'GALAXY_TEST_TOOL_CONF', tool_conf )
+ galaxy_test_file_dir = os.environ.get( 'GALAXY_TEST_FILE_DIR', test_dir )
if not os.path.isabs( galaxy_test_file_dir ):
galaxy_test_file_dir = os.path.join( os.getcwd(), galaxy_test_file_dir )
library_import_dir = galaxy_test_file_dir
@@ -338,6 +352,8 @@
if use_distributed_object_store:
kwargs[ 'object_store' ] = 'distributed'
kwargs[ 'distributed_object_store_config_file' ] = 'distributed_object_store_conf.xml.sample'
+ if datatypes_conf_override:
+ kwargs[ 'datatypes_config_file' ] = datatypes_conf_override
# If the user has passed in a path for the .ini file, do not overwrite it.
galaxy_config_file = os.environ.get( 'GALAXY_TEST_INI_FILE', None )
if not galaxy_config_file:
diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 test/functional/tools/README.txt
--- /dev/null
+++ b/test/functional/tools/README.txt
@@ -0,0 +1,3 @@
+This directory contains tools only useful for testing the tool test framework
+and demonstrating it features. Run the driver script 'run_functional_tests.sh'
+with '-installed' as first argument to run through these tests.
diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 test/functional/tools/multi_repeats.xml
--- /dev/null
+++ b/test/functional/tools/multi_repeats.xml
@@ -0,0 +1,39 @@
+<tool id="multirepeat" name="multirepeat">
+ <description>tail-to-head</description>
+ <command>
+ cat $input1 #for $q in $queries# ${q.input2} #end for# #for $q in $more_queries# ${q.more_queries_input} #end for# > $out_file1
+ </command>
+ <inputs>
+ <param name="input1" type="data" label="Concatenate Dataset"/>
+ <repeat name="queries" title="Dataset">
+ <param name="input2" type="data" label="Select" />
+ </repeat>
+ <repeat name="more_queries" title="Dataset">
+ <param name="more_queries_input" type="data" label="Select" />
+ </repeat>
+ </inputs>
+ <outputs>
+ <data name="out_file1" format="input" metadata_source="input1"/>
+ </outputs>
+ <tests>
+ <test>
+ <param name="input1" value="simple_line.txt"/>
+ <param name="input2" value="simple_line.txt"/>
+ <output name="out_file1" file="simple_line_x2.txt"/>
+ </test>
+ <test>
+ <param name="input1" value="simple_line.txt"/>
+ <param name="input2" value="simple_line.txt"/>
+ <param name="input2" value="simple_line.txt"/>
+ <output name="out_file1" file="simple_line_x3.txt"/>
+ </test>
+ <test>
+ <param name="input1" value="simple_line.txt"/>
+ <param name="input2" value="simple_line.txt"/>
+ <param name="input2" value="simple_line.txt"/>
+ <param name="more_queries_input" value="simple_line.txt"/>
+ <param name="more_queries_input" value="simple_line.txt"/>
+ <output name="out_file1" file="simple_line_x5.txt"/>
+ </test>
+ </tests>
+</tool>
diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 test/functional/tools/sample_datatypes_conf.xml
--- /dev/null
+++ b/test/functional/tools/sample_datatypes_conf.xml
@@ -0,0 +1,8 @@
+<?xml version="1.0"?>
+<datatypes>
+ <registration converters_path="lib/galaxy/datatypes/converters" display_path="display_applications">
+ <datatype extension="velvet" type="galaxy.datatypes.assembly:Velvet" display_in_upload="true"/>
+ <datatype extension="txt" type="galaxy.datatypes.data:Text" display_in_upload="true"/>
+ <datatype extension="tabular" type="galaxy.datatypes.tabular:Tabular" display_in_upload="true"/>
+ </registration>
+</datatypes>
\ No newline at end of file
diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 test/functional/tools/samples_tool_conf.xml
--- /dev/null
+++ b/test/functional/tools/samples_tool_conf.xml
@@ -0,0 +1,5 @@
+<?xml version="1.0"?>
+<toolbox>
+ <tool file="upload.xml"/>
+ <tool file="multi_repeats.xml"/>
+</toolbox>
\ No newline at end of file
diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 test/functional/tools/test-data/simple_line.txt
--- /dev/null
+++ b/test/functional/tools/test-data/simple_line.txt
@@ -0,0 +1,1 @@
+This is a line of text.
diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 test/functional/tools/test-data/simple_line_x2.txt
--- /dev/null
+++ b/test/functional/tools/test-data/simple_line_x2.txt
@@ -0,0 +1,2 @@
+This is a line of text.
+This is a line of text.
diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 test/functional/tools/test-data/simple_line_x3.txt
--- /dev/null
+++ b/test/functional/tools/test-data/simple_line_x3.txt
@@ -0,0 +1,3 @@
+This is a line of text.
+This is a line of text.
+This is a line of text.
diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 test/functional/tools/test-data/simple_line_x5.txt
--- /dev/null
+++ b/test/functional/tools/test-data/simple_line_x5.txt
@@ -0,0 +1,5 @@
+This is a line of text.
+This is a line of text.
+This is a line of text.
+This is a line of text.
+This is a line of text.
diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 test/functional/tools/upload.py
--- /dev/null
+++ b/test/functional/tools/upload.py
@@ -0,0 +1,1 @@
+../../../tools/data_source/upload.py
\ No newline at end of file
diff -r deaa67411c9212f8c24f3062e8c7710466118f34 -r f5add174debe1ca23aa21a42530c650ad6b1ad42 test/functional/tools/upload.xml
--- /dev/null
+++ b/test/functional/tools/upload.xml
@@ -0,0 +1,1 @@
+../../../tools/data_source/upload.xml
\ No newline at end of file
https://bitbucket.org/galaxy/galaxy-central/commits/81b6aca75182/
Changeset: 81b6aca75182
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Twill-less tool tests - allow multiple param instances with same name.
Allows specifing multiple repeat instances.
Affected #: 2 files
diff -r f5add174debe1ca23aa21a42530c650ad6b1ad42 -r 81b6aca751826e85bbf58fadb21f96d370dd9f6e lib/galaxy/tools/test.py
--- a/lib/galaxy/tools/test.py
+++ b/lib/galaxy/tools/test.py
@@ -103,6 +103,41 @@
return case
print "Not matching case found for %s value %s. Test may fail in unexpected ways." % ( param.name, declared_value )
+ def expand_multi_grouping( self, tool_inputs, declared_inputs, prefix='', index=0 ):
+ """
+ Used by API, slight generalization of expand_grouping used by Twill based interactor. Still
+ not quite the context/tree based specification that should exist!
+ """
+ expanded_inputs = {}
+ for key, value in tool_inputs.items():
+ expanded_key = value.name if not prefix else "%s|%s" % (prefix, value.name)
+ if isinstance( value, grouping.Conditional ):
+ new_prefix = expanded_key
+ case = self.__matching_case( value, declared_inputs )
+ if case:
+ expanded_value = self.__split_if_str(case.value)
+ expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = expanded_value
+ for input_name, input_value in case.inputs.items():
+ expanded_inputs.update( self.expand_multi_grouping( { input_name: input_value }, declared_inputs, prefix=new_prefix ), index=index )
+ elif isinstance( value, grouping.Repeat ):
+ repeat_index = 0
+ any_children_matched = True
+ while any_children_matched:
+ any_children_matched = False
+ for r_name, r_value in value.inputs.iteritems():
+ new_prefix = "%s_%d" % ( value.name, repeat_index )
+ if prefix:
+ new_prefix = "%s|%s" % ( prefix, new_prefix )
+ expanded_input = self.expand_multi_grouping( { new_prefix : r_value }, declared_inputs, prefix=new_prefix, index=repeat_index )
+ if expanded_input:
+ any_children_matched = True
+ expanded_inputs.update( expanded_input )
+ repeat_index += 1
+ elif value.name in declared_inputs and len(declared_inputs[ value.name ]) > index:
+ value = self.__split_if_str( declared_inputs[ value.name ][ index ] )
+ expanded_inputs[ expanded_key ] = value
+ return expanded_inputs
+
def expand_grouping( self, tool_inputs, declared_inputs, prefix='' ):
expanded_inputs = {}
for key, value in tool_inputs.items():
diff -r f5add174debe1ca23aa21a42530c650ad6b1ad42 -r 81b6aca751826e85bbf58fadb21f96d370dd9f6e test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -126,12 +126,14 @@
# tool will have uncompressed it on the fly.
all_inputs = {}
for name, value, _ in testdef.inputs:
- all_inputs[ name ] = value
-
- for key, value in all_inputs.iteritems():
# TODO: Restrict this to param inputs.
if value in self.uploads:
- all_inputs[key] = self.uploads[ value ]
+ value = self.uploads[ value ]
+
+ if name in all_inputs:
+ all_inputs[name].append( value )
+ else:
+ all_inputs[name] = [ value ]
# TODO: Handle pages?
# TODO: Handle force_history_refresh?
@@ -139,9 +141,9 @@
if flat_inputs:
# Build up tool_input flately (e.g {"a_repeat_0|a_repeat_param" : "value1"})
expanded_inputs = {}
- expanded_inputs.update(testdef.expand_grouping(testdef.tool.inputs_by_page[0], all_inputs))
+ expanded_inputs.update(testdef.expand_multi_grouping(testdef.tool.inputs_by_page[0], all_inputs))
for i in range( 1, testdef.tool.npages ):
- expanded_inputs.update(testdef.expand_grouping(testdef.tool.inputs_by_page[i], all_inputs))
+ expanded_inputs.update(testdef.expand_multi_grouping(testdef.tool.inputs_by_page[i], all_inputs))
# # HACK: Flatten single-value lists. Required when using expand_grouping
for key, value in expanded_inputs.iteritems():
https://bitbucket.org/galaxy/galaxy-central/commits/72c22a117bdd/
Changeset: 72c22a117bdd
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Tool functional tests - allow disambiguation of params in conditionals.
Disambiguate by add prefix for parent (or any number of direct ancestors) with pipe (|). See included test case for an example.
Should fix twill and API driven functional tests.
Affected #: 3 files
diff -r 81b6aca751826e85bbf58fadb21f96d370dd9f6e -r 72c22a117bdd7fe83b05dd616114bbb7b6a2dae8 lib/galaxy/tools/test.py
--- a/lib/galaxy/tools/test.py
+++ b/lib/galaxy/tools/test.py
@@ -3,6 +3,7 @@
from parameters import basic
from parameters import grouping
from galaxy.util import string_as_bool
+from galaxy.util.bunch import Bunch
import logging
log = logging.getLogger( __name__ )
@@ -92,15 +93,20 @@
expanded_inputs[ value.name ] = declared_inputs[value.name]
return expanded_inputs
- def __matching_case( self, cond, declared_inputs ):
+ def __matching_case( self, cond, declared_inputs, prefix, index=None ):
param = cond.test_param
- declared_value = declared_inputs.get( param.name, None )
+ declared_value = self.__declared_match( declared_inputs, param.name, prefix)
+ if index is not None:
+ declared_value = declared_value[index]
for i, case in enumerate( cond.cases ):
- if declared_value and (case.value == declared_value):
+ if declared_value is not None and (case.value == declared_value):
return case
- if not declared_value:
+ if declared_value is None:
# TODO: Default might not be top value, fix this.
+ # TODO: Also may be boolean, got to look at checked.
return case
+ else:
+ return Bunch(value=declared_value, inputs=Bunch(items=lambda: []))
print "Not matching case found for %s value %s. Test may fail in unexpected ways." % ( param.name, declared_value )
def expand_multi_grouping( self, tool_inputs, declared_inputs, prefix='', index=0 ):
@@ -113,9 +119,9 @@
expanded_key = value.name if not prefix else "%s|%s" % (prefix, value.name)
if isinstance( value, grouping.Conditional ):
new_prefix = expanded_key
- case = self.__matching_case( value, declared_inputs )
+ case = self.__matching_case( value, declared_inputs, new_prefix, index=index )
if case:
- expanded_value = self.__split_if_str(case.value)
+ expanded_value = self.__split_if_str( case.value )
expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = expanded_value
for input_name, input_value in case.inputs.items():
expanded_inputs.update( self.expand_multi_grouping( { input_name: input_value }, declared_inputs, prefix=new_prefix ), index=index )
@@ -133,9 +139,11 @@
any_children_matched = True
expanded_inputs.update( expanded_input )
repeat_index += 1
- elif value.name in declared_inputs and len(declared_inputs[ value.name ]) > index:
- value = self.__split_if_str( declared_inputs[ value.name ][ index ] )
- expanded_inputs[ expanded_key ] = value
+ else:
+ declared_value = self.__declared_match( declared_inputs, value.name, prefix )
+ if declared_value and len(declared_value) > index:
+ value = self.__split_if_str( declared_value[ index ] )
+ expanded_inputs[ expanded_key ] = value
return expanded_inputs
def expand_grouping( self, tool_inputs, declared_inputs, prefix='' ):
@@ -144,7 +152,7 @@
expanded_key = value.name if not prefix else "%s|%s" % (prefix, value.name)
if isinstance( value, grouping.Conditional ):
new_prefix = expanded_key
- case = self.__matching_case( value, declared_inputs )
+ case = self.__matching_case( value, declared_inputs, new_prefix )
if case:
expanded_value = self.__split_if_str(case.value)
expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = expanded_value
@@ -157,13 +165,26 @@
if prefix:
new_prefix = "%s|%s" % ( prefix, new_prefix )
expanded_inputs.update( self.expand_grouping( { new_prefix : r_value }, declared_inputs, prefix=new_prefix ) )
- elif value.name not in declared_inputs:
- print "%s not declared in tool test, will not change default value." % value.name
else:
- value = self.__split_if_str(declared_inputs[value.name])
- expanded_inputs[expanded_key] = value
+ declared_value = self.__declared_match( declared_inputs, value.name, prefix )
+ if not declared_value:
+ print "%s not declared in tool test, will not change default value." % value.name
+ else:
+ value = self.__split_if_str(declared_value)
+ expanded_inputs[expanded_key] = value
return expanded_inputs
+ def __declared_match( self, declared_inputs, name, prefix ):
+ prefix_suffixes = [ "%s|" % part for part in prefix.split( "|" ) ] if prefix else []
+ prefix_suffixes.append( name )
+ prefix_suffixes.reverse()
+ prefixed_name = ""
+ for prefix_suffix in prefix_suffixes:
+ prefixed_name = "%s%s" % ( prefix_suffix, prefixed_name )
+ if prefixed_name in declared_inputs:
+ return declared_inputs[prefixed_name]
+ return None
+
def __split_if_str( self, value ):
split = isinstance(value, str)
if split:
diff -r 81b6aca751826e85bbf58fadb21f96d370dd9f6e -r 72c22a117bdd7fe83b05dd616114bbb7b6a2dae8 test/functional/tools/disambiguate_cond.xml
--- /dev/null
+++ b/test/functional/tools/disambiguate_cond.xml
@@ -0,0 +1,50 @@
+<tool id="handle_cond" name="handle_cond">
+ <description>tail-to-head</description>
+ <command>
+ echo "$p1.p1v $p2.p2v $p3.p3v" > $out_file1
+ </command>
+ <inputs>
+ <conditional name="p1">
+ <param type="boolean" name="use" />
+ <when value="true">
+ <param name="p1v" value="4" type="integer" />
+ </when>
+ <when value="false">
+ <param name="p1v" value="7" type="integer" />
+ </when>
+ </conditional>
+ <conditional name="p2">
+ <param type="boolean" name="use" />
+ <when value="true">
+ <param name="p2v" value="4" type="integer" />
+ </when>
+ <when value="false">
+ <param name="p2v" value="7" type="integer" />
+ </when>
+ </conditional>
+ <conditional name="p3">
+ <param type="boolean" name="use" />
+ <when value="true">
+ <param name="p3v" value="4" type="integer" />
+ </when>
+ <when value="false">
+ <param name="p3v" value="7" type="integer" />
+ </when>
+ </conditional>
+ </inputs>
+ <outputs>
+ <data name="out_file1" format="txt" />
+ </outputs>
+ <tests>
+ <test>
+ <param name="p1|use" value="True"/>
+ <param name="p2|use" value="False"/>
+ <param name="p3|use" value="True"/>
+ <output name="out_file1">
+ <assert_contents>
+ <has_line line="4 7 4" />
+ </assert_contents>
+ </output>
+ </test>
+ </tests>
+</tool>
diff -r 81b6aca751826e85bbf58fadb21f96d370dd9f6e -r 72c22a117bdd7fe83b05dd616114bbb7b6a2dae8 test/functional/tools/samples_tool_conf.xml
--- a/test/functional/tools/samples_tool_conf.xml
+++ b/test/functional/tools/samples_tool_conf.xml
@@ -1,5 +1,6 @@
<?xml version="1.0"?><toolbox><tool file="upload.xml"/>
+ <tool file="disambiguate_cond.xml" /><tool file="multi_repeats.xml"/></toolbox>
\ No newline at end of file
https://bitbucket.org/galaxy/galaxy-central/commits/61e4e10e03ce/
Changeset: 61e4e10e03ce
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Twill-less tool tests - handle composite inputs.
Add example tool test for composite data (worked with Twill based test framework prior to this commit, works with API based framework as a result of this commit).
Affected #: 4 files
diff -r 72c22a117bdd7fe83b05dd616114bbb7b6a2dae8 -r 61e4e10e03ce8de315a1ae81d12529d80dedf594 test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -101,24 +101,42 @@
def stage_data_async( self, test_data, history_id, shed_tool_id, async=True ):
fname = test_data[ 'fname' ]
- file_name = self.twill_test_case.get_filename( fname, shed_tool_id=shed_tool_id )
- name = test_data.get( 'name', None )
- if not name:
- name = os.path.basename( file_name )
tool_input = {
"file_type": test_data[ 'ftype' ],
- "dbkey": test_data[ 'dbkey' ], # TODO: Handle it! Doesn't work if undefined, does seem to in Twill.
- "files_0|NAME": name,
- "files_0|type": "upload_dataset",
+ "dbkey": test_data[ 'dbkey' ],
}
- files = {
- "files_0|file_data": open( file_name, 'rb')
- }
- submit_response = self.__submit_tool( history_id, "upload1", tool_input, extra_data={"type": "upload_dataset"}, files=files ).json()
+ composite_data = test_data[ 'composite_data' ]
+ if composite_data:
+ files = {}
+ for i, composite_file in enumerate( composite_data ):
+ file_name = self.twill_test_case.get_filename( composite_file.get( 'value' ), shed_tool_id=shed_tool_id )
+ files["files_%s|file_data" % i] = open( file_name, 'rb' )
+ tool_input.update({
+ #"files_%d|NAME" % i: name,
+ "files_%d|type" % i: "upload_dataset",
+ ## TODO:
+ #"files_%d|space_to_tab" % i: composite_file.get( 'space_to_tab', False )
+ })
+ name = test_data[ 'name' ]
+ else:
+ file_name = self.twill_test_case.get_filename( fname, shed_tool_id=shed_tool_id )
+ name = test_data.get( 'name', None )
+ if not name:
+ name = os.path.basename( file_name )
+
+ tool_input.update({
+ "files_0|NAME": name,
+ "files_0|type": "upload_dataset",
+ })
+ files = {
+ "files_0|file_data": open( file_name, 'rb')
+ }
+ submit_response_object = self.__submit_tool( history_id, "upload1", tool_input, extra_data={"type": "upload_dataset"}, files=files )
+ submit_response = submit_response_object.json()
dataset = submit_response["outputs"][0]
#raise Exception(str(dataset))
hid = dataset['id']
- self.uploads[ os.path.basename(fname) ] = self.uploads[ fname ] = {"src": "hda", "id": hid}
+ self.uploads[ os.path.basename(fname) ] = self.uploads[ fname ] = self.uploads[ name ] = {"src": "hda", "id": hid}
return self.__wait_for_history( history_id )
def run_tool( self, testdef, history_id ):
diff -r 72c22a117bdd7fe83b05dd616114bbb7b6a2dae8 -r 61e4e10e03ce8de315a1ae81d12529d80dedf594 test/functional/tools/composite.xml
--- /dev/null
+++ b/test/functional/tools/composite.xml
@@ -0,0 +1,21 @@
+<tool id="velvet_inspired" name="velvet_inspired" version="1.0.0">
+ <description>Velvet sequence assembler for very short reads</description>
+ <command>cat '$input.extra_files_path/Sequences' > $output</command>
+ <inputs>
+ <param name="input" type="data" format="velvet" label="Velvet Dataset" help="Prepared by velveth."/>
+ </inputs>
+ <outputs>
+ <data format="txt" name="output" label="${tool.name} on ${on_string}: LastGraph">
+ </data>
+ </outputs>
+ <tests>
+ <test>
+ <param name="input" value="velveth_test1/output.html" ftype="velvet" >
+ <composite_data value='velveth_test1/Sequences' ftype="Sequences"/>
+ <composite_data value='velveth_test1/Roadmaps' ftype="Roadmaps"/>
+ <composite_data value='velveth_test1/Log'/>
+ </param>
+ <output name="unused_reads_fasta" file="velveth_test1/Sequences" compare="diff"/>
+ </test>
+ </tests>
+</tool>
diff -r 72c22a117bdd7fe83b05dd616114bbb7b6a2dae8 -r 61e4e10e03ce8de315a1ae81d12529d80dedf594 test/functional/tools/samples_tool_conf.xml
--- a/test/functional/tools/samples_tool_conf.xml
+++ b/test/functional/tools/samples_tool_conf.xml
@@ -1,6 +1,7 @@
<?xml version="1.0"?><toolbox><tool file="upload.xml"/>
+ <tool file="composite.xml" /><tool file="disambiguate_cond.xml" /><tool file="multi_repeats.xml"/></toolbox>
\ No newline at end of file
diff -r 72c22a117bdd7fe83b05dd616114bbb7b6a2dae8 -r 61e4e10e03ce8de315a1ae81d12529d80dedf594 test/functional/tools/test-data/velveth_test1
--- /dev/null
+++ b/test/functional/tools/test-data/velveth_test1
@@ -0,0 +1,1 @@
+../../../../test-data/velveth_test1
\ No newline at end of file
https://bitbucket.org/galaxy/galaxy-central/commits/df80192bfdcc/
Changeset: df80192bfdcc
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Add more sample tools for tool testing framework.
Adding multi_select.xml, which demonstrates twill cannot deal with - at the beginning of select param values - this test works immediately with API interactor.
Also adding a multi_output.xml, this is a tool using variable number of outputs (force_refresh=True), both interactors pass this but it is a good test to verify that.
Also adding example to test multi-page tools (multi_page.xml) - both interactors pass this test. Though the API needed to be adjusted to allow its use (in a previous changeset).
Also adding example tool demonstrating extra_files output.
Finally, a tool simple_constructs.xml that just tests the basics, various parameter types, simple conditional, and simple repeat.
Affected #: 6 files
diff -r 61e4e10e03ce8de315a1ae81d12529d80dedf594 -r df80192bfdcc3b8ed37b104c757e0dcc21535420 test/functional/tools/composite_output.xml
--- /dev/null
+++ b/test/functional/tools/composite_output.xml
@@ -0,0 +1,24 @@
+<tool id="composite_output" name="composite_output" version="1.0.0">
+ <command>mkdir $output.extra_files_path; cp $input.extra_files_path/* $output.extra_files_path</command>
+ <inputs>
+ <param name="input" type="data" format="velvet" label="Velvet Dataset" help="Prepared by velveth."/>
+ </inputs>
+ <outputs>
+ <data format="velvet" name="output" label="">
+ </data>
+ </outputs>
+ <tests>
+ <test>
+ <param name="input" value="velveth_test1/output.html" ftype="velvet" >
+ <composite_data value='velveth_test1/Sequences' ftype="Sequences"/>
+ <composite_data value='velveth_test1/Roadmaps' ftype="Roadmaps"/>
+ <composite_data value='velveth_test1/Log'/>
+ </param>
+ <output name="output" file="velveth_test1/output.html">
+ <extra_files type="file" name="Sequences" value="velveth_test1/Sequences" />
+ <extra_files type="file" name="Roadmaps" value="velveth_test1/Roadmaps" />
+ <extra_files type="file" name="Log" value="velveth_test1/Log" />
+ </output>
+ </test>
+ </tests>
+</tool>
diff -r 61e4e10e03ce8de315a1ae81d12529d80dedf594 -r df80192bfdcc3b8ed37b104c757e0dcc21535420 test/functional/tools/multi_output.xml
--- /dev/null
+++ b/test/functional/tools/multi_output.xml
@@ -0,0 +1,22 @@
+<tool id="multi_output" name="Multi_Output" description="multi_output" force_history_refresh="True" version="0.1.0">
+ <command>
+ echo "Hello" > $report;
+ echo "World" > '${__new_file_path__}/primary_${report.id}_moo_visible_?'
+ </command>
+ <inputs>
+ <param name="input" type="integer" value="7" />
+ </inputs>
+ <outputs>
+ <data format="txt" name="report" />
+ </outputs>
+ <tests>
+ <test>
+ <param name="input" value="7" />
+ <output name="report">
+ <assert_contents>
+ <has_line line="Hello" />
+ </assert_contents>
+ </output>
+ </test>
+ </tests>
+</tool>
diff -r 61e4e10e03ce8de315a1ae81d12529d80dedf594 -r df80192bfdcc3b8ed37b104c757e0dcc21535420 test/functional/tools/multi_page.xml
--- /dev/null
+++ b/test/functional/tools/multi_page.xml
@@ -0,0 +1,29 @@
+<tool id="multi_page" name="Multi_Page" description="multi_page" version="0.1.0">
+ <configfiles>
+ <configfile name="config">${p1} ${p2}</configfile>
+ </configfiles>
+ <command>cat $config > $output</command>
+ <inputs>
+ <page>
+ <param name="p1" type="integer" value="1" />
+ </page>
+ <page>
+ <param name="p2" type="integer" value="2" />
+ </page>
+ </inputs>
+ <outputs>
+ <data format="txt" name="output" />
+ </outputs>
+ <tests>
+ <test>
+ <param name="p1" value="3" />
+ <param name="p2" value="4" />
+
+ <output name="output">
+ <assert_contents>
+ <has_line line="3 4" />
+ </assert_contents>
+ </output>
+ </test>
+ </tests>
+</tool>
diff -r 61e4e10e03ce8de315a1ae81d12529d80dedf594 -r df80192bfdcc3b8ed37b104c757e0dcc21535420 test/functional/tools/multi_select.xml
--- /dev/null
+++ b/test/functional/tools/multi_select.xml
@@ -0,0 +1,29 @@
+<tool id="multi_select" name="multi_select" version="1.0.0">
+ <description>multi_select</description>
+ <configfiles>
+ <configfile name="config">${select_ex}</configfile>
+ </configfiles>
+ <command>cat $config > $output</command>
+ <inputs>
+ <param name="select_ex" type="select" display="checkboxes" multiple="true">
+ <option value="--ex1">Ex1</option>
+ <option value="ex2">Ex2</option>
+ <option value="--ex3">Ex3</option>
+ <option value="--ex4">Ex4</option>
+ <option value="ex5">Ex5</option>
+ </param>
+ </inputs>
+ <outputs>
+ <data format="txt" name="output" />
+ </outputs>
+ <tests>
+ <test>
+ <param name="select_ex" value="--ex1,ex2,--ex3" />
+ <output name="output">
+ <assert_contents>
+ <has_line line="--ex1,ex2,--ex3" />
+ </assert_contents>
+ </output>
+ </test>
+ </tests>
+</tool>
diff -r 61e4e10e03ce8de315a1ae81d12529d80dedf594 -r df80192bfdcc3b8ed37b104c757e0dcc21535420 test/functional/tools/samples_tool_conf.xml
--- a/test/functional/tools/samples_tool_conf.xml
+++ b/test/functional/tools/samples_tool_conf.xml
@@ -1,7 +1,12 @@
<?xml version="1.0"?><toolbox><tool file="upload.xml"/>
+ <tool file="simple_constructs.xml" /><tool file="composite.xml" /><tool file="disambiguate_cond.xml" /><tool file="multi_repeats.xml"/>
+ <tool file="multi_page.xml"/>
+ <tool file="multi_select.xml" />
+ <tool file="multi_output.xml" />
+ <tool file="composite_output.xml" /></toolbox>
\ No newline at end of file
diff -r 61e4e10e03ce8de315a1ae81d12529d80dedf594 -r df80192bfdcc3b8ed37b104c757e0dcc21535420 test/functional/tools/simple_constructs.xml
--- /dev/null
+++ b/test/functional/tools/simple_constructs.xml
@@ -0,0 +1,47 @@
+<tool id="simple_constructs" name="simple_constructs">
+ <command>
+ echo "$p1.p1val" >> $out_file1;
+ echo "$booltest" >> $out_file1;
+ echo "$inttest" >> $out_file1;
+ echo "$floattest" >> $out_file1;
+ cat "$files[0].file" >> $out_file1;
+ </command>
+ <inputs>
+ <conditional name="p1">
+ <param type="boolean" name="p1use" />
+ <when value="true">
+ <param name="p1val" value="p1used" type="text" />
+ </when>
+ <when value="false">
+ <param name="p1val" value="p1notused" type="text" />
+ </when>
+ </conditional>
+ <param name="booltest" truevalue="booltrue" falsevalue="boolfalse" checked="false" type="boolean" />
+ <param name="inttest" value="1" type="integer" />
+ <param name="floattest" value="1.0" type="float" />
+ <repeat name="files" title="Files">
+ <param name="file" type="data" format="txt" />
+ </repeat>
+ </inputs>
+ <outputs>
+ <data name="out_file1" format="txt" />
+ </outputs>
+ <tests>
+ <test>
+ <param name="p1use" value="true" />
+ <param name="booltest" value="true" />
+ <param name="inttest" value="12456" />
+ <param name="floattest" value="6.789" />
+ <param name="file" value="simple_line.txt" /><!-- This is a line of text. -->
+ <output name="out_file1">
+ <assert_contents>
+ <has_line line="p1used" />
+ <has_line line="booltrue" />
+ <has_line line="12456" />
+ <has_line line="6.789" />
+ <has_line line="This is a line of text." />
+ </assert_contents>
+ </output>
+ </test>
+ </tests>
+</tool>
https://bitbucket.org/galaxy/galaxy-central/commits/e3731610302a/
Changeset: e3731610302a
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Provide fallback implementation of requests functionality if lib unavailable.
Just limited post/get functionality needed for tests.
Affected #: 1 file
diff -r df80192bfdcc3b8ed37b104c757e0dcc21535420 -r e3731610302a9de11bc15c3b039fac7a17a17558 test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -7,8 +7,7 @@
import galaxy.model
from galaxy.model.orm import and_, desc
from galaxy.model.mapping import context as sa_session
-from simplejson import dumps
-import requests
+from simplejson import dumps, loads
toolbox = None
@@ -133,7 +132,10 @@
}
submit_response_object = self.__submit_tool( history_id, "upload1", tool_input, extra_data={"type": "upload_dataset"}, files=files )
submit_response = submit_response_object.json()
- dataset = submit_response["outputs"][0]
+ try:
+ dataset = submit_response["outputs"][0]
+ except KeyError:
+ raise Exception(submit_response)
#raise Exception(str(dataset))
hid = dataset['id']
self.uploads[ os.path.basename(fname) ] = self.uploads[ fname ] = self.uploads[ name ] = {"src": "hda", "id": hid}
@@ -242,7 +244,7 @@
key = self.api_key
data = data.copy()
data['key'] = key
- return requests.post( "%s/%s" % (self.api_url, path), data=data, files=files )
+ return post_request( "%s/%s" % (self.api_url, path), data=data, files=files )
def __get( self, path, data={}, key=None ):
if not key:
@@ -252,7 +254,7 @@
if path.startswith("/api"):
path = path[ len("/api"): ]
url = "%s/%s" % (self.api_url, path)
- return requests.get( url, params=data )
+ return get_request( url, params=data )
class GalaxyInteractorTwill( object ):
@@ -398,3 +400,66 @@
'api': GalaxyInteractorApi,
'twill': GalaxyInteractorTwill,
}
+
+
+# Lets just try to use requests if it is available, but if not provide fallback
+# on custom implementations of limited requests get/post functionality.
+try:
+ from requests import get as get_request
+ from requests import post as post_request
+except ImportError:
+ import urllib2
+ import httplib
+
+ class RequestsLikeResponse( object ):
+
+ def __init__( self, content ):
+ self.content = content
+
+ def json( self ):
+ return loads( self.content )
+
+ def get_request( url, params={} ):
+ argsep = '&'
+ if '?' not in url:
+ argsep = '?'
+ url = url + argsep + '&'.join( [ '%s=%s' % (k, v) for k, v in params.iteritems() ] )
+ #req = urllib2.Request( url, headers = { 'Content-Type': 'application/json' } )
+ return RequestsLikeResponse(urllib2.urlopen( url ).read() )
+
+ def post_request( url, data, files ):
+ parsed_url = urllib2.urlparse.urlparse( url )
+ return __post_multipart( host=parsed_url.netloc, selector=parsed_url.path, fields=data.iteritems(), files=(files or {}).iteritems() )
+
+ # http://stackoverflow.com/a/681182
+ def __post_multipart(host, selector, fields, files):
+ content_type, body = __encode_multipart_formdata(fields, files)
+ h = httplib.HTTP(host)
+ h.putrequest('POST', selector)
+ h.putheader('content-type', content_type)
+ h.putheader('content-length', str(len(body)))
+ h.endheaders()
+ h.send(body)
+ errcode, errmsg, headers = h.getreply()
+ return RequestsLikeResponse(h.file.read())
+
+ def __encode_multipart_formdata(fields, files):
+ LIMIT = '----------lImIt_of_THE_fIle_eW_$'
+ CRLF = '\r\n'
+ L = []
+ for (key, value) in fields:
+ L.append('--' + LIMIT)
+ L.append('Content-Disposition: form-data; name="%s"' % key)
+ L.append('')
+ L.append(value)
+ for (key, value) in files:
+ L.append('--' + LIMIT)
+ L.append('Content-Disposition: form-data; name="%s"; filename="%s";' % (key, key))
+ L.append('Content-Type: application/octet-stream')
+ L.append('')
+ L.append(value.read())
+ L.append('--' + LIMIT + '--')
+ L.append('')
+ body = CRLF.join(L)
+ content_type = 'multipart/form-data; boundary=%s' % LIMIT
+ return content_type, body
https://bitbucket.org/galaxy/galaxy-central/commits/cf493be30252/
Changeset: cf493be30252
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Twill-less tool tests - handle input metadata tags.
Affected #: 1 file
diff -r e3731610302a9de11bc15c3b039fac7a17a17558 -r cf493be302529fe2271b2a339f8a3fada63301c8 test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -104,6 +104,9 @@
"file_type": test_data[ 'ftype' ],
"dbkey": test_data[ 'dbkey' ],
}
+ for elem in test_data.get('metadata', []):
+ tool_input["files_metadata|%s" % elem.get( 'name' )] = elem.get( 'value' )
+
composite_data = test_data[ 'composite_data' ]
if composite_data:
files = {}
https://bitbucket.org/galaxy/galaxy-central/commits/45c1d11cfff5/
Changeset: 45c1d11cfff5
User: jmchilton
Date: 2013-11-22 02:17:12
Summary: Tool functional tests - allow checking extra files without checking a primary file.
For output datatypes where primary file is meaningless.
Affected #: 1 file
diff -r cf493be302529fe2271b2a339f8a3fada63301c8 -r 45c1d11cfff552ce095c0561b5fba2cb5d3610e3 lib/galaxy/tools/test.py
--- a/lib/galaxy/tools/test.py
+++ b/lib/galaxy/tools/test.py
@@ -252,8 +252,6 @@
assert_list = self.__parse_assert_list( output_elem )
file = attrib.pop( 'file', None )
# File no longer required if an list of assertions was present.
- if not assert_list and file is None:
- raise Exception( "Test output does not have a 'file' to compare with or list of assertions to check")
attributes = {}
# Method of comparison
attributes['compare'] = attrib.pop( 'compare', 'diff' ).lower()
@@ -262,12 +260,15 @@
# Allow a file size to vary if sim_size compare
attributes['delta'] = int( attrib.pop( 'delta', '10000' ) )
attributes['sort'] = string_as_bool( attrib.pop( 'sort', False ) )
- attributes['extra_files'] = []
- attributes['assert_list'] = assert_list
+ extra_files = []
if 'ftype' in attrib:
attributes['ftype'] = attrib['ftype']
for extra in output_elem.findall( 'extra_files' ):
- attributes['extra_files'].append( self.__parse_extra_files_elem( extra ) )
+ extra_files.append( self.__parse_extra_files_elem( extra ) )
+ if not (assert_list or file or extra_files):
+ raise Exception( "Test output defines not checks (e.g. must have a 'file' check against, assertions to check, etc...)")
+ attributes['assert_list'] = assert_list
+ attributes['extra_files'] = extra_files
self.__add_output( name, file, attributes )
except Exception, e:
self.error = True
https://bitbucket.org/galaxy/galaxy-central/commits/01f05ab8df70/
Changeset: 01f05ab8df70
User: jmchilton
Date: 2013-11-22 02:17:13
Summary: Extend tool functional test framework to allow testing output dataset metadata.
Adding file test/functional/tools/metadata.xml demonstrating how to check output metadata - this file also demonstrates setting metadata on uploaded datasets and verifies both of these functionalities. Checking output metadata is only available for new API driven tool testing.
Affected #: 4 files
diff -r 45c1d11cfff552ce095c0561b5fba2cb5d3610e3 -r 01f05ab8df7092b6e03f35faf7f80b47acafa0eb lib/galaxy/tools/test.py
--- a/lib/galaxy/tools/test.py
+++ b/lib/galaxy/tools/test.py
@@ -265,10 +265,14 @@
attributes['ftype'] = attrib['ftype']
for extra in output_elem.findall( 'extra_files' ):
extra_files.append( self.__parse_extra_files_elem( extra ) )
- if not (assert_list or file or extra_files):
+ metadata = {}
+ for metadata_elem in output_elem.findall( 'metadata' ):
+ metadata[ metadata_elem.get('name') ] = metadata_elem.get( 'value' )
+ if not (assert_list or file or extra_files or metadata):
raise Exception( "Test output defines not checks (e.g. must have a 'file' check against, assertions to check, etc...)")
attributes['assert_list'] = assert_list
attributes['extra_files'] = extra_files
+ attributes['metadata'] = metadata
self.__add_output( name, file, attributes )
except Exception, e:
self.error = True
diff -r 45c1d11cfff552ce095c0561b5fba2cb5d3610e3 -r 01f05ab8df7092b6e03f35faf7f80b47acafa0eb test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -88,6 +88,21 @@
fetcher = self.__dataset_fetcher( history_id )
## TODO: Twill version verifys dataset is 'ok' in here.
self.twill_test_case.verify_hid( outfile, hda_id=hid, attributes=attributes, dataset_fetcher=fetcher, shed_tool_id=shed_tool_id )
+ metadata = attributes.get( 'metadata', {} )
+ if metadata:
+ dataset = self.__get( "histories/%s/contents/%s" % ( history_id, hid ) ).json()
+ for key, value in metadata.iteritems():
+ dataset_key = "metadata_%s" % key
+ try:
+ dataset_value = dataset.get( dataset_key, None )
+ if dataset_value != value:
+ msg = "Dataset metadata verification for [%s] failed, expected [%s] but found [%s]."
+ msg_params = ( key, value, dataset_value )
+ msg = msg % msg_params
+ raise Exception( msg )
+ except KeyError:
+ msg = "Failed to verify dataset metadata, metadata key [%s] was not found." % key
+ raise Exception( msg )
def get_job_stream( self, history_id, output_data, stream ):
hid = output_data.get( 'id' )
diff -r 45c1d11cfff552ce095c0561b5fba2cb5d3610e3 -r 01f05ab8df7092b6e03f35faf7f80b47acafa0eb test/functional/tools/metadata.xml
--- /dev/null
+++ b/test/functional/tools/metadata.xml
@@ -0,0 +1,29 @@
+<tool id="metadata" name="metadata" version="1.0.0">
+ <command>mkdir $output_copy_of_input.extra_files_path; cp $input.extra_files_path/* $output_copy_of_input.extra_files_path; echo "$input.metadata.base_name" > $output_of_input_metadata</command>
+ <inputs>
+ <param name="input" type="data" format="velvet" label="Velvet Dataset" help="Prepared by velveth."/>
+ </inputs>
+ <outputs>
+ <data format="txt" name="output_of_input_metadata" />
+ <data format="velvet" name="output_copy_of_input" />
+ </outputs>
+ <tests>
+ <test>
+ <param name="input" value="velveth_test1/output.html" ftype="velvet" >
+ <composite_data value='velveth_test1/Sequences' ftype="Sequences"/>
+ <composite_data value='velveth_test1/Roadmaps' ftype="Roadmaps"/>
+ <composite_data value='velveth_test1/Log'/>
+ <metadata name="base_name" value="Example Metadata" />
+ </param>
+ <!-- This ouptut tests setting input metadata above -->
+ <output name="output_of_input_metadata">
+ <assert_contents>
+ <has_line line="Example Metadata" />
+ </assert_contents>
+ </output>
+ <!-- This output tests an assertion about output metadata -->
+ <output name="output_copy_of_input" file="velveth_test1/output.html">
+ <metadata name="base_name" value="velvet" />
+ </output>
+ </test>
+ </tests></tool>
diff -r 45c1d11cfff552ce095c0561b5fba2cb5d3610e3 -r 01f05ab8df7092b6e03f35faf7f80b47acafa0eb test/functional/tools/samples_tool_conf.xml
--- a/test/functional/tools/samples_tool_conf.xml
+++ b/test/functional/tools/samples_tool_conf.xml
@@ -9,4 +9,5 @@
<tool file="multi_select.xml" /><tool file="multi_output.xml" /><tool file="composite_output.xml" />
+ <tool file="metadata.xml" /></toolbox>
\ No newline at end of file
https://bitbucket.org/galaxy/galaxy-central/commits/d05be33ad6b7/
Changeset: d05be33ad6b7
User: jmchilton
Date: 2013-11-22 02:17:13
Summary: Allow outputs in tool functional tests to be specified in any order when using API interactor.
Outputs must specify name and must be specified in the same order with the Twill variant. This second restriction is entirely arbitrary using API so dropping it here.
Adding tool demonstrating this functionality (test/functional/tools/output_order.xml) which fails with twill interactor but works fine with API interactor.
Affected #: 3 files
diff -r 01f05ab8df7092b6e03f35faf7f80b47acafa0eb -r d05be33ad6b772c2a688d875c4cf895d6afac4e3 test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -57,12 +57,16 @@
def __verify_outputs( self, testdef, history, shed_tool_id, data_list, galaxy_interactor ):
maxseconds = testdef.maxseconds
- output_index = 0 - len( testdef.outputs )
- for output_tuple in testdef.outputs:
+ for output_index, output_tuple in enumerate(testdef.outputs):
# Get the correct hid
- output_data = data_list[ output_index ]
+ name, outfile, attributes = output_tuple
+ try:
+ output_data = data_list[ name ]
+ except (TypeError, KeyError):
+ # Legacy - fall back on ordered data list access if data_list is
+ # just a list (case with twill variant)
+ output_data = data_list[ len(data_list) - len(testdef.outputs) + output_index ]
self.assertTrue( output_data is not None )
- name, outfile, attributes = output_tuple
try:
galaxy_interactor.verify_output( history, output_data, outfile, attributes=attributes, shed_tool_id=shed_tool_id, maxseconds=maxseconds )
except Exception:
@@ -71,8 +75,6 @@
print >>sys.stderr, self._format_stream( stream_output, stream=stream, format=True )
raise
- output_index += 1
-
class GalaxyInteractorApi( object ):
@@ -199,10 +201,21 @@
datasets = self.__submit_tool( history_id, tool_id=testdef.tool.id, tool_input=tool_input )
datasets_object = datasets.json()
try:
- return datasets_object[ 'outputs' ]
+ return self.__dictify_outputs( datasets_object )
except KeyError:
raise Exception( datasets_object[ 'message' ] )
+ def __dictify_outputs( self, datasets_object ):
+ ## Convert outputs list to a dictionary that can be accessed by
+ ## output_name so can be more flexiable about ordering of outputs
+ ## but also allows fallback to legacy access as list mode.
+ outputs_dict = {}
+ index = 0
+ for output in datasets_object[ 'outputs' ]:
+ outputs_dict[ index ] = outputs_dict[ output.get("output_name") ] = output
+ index += 1
+ return outputs_dict
+
def output_hid( self, output_data ):
return output_data[ 'id' ]
diff -r 01f05ab8df7092b6e03f35faf7f80b47acafa0eb -r d05be33ad6b772c2a688d875c4cf895d6afac4e3 test/functional/tools/output_order.xml
--- /dev/null
+++ b/test/functional/tools/output_order.xml
@@ -0,0 +1,25 @@
+<tool id="output_order" name="output_order" version="0.1.0">
+ <command>echo $pa > $output_a; echo $pb > $output_b</command>
+ <inputs>
+ <param name="pa" type="integer" value="1" />
+ <param name="pb" type="integer" value="2" />
+ </inputs>
+ <outputs>
+ <data format="txt" name="output_a" />
+ <data format="txt" name="output_b" />
+ </outputs>
+ <tests>
+ <test>
+ <output name="output_b">
+ <assert_contents>
+ <has_line line="2" />
+ </assert_contents>
+ </output>
+ <output name="output_a">
+ <assert_contents>
+ <has_line line="1" />
+ </assert_contents>
+ </output>
+ </test>
+ </tests>
+</tool>
diff -r 01f05ab8df7092b6e03f35faf7f80b47acafa0eb -r d05be33ad6b772c2a688d875c4cf895d6afac4e3 test/functional/tools/samples_tool_conf.xml
--- a/test/functional/tools/samples_tool_conf.xml
+++ b/test/functional/tools/samples_tool_conf.xml
@@ -10,4 +10,5 @@
<tool file="multi_output.xml" /><tool file="composite_output.xml" /><tool file="metadata.xml" />
+ <tool file="output_order.xml" /></toolbox>
\ No newline at end of file
https://bitbucket.org/galaxy/galaxy-central/commits/1cb070e3da5b/
Changeset: 1cb070e3da5b
User: jmchilton
Date: 2013-11-22 02:17:13
Summary: Refactor lib/galaxy/tools/test.py parsing into smaller methods.
Affected #: 1 file
diff -r d05be33ad6b772c2a688d875c4cf895d6afac4e3 -r 1cb070e3da5b77f16eb3b8591a1cacd74389c34c lib/galaxy/tools/test.py
--- a/lib/galaxy/tools/test.py
+++ b/lib/galaxy/tools/test.py
@@ -192,10 +192,6 @@
return value
def __parse_elem( self, test_elem, i, default_interactor ):
- # Composite datasets need a unique name: each test occurs in a fresh
- # history, but we'll keep it unique per set of tests - use i (test #)
- # and composite_data_names_counter (instance per test #)
- composite_data_names_counter = 0
try:
# Mechanism test code uses for interacting with Galaxy instance,
# until 'api' is the default switch this to API to use its new
@@ -203,81 +199,92 @@
# features or workarounds.
self.interactor = test_elem.get( 'interactor', default_interactor )
- for param_elem in test_elem.findall( "param" ):
- attrib = dict( param_elem.attrib )
- if 'values' in attrib:
- value = attrib[ 'values' ].split( ',' )
- elif 'value' in attrib:
- value = attrib['value']
- else:
- value = None
- attrib['children'] = list( param_elem.getchildren() )
- if attrib['children']:
- # At this time, we can assume having children only
- # occurs on DataToolParameter test items but this could
- # change and would cause the below parsing to change
- # based upon differences in children items
- attrib['metadata'] = []
- attrib['composite_data'] = []
- attrib['edit_attributes'] = []
- # Composite datasets need to be renamed uniquely
- composite_data_name = None
- for child in attrib['children']:
- if child.tag == 'composite_data':
- attrib['composite_data'].append( child )
- if composite_data_name is None:
- # Generate a unique name; each test uses a
- # fresh history
- composite_data_name = '_COMPOSITE_RENAMED_t%i_d%i' \
- % ( i, composite_data_names_counter )
- composite_data_names_counter += 1
- elif child.tag == 'metadata':
- attrib['metadata'].append( child )
- elif child.tag == 'metadata':
- attrib['metadata'].append( child )
- elif child.tag == 'edit_attributes':
- attrib['edit_attributes'].append( child )
- if composite_data_name:
- # Composite datasets need implicit renaming;
- # inserted at front of list so explicit declarations
- # take precedence
- attrib['edit_attributes'].insert( 0, { 'type': 'name', 'value': composite_data_name } )
- self.__add_param( attrib.pop( 'name' ), value, attrib )
- for output_elem in test_elem.findall( "output" ):
- attrib = dict( output_elem.attrib )
- name = attrib.pop( 'name', None )
- if name is None:
- raise Exception( "Test output does not have a 'name'" )
+ self.__parse_inputs_elems( test_elem, i )
- assert_list = self.__parse_assert_list( output_elem )
- file = attrib.pop( 'file', None )
- # File no longer required if an list of assertions was present.
- attributes = {}
- # Method of comparison
- attributes['compare'] = attrib.pop( 'compare', 'diff' ).lower()
- # Number of lines to allow to vary in logs (for dates, etc)
- attributes['lines_diff'] = int( attrib.pop( 'lines_diff', '0' ) )
- # Allow a file size to vary if sim_size compare
- attributes['delta'] = int( attrib.pop( 'delta', '10000' ) )
- attributes['sort'] = string_as_bool( attrib.pop( 'sort', False ) )
- extra_files = []
- if 'ftype' in attrib:
- attributes['ftype'] = attrib['ftype']
- for extra in output_elem.findall( 'extra_files' ):
- extra_files.append( self.__parse_extra_files_elem( extra ) )
- metadata = {}
- for metadata_elem in output_elem.findall( 'metadata' ):
- metadata[ metadata_elem.get('name') ] = metadata_elem.get( 'value' )
- if not (assert_list or file or extra_files or metadata):
- raise Exception( "Test output defines not checks (e.g. must have a 'file' check against, assertions to check, etc...)")
- attributes['assert_list'] = assert_list
- attributes['extra_files'] = extra_files
- attributes['metadata'] = metadata
- self.__add_output( name, file, attributes )
+ self.__parse_output_elems( test_elem )
except Exception, e:
self.error = True
self.exception = e
+ def __parse_inputs_elems( self, test_elem, i ):
+ # Composite datasets need a unique name: each test occurs in a fresh
+ # history, but we'll keep it unique per set of tests - use i (test #)
+ # and composite_data_names_counter (instance per test #)
+ composite_data_names_counter = 0
+ for param_elem in test_elem.findall( "param" ):
+ attrib = dict( param_elem.attrib )
+ if 'values' in attrib:
+ value = attrib[ 'values' ].split( ',' )
+ elif 'value' in attrib:
+ value = attrib['value']
+ else:
+ value = None
+ attrib['children'] = list( param_elem.getchildren() )
+ if attrib['children']:
+ # At this time, we can assume having children only
+ # occurs on DataToolParameter test items but this could
+ # change and would cause the below parsing to change
+ # based upon differences in children items
+ attrib['metadata'] = []
+ attrib['composite_data'] = []
+ attrib['edit_attributes'] = []
+ # Composite datasets need to be renamed uniquely
+ composite_data_name = None
+ for child in attrib['children']:
+ if child.tag == 'composite_data':
+ attrib['composite_data'].append( child )
+ if composite_data_name is None:
+ # Generate a unique name; each test uses a
+ # fresh history
+ composite_data_name = '_COMPOSITE_RENAMED_t%i_d%i' \
+ % ( i, composite_data_names_counter )
+ composite_data_names_counter += 1
+ elif child.tag == 'metadata':
+ attrib['metadata'].append( child )
+ elif child.tag == 'metadata':
+ attrib['metadata'].append( child )
+ elif child.tag == 'edit_attributes':
+ attrib['edit_attributes'].append( child )
+ if composite_data_name:
+ # Composite datasets need implicit renaming;
+ # inserted at front of list so explicit declarations
+ # take precedence
+ attrib['edit_attributes'].insert( 0, { 'type': 'name', 'value': composite_data_name } )
+ self.__add_param( attrib.pop( 'name' ), value, attrib )
+
+ def __parse_output_elems( self, test_elem ):
+ for output_elem in test_elem.findall( "output" ):
+ attrib = dict( output_elem.attrib )
+ name = attrib.pop( 'name', None )
+ if name is None:
+ raise Exception( "Test output does not have a 'name'" )
+
+ assert_list = self.__parse_assert_list( output_elem )
+ file = attrib.pop( 'file', None )
+ # File no longer required if an list of assertions was present.
+ attributes = {}
+ # Method of comparison
+ attributes['compare'] = attrib.pop( 'compare', 'diff' ).lower()
+ # Number of lines to allow to vary in logs (for dates, etc)
+ attributes['lines_diff'] = int( attrib.pop( 'lines_diff', '0' ) )
+ # Allow a file size to vary if sim_size compare
+ attributes['delta'] = int( attrib.pop( 'delta', '10000' ) )
+ attributes['sort'] = string_as_bool( attrib.pop( 'sort', False ) )
+ extra_files = []
+ if 'ftype' in attrib:
+ attributes['ftype'] = attrib['ftype']
+ for extra in output_elem.findall( 'extra_files' ):
+ extra_files.append( self.__parse_extra_files_elem( extra ) )
+ metadata = {}
+ for metadata_elem in output_elem.findall( 'metadata' ):
+ metadata[ metadata_elem.get('name') ] = metadata_elem.get( 'value' )
+ if not (assert_list or file or extra_files or metadata):
+ raise Exception( "Test output defines not checks (e.g. must have a 'file' check against, assertions to check, etc...)")
+ attributes['assert_list'] = assert_list
+ attributes['extra_files'] = extra_files
+ attributes['metadata'] = metadata
+ self.__add_output( name, file, attributes )
+
def __parse_assert_list( self, output_elem ):
assert_elem = output_elem.find("assert_contents")
assert_list = None
https://bitbucket.org/galaxy/galaxy-central/commits/aa6ec1842ba6/
Changeset: aa6ec1842ba6
User: jmchilton
Date: 2013-11-22 02:17:13
Summary: Utilize tool test runtime tree analysis earlier to more correctly determine which inputs are data params.
Turns out previous changesets attempting to separately disambiguate and supply implicit defaults for conditionals do not really work with data inputs - because they are not recognized as dataset input params during parsing. Additionally, this change should eliminate any ordering requirements of tool test params - i.e. you shouldn't need to provide the test parameters in an order that makes it clear what branch you are on before suppling a dataset in order for the parser to realize it is a dataset.
Finally, this changeset allows deletion of a lot of code and creates some more robust abstractions.
Affected #: 2 files
diff -r 1cb070e3da5b77f16eb3b8591a1cacd74389c34c -r aa6ec1842ba60b5ba7af69f8f01624c4902bc2be lib/galaxy/tools/test.py
--- a/lib/galaxy/tools/test.py
+++ b/lib/galaxy/tools/test.py
@@ -75,29 +75,7 @@
yield data_dict
- def to_dict( self, tool_inputs, declared_inputs ):
- expanded_inputs = {}
- for key, value in tool_inputs.items():
- if isinstance( value, grouping.Conditional ):
- for i, case in enumerate( value.cases ):
- if declared_inputs[ value.test_param.name ] == case.value:
- pass # TODO
- elif isinstance( value, grouping.Repeat ):
- values = []
- for r_name, r_value in value.inputs.iteritems():
- values.append( self.to_dict( {r_name: r_value} , declared_inputs ) )
- expanded_inputs[ value.name ] = values
- elif value.name not in declared_inputs:
- print "%s not declared in tool test, will not change default value." % value.name
- else:
- expanded_inputs[ value.name ] = declared_inputs[value.name]
- return expanded_inputs
-
- def __matching_case( self, cond, declared_inputs, prefix, index=None ):
- param = cond.test_param
- declared_value = self.__declared_match( declared_inputs, param.name, prefix)
- if index is not None:
- declared_value = declared_value[index]
+ def __matching_case_for_value( self, cond, declared_value ):
for i, case in enumerate( cond.cases ):
if declared_value is not None and (case.value == declared_value):
return case
@@ -107,83 +85,7 @@
return case
else:
return Bunch(value=declared_value, inputs=Bunch(items=lambda: []))
- print "Not matching case found for %s value %s. Test may fail in unexpected ways." % ( param.name, declared_value )
-
- def expand_multi_grouping( self, tool_inputs, declared_inputs, prefix='', index=0 ):
- """
- Used by API, slight generalization of expand_grouping used by Twill based interactor. Still
- not quite the context/tree based specification that should exist!
- """
- expanded_inputs = {}
- for key, value in tool_inputs.items():
- expanded_key = value.name if not prefix else "%s|%s" % (prefix, value.name)
- if isinstance( value, grouping.Conditional ):
- new_prefix = expanded_key
- case = self.__matching_case( value, declared_inputs, new_prefix, index=index )
- if case:
- expanded_value = self.__split_if_str( case.value )
- expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = expanded_value
- for input_name, input_value in case.inputs.items():
- expanded_inputs.update( self.expand_multi_grouping( { input_name: input_value }, declared_inputs, prefix=new_prefix ), index=index )
- elif isinstance( value, grouping.Repeat ):
- repeat_index = 0
- any_children_matched = True
- while any_children_matched:
- any_children_matched = False
- for r_name, r_value in value.inputs.iteritems():
- new_prefix = "%s_%d" % ( value.name, repeat_index )
- if prefix:
- new_prefix = "%s|%s" % ( prefix, new_prefix )
- expanded_input = self.expand_multi_grouping( { new_prefix : r_value }, declared_inputs, prefix=new_prefix, index=repeat_index )
- if expanded_input:
- any_children_matched = True
- expanded_inputs.update( expanded_input )
- repeat_index += 1
- else:
- declared_value = self.__declared_match( declared_inputs, value.name, prefix )
- if declared_value and len(declared_value) > index:
- value = self.__split_if_str( declared_value[ index ] )
- expanded_inputs[ expanded_key ] = value
- return expanded_inputs
-
- def expand_grouping( self, tool_inputs, declared_inputs, prefix='' ):
- expanded_inputs = {}
- for key, value in tool_inputs.items():
- expanded_key = value.name if not prefix else "%s|%s" % (prefix, value.name)
- if isinstance( value, grouping.Conditional ):
- new_prefix = expanded_key
- case = self.__matching_case( value, declared_inputs, new_prefix )
- if case:
- expanded_value = self.__split_if_str(case.value)
- expanded_inputs[ "%s|%s" % ( new_prefix, value.test_param.name ) ] = expanded_value
- for input_name, input_value in case.inputs.items():
- expanded_inputs.update( self.expand_grouping( { input_name: input_value }, declared_inputs, prefix=new_prefix ) )
- elif isinstance( value, grouping.Repeat ):
- for repeat_index in xrange( 0, 1 ): # need to allow for and figure out how many repeats we have
- for r_name, r_value in value.inputs.iteritems():
- new_prefix = "%s_%d" % ( value.name, repeat_index )
- if prefix:
- new_prefix = "%s|%s" % ( prefix, new_prefix )
- expanded_inputs.update( self.expand_grouping( { new_prefix : r_value }, declared_inputs, prefix=new_prefix ) )
- else:
- declared_value = self.__declared_match( declared_inputs, value.name, prefix )
- if not declared_value:
- print "%s not declared in tool test, will not change default value." % value.name
- else:
- value = self.__split_if_str(declared_value)
- expanded_inputs[expanded_key] = value
- return expanded_inputs
-
- def __declared_match( self, declared_inputs, name, prefix ):
- prefix_suffixes = [ "%s|" % part for part in prefix.split( "|" ) ] if prefix else []
- prefix_suffixes.append( name )
- prefix_suffixes.reverse()
- prefixed_name = ""
- for prefix_suffix in prefix_suffixes:
- prefixed_name = "%s%s" % ( prefix_suffix, prefixed_name )
- if prefixed_name in declared_inputs:
- return declared_inputs[prefixed_name]
- return None
+ print "Not matching case found for %s value %s. Test may fail in unexpected ways." % ( cond.test_param.name, declared_value )
def __split_if_str( self, value ):
split = isinstance(value, str)
@@ -211,6 +113,7 @@
# history, but we'll keep it unique per set of tests - use i (test #)
# and composite_data_names_counter (instance per test #)
composite_data_names_counter = 0
+ raw_inputs = {}
for param_elem in test_elem.findall( "param" ):
attrib = dict( param_elem.attrib )
if 'values' in attrib:
@@ -250,7 +153,56 @@
# inserted at front of list so explicit declarations
# take precedence
attrib['edit_attributes'].insert( 0, { 'type': 'name', 'value': composite_data_name } )
- self.__add_param( attrib.pop( 'name' ), value, attrib )
+ name = attrib.pop( 'name' )
+ if name not in raw_inputs:
+ raw_inputs[ name ] = []
+ raw_inputs[ name ].append( ( value, attrib ) )
+ self.inputs = self.__process_raw_inputs( self.tool.inputs, raw_inputs )
+
+ def __process_raw_inputs( self, tool_inputs, raw_inputs, parent_context=None ):
+ """
+ Recursively expand flat list of inputs into "tree" form of flat list
+ (| using to nest to new levels) structure and expand dataset
+ information as proceeding to populate self.required_files.
+ """
+ parent_context = parent_context or RootParamContext()
+ expanded_inputs = {}
+ for key, value in tool_inputs.items():
+ if isinstance( value, grouping.Conditional ):
+ cond_context = ParamContext( name=value.name, parent_context=parent_context )
+ case_context = ParamContext( name=value.test_param.name, parent_context=cond_context )
+ raw_input = case_context.value( raw_inputs )
+ case = self.__matching_case_for_value( value, raw_input )
+ if case:
+ expanded_value = self.__split_if_str( case.value )
+ expanded_inputs[ case_context.for_state() ] = expanded_value
+ for input_name, input_value in case.inputs.items():
+ expanded_inputs.update( self.__process_raw_inputs( { input_name: input_value }, raw_inputs, parent_context=cond_context ) )
+ elif isinstance( value, grouping.Repeat ):
+ repeat_index = 0
+ while True:
+ context = ParamContext( name=value.name, index=repeat_index, parent_context=parent_context )
+ updated = False
+ for r_name, r_value in value.inputs.iteritems():
+ expanded_input = self.__process_raw_inputs( { context.for_state() : r_value }, raw_inputs, parent_context=context )
+ if expanded_input:
+ expanded_inputs.update( expanded_input )
+ updated = True
+ if not updated:
+ break
+ repeat_index += 1
+ else:
+ context = ParamContext( name=value.name, parent_context=parent_context )
+ raw_input = context.value( raw_inputs )
+ if raw_input:
+ (param_value, param_extra) = raw_input
+ if isinstance( value, basic.DataToolParameter ):
+ processed_value = [ self.__add_uploaded_dataset( context.for_state(), param_value, param_extra, value ) ]
+ else:
+ param_value = self.__split_if_str( param_value )
+ processed_value = param_value
+ expanded_inputs[ context.for_state() ] = processed_value
+ return expanded_inputs
def __parse_output_elems( self, test_elem ):
for output_elem in test_elem.findall( "output" ):
@@ -324,78 +276,9 @@
extra_attributes['sort'] = string_as_bool( extra.get( 'sort', False ) )
return extra_type, extra_value, extra_name, extra_attributes
- def __add_param( self, name, value, extra ):
- try:
- if name not in self.tool.inputs:
- found_parameter = False
- for input_name, input_value in self.tool.inputs.items():
- if isinstance( input_value, grouping.Group ):
- found_parameter, new_value = self.__expand_grouping_for_data_input(name, value, extra, input_name, input_value)
- if found_parameter:
- value = new_value
- break
- if not found_parameter:
- raise ValueError( "Unable to determine parameter type of test input '%s'. "
- "Ensure that the parameter exists and that any container groups are defined first."
- % name )
- elif isinstance( self.tool.inputs[name], basic.DataToolParameter ):
- value = self.__add_uploaded_dataset( name, value, extra, self.tool.inputs[name] )
- except Exception, e:
- log.debug( "Error for tool %s: could not add test parameter %s. %s" % ( self.tool.id, name, e ) )
- self.inputs.append( ( name, value, extra ) )
-
def __add_output( self, name, file, extra ):
self.outputs.append( ( name, file, extra ) )
- def __expand_grouping_for_data_input( self, name, value, extra, grouping_name, grouping_value ):
- # Currently handles grouping.Conditional and grouping.Repeat
- if isinstance( grouping_value, grouping.Conditional ):
- if name == grouping_value.test_param.name:
- return True, value
- case_test_param_value = None
- for input in self.inputs:
- if input[0] == grouping_value.test_param.name:
- case_test_param_value = input[1]
- break
- if case_test_param_value is None:
- #case for this group has not been set yet
- return False, value
- for case in grouping_value.cases:
- if case.value == case_test_param_value:
- break
- if case.value != case_test_param_value:
- return False, value
- #assert case.value == case_test_param_value, "Current case could not be determined for parameter '%s'. Provided value '%s' could not be found in '%s'." % ( grouping_value.name, value, grouping_value.test_param.name )
- if name in case.inputs:
- if isinstance( case.inputs[name], basic.DataToolParameter ):
- return True, self.__add_uploaded_dataset( name, value, extra, case.inputs[name] )
- else:
- return True, value
- else:
- for input_name, input_parameter in case.inputs.iteritems():
- if isinstance( input_parameter, grouping.Group ):
- found_parameter, new_value = self.__expand_grouping_for_data_input( name, value, extra, input_name, input_parameter )
- if found_parameter:
- return True, new_value
- elif isinstance( grouping_value, grouping.Repeat ):
- # FIXME: grouping.Repeat can only handle 1 repeat param element since the param name
- # is something like "input2" and the expanded page display is something like "queries_0|input2".
- # The problem is that the only param name on the page is "input2", and adding more test input params
- # with the same name ( "input2" ) is not yet supported in our test code ( the last one added is the only
- # one used ).
- if name in grouping_value.inputs:
- if isinstance( grouping_value.inputs[name], basic.DataToolParameter ):
- return True, self.__add_uploaded_dataset( name, value, extra, grouping_value.inputs[name] )
- else:
- return True, value
- else:
- for input_name, input_parameter in grouping_value.inputs.iteritems():
- if isinstance( input_parameter, grouping.Group ):
- found_parameter, new_value = self.__expand_grouping_for_data_input( name, value, extra, input_name, input_parameter )
- if found_parameter:
- return True, new_value
- return False, value
-
def __add_uploaded_dataset( self, name, value, extra, input_parameter ):
if value is None:
assert input_parameter.optional, '%s is not optional. You must provide a valid filename.' % name
@@ -413,3 +296,58 @@
break
value = os.path.basename( value ) # if uploading a file in a path other than root of test-data
return value
+
+
+class ParamContext(object):
+
+ def __init__( self, name, index=None, parent_context=None ):
+ self.parent_context = parent_context
+ self.name = name
+ self.index = None if index is None else int( index )
+
+ def for_state( self ):
+ name = self.name if self.index is None else "%s_%d" % ( self.name, self.index )
+ parent_for_state = self.parent_context.for_state()
+ if parent_for_state:
+ return "%s|%s" % ( parent_for_state, name )
+ else:
+ return name
+
+ def __str__( self ):
+ return "Context[for_state=%s]" % self.for_state()
+
+ def param_names( self ):
+ for parent_context_param in self.parent_context.param_names():
+ yield "%s|%s" % ( parent_context_param, self.name )
+ yield self.name
+
+ def value( self, declared_inputs ):
+ for param_name in self.param_names():
+ if param_name in declared_inputs:
+ index = self.get_index()
+ try:
+ return declared_inputs[ param_name ][ index ]
+ except IndexError:
+ return None
+ return None
+
+ def get_index( self ):
+ if self.index is not None:
+ return self.index
+ else:
+ return self.parent_context.get_index()
+
+
+class RootParamContext(object):
+
+ def __init__( self ):
+ pass
+
+ def for_state( self ):
+ return ""
+
+ def param_names( self ):
+ return []
+
+ def get_index( self ):
+ return 0
diff -r 1cb070e3da5b77f16eb3b8591a1cacd74389c34c -r aa6ec1842ba60b5ba7af69f8f01624c4902bc2be test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -164,41 +164,21 @@
def run_tool( self, testdef, history_id ):
# We need to handle the case where we've uploaded a valid compressed file since the upload
# tool will have uncompressed it on the fly.
- all_inputs = {}
- for name, value, _ in testdef.inputs:
- # TODO: Restrict this to param inputs.
- if value in self.uploads:
- value = self.uploads[ value ]
- if name in all_inputs:
- all_inputs[name].append( value )
- else:
- all_inputs[name] = [ value ]
+ inputs_tree = testdef.inputs.copy()
+ for key, value in inputs_tree.iteritems():
+ values = [value] if not isinstance(value, list) else value
+ for value in values:
+ if value in self.uploads:
+ inputs_tree[ key ] = self.uploads[ value ]
- # TODO: Handle pages?
- # TODO: Handle force_history_refresh?
- flat_inputs = True
- if flat_inputs:
- # Build up tool_input flately (e.g {"a_repeat_0|a_repeat_param" : "value1"})
- expanded_inputs = {}
- expanded_inputs.update(testdef.expand_multi_grouping(testdef.tool.inputs_by_page[0], all_inputs))
- for i in range( 1, testdef.tool.npages ):
- expanded_inputs.update(testdef.expand_multi_grouping(testdef.tool.inputs_by_page[i], all_inputs))
+ # # HACK: Flatten single-value lists. Required when using expand_grouping
+ for key, value in inputs_tree.iteritems():
+ if isinstance(value, list) and len(value) == 1:
+ inputs_tree[key] = value[0]
- # # HACK: Flatten single-value lists. Required when using expand_grouping
- for key, value in expanded_inputs.iteritems():
- if isinstance(value, list) and len(value) == 1:
- expanded_inputs[key] = value[0]
- tool_input = expanded_inputs
- else:
- # Build up tool_input as nested dictionary (e.g. {"a_repeat": [{"a_repeat_param" : "value1"}]})
- # Doesn't work with the tool API at this time.
- tool_input = {}
- tool_input.update(testdef.to_dict(testdef.tool.inputs_by_page[0], all_inputs))
- for i in range( 1, testdef.tool.npages ):
- tool_input.update(testdef.to_dict(testdef.tool.inputs_by_page[i], all_inputs))
-
- datasets = self.__submit_tool( history_id, tool_id=testdef.tool.id, tool_input=tool_input )
+ log.info( "Submiting tool with params %s" % inputs_tree )
+ datasets = self.__submit_tool( history_id, tool_id=testdef.tool.id, tool_input=inputs_tree )
datasets_object = datasets.json()
try:
return self.__dictify_outputs( datasets_object )
@@ -322,9 +302,11 @@
def run_tool( self, testdef, test_history ):
# We need to handle the case where we've uploaded a valid compressed file since the upload
# tool will have uncompressed it on the fly.
+
+ # Lose tons of information to accomodate legacy repeat handling.
all_inputs = {}
- for name, value, _ in testdef.inputs:
- all_inputs[ name ] = value
+ for key, value in testdef.inputs.iteritems():
+ all_inputs[ key.split("|")[-1] ] = value
# See if we have a grouping.Repeat element
repeat_name = None
@@ -340,15 +322,20 @@
else:
job_finish_by_output_count = False
+ # Strip out just a given page of inputs from inputs "tree".
+ def filter_page_inputs( n ):
+ page_input_keys = testdef.tool.inputs_by_page[ n ].keys()
+ return dict( [ (k, v) for k, v in testdef.inputs.iteritems() if k.split("|")[0] in page_input_keys ] )
+
# Do the first page
- page_inputs = testdef.expand_grouping(testdef.tool.inputs_by_page[0], all_inputs)
+ page_inputs = filter_page_inputs( 0 )
# Run the tool
self.twill_test_case.run_tool( testdef.tool.id, repeat_name=repeat_name, **page_inputs )
print "page_inputs (0)", page_inputs
# Do other pages if they exist
for i in range( 1, testdef.tool.npages ):
- page_inputs = testdef.expand_grouping(testdef.tool.inputs_by_page[i], all_inputs)
+ page_inputs = filter_page_inputs( i )
self.twill_test_case.submit_form( **page_inputs )
print "page_inputs (%i)" % i, page_inputs
https://bitbucket.org/galaxy/galaxy-central/commits/76bd49e65887/
Changeset: 76bd49e65887
User: jmchilton
Date: 2013-11-22 02:17:13
Summary: Previous attempt to allow implicit defaults for conditional test params had many problems.
This rewrite has fewer.
Affected #: 1 file
diff -r aa6ec1842ba60b5ba7af69f8f01624c4902bc2be -r 76bd49e658876f5cc90ec5b80033f13b058593b8 lib/galaxy/tools/test.py
--- a/lib/galaxy/tools/test.py
+++ b/lib/galaxy/tools/test.py
@@ -3,7 +3,6 @@
from parameters import basic
from parameters import grouping
from galaxy.util import string_as_bool
-from galaxy.util.bunch import Bunch
import logging
log = logging.getLogger( __name__ )
@@ -76,16 +75,43 @@
yield data_dict
def __matching_case_for_value( self, cond, declared_value ):
+ test_param = cond.test_param
+ if isinstance(test_param, basic.BooleanToolParameter):
+ if declared_value is None:
+ # No explicit value for param in test case, determine from default
+ query_value = test_param.checked
+ else:
+ # Test case supplied value, check cases against this.
+ query_value = string_as_bool( declared_value )
+ matches_declared_value = lambda case_value: string_as_bool(case_value) == query_value
+ elif isinstance(test_param, basic.SelectToolParameter):
+ if declared_value is not None:
+ # Test case supplied explicit value to check against.
+ matches_declared_value = lambda case_value: case_value == declared_value
+ elif test_param.static_options:
+ # No explicit value in test case, not much to do if options are dynamic but
+ # if static options are available can find the one specified as default or
+ # fallback on top most option (like GUI).
+ for (name, value, selected) in test_param.static_options:
+ if selected:
+ default_option = name
+ else:
+ default_option = test_param.static_options[0]
+ matches_declared_value = lambda case_value: case_value == default_option
+ else:
+ # No explicit value for this param and cannot determine a
+ # default - give up. Previously this would just result in a key
+ # error exception.
+ msg = "Failed to find test parameter specification required for conditional %s" % cond
+ raise Exception( msg )
+
+ # Check the tool's defined cases against predicate to determine
+ # selected or default.
for i, case in enumerate( cond.cases ):
- if declared_value is not None and (case.value == declared_value):
- return case
- if declared_value is None:
- # TODO: Default might not be top value, fix this.
- # TODO: Also may be boolean, got to look at checked.
+ if matches_declared_value( case.value ):
return case
else:
- return Bunch(value=declared_value, inputs=Bunch(items=lambda: []))
- print "Not matching case found for %s value %s. Test may fail in unexpected ways." % ( cond.test_param.name, declared_value )
+ log.info("Failed to find case matching test parameter specification for cond %s. Remainder of test behavior is unspecified." % cond)
def __split_if_str( self, value ):
split = isinstance(value, str)
@@ -172,7 +198,8 @@
cond_context = ParamContext( name=value.name, parent_context=parent_context )
case_context = ParamContext( name=value.test_param.name, parent_context=cond_context )
raw_input = case_context.value( raw_inputs )
- case = self.__matching_case_for_value( value, raw_input )
+ case_value = raw_input[ 0 ] if raw_input else None
+ case = self.__matching_case_for_value( value, case_value )
if case:
expanded_value = self.__split_if_str( case.value )
expanded_inputs[ case_context.for_state() ] = expanded_value
https://bitbucket.org/galaxy/galaxy-central/commits/07d48a9f9a09/
Changeset: 07d48a9f9a09
User: jmchilton
Date: 2013-11-22 02:17:13
Summary: Make disambiguate_cond more difficult.
Affected #: 2 files
diff -r 76bd49e658876f5cc90ec5b80033f13b058593b8 -r 07d48a9f9a09edd916bf4f94e95326211cd0d255 test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -9,6 +9,9 @@
from galaxy.model.mapping import context as sa_session
from simplejson import dumps, loads
+import logging
+log = logging.getLogger( __name__ )
+
toolbox = None
diff -r 76bd49e658876f5cc90ec5b80033f13b058593b8 -r 07d48a9f9a09edd916bf4f94e95326211cd0d255 test/functional/tools/disambiguate_cond.xml
--- a/test/functional/tools/disambiguate_cond.xml
+++ b/test/functional/tools/disambiguate_cond.xml
@@ -1,7 +1,8 @@
<tool id="handle_cond" name="handle_cond"><description>tail-to-head</description>
+ <!-- --><command>
- echo "$p1.p1v $p2.p2v $p3.p3v" > $out_file1
+ echo "$p1.p1v $p2.p2v $p3.p3v" > $out_file1; cat "$files.p4.file" >> $out_file1;
</command><inputs><conditional name="p1">
@@ -30,7 +31,18 @@
<when value="false"><param name="p3v" value="7" type="integer" /></when>
- </conditional>
+ </conditional>
+ <conditional name="files">
+ <param name="attach_files" type="boolean" checked="true" />
+ <when value="true">
+ <conditional name="p4">
+ <param type="boolean" name="use" />
+ <when value="true">
+ <param type="data" name="file" />
+ </when>
+ </conditional>
+ </when>
+ </conditional></inputs><outputs><data name="out_file1" format="txt" />
@@ -40,9 +52,12 @@
<param name="p1|use" value="True"/><param name="p2|use" value="False"/><param name="p3|use" value="True"/>
+ <param name="p4|use" value="True" />
+ <param name="p4|file" value="simple_line.txt" /><output name="out_file1"><assert_contents><has_line line="4 7 4" />
+ <has_line line="This is a line of text." /></assert_contents></output></test>
https://bitbucket.org/galaxy/galaxy-central/commits/265e67c9dca8/
Changeset: 265e67c9dca8
User: jmchilton
Date: 2013-11-22 02:17:13
Summary: Allow robust disambiguation of repeat statements in tool tests.
Imagine a tool with a repeat statement like:
<repeat name="rinst" title="Repeat Parameter"><param name="int_param" type="integer" value="1" /><param name="float_param" type="float" value="2.0" /></repeat>
3 test instances overidding int_param default in all three, but leaving the float_param default in place for the first and last can specified as follows:
<param name="rinst_0|int_param" value="4" /><param name="rinst_1|int_param" value="5" /><param name="rtnst_2|int_param" value="6" /><param name="rinst_1|float_param" value="4.0" />
This syntax can be mixed and matched with specifing conditionals and nested repeats, etc....
One could imagine an alternative syntax like:
<param name="rinst|int_param" value="4" /><param name="rinst|int_param" value="5" /><param name="rtnst|int_param" value="6" /><param name="rinst|float_param" value="4.0" />
Upon consideration, I have determined that this ambigious syntax is more difficult to reason about, leaves so much ambigiouity in place especially in the context of default and nested elements, and is harder to support. So the plan is to not support it at this time.
The following changeset will add "the right" way to do this anyway:
<repeat name="rinst"><param name="rinst|int_param" value="4" /></repeat><repeat name="rinst"><param name="rinst|int_param" value="5" /><param name="rinst|float_param" value="4.0" /></repeat><repeat name="rinst"><param name="rinst|int_param" value="6" /></repeat>
Affected #: 7 files
diff -r 07d48a9f9a09edd916bf4f94e95326211cd0d255 -r 265e67c9dca81c496ce42ba6517b10444ad56d59 lib/galaxy/tools/test.py
--- a/lib/galaxy/tools/test.py
+++ b/lib/galaxy/tools/test.py
@@ -139,7 +139,7 @@
# history, but we'll keep it unique per set of tests - use i (test #)
# and composite_data_names_counter (instance per test #)
composite_data_names_counter = 0
- raw_inputs = {}
+ raw_inputs = []
for param_elem in test_elem.findall( "param" ):
attrib = dict( param_elem.attrib )
if 'values' in attrib:
@@ -180,9 +180,7 @@
# take precedence
attrib['edit_attributes'].insert( 0, { 'type': 'name', 'value': composite_data_name } )
name = attrib.pop( 'name' )
- if name not in raw_inputs:
- raw_inputs[ name ] = []
- raw_inputs[ name ].append( ( value, attrib ) )
+ raw_inputs.append( ( name, value, attrib ) )
self.inputs = self.__process_raw_inputs( self.tool.inputs, raw_inputs )
def __process_raw_inputs( self, tool_inputs, raw_inputs, parent_context=None ):
@@ -197,8 +195,8 @@
if isinstance( value, grouping.Conditional ):
cond_context = ParamContext( name=value.name, parent_context=parent_context )
case_context = ParamContext( name=value.test_param.name, parent_context=cond_context )
- raw_input = case_context.value( raw_inputs )
- case_value = raw_input[ 0 ] if raw_input else None
+ raw_input = case_context.extract_value( raw_inputs )
+ case_value = raw_input[ 1 ] if raw_input else None
case = self.__matching_case_for_value( value, case_value )
if case:
expanded_value = self.__split_if_str( case.value )
@@ -220,9 +218,9 @@
repeat_index += 1
else:
context = ParamContext( name=value.name, parent_context=parent_context )
- raw_input = context.value( raw_inputs )
+ raw_input = context.extract_value( raw_inputs )
if raw_input:
- (param_value, param_extra) = raw_input
+ (name, param_value, param_extra) = raw_input
if isinstance( value, basic.DataToolParameter ):
processed_value = [ self.__add_uploaded_dataset( context.for_state(), param_value, param_extra, value ) ]
else:
@@ -345,24 +343,33 @@
def param_names( self ):
for parent_context_param in self.parent_context.param_names():
- yield "%s|%s" % ( parent_context_param, self.name )
- yield self.name
+ if self.index is not None:
+ yield "%s|%s_%d" % ( parent_context_param, self.name, self.index )
+ else:
+ yield "%s|%s" % ( parent_context_param, self.name )
+ if self.index is not None:
+ yield "%s_%d" % ( self.name, self.index )
+ else:
+ yield self.name
- def value( self, declared_inputs ):
+ def extract_value( self, raw_inputs ):
for param_name in self.param_names():
- if param_name in declared_inputs:
- index = self.get_index()
- try:
- return declared_inputs[ param_name ][ index ]
- except IndexError:
- return None
+ value = self.__raw_param_found( param_name, raw_inputs)
+ if value:
+ return value
return None
- def get_index( self ):
- if self.index is not None:
- return self.index
+ def __raw_param_found( self, param_name, raw_inputs ):
+ index = None
+ for i, raw_input in enumerate( raw_inputs ):
+ if raw_input[ 0 ] == param_name:
+ index = i
+ if index is not None:
+ raw_input = raw_inputs[ index ]
+ del raw_inputs[ index ]
+ return raw_input
else:
- return self.parent_context.get_index()
+ return None
class RootParamContext(object):
diff -r 07d48a9f9a09edd916bf4f94e95326211cd0d255 -r 265e67c9dca81c496ce42ba6517b10444ad56d59 test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -325,10 +325,16 @@
else:
job_finish_by_output_count = False
+ inputs_tree = testdef.inputs
+ # # # HACK: Flatten single-value lists. Required when using expand_grouping
+ # #for key, value in inputs_tree.iteritems():
+ # if isinstance(value, list) and len(value) == 1:
+ # inputs_tree[key] = value[0]
+
# Strip out just a given page of inputs from inputs "tree".
def filter_page_inputs( n ):
page_input_keys = testdef.tool.inputs_by_page[ n ].keys()
- return dict( [ (k, v) for k, v in testdef.inputs.iteritems() if k.split("|")[0] in page_input_keys ] )
+ return dict( [ (k, v) for k, v in inputs_tree.iteritems() if k.split("|")[0] or k.split("|")[0].resplit("_", 1)[0] in page_input_keys ] )
# Do the first page
page_inputs = filter_page_inputs( 0 )
diff -r 07d48a9f9a09edd916bf4f94e95326211cd0d255 -r 265e67c9dca81c496ce42ba6517b10444ad56d59 test/functional/tools/disambiguate_repeats.xml
--- /dev/null
+++ b/test/functional/tools/disambiguate_repeats.xml
@@ -0,0 +1,28 @@
+<tool id="disambiguate_repeats" name="disambiguate_repeats">
+ <command>
+ cat #for $q in $queries# ${q.input} #end for# #for $q in $more_queries# ${q.input} #end for# > $out_file1
+ </command>
+ <inputs>
+ <repeat name="queries" title="Dataset">
+ <param name="input" type="data" label="Select" />
+ </repeat>
+ <repeat name="more_queries" title="Dataset">
+ <param name="input" type="data" label="Select" />
+ </repeat>
+ </inputs>
+ <outputs>
+ <data name="out_file1" format="txt" />
+ </outputs>
+ <tests>
+ <!-- Can use prefixes to disambiguate inputs or force order. -->
+ <test>
+ <param name="queries_1|input" value="simple_line_alternative.txt"/>
+ <param name="queries_0|input" value="simple_line.txt"/>
+
+ <param name="more_queries_1|input" value="simple_line_alternative.txt" />
+ <param name="more_queries_0|input" value="simple_line.txt"/>
+
+ <output name="out_file1" file="simple_lines_interleaved.txt"/>
+ </test>
+ </tests>
+</tool>
diff -r 07d48a9f9a09edd916bf4f94e95326211cd0d255 -r 265e67c9dca81c496ce42ba6517b10444ad56d59 test/functional/tools/multi_repeats.xml
--- a/test/functional/tools/multi_repeats.xml
+++ b/test/functional/tools/multi_repeats.xml
@@ -21,6 +21,12 @@
<param name="input2" value="simple_line.txt"/><output name="out_file1" file="simple_line_x2.txt"/></test>
+ <!--
+ Following tests continue to work, but for anything more
+ advanced than this simple case these should be considered
+ something of an anti-pattern - see disambiguate_repeats.xml
+ for superior syntax.
+ --><test><param name="input1" value="simple_line.txt"/><param name="input2" value="simple_line.txt"/>
diff -r 07d48a9f9a09edd916bf4f94e95326211cd0d255 -r 265e67c9dca81c496ce42ba6517b10444ad56d59 test/functional/tools/samples_tool_conf.xml
--- a/test/functional/tools/samples_tool_conf.xml
+++ b/test/functional/tools/samples_tool_conf.xml
@@ -11,4 +11,5 @@
<tool file="composite_output.xml" /><tool file="metadata.xml" /><tool file="output_order.xml" />
+ <tool file="disambiguate_repeats.xml" /></toolbox>
\ No newline at end of file
diff -r 07d48a9f9a09edd916bf4f94e95326211cd0d255 -r 265e67c9dca81c496ce42ba6517b10444ad56d59 test/functional/tools/test-data/simple_line_alternative.txt
--- /dev/null
+++ b/test/functional/tools/test-data/simple_line_alternative.txt
@@ -0,0 +1,1 @@
+This is a different line of text.
\ No newline at end of file
diff -r 07d48a9f9a09edd916bf4f94e95326211cd0d255 -r 265e67c9dca81c496ce42ba6517b10444ad56d59 test/functional/tools/test-data/simple_lines_interleaved.txt
--- /dev/null
+++ b/test/functional/tools/test-data/simple_lines_interleaved.txt
@@ -0,0 +1,4 @@
+This is a line of text.
+This is a different line of text.
+This is a line of text.
+This is a different line of text.
\ No newline at end of file
https://bitbucket.org/galaxy/galaxy-central/commits/e86455cbd0a5/
Changeset: e86455cbd0a5
User: jmchilton
Date: 2013-11-22 02:17:13
Summary: Tree-like input specification in tool XML file.
Allow nestable <repeat> and <conditional> statements in test tool definitions.
For instance, imagine a tool with a repeat statement like:
<repeat name="rinst" title="Repeat Parameter"><param name="int_param" type="integer" value="1" /><param name="float_param" type="float" value="2.0" /></repeat>
3 test instances overidding int_param default in all three, but leaving the float_param default in place for the first and last can specified as follows:
<repeat name="rinst"><param name="rinst|int_param" value="4" /></repeat><repeat name="rinst"><param name="rinst|int_param" value="5" /><param name="rinst|float_param" value="4.0" /></repeat><repeat name="rinst"><param name="rinst|int_param" value="6" /></repeat>
Likewise, <conditional name="cinst"> can be used analogously though this is for grouping and disambiguation - does not allow multiple instances to specified obviously.
Affected #: 2 files
diff -r 265e67c9dca81c496ce42ba6517b10444ad56d59 -r e86455cbd0a554613eee96852a1240205f65696b lib/galaxy/tools/test.py
--- a/lib/galaxy/tools/test.py
+++ b/lib/galaxy/tools/test.py
@@ -127,6 +127,7 @@
# features or workarounds.
self.interactor = test_elem.get( 'interactor', default_interactor )
+ self.__preprocess_input_elems( test_elem )
self.__parse_inputs_elems( test_elem, i )
self.__parse_output_elems( test_elem )
@@ -134,6 +135,9 @@
self.error = True
self.exception = e
+ def __preprocess_input_elems( self, test_elem ):
+ expand_input_elems( test_elem )
+
def __parse_inputs_elems( self, test_elem, i ):
# Composite datasets need a unique name: each test occurs in a fresh
# history, but we'll keep it unique per set of tests - use i (test #)
@@ -385,3 +389,46 @@
def get_index( self ):
return 0
+
+
+def expand_input_elems( root_elem, prefix="" ):
+ __append_prefix_to_params( root_elem, prefix )
+
+ repeat_elems = root_elem.findall( 'repeat' )
+ indices = {}
+ for repeat_elem in repeat_elems:
+ name = repeat_elem.get( "name" )
+ if name not in indices:
+ indices[ name ] = 0
+ index = 0
+ else:
+ index = indices[ name ] + 1
+ indices[ name ] = index
+
+ new_prefix = __prefix_join( prefix, name, index=index )
+ expand_input_elems( repeat_elem, new_prefix )
+ __pull_up_params( root_elem, repeat_elem )
+ root_elem.remove( repeat_elem )
+
+ cond_elems = root_elem.findall( 'conditional' )
+ for cond_elem in cond_elems:
+ new_prefix = __prefix_join( prefix, cond_elem.get( "name" ) )
+ expand_input_elems( cond_elem, new_prefix )
+ __pull_up_params( root_elem, cond_elem )
+ root_elem.remove( cond_elem )
+
+
+def __append_prefix_to_params( elem, prefix ):
+ for param_elem in elem.findall( 'param' ):
+ param_elem.set( "name", __prefix_join( prefix, param_elem.get( "name" ) ) )
+
+
+def __pull_up_params( parent_elem, child_elem ):
+ for param_elem in child_elem.findall( 'param' ):
+ parent_elem.append( param_elem )
+ child_elem.remove( param_elem )
+
+
+def __prefix_join( prefix, name, index=None ):
+ name = name if index is None else "%s_%d" % ( name, index )
+ return name if not prefix else "%s|%s" % ( prefix, name )
diff -r 265e67c9dca81c496ce42ba6517b10444ad56d59 -r e86455cbd0a554613eee96852a1240205f65696b test/functional/tools/disambiguate_repeats.xml
--- a/test/functional/tools/disambiguate_repeats.xml
+++ b/test/functional/tools/disambiguate_repeats.xml
@@ -24,5 +24,22 @@
<output name="out_file1" file="simple_lines_interleaved.txt"/></test>
+
+ <test>
+ <repeat name="queries">
+ <param name="input" value="simple_line.txt"/>
+ </repeat>
+ <repeat name="queries">
+ <param name="input" value="simple_line_alternative.txt"/>
+ </repeat>
+ <repeat name="more_queries">
+ <param name="input" value="simple_line.txt"/>
+ </repeat>
+ <repeat name="more_queries">
+ <param name="input" value="simple_line_alternative.txt"/>
+ </repeat>
+ <output name="out_file1" file="simple_lines_interleaved.txt"/>
+ </test>
+
</tests></tool>
https://bitbucket.org/galaxy/galaxy-central/commits/d4f7fbbc3017/
Changeset: d4f7fbbc3017
User: jmchilton
Date: 2013-11-22 02:17:13
Summary: Lazy load tool test cases.
Running any tool test, will force everything in toolbox to parse out, but at least just starting Galaxy doesn't require parsing all test cases.
Affected #: 1 file
diff -r e86455cbd0a554613eee96852a1240205f65696b -r d4f7fbbc3017529eef45719fa8a6c54a12e6de78 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -1219,14 +1219,9 @@
for key, value in uihints_elem.attrib.iteritems():
self.uihints[ key ] = value
# Tests
- tests_elem = root.find( "tests" )
- if tests_elem:
- try:
- self.tests = parse_tests_elem( self, tests_elem )
- except:
- log.exception( "Failed to parse tool tests" )
- else:
- self.tests = None
+ self.__tests_elem = root.find( "tests" )
+ self.__tests_populated = False
+
# Requirements (dependencies)
self.requirements = parse_requirements_from_xml( root )
# Determine if this tool can be used in workflows
@@ -1237,6 +1232,21 @@
self.trackster_conf = TracksterConfig.parse( trackster_conf )
else:
self.trackster_conf = None
+
+ @property
+ def tests( self ):
+ if not self.__tests_populated:
+ tests_elem = self.__tests_elem
+ if tests_elem:
+ try:
+ self.__tests = parse_tests_elem( self, tests_elem )
+ except:
+ log.exception( "Failed to parse tool tests" )
+ else:
+ self.__tests = None
+ self.__tests_populated = True
+ return self.__tests
+
def parse_inputs( self, root ):
"""
Parse the "<inputs>" element and create appropriate `ToolParameter`s.
https://bitbucket.org/galaxy/galaxy-central/commits/0923ef9b1edb/
Changeset: 0923ef9b1edb
User: jmchilton
Date: 2013-11-22 02:17:13
Summary: Small clean up of argument processing in functional_tests.py
Affected #: 1 file
diff -r d4f7fbbc3017529eef45719fa8a6c54a12e6de78 -r 0923ef9b1edba9d90de4b9d032f4818942489df1 scripts/functional_tests.py
--- a/scripts/functional_tests.py
+++ b/scripts/functional_tests.py
@@ -176,12 +176,11 @@
tool_path = os.environ.get( 'GALAXY_TEST_TOOL_PATH', 'tools' )
if 'HTTP_ACCEPT_LANGUAGE' not in os.environ:
os.environ[ 'HTTP_ACCEPT_LANGUAGE' ] = default_galaxy_locales
- testing_migrated_tools = '-migrated' in sys.argv
- testing_installed_tools = '-installed' in sys.argv
+ testing_migrated_tools = __check_arg( '-migrated' )
+ testing_installed_tools = __check_arg( '-installed' )
datatypes_conf_override = None
if testing_migrated_tools or testing_installed_tools:
- sys.argv.pop()
# Store a jsonified dictionary of tool_id : GALAXY_TEST_FILE_DIR pairs.
galaxy_tool_shed_test_file = 'shed_tools_dict'
# We need the upload tool for functional tests, so we'll create a temporary tool panel config that defines it.
@@ -198,9 +197,8 @@
# Exclude all files except test_toolbox.py.
ignore_files = ( re.compile( r'^test_[adghlmsu]*' ), re.compile( r'^test_ta*' ) )
else:
- framework_test = '-framework' in sys.argv # Run through suite of tests testing framework.
+ framework_test = __check_arg( '-framework' ) # Run through suite of tests testing framework.
if framework_test:
- sys.argv.pop()
framework_tool_dir = os.path.join('test', 'functional', 'tools')
tool_conf = os.path.join( framework_tool_dir, 'samples_tool_conf.xml' )
datatypes_conf_override = os.path.join( framework_tool_dir, 'sample_datatypes_conf.xml' )
@@ -522,5 +520,19 @@
else:
return 1
+
+def __check_arg( name, param=False ):
+ try:
+ index = sys.argv.index( name )
+ del sys.argv[ index ]
+ if param:
+ ret_val = sys.argv[ index ]
+ del sys.argv[ index ]
+ else:
+ ret_val = True
+ except ValueError:
+ ret_val = False
+ return ret_val
+
if __name__ == "__main__":
sys.exit( main() )
https://bitbucket.org/galaxy/galaxy-central/commits/d5c0f73b3db4/
Changeset: d5c0f73b3db4
User: jmchilton
Date: 2013-11-22 02:17:13
Summary: Refactor interactors out of test_toolbox into base.
For potential reuse in other tests.
Affected #: 2 files
diff -r 0923ef9b1edba9d90de4b9d032f4818942489df1 -r d5c0f73b3db4cdb0b984b8a64da72dfe64ab6f0d test/base/interactor.py
--- /dev/null
+++ b/test/base/interactor.py
@@ -0,0 +1,380 @@
+import os
+from galaxy.tools.parameters import grouping
+import galaxy.model
+from galaxy.model.orm import and_, desc
+from galaxy.model.mapping import context as sa_session
+from simplejson import dumps, loads
+
+
+def build_interactor( test_case, type="api" ):
+ interactor_class = GALAXY_INTERACTORS[ type ]
+ return interactor_class( test_case )
+
+
+class GalaxyInteractorApi( object ):
+
+ def __init__( self, twill_test_case ):
+ self.twill_test_case = twill_test_case
+ self.api_url = "%s/api" % twill_test_case.url.rstrip("/")
+ self.api_key = self.__get_user_key( twill_test_case.user_api_key, twill_test_case.master_api_key )
+ self.uploads = {}
+
+ def verify_output( self, history_id, output_data, outfile, attributes, shed_tool_id, maxseconds ):
+ self.twill_test_case.wait_for( lambda: not self.__history_ready( history_id ), maxseconds=maxseconds)
+ hid = output_data.get( 'id' )
+ fetcher = self.__dataset_fetcher( history_id )
+ ## TODO: Twill version verifys dataset is 'ok' in here.
+ self.twill_test_case.verify_hid( outfile, hda_id=hid, attributes=attributes, dataset_fetcher=fetcher, shed_tool_id=shed_tool_id )
+ metadata = attributes.get( 'metadata', {} )
+ if metadata:
+ dataset = self.__get( "histories/%s/contents/%s" % ( history_id, hid ) ).json()
+ for key, value in metadata.iteritems():
+ dataset_key = "metadata_%s" % key
+ try:
+ dataset_value = dataset.get( dataset_key, None )
+ if dataset_value != value:
+ msg = "Dataset metadata verification for [%s] failed, expected [%s] but found [%s]."
+ msg_params = ( key, value, dataset_value )
+ msg = msg % msg_params
+ raise Exception( msg )
+ except KeyError:
+ msg = "Failed to verify dataset metadata, metadata key [%s] was not found." % key
+ raise Exception( msg )
+
+ def get_job_stream( self, history_id, output_data, stream ):
+ hid = output_data.get( 'id' )
+ data = self.__get( "histories/%s/contents/%s/provenance" % (history_id, hid) ).json()
+ return data.get( stream, '' )
+
+ def new_history( self ):
+ history_json = self.__post( "histories", {"name": "test_history"} ).json()
+ return history_json[ 'id' ]
+
+ def stage_data_async( self, test_data, history_id, shed_tool_id, async=True ):
+ fname = test_data[ 'fname' ]
+ tool_input = {
+ "file_type": test_data[ 'ftype' ],
+ "dbkey": test_data[ 'dbkey' ],
+ }
+ for elem in test_data.get('metadata', []):
+ tool_input["files_metadata|%s" % elem.get( 'name' )] = elem.get( 'value' )
+
+ composite_data = test_data[ 'composite_data' ]
+ if composite_data:
+ files = {}
+ for i, composite_file in enumerate( composite_data ):
+ file_name = self.twill_test_case.get_filename( composite_file.get( 'value' ), shed_tool_id=shed_tool_id )
+ files["files_%s|file_data" % i] = open( file_name, 'rb' )
+ tool_input.update({
+ #"files_%d|NAME" % i: name,
+ "files_%d|type" % i: "upload_dataset",
+ ## TODO:
+ #"files_%d|space_to_tab" % i: composite_file.get( 'space_to_tab', False )
+ })
+ name = test_data[ 'name' ]
+ else:
+ file_name = self.twill_test_case.get_filename( fname, shed_tool_id=shed_tool_id )
+ name = test_data.get( 'name', None )
+ if not name:
+ name = os.path.basename( file_name )
+
+ tool_input.update({
+ "files_0|NAME": name,
+ "files_0|type": "upload_dataset",
+ })
+ files = {
+ "files_0|file_data": open( file_name, 'rb')
+ }
+ submit_response_object = self.__submit_tool( history_id, "upload1", tool_input, extra_data={"type": "upload_dataset"}, files=files )
+ submit_response = submit_response_object.json()
+ try:
+ dataset = submit_response["outputs"][0]
+ except KeyError:
+ raise Exception(submit_response)
+ #raise Exception(str(dataset))
+ hid = dataset['id']
+ self.uploads[ os.path.basename(fname) ] = self.uploads[ fname ] = self.uploads[ name ] = {"src": "hda", "id": hid}
+ return self.__wait_for_history( history_id )
+
+ def run_tool( self, testdef, history_id ):
+ # We need to handle the case where we've uploaded a valid compressed file since the upload
+ # tool will have uncompressed it on the fly.
+
+ inputs_tree = testdef.inputs.copy()
+ for key, value in inputs_tree.iteritems():
+ values = [value] if not isinstance(value, list) else value
+ for value in values:
+ if value in self.uploads:
+ inputs_tree[ key ] = self.uploads[ value ]
+
+ # # HACK: Flatten single-value lists. Required when using expand_grouping
+ for key, value in inputs_tree.iteritems():
+ if isinstance(value, list) and len(value) == 1:
+ inputs_tree[key] = value[0]
+
+ datasets = self.__submit_tool( history_id, tool_id=testdef.tool.id, tool_input=inputs_tree )
+ datasets_object = datasets.json()
+ try:
+ return self.__dictify_outputs( datasets_object )
+ except KeyError:
+ raise Exception( datasets_object[ 'message' ] )
+
+ def __dictify_outputs( self, datasets_object ):
+ ## Convert outputs list to a dictionary that can be accessed by
+ ## output_name so can be more flexiable about ordering of outputs
+ ## but also allows fallback to legacy access as list mode.
+ outputs_dict = {}
+ index = 0
+ for output in datasets_object[ 'outputs' ]:
+ outputs_dict[ index ] = outputs_dict[ output.get("output_name") ] = output
+ index += 1
+ return outputs_dict
+
+ def output_hid( self, output_data ):
+ return output_data[ 'id' ]
+
+ def delete_history( self, history ):
+ return None
+
+ def __wait_for_history( self, history_id ):
+ def wait():
+ while not self.__history_ready( history_id ):
+ pass
+ return wait
+
+ def __history_ready( self, history_id ):
+ history_json = self.__get( "histories/%s" % history_id ).json()
+ state = history_json[ 'state' ]
+ if state == 'ok':
+ return True
+ elif state == 'error':
+ raise Exception("History in error state.")
+ return False
+
+ def __submit_tool( self, history_id, tool_id, tool_input, extra_data={}, files=None ):
+ data = dict(
+ history_id=history_id,
+ tool_id=tool_id,
+ inputs=dumps( tool_input ),
+ **extra_data
+ )
+ return self.__post( "tools", files=files, data=data )
+
+ def __get_user_key( self, user_key, admin_key ):
+ if user_key:
+ return user_key
+ all_users = self.__get( 'users', key=admin_key ).json()
+ try:
+ test_user = [ user for user in all_users if user["email"] == 'test(a)bx.psu.edu' ][0]
+ except IndexError:
+ data = dict(
+ email='test(a)bx.psu.edu',
+ password='testuser',
+ username='admin-user',
+ )
+ test_user = self.__post( 'users', data, key=admin_key ).json()
+ return self.__post( "users/%s/api_key" % test_user['id'], key=admin_key ).json()
+
+ def __dataset_fetcher( self, history_id ):
+ def fetcher( hda_id, base_name=None ):
+ url = "histories/%s/contents/%s/display?raw=true" % (history_id, hda_id)
+ if base_name:
+ url += "&filename=%s" % base_name
+ return self.__get( url ).content
+
+ return fetcher
+
+ def __post( self, path, data={}, files=None, key=None):
+ if not key:
+ key = self.api_key
+ data = data.copy()
+ data['key'] = key
+ return post_request( "%s/%s" % (self.api_url, path), data=data, files=files )
+
+ def __get( self, path, data={}, key=None ):
+ if not key:
+ key = self.api_key
+ data = data.copy()
+ data['key'] = key
+ if path.startswith("/api"):
+ path = path[ len("/api"): ]
+ url = "%s/%s" % (self.api_url, path)
+ return get_request( url, params=data )
+
+
+class GalaxyInteractorTwill( object ):
+
+ def __init__( self, twill_test_case ):
+ self.twill_test_case = twill_test_case
+
+ def verify_output( self, history, output_data, outfile, attributes, shed_tool_id, maxseconds ):
+ hid = output_data.get( 'hid' )
+ self.twill_test_case.verify_dataset_correctness( outfile, hid=hid, attributes=attributes, shed_tool_id=shed_tool_id, maxseconds=maxseconds )
+
+ def get_job_stream( self, history_id, output_data, stream ):
+ return self.twill_test_case._get_job_stream_output( output_data.get( 'id' ), stream=stream, format=False )
+
+ def stage_data_async( self, test_data, history, shed_tool_id, async=True ):
+ name = test_data.get( 'name', None )
+ if name:
+ async = False
+ self.twill_test_case.upload_file( test_data['fname'],
+ ftype=test_data['ftype'],
+ dbkey=test_data['dbkey'],
+ metadata=test_data['metadata'],
+ composite_data=test_data['composite_data'],
+ shed_tool_id=shed_tool_id,
+ wait=(not async) )
+ if name:
+ hda_id = self.twill_test_case.get_history_as_data_list()[-1].get( 'id' )
+ try:
+ self.twill_test_case.edit_hda_attribute_info( hda_id=str(hda_id), new_name=name )
+ except:
+ print "### call to edit_hda failed for hda_id %s, new_name=%s" % (hda_id, name)
+ return lambda: self.twill_test_case.wait()
+
+ def run_tool( self, testdef, test_history ):
+ # We need to handle the case where we've uploaded a valid compressed file since the upload
+ # tool will have uncompressed it on the fly.
+
+ # Lose tons of information to accomodate legacy repeat handling.
+ all_inputs = {}
+ for key, value in testdef.inputs.iteritems():
+ all_inputs[ key.split("|")[-1] ] = value
+
+ # See if we have a grouping.Repeat element
+ repeat_name = None
+ for input_name, input_value in testdef.tool.inputs_by_page[0].items():
+ if isinstance( input_value, grouping.Repeat ) and all_inputs.get( input_name, 1 ) not in [ 0, "0" ]: # default behavior is to test 1 repeat, for backwards compatibility
+ if not input_value.min: # If input_value.min == 1, the element is already on the page don't add new element.
+ repeat_name = input_name
+ break
+
+ #check if we need to verify number of outputs created dynamically by tool
+ if testdef.tool.force_history_refresh:
+ job_finish_by_output_count = len( self.twill_test_case.get_history_as_data_list() )
+ else:
+ job_finish_by_output_count = False
+
+ inputs_tree = testdef.inputs
+ # # # HACK: Flatten single-value lists. Required when using expand_grouping
+ # #for key, value in inputs_tree.iteritems():
+ # if isinstance(value, list) and len(value) == 1:
+ # inputs_tree[key] = value[0]
+
+ # Strip out just a given page of inputs from inputs "tree".
+ def filter_page_inputs( n ):
+ page_input_keys = testdef.tool.inputs_by_page[ n ].keys()
+ return dict( [ (k, v) for k, v in inputs_tree.iteritems() if k.split("|")[0] or k.split("|")[0].resplit("_", 1)[0] in page_input_keys ] )
+
+ # Do the first page
+ page_inputs = filter_page_inputs( 0 )
+
+ # Run the tool
+ self.twill_test_case.run_tool( testdef.tool.id, repeat_name=repeat_name, **page_inputs )
+ print "page_inputs (0)", page_inputs
+ # Do other pages if they exist
+ for i in range( 1, testdef.tool.npages ):
+ page_inputs = filter_page_inputs( i )
+ self.twill_test_case.submit_form( **page_inputs )
+ print "page_inputs (%i)" % i, page_inputs
+
+ # Check the results ( handles single or multiple tool outputs ). Make sure to pass the correct hid.
+ # The output datasets from the tool should be in the same order as the testdef.outputs.
+ data_list = None
+ while data_list is None:
+ data_list = self.twill_test_case.get_history_as_data_list()
+ if job_finish_by_output_count and len( testdef.outputs ) > ( len( data_list ) - job_finish_by_output_count ):
+ data_list = None
+ return data_list
+
+ def new_history( self ):
+ # Start with a new history
+ self.twill_test_case.logout()
+ self.twill_test_case.login( email='test(a)bx.psu.edu' )
+ admin_user = sa_session.query( galaxy.model.User ).filter( galaxy.model.User.table.c.email == 'test(a)bx.psu.edu' ).one()
+ self.twill_test_case.new_history()
+ latest_history = sa_session.query( galaxy.model.History ) \
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == admin_user.id ) ) \
+ .order_by( desc( galaxy.model.History.table.c.create_time ) ) \
+ .first()
+ assert latest_history is not None, "Problem retrieving latest_history from database"
+ if len( self.twill_test_case.get_history_as_data_list() ) > 0:
+ raise AssertionError("ToolTestCase.do_it failed")
+ return latest_history
+
+ def delete_history( self, latest_history ):
+ self.twill_test_case.delete_history( id=self.twill_test_case.security.encode_id( latest_history.id ) )
+
+ def output_hid( self, output_data ):
+ return output_data.get( 'hid' )
+
+
+GALAXY_INTERACTORS = {
+ 'api': GalaxyInteractorApi,
+ 'twill': GalaxyInteractorTwill,
+}
+
+
+# Lets just try to use requests if it is available, but if not provide fallback
+# on custom implementations of limited requests get/post functionality.
+try:
+ from requests import get as get_request
+ from requests import post as post_request
+except ImportError:
+ import urllib2
+ import httplib
+
+ class RequestsLikeResponse( object ):
+
+ def __init__( self, content ):
+ self.content = content
+
+ def json( self ):
+ return loads( self.content )
+
+ def get_request( url, params={} ):
+ argsep = '&'
+ if '?' not in url:
+ argsep = '?'
+ url = url + argsep + '&'.join( [ '%s=%s' % (k, v) for k, v in params.iteritems() ] )
+ #req = urllib2.Request( url, headers = { 'Content-Type': 'application/json' } )
+ return RequestsLikeResponse(urllib2.urlopen( url ).read() )
+
+ def post_request( url, data, files ):
+ parsed_url = urllib2.urlparse.urlparse( url )
+ return __post_multipart( host=parsed_url.netloc, selector=parsed_url.path, fields=data.iteritems(), files=(files or {}).iteritems() )
+
+ # http://stackoverflow.com/a/681182
+ def __post_multipart(host, selector, fields, files):
+ content_type, body = __encode_multipart_formdata(fields, files)
+ h = httplib.HTTP(host)
+ h.putrequest('POST', selector)
+ h.putheader('content-type', content_type)
+ h.putheader('content-length', str(len(body)))
+ h.endheaders()
+ h.send(body)
+ errcode, errmsg, headers = h.getreply()
+ return RequestsLikeResponse(h.file.read())
+
+ def __encode_multipart_formdata(fields, files):
+ LIMIT = '----------lImIt_of_THE_fIle_eW_$'
+ CRLF = '\r\n'
+ L = []
+ for (key, value) in fields:
+ L.append('--' + LIMIT)
+ L.append('Content-Disposition: form-data; name="%s"' % key)
+ L.append('')
+ L.append(value)
+ for (key, value) in files:
+ L.append('--' + LIMIT)
+ L.append('Content-Disposition: form-data; name="%s"; filename="%s";' % (key, key))
+ L.append('Content-Type: application/octet-stream')
+ L.append('')
+ L.append(value.read())
+ L.append('--' + LIMIT + '--')
+ L.append('')
+ body = CRLF.join(L)
+ content_type = 'multipart/form-data; boundary=%s' % LIMIT
+ return content_type, body
diff -r 0923ef9b1edba9d90de4b9d032f4818942489df1 -r d5c0f73b3db4cdb0b984b8a64da72dfe64ab6f0d test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -1,14 +1,7 @@
+import new
import sys
-import new
-import os
-from galaxy.tools.parameters import grouping
-from galaxy.util import string_as_bool
from base.twilltestcase import TwillTestCase
-import galaxy.model
-from galaxy.model.orm import and_, desc
-from galaxy.model.mapping import context as sa_session
-from simplejson import dumps, loads
-
+from base.interactor import build_interactor
import logging
log = logging.getLogger( __name__ )
@@ -45,9 +38,7 @@
galaxy_interactor.delete_history( test_history )
def __galaxy_interactor( self, testdef ):
- interactor_key = testdef.interactor
- interactor_class = GALAXY_INTERACTORS[ interactor_key ]
- return interactor_class( self )
+ return build_interactor( self, testdef.interactor )
def __handle_test_def_errors(self, testdef):
# If the test generation had an error, raise
@@ -79,307 +70,6 @@
raise
-class GalaxyInteractorApi( object ):
-
- def __init__( self, twill_test_case ):
- self.twill_test_case = twill_test_case
- self.api_url = "%s/api" % twill_test_case.url.rstrip("/")
- self.api_key = self.__get_user_key( twill_test_case.user_api_key, twill_test_case.master_api_key )
- self.uploads = {}
-
- def verify_output( self, history_id, output_data, outfile, attributes, shed_tool_id, maxseconds ):
- self.twill_test_case.wait_for( lambda: not self.__history_ready( history_id ), maxseconds=maxseconds)
- hid = output_data.get( 'id' )
- fetcher = self.__dataset_fetcher( history_id )
- ## TODO: Twill version verifys dataset is 'ok' in here.
- self.twill_test_case.verify_hid( outfile, hda_id=hid, attributes=attributes, dataset_fetcher=fetcher, shed_tool_id=shed_tool_id )
- metadata = attributes.get( 'metadata', {} )
- if metadata:
- dataset = self.__get( "histories/%s/contents/%s" % ( history_id, hid ) ).json()
- for key, value in metadata.iteritems():
- dataset_key = "metadata_%s" % key
- try:
- dataset_value = dataset.get( dataset_key, None )
- if dataset_value != value:
- msg = "Dataset metadata verification for [%s] failed, expected [%s] but found [%s]."
- msg_params = ( key, value, dataset_value )
- msg = msg % msg_params
- raise Exception( msg )
- except KeyError:
- msg = "Failed to verify dataset metadata, metadata key [%s] was not found." % key
- raise Exception( msg )
-
- def get_job_stream( self, history_id, output_data, stream ):
- hid = output_data.get( 'id' )
- data = self.__get( "histories/%s/contents/%s/provenance" % (history_id, hid) ).json()
- return data.get( stream, '' )
-
- def new_history( self ):
- history_json = self.__post( "histories", {"name": "test_history"} ).json()
- return history_json[ 'id' ]
-
- def stage_data_async( self, test_data, history_id, shed_tool_id, async=True ):
- fname = test_data[ 'fname' ]
- tool_input = {
- "file_type": test_data[ 'ftype' ],
- "dbkey": test_data[ 'dbkey' ],
- }
- for elem in test_data.get('metadata', []):
- tool_input["files_metadata|%s" % elem.get( 'name' )] = elem.get( 'value' )
-
- composite_data = test_data[ 'composite_data' ]
- if composite_data:
- files = {}
- for i, composite_file in enumerate( composite_data ):
- file_name = self.twill_test_case.get_filename( composite_file.get( 'value' ), shed_tool_id=shed_tool_id )
- files["files_%s|file_data" % i] = open( file_name, 'rb' )
- tool_input.update({
- #"files_%d|NAME" % i: name,
- "files_%d|type" % i: "upload_dataset",
- ## TODO:
- #"files_%d|space_to_tab" % i: composite_file.get( 'space_to_tab', False )
- })
- name = test_data[ 'name' ]
- else:
- file_name = self.twill_test_case.get_filename( fname, shed_tool_id=shed_tool_id )
- name = test_data.get( 'name', None )
- if not name:
- name = os.path.basename( file_name )
-
- tool_input.update({
- "files_0|NAME": name,
- "files_0|type": "upload_dataset",
- })
- files = {
- "files_0|file_data": open( file_name, 'rb')
- }
- submit_response_object = self.__submit_tool( history_id, "upload1", tool_input, extra_data={"type": "upload_dataset"}, files=files )
- submit_response = submit_response_object.json()
- try:
- dataset = submit_response["outputs"][0]
- except KeyError:
- raise Exception(submit_response)
- #raise Exception(str(dataset))
- hid = dataset['id']
- self.uploads[ os.path.basename(fname) ] = self.uploads[ fname ] = self.uploads[ name ] = {"src": "hda", "id": hid}
- return self.__wait_for_history( history_id )
-
- def run_tool( self, testdef, history_id ):
- # We need to handle the case where we've uploaded a valid compressed file since the upload
- # tool will have uncompressed it on the fly.
-
- inputs_tree = testdef.inputs.copy()
- for key, value in inputs_tree.iteritems():
- values = [value] if not isinstance(value, list) else value
- for value in values:
- if value in self.uploads:
- inputs_tree[ key ] = self.uploads[ value ]
-
- # # HACK: Flatten single-value lists. Required when using expand_grouping
- for key, value in inputs_tree.iteritems():
- if isinstance(value, list) and len(value) == 1:
- inputs_tree[key] = value[0]
-
- log.info( "Submiting tool with params %s" % inputs_tree )
- datasets = self.__submit_tool( history_id, tool_id=testdef.tool.id, tool_input=inputs_tree )
- datasets_object = datasets.json()
- try:
- return self.__dictify_outputs( datasets_object )
- except KeyError:
- raise Exception( datasets_object[ 'message' ] )
-
- def __dictify_outputs( self, datasets_object ):
- ## Convert outputs list to a dictionary that can be accessed by
- ## output_name so can be more flexiable about ordering of outputs
- ## but also allows fallback to legacy access as list mode.
- outputs_dict = {}
- index = 0
- for output in datasets_object[ 'outputs' ]:
- outputs_dict[ index ] = outputs_dict[ output.get("output_name") ] = output
- index += 1
- return outputs_dict
-
- def output_hid( self, output_data ):
- return output_data[ 'id' ]
-
- def delete_history( self, history ):
- return None
-
- def __wait_for_history( self, history_id ):
- def wait():
- while not self.__history_ready( history_id ):
- pass
- return wait
-
- def __history_ready( self, history_id ):
- history_json = self.__get( "histories/%s" % history_id ).json()
- state = history_json[ 'state' ]
- if state == 'ok':
- return True
- elif state == 'error':
- raise Exception("History in error state.")
- return False
-
- def __submit_tool( self, history_id, tool_id, tool_input, extra_data={}, files=None ):
- data = dict(
- history_id=history_id,
- tool_id=tool_id,
- inputs=dumps( tool_input ),
- **extra_data
- )
- return self.__post( "tools", files=files, data=data )
-
- def __get_user_key( self, user_key, admin_key ):
- if user_key:
- return user_key
- all_users = self.__get( 'users', key=admin_key ).json()
- try:
- test_user = [ user for user in all_users if user["email"] == 'test(a)bx.psu.edu' ][0]
- except IndexError:
- data = dict(
- email='test(a)bx.psu.edu',
- password='testuser',
- username='admin-user',
- )
- test_user = self.__post( 'users', data, key=admin_key ).json()
- return self.__post( "users/%s/api_key" % test_user['id'], key=admin_key ).json()
-
- def __dataset_fetcher( self, history_id ):
- def fetcher( hda_id, base_name=None ):
- url = "histories/%s/contents/%s/display?raw=true" % (history_id, hda_id)
- if base_name:
- url += "&filename=%s" % base_name
- return self.__get( url ).content
-
- return fetcher
-
- def __post( self, path, data={}, files=None, key=None):
- if not key:
- key = self.api_key
- data = data.copy()
- data['key'] = key
- return post_request( "%s/%s" % (self.api_url, path), data=data, files=files )
-
- def __get( self, path, data={}, key=None ):
- if not key:
- key = self.api_key
- data = data.copy()
- data['key'] = key
- if path.startswith("/api"):
- path = path[ len("/api"): ]
- url = "%s/%s" % (self.api_url, path)
- return get_request( url, params=data )
-
-
-class GalaxyInteractorTwill( object ):
-
- def __init__( self, twill_test_case ):
- self.twill_test_case = twill_test_case
-
- def verify_output( self, history, output_data, outfile, attributes, shed_tool_id, maxseconds ):
- hid = output_data.get( 'hid' )
- self.twill_test_case.verify_dataset_correctness( outfile, hid=hid, attributes=attributes, shed_tool_id=shed_tool_id, maxseconds=maxseconds )
-
- def get_job_stream( self, history_id, output_data, stream ):
- return self.twill_test_case._get_job_stream_output( output_data.get( 'id' ), stream=stream, format=False )
-
- def stage_data_async( self, test_data, history, shed_tool_id, async=True ):
- name = test_data.get( 'name', None )
- if name:
- async = False
- self.twill_test_case.upload_file( test_data['fname'],
- ftype=test_data['ftype'],
- dbkey=test_data['dbkey'],
- metadata=test_data['metadata'],
- composite_data=test_data['composite_data'],
- shed_tool_id=shed_tool_id,
- wait=(not async) )
- if name:
- hda_id = self.twill_test_case.get_history_as_data_list()[-1].get( 'id' )
- try:
- self.twill_test_case.edit_hda_attribute_info( hda_id=str(hda_id), new_name=name )
- except:
- print "### call to edit_hda failed for hda_id %s, new_name=%s" % (hda_id, name)
- return lambda: self.twill_test_case.wait()
-
- def run_tool( self, testdef, test_history ):
- # We need to handle the case where we've uploaded a valid compressed file since the upload
- # tool will have uncompressed it on the fly.
-
- # Lose tons of information to accomodate legacy repeat handling.
- all_inputs = {}
- for key, value in testdef.inputs.iteritems():
- all_inputs[ key.split("|")[-1] ] = value
-
- # See if we have a grouping.Repeat element
- repeat_name = None
- for input_name, input_value in testdef.tool.inputs_by_page[0].items():
- if isinstance( input_value, grouping.Repeat ) and all_inputs.get( input_name, 1 ) not in [ 0, "0" ]: # default behavior is to test 1 repeat, for backwards compatibility
- if not input_value.min: # If input_value.min == 1, the element is already on the page don't add new element.
- repeat_name = input_name
- break
-
- #check if we need to verify number of outputs created dynamically by tool
- if testdef.tool.force_history_refresh:
- job_finish_by_output_count = len( self.twill_test_case.get_history_as_data_list() )
- else:
- job_finish_by_output_count = False
-
- inputs_tree = testdef.inputs
- # # # HACK: Flatten single-value lists. Required when using expand_grouping
- # #for key, value in inputs_tree.iteritems():
- # if isinstance(value, list) and len(value) == 1:
- # inputs_tree[key] = value[0]
-
- # Strip out just a given page of inputs from inputs "tree".
- def filter_page_inputs( n ):
- page_input_keys = testdef.tool.inputs_by_page[ n ].keys()
- return dict( [ (k, v) for k, v in inputs_tree.iteritems() if k.split("|")[0] or k.split("|")[0].resplit("_", 1)[0] in page_input_keys ] )
-
- # Do the first page
- page_inputs = filter_page_inputs( 0 )
-
- # Run the tool
- self.twill_test_case.run_tool( testdef.tool.id, repeat_name=repeat_name, **page_inputs )
- print "page_inputs (0)", page_inputs
- # Do other pages if they exist
- for i in range( 1, testdef.tool.npages ):
- page_inputs = filter_page_inputs( i )
- self.twill_test_case.submit_form( **page_inputs )
- print "page_inputs (%i)" % i, page_inputs
-
- # Check the results ( handles single or multiple tool outputs ). Make sure to pass the correct hid.
- # The output datasets from the tool should be in the same order as the testdef.outputs.
- data_list = None
- while data_list is None:
- data_list = self.twill_test_case.get_history_as_data_list()
- if job_finish_by_output_count and len( testdef.outputs ) > ( len( data_list ) - job_finish_by_output_count ):
- data_list = None
- return data_list
-
- def new_history( self ):
- # Start with a new history
- self.twill_test_case.logout()
- self.twill_test_case.login( email='test(a)bx.psu.edu' )
- admin_user = sa_session.query( galaxy.model.User ).filter( galaxy.model.User.table.c.email == 'test(a)bx.psu.edu' ).one()
- self.twill_test_case.new_history()
- latest_history = sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted == False,
- galaxy.model.History.table.c.user_id == admin_user.id ) ) \
- .order_by( desc( galaxy.model.History.table.c.create_time ) ) \
- .first()
- assert latest_history is not None, "Problem retrieving latest_history from database"
- if len( self.twill_test_case.get_history_as_data_list() ) > 0:
- raise AssertionError("ToolTestCase.do_it failed")
- return latest_history
-
- def delete_history( self, latest_history ):
- self.twill_test_case.delete_history( id=self.twill_test_case.security.encode_id( latest_history.id ) )
-
- def output_hid( self, output_data ):
- return output_data.get( 'hid' )
-
-
def build_tests( testing_shed_tools=False, master_api_key=None, user_api_key=None ):
"""
If the module level variable `toolbox` is set, generate `ToolTestCase`
@@ -421,72 +111,3 @@
# from baseclasses (which should be a tuple of classes) and with namespace dict.
new_class_obj = new.classobj( name, baseclasses, namespace )
G[ name ] = new_class_obj
-
-
-GALAXY_INTERACTORS = {
- 'api': GalaxyInteractorApi,
- 'twill': GalaxyInteractorTwill,
-}
-
-
-# Lets just try to use requests if it is available, but if not provide fallback
-# on custom implementations of limited requests get/post functionality.
-try:
- from requests import get as get_request
- from requests import post as post_request
-except ImportError:
- import urllib2
- import httplib
-
- class RequestsLikeResponse( object ):
-
- def __init__( self, content ):
- self.content = content
-
- def json( self ):
- return loads( self.content )
-
- def get_request( url, params={} ):
- argsep = '&'
- if '?' not in url:
- argsep = '?'
- url = url + argsep + '&'.join( [ '%s=%s' % (k, v) for k, v in params.iteritems() ] )
- #req = urllib2.Request( url, headers = { 'Content-Type': 'application/json' } )
- return RequestsLikeResponse(urllib2.urlopen( url ).read() )
-
- def post_request( url, data, files ):
- parsed_url = urllib2.urlparse.urlparse( url )
- return __post_multipart( host=parsed_url.netloc, selector=parsed_url.path, fields=data.iteritems(), files=(files or {}).iteritems() )
-
- # http://stackoverflow.com/a/681182
- def __post_multipart(host, selector, fields, files):
- content_type, body = __encode_multipart_formdata(fields, files)
- h = httplib.HTTP(host)
- h.putrequest('POST', selector)
- h.putheader('content-type', content_type)
- h.putheader('content-length', str(len(body)))
- h.endheaders()
- h.send(body)
- errcode, errmsg, headers = h.getreply()
- return RequestsLikeResponse(h.file.read())
-
- def __encode_multipart_formdata(fields, files):
- LIMIT = '----------lImIt_of_THE_fIle_eW_$'
- CRLF = '\r\n'
- L = []
- for (key, value) in fields:
- L.append('--' + LIMIT)
- L.append('Content-Disposition: form-data; name="%s"' % key)
- L.append('')
- L.append(value)
- for (key, value) in files:
- L.append('--' + LIMIT)
- L.append('Content-Disposition: form-data; name="%s"; filename="%s";' % (key, key))
- L.append('Content-Type: application/octet-stream')
- L.append('')
- L.append(value.read())
- L.append('--' + LIMIT + '--')
- L.append('')
- body = CRLF.join(L)
- content_type = 'multipart/form-data; boundary=%s' % LIMIT
- return content_type, body
https://bitbucket.org/galaxy/galaxy-central/commits/f4fd082fdcd2/
Changeset: f4fd082fdcd2
User: jmchilton
Date: 2013-11-22 02:17:13
Summary: Simplify test composite history item naming using UUIDs.
It needs to be unique, so lets use uuids instead of a composite name counter to make the code more portable.
Affected #: 1 file
diff -r d5c0f73b3db4cdb0b984b8a64da72dfe64ab6f0d -r f4fd082fdcd25f06b2f0dfcfa35c1c3ed3046732 lib/galaxy/tools/test.py
--- a/lib/galaxy/tools/test.py
+++ b/lib/galaxy/tools/test.py
@@ -1,5 +1,6 @@
import os
import os.path
+import uuid
from parameters import basic
from parameters import grouping
from galaxy.util import string_as_bool
@@ -139,10 +140,6 @@
expand_input_elems( test_elem )
def __parse_inputs_elems( self, test_elem, i ):
- # Composite datasets need a unique name: each test occurs in a fresh
- # history, but we'll keep it unique per set of tests - use i (test #)
- # and composite_data_names_counter (instance per test #)
- composite_data_names_counter = 0
raw_inputs = []
for param_elem in test_elem.findall( "param" ):
attrib = dict( param_elem.attrib )
@@ -168,10 +165,9 @@
attrib['composite_data'].append( child )
if composite_data_name is None:
# Generate a unique name; each test uses a
- # fresh history
- composite_data_name = '_COMPOSITE_RENAMED_t%i_d%i' \
- % ( i, composite_data_names_counter )
- composite_data_names_counter += 1
+ # fresh history.
+ composite_data_name = '_COMPOSITE_RENAMED_t%d_%s' \
+ % ( i, uuid.uuid1().hex )
elif child.tag == 'metadata':
attrib['metadata'].append( child )
elif child.tag == 'metadata':
https://bitbucket.org/galaxy/galaxy-central/commits/b51a17a1d3ce/
Changeset: b51a17a1d3ce
User: jmchilton
Date: 2013-11-22 02:17:13
Summary: Refactor tools test param parsing for reuse (in particular w/workflow tests).
They will be used in a slightly different context, but will need to describe inputs and outputs so lets reuse this code if possible. None of these methods used tool or self anyway, so they are probably more appropriate outside of ToolTestBuilder and ToolTestBuilder is now much more manageable.
Affected #: 1 file
diff -r f4fd082fdcd25f06b2f0dfcfa35c1c3ed3046732 -r b51a17a1d3cec70820ec921b66e477d6d8f6bc60 lib/galaxy/tools/test.py
--- a/lib/galaxy/tools/test.py
+++ b/lib/galaxy/tools/test.py
@@ -54,26 +54,7 @@
"""
Iterator over metadata representing the required files for upload.
"""
- for fname, extra in self.required_files:
- data_dict = dict(
- fname=fname,
- metadata=extra.get( 'metadata', [] ),
- composite_data=extra.get( 'composite_data', [] ),
- ftype=extra.get( 'ftype', DEFAULT_FTYPE ),
- dbkey=extra.get( 'dbkey', DEFAULT_DBKEY ),
- )
- edit_attributes = extra.get( 'edit_attributes', [] )
-
- #currently only renaming is supported
- for edit_att in edit_attributes:
- if edit_att.get( 'type', None ) == 'name':
- new_name = edit_att.get( 'value', None )
- assert new_name, 'You must supply the new dataset name as the value tag of the edit_attributes tag'
- data_dict['name'] = new_name
- else:
- raise Exception( 'edit_attributes type (%s) is unimplemented' % edit_att.get( 'type', None ) )
-
- yield data_dict
+ return test_data_iter( self.required_files )
def __matching_case_for_value( self, cond, declared_value ):
test_param = cond.test_param
@@ -131,7 +112,7 @@
self.__preprocess_input_elems( test_elem )
self.__parse_inputs_elems( test_elem, i )
- self.__parse_output_elems( test_elem )
+ self.outputs = parse_output_elems( test_elem )
except Exception, e:
self.error = True
self.exception = e
@@ -142,44 +123,7 @@
def __parse_inputs_elems( self, test_elem, i ):
raw_inputs = []
for param_elem in test_elem.findall( "param" ):
- attrib = dict( param_elem.attrib )
- if 'values' in attrib:
- value = attrib[ 'values' ].split( ',' )
- elif 'value' in attrib:
- value = attrib['value']
- else:
- value = None
- attrib['children'] = list( param_elem.getchildren() )
- if attrib['children']:
- # At this time, we can assume having children only
- # occurs on DataToolParameter test items but this could
- # change and would cause the below parsing to change
- # based upon differences in children items
- attrib['metadata'] = []
- attrib['composite_data'] = []
- attrib['edit_attributes'] = []
- # Composite datasets need to be renamed uniquely
- composite_data_name = None
- for child in attrib['children']:
- if child.tag == 'composite_data':
- attrib['composite_data'].append( child )
- if composite_data_name is None:
- # Generate a unique name; each test uses a
- # fresh history.
- composite_data_name = '_COMPOSITE_RENAMED_t%d_%s' \
- % ( i, uuid.uuid1().hex )
- elif child.tag == 'metadata':
- attrib['metadata'].append( child )
- elif child.tag == 'metadata':
- attrib['metadata'].append( child )
- elif child.tag == 'edit_attributes':
- attrib['edit_attributes'].append( child )
- if composite_data_name:
- # Composite datasets need implicit renaming;
- # inserted at front of list so explicit declarations
- # take precedence
- attrib['edit_attributes'].insert( 0, { 'type': 'name', 'value': composite_data_name } )
- name = attrib.pop( 'name' )
+ name, value, attrib = parse_param_elem( param_elem, i )
raw_inputs.append( ( name, value, attrib ) )
self.inputs = self.__process_raw_inputs( self.tool.inputs, raw_inputs )
@@ -229,98 +173,174 @@
expanded_inputs[ context.for_state() ] = processed_value
return expanded_inputs
- def __parse_output_elems( self, test_elem ):
- for output_elem in test_elem.findall( "output" ):
- attrib = dict( output_elem.attrib )
- name = attrib.pop( 'name', None )
- if name is None:
- raise Exception( "Test output does not have a 'name'" )
-
- assert_list = self.__parse_assert_list( output_elem )
- file = attrib.pop( 'file', None )
- # File no longer required if an list of assertions was present.
- attributes = {}
- # Method of comparison
- attributes['compare'] = attrib.pop( 'compare', 'diff' ).lower()
- # Number of lines to allow to vary in logs (for dates, etc)
- attributes['lines_diff'] = int( attrib.pop( 'lines_diff', '0' ) )
- # Allow a file size to vary if sim_size compare
- attributes['delta'] = int( attrib.pop( 'delta', '10000' ) )
- attributes['sort'] = string_as_bool( attrib.pop( 'sort', False ) )
- extra_files = []
- if 'ftype' in attrib:
- attributes['ftype'] = attrib['ftype']
- for extra in output_elem.findall( 'extra_files' ):
- extra_files.append( self.__parse_extra_files_elem( extra ) )
- metadata = {}
- for metadata_elem in output_elem.findall( 'metadata' ):
- metadata[ metadata_elem.get('name') ] = metadata_elem.get( 'value' )
- if not (assert_list or file or extra_files or metadata):
- raise Exception( "Test output defines not checks (e.g. must have a 'file' check against, assertions to check, etc...)")
- attributes['assert_list'] = assert_list
- attributes['extra_files'] = extra_files
- attributes['metadata'] = metadata
- self.__add_output( name, file, attributes )
-
- def __parse_assert_list( self, output_elem ):
- assert_elem = output_elem.find("assert_contents")
- assert_list = None
-
- # Trying to keep testing patch as localized as
- # possible, this function should be relocated
- # somewhere more conventional.
- def convert_elem(elem):
- """ Converts and XML element to a dictionary format, used by assertion checking code. """
- tag = elem.tag
- attributes = dict( elem.attrib )
- child_elems = list( elem.getchildren() )
- converted_children = []
- for child_elem in child_elems:
- converted_children.append( convert_elem(child_elem) )
- return {"tag": tag, "attributes": attributes, "children": converted_children}
- if assert_elem is not None:
- assert_list = []
- for assert_child in list(assert_elem):
- assert_list.append(convert_elem(assert_child))
-
- return assert_list
-
- def __parse_extra_files_elem( self, extra ):
- # File or directory, when directory, compare basename
- # by basename
- extra_type = extra.get( 'type', 'file' )
- extra_name = extra.get( 'name', None )
- assert extra_type == 'directory' or extra_name is not None, \
- 'extra_files type (%s) requires a name attribute' % extra_type
- extra_value = extra.get( 'value', None )
- assert extra_value is not None, 'extra_files requires a value attribute'
- extra_attributes = {}
- extra_attributes['compare'] = extra.get( 'compare', 'diff' ).lower()
- extra_attributes['delta'] = extra.get( 'delta', '0' )
- extra_attributes['lines_diff'] = int( extra.get( 'lines_diff', '0' ) )
- extra_attributes['sort'] = string_as_bool( extra.get( 'sort', False ) )
- return extra_type, extra_value, extra_name, extra_attributes
-
- def __add_output( self, name, file, extra ):
- self.outputs.append( ( name, file, extra ) )
-
def __add_uploaded_dataset( self, name, value, extra, input_parameter ):
if value is None:
assert input_parameter.optional, '%s is not optional. You must provide a valid filename.' % name
return value
- if ( value, extra ) not in self.required_files:
- self.required_files.append( ( value, extra ) ) # these files will be uploaded
- name_change = [ att for att in extra.get( 'edit_attributes', [] ) if att.get( 'type' ) == 'name' ]
- if name_change:
- name_change = name_change[-1].get( 'value' ) # only the last name change really matters
- value = name_change # change value for select to renamed uploaded file for e.g. composite dataset
- else:
- for end in [ '.zip', '.gz' ]:
- if value.endswith( end ):
- value = value[ :-len( end ) ]
- break
- value = os.path.basename( value ) # if uploading a file in a path other than root of test-data
- return value
+ return require_file( name, value, extra, self.required_files )
+
+
+def test_data_iter( required_files ):
+ for fname, extra in required_files:
+ data_dict = dict(
+ fname=fname,
+ metadata=extra.get( 'metadata', [] ),
+ composite_data=extra.get( 'composite_data', [] ),
+ ftype=extra.get( 'ftype', DEFAULT_FTYPE ),
+ dbkey=extra.get( 'dbkey', DEFAULT_DBKEY ),
+ )
+ edit_attributes = extra.get( 'edit_attributes', [] )
+
+ #currently only renaming is supported
+ for edit_att in edit_attributes:
+ if edit_att.get( 'type', None ) == 'name':
+ new_name = edit_att.get( 'value', None )
+ assert new_name, 'You must supply the new dataset name as the value tag of the edit_attributes tag'
+ data_dict['name'] = new_name
+ else:
+ raise Exception( 'edit_attributes type (%s) is unimplemented' % edit_att.get( 'type', None ) )
+
+ yield data_dict
+
+
+def require_file( name, value, extra, required_files ):
+ if ( value, extra ) not in required_files:
+ required_files.append( ( value, extra ) ) # these files will be uploaded
+ name_change = [ att for att in extra.get( 'edit_attributes', [] ) if att.get( 'type' ) == 'name' ]
+ if name_change:
+ name_change = name_change[-1].get( 'value' ) # only the last name change really matters
+ value = name_change # change value for select to renamed uploaded file for e.g. composite dataset
+ else:
+ for end in [ '.zip', '.gz' ]:
+ if value.endswith( end ):
+ value = value[ :-len( end ) ]
+ break
+ value = os.path.basename( value ) # if uploading a file in a path other than root of test-data
+ return value
+
+
+def parse_param_elem( param_elem, i=0 ):
+ attrib = dict( param_elem.attrib )
+ if 'values' in attrib:
+ value = attrib[ 'values' ].split( ',' )
+ elif 'value' in attrib:
+ value = attrib['value']
+ else:
+ value = None
+ attrib['children'] = list( param_elem.getchildren() )
+ if attrib['children']:
+ # At this time, we can assume having children only
+ # occurs on DataToolParameter test items but this could
+ # change and would cause the below parsing to change
+ # based upon differences in children items
+ attrib['metadata'] = []
+ attrib['composite_data'] = []
+ attrib['edit_attributes'] = []
+ # Composite datasets need to be renamed uniquely
+ composite_data_name = None
+ for child in attrib['children']:
+ if child.tag == 'composite_data':
+ attrib['composite_data'].append( child )
+ if composite_data_name is None:
+ # Generate a unique name; each test uses a
+ # fresh history.
+ composite_data_name = '_COMPOSITE_RENAMED_t%d_%s' \
+ % ( i, uuid.uuid1().hex )
+ elif child.tag == 'metadata':
+ attrib['metadata'].append( child )
+ elif child.tag == 'metadata':
+ attrib['metadata'].append( child )
+ elif child.tag == 'edit_attributes':
+ attrib['edit_attributes'].append( child )
+ if composite_data_name:
+ # Composite datasets need implicit renaming;
+ # inserted at front of list so explicit declarations
+ # take precedence
+ attrib['edit_attributes'].insert( 0, { 'type': 'name', 'value': composite_data_name } )
+ name = attrib.pop( 'name' )
+ return ( name, value, attrib )
+
+
+def parse_output_elems( test_elem ):
+ outputs = []
+ for output_elem in test_elem.findall( "output" ):
+ name, file, attributes = __parse_output_elem( output_elem )
+ outputs.append( ( name, file, attributes ) )
+ return outputs
+
+
+def __parse_output_elem( output_elem ):
+ attrib = dict( output_elem.attrib )
+ name = attrib.pop( 'name', None )
+ if name is None:
+ raise Exception( "Test output does not have a 'name'" )
+
+ assert_list = __parse_assert_list( output_elem )
+ file = attrib.pop( 'file', None )
+ # File no longer required if an list of assertions was present.
+ attributes = {}
+ # Method of comparison
+ attributes['compare'] = attrib.pop( 'compare', 'diff' ).lower()
+ # Number of lines to allow to vary in logs (for dates, etc)
+ attributes['lines_diff'] = int( attrib.pop( 'lines_diff', '0' ) )
+ # Allow a file size to vary if sim_size compare
+ attributes['delta'] = int( attrib.pop( 'delta', '10000' ) )
+ attributes['sort'] = string_as_bool( attrib.pop( 'sort', False ) )
+ extra_files = []
+ if 'ftype' in attrib:
+ attributes['ftype'] = attrib['ftype']
+ for extra in output_elem.findall( 'extra_files' ):
+ extra_files.append( __parse_extra_files_elem( extra ) )
+ metadata = {}
+ for metadata_elem in output_elem.findall( 'metadata' ):
+ metadata[ metadata_elem.get('name') ] = metadata_elem.get( 'value' )
+ if not (assert_list or file or extra_files or metadata):
+ raise Exception( "Test output defines not checks (e.g. must have a 'file' check against, assertions to check, etc...)")
+ attributes['assert_list'] = assert_list
+ attributes['extra_files'] = extra_files
+ attributes['metadata'] = metadata
+ return name, file, attributes
+
+
+def __parse_assert_list( output_elem ):
+ assert_elem = output_elem.find("assert_contents")
+ assert_list = None
+
+ # Trying to keep testing patch as localized as
+ # possible, this function should be relocated
+ # somewhere more conventional.
+ def convert_elem(elem):
+ """ Converts and XML element to a dictionary format, used by assertion checking code. """
+ tag = elem.tag
+ attributes = dict( elem.attrib )
+ child_elems = list( elem.getchildren() )
+ converted_children = []
+ for child_elem in child_elems:
+ converted_children.append( convert_elem(child_elem) )
+ return {"tag": tag, "attributes": attributes, "children": converted_children}
+ if assert_elem is not None:
+ assert_list = []
+ for assert_child in list(assert_elem):
+ assert_list.append(convert_elem(assert_child))
+
+ return assert_list
+
+
+def __parse_extra_files_elem( extra ):
+ # File or directory, when directory, compare basename
+ # by basename
+ extra_type = extra.get( 'type', 'file' )
+ extra_name = extra.get( 'name', None )
+ assert extra_type == 'directory' or extra_name is not None, \
+ 'extra_files type (%s) requires a name attribute' % extra_type
+ extra_value = extra.get( 'value', None )
+ assert extra_value is not None, 'extra_files requires a value attribute'
+ extra_attributes = {}
+ extra_attributes['compare'] = extra.get( 'compare', 'diff' ).lower()
+ extra_attributes['delta'] = extra.get( 'delta', '0' )
+ extra_attributes['lines_diff'] = int( extra.get( 'lines_diff', '0' ) )
+ extra_attributes['sort'] = string_as_bool( extra.get( 'sort', False ) )
+ return extra_type, extra_value, extra_name, extra_attributes
class ParamContext(object):
https://bitbucket.org/galaxy/galaxy-central/commits/6fadd2ce40e1/
Changeset: 6fadd2ce40e1
User: jmchilton
Date: 2013-11-22 02:17:13
Summary: Further refactoring for iteractors for reuse.
Affected #: 2 files
diff -r b51a17a1d3cec70820ec921b66e477d6d8f6bc60 -r 6fadd2ce40e107fdc9f754cb0b45f1a1263d5921 test/base/interactor.py
--- a/test/base/interactor.py
+++ b/test/base/interactor.py
@@ -5,12 +5,25 @@
from galaxy.model.mapping import context as sa_session
from simplejson import dumps, loads
+from logging import getLogger
+log = getLogger( __name__ )
+
def build_interactor( test_case, type="api" ):
interactor_class = GALAXY_INTERACTORS[ type ]
return interactor_class( test_case )
+def stage_data_in_history( galaxy_interactor, all_test_data, history, shed_tool_id=None ):
+ # Upload any needed files
+ upload_waits = []
+
+ for test_data in all_test_data:
+ upload_waits.append( galaxy_interactor.stage_data_async( test_data, history, shed_tool_id ) )
+ for upload_wait in upload_waits:
+ upload_wait()
+
+
class GalaxyInteractorApi( object ):
def __init__( self, twill_test_case ):
@@ -20,14 +33,14 @@
self.uploads = {}
def verify_output( self, history_id, output_data, outfile, attributes, shed_tool_id, maxseconds ):
- self.twill_test_case.wait_for( lambda: not self.__history_ready( history_id ), maxseconds=maxseconds)
- hid = output_data.get( 'id' )
+ self.wait_for_history( history_id, maxseconds )
+ hid = self.__output_id( output_data )
fetcher = self.__dataset_fetcher( history_id )
## TODO: Twill version verifys dataset is 'ok' in here.
self.twill_test_case.verify_hid( outfile, hda_id=hid, attributes=attributes, dataset_fetcher=fetcher, shed_tool_id=shed_tool_id )
metadata = attributes.get( 'metadata', {} )
if metadata:
- dataset = self.__get( "histories/%s/contents/%s" % ( history_id, hid ) ).json()
+ dataset = self._get( "histories/%s/contents/%s" % ( history_id, hid ) ).json()
for key, value in metadata.iteritems():
dataset_key = "metadata_%s" % key
try:
@@ -41,15 +54,27 @@
msg = "Failed to verify dataset metadata, metadata key [%s] was not found." % key
raise Exception( msg )
+ def wait_for_history( self, history_id, maxseconds ):
+ self.twill_test_case.wait_for( lambda: not self.__history_ready( history_id ), maxseconds=maxseconds)
+
def get_job_stream( self, history_id, output_data, stream ):
- hid = output_data.get( 'id' )
- data = self.__get( "histories/%s/contents/%s/provenance" % (history_id, hid) ).json()
+ hid = self.__output_id( output_data )
+ data = self._get( "histories/%s/contents/%s/provenance" % (history_id, hid) ).json()
return data.get( stream, '' )
def new_history( self ):
- history_json = self.__post( "histories", {"name": "test_history"} ).json()
+ history_json = self._post( "histories", {"name": "test_history"} ).json()
return history_json[ 'id' ]
+ def __output_id( self, output_data ):
+ # Allow data structure coming out of tools API - {id: <id>, output_name: <name>, etc...}
+ # or simple id as comes out of workflow API.
+ try:
+ output_id = output_data.get( 'id' )
+ except AttributeError:
+ output_id = output_data
+ return output_id
+
def stage_data_async( self, test_data, history_id, shed_tool_id, async=True ):
fname = test_data[ 'fname' ]
tool_input = {
@@ -143,12 +168,15 @@
return wait
def __history_ready( self, history_id ):
- history_json = self.__get( "histories/%s" % history_id ).json()
+ history_json = self._get( "histories/%s" % history_id ).json()
state = history_json[ 'state' ]
- if state == 'ok':
+ return self._state_ready( state, error_msg="History in error state." )
+
+ def _state_ready( self, state_str, error_msg ):
+ if state_str == 'ok':
return True
- elif state == 'error':
- raise Exception("History in error state.")
+ elif state_str == 'error':
+ raise Exception( error_msg )
return False
def __submit_tool( self, history_id, tool_id, tool_input, extra_data={}, files=None ):
@@ -158,12 +186,12 @@
inputs=dumps( tool_input ),
**extra_data
)
- return self.__post( "tools", files=files, data=data )
+ return self._post( "tools", files=files, data=data )
def __get_user_key( self, user_key, admin_key ):
if user_key:
return user_key
- all_users = self.__get( 'users', key=admin_key ).json()
+ all_users = self._get( 'users', key=admin_key ).json()
try:
test_user = [ user for user in all_users if user["email"] == 'test(a)bx.psu.edu' ][0]
except IndexError:
@@ -172,26 +200,26 @@
password='testuser',
username='admin-user',
)
- test_user = self.__post( 'users', data, key=admin_key ).json()
- return self.__post( "users/%s/api_key" % test_user['id'], key=admin_key ).json()
+ test_user = self._post( 'users', data, key=admin_key ).json()
+ return self._post( "users/%s/api_key" % test_user['id'], key=admin_key ).json()
def __dataset_fetcher( self, history_id ):
def fetcher( hda_id, base_name=None ):
url = "histories/%s/contents/%s/display?raw=true" % (history_id, hda_id)
if base_name:
url += "&filename=%s" % base_name
- return self.__get( url ).content
+ return self._get( url ).content
return fetcher
- def __post( self, path, data={}, files=None, key=None):
+ def _post( self, path, data={}, files=None, key=None):
if not key:
key = self.api_key
data = data.copy()
data['key'] = key
return post_request( "%s/%s" % (self.api_url, path), data=data, files=files )
- def __get( self, path, data={}, key=None ):
+ def _get( self, path, data={}, key=None ):
if not key:
key = self.api_key
data = data.copy()
diff -r b51a17a1d3cec70820ec921b66e477d6d8f6bc60 -r 6fadd2ce40e107fdc9f754cb0b45f1a1263d5921 test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -1,7 +1,7 @@
import new
import sys
from base.twilltestcase import TwillTestCase
-from base.interactor import build_interactor
+from base.interactor import build_interactor, stage_data_in_history
import logging
log = logging.getLogger( __name__ )
@@ -23,12 +23,7 @@
test_history = galaxy_interactor.new_history()
- # Upload any needed files
- upload_waits = []
- for test_data in testdef.test_data():
- upload_waits.append( galaxy_interactor.stage_data_async( test_data, test_history, shed_tool_id ) )
- for upload_wait in upload_waits:
- upload_wait()
+ stage_data_in_history( galaxy_interactor, testdef.test_data(), test_history, shed_tool_id )
data_list = galaxy_interactor.run_tool( testdef, test_history )
self.assertTrue( data_list )
https://bitbucket.org/galaxy/galaxy-central/commits/7c5305e54c03/
Changeset: 7c5305e54c03
User: jmchilton
Date: 2013-11-22 02:17:13
Summary: Functional testing of workflows (for Galaxy).
... tool shed integration will be more challenging, but this can be used for directly configured Galaxy tools and outlines a syntax that could be shared with a tool shed driven approach.
The one tricky part is how to match workflow outputs to things to check. Right now it is based on the index of the output across the workflow. This is both difficult to determine and very brittle to workflow modifications - how to proceed - require annontation string to be setup? Modify workflow data model to all assigning names to outputs the way inputs havenames? At any rate this current syntax should be considered beta and may change - if it does Galaxy will not continue to support this syntax.
To run the sample test execute the following test command:
sh run_functional_tests.sh -workflow test-data/workflows/1.xml
Affected #: 8 files
diff -r 6fadd2ce40e107fdc9f754cb0b45f1a1263d5921 -r 7c5305e54c03373b9922d60e2c75e90607c61c6f run_functional_tests.sh
--- a/run_functional_tests.sh
+++ b/run_functional_tests.sh
@@ -13,6 +13,7 @@
echo "'run_functional_tests.sh -list' for listing all the tool ids"
echo "'run_functional_tests.sh -toolshed' for running all the test scripts in the ./test/tool_shed/functional directory"
echo "'run_functional_tests.sh -toolshed testscriptname' for running one test script named testscriptname in the .test/tool_shed/functional directory"
+ echo "'run_functional_tests.sh -workflow test.xml' for running a workflow test case as defined by supplied workflow xml test file"
echo "'run_functional_tests.sh -framework' for running through example tool tests testing framework features in test/functional/tools"
echo "'run_functional_tests.sh -framework -id toolid' for testing one framework tool (in test/functional/tools/) with id 'toolid'"
elif [ $1 = '-id' ]; then
@@ -48,6 +49,8 @@
else
python ./test/tool_shed/functional_tests.py -v --with-nosehtml --html-report-file ./test/tool_shed/run_functional_tests.html $2
fi
+elif [ $1 = '-workflow' ]; then
+ python ./scripts/functional_tests.py -v functional.test_workflow:WorkflowTestCase --with-nosehtml --html-report-file ./test/tool_shed/run_functional_tests.html -workflow $2
elif [ $1 = '-framework' ]; then
if [ ! $2 ]; then
python ./scripts/functional_tests.py -v functional.test_toolbox --with-nosehtml --html-report-file run_functional_tests.html -framework
diff -r 6fadd2ce40e107fdc9f754cb0b45f1a1263d5921 -r 7c5305e54c03373b9922d60e2c75e90607c61c6f scripts/functional_tests.py
--- a/scripts/functional_tests.py
+++ b/scripts/functional_tests.py
@@ -420,7 +420,7 @@
new_path = [ os.path.join( cwd, "test" ) ]
new_path.extend( sys.path[1:] )
sys.path = new_path
- import functional.test_toolbox
+
# ---- Find tests ---------------------------------------------------------
if galaxy_test_proxy_port:
log.info( "Functional tests will be run against %s:%s" % ( galaxy_test_host, galaxy_test_proxy_port ) )
@@ -436,6 +436,13 @@
os.environ[ 'GALAXY_TEST_HOST' ] = galaxy_test_host
def _run_functional_test( testing_shed_tools=None ):
+ workflow_test = __check_arg( '-workflow', param=True )
+ if workflow_test:
+ import functional.test_workflow
+ functional.test_workflow.WorkflowTestCase.workflow_test_file = workflow_test
+ functional.test_workflow.WorkflowTestCase.master_api_key = master_api_key
+ functional.test_workflow.WorkflowTestCase.user_api_key = os.environ.get( "GALAXY_TEST_USER_API_KEY", default_galaxy_user_key )
+ import functional.test_toolbox
functional.test_toolbox.toolbox = app.toolbox
functional.test_toolbox.build_tests(
testing_shed_tools=testing_shed_tools,
diff -r 6fadd2ce40e107fdc9f754cb0b45f1a1263d5921 -r 7c5305e54c03373b9922d60e2c75e90607c61c6f test-data/workflows/1.ga
--- /dev/null
+++ b/test-data/workflows/1.ga
@@ -0,0 +1,145 @@
+{
+ "a_galaxy_workflow": "true",
+ "annotation": "",
+ "format-version": "0.1",
+ "name": "TestWorkflowAlpha",
+ "steps": {
+ "0": {
+ "annotation": "",
+ "id": 0,
+ "input_connections": {},
+ "inputs": [
+ {
+ "description": "",
+ "name": "Input Dataset1"
+ }
+ ],
+ "name": "Input dataset",
+ "outputs": [],
+ "position": {
+ "left": 206,
+ "top": 207
+ },
+ "tool_errors": null,
+ "tool_id": null,
+ "tool_state": "{\"name\": \"Input Dataset1\"}",
+ "tool_version": null,
+ "type": "data_input",
+ "user_outputs": []
+ },
+ "1": {
+ "annotation": "",
+ "id": 1,
+ "input_connections": {},
+ "inputs": [
+ {
+ "description": "",
+ "name": "Input Dataset2"
+ }
+ ],
+ "name": "Input dataset",
+ "outputs": [],
+ "position": {
+ "left": 200,
+ "top": 320
+ },
+ "tool_errors": null,
+ "tool_id": null,
+ "tool_state": "{\"name\": \"Input Dataset2\"}",
+ "tool_version": null,
+ "type": "data_input",
+ "user_outputs": []
+ },
+ "2": {
+ "annotation": "",
+ "id": 2,
+ "input_connections": {
+ "input1": {
+ "id": 0,
+ "output_name": "output"
+ },
+ "queries_0|input2": {
+ "id": 1,
+ "output_name": "output"
+ }
+ },
+ "inputs": [],
+ "name": "Concatenate datasets",
+ "outputs": [
+ {
+ "name": "out_file1",
+ "type": "input"
+ }
+ ],
+ "position": {
+ "left": 420,
+ "top": 200
+ },
+ "post_job_actions": {},
+ "tool_errors": null,
+ "tool_id": "cat1",
+ "tool_state": "{\"__page__\": 0, \"__rerun_remap_job_id__\": null, \"input1\": \"null\", \"chromInfo\": \"\\\"/home/john/workspace/galaxy-central/tool-data/shared/ucsc/chrom/?.len\\\"\", \"queries\": \"[{\\\"input2\\\": null, \\\"__index__\\\": 0}]\"}",
+ "tool_version": "1.0.0",
+ "type": "tool",
+ "user_outputs": []
+ },
+ "3": {
+ "annotation": "",
+ "id": 3,
+ "input_connections": {
+ "input": {
+ "id": 2,
+ "output_name": "out_file1"
+ }
+ },
+ "inputs": [],
+ "name": "Convert",
+ "outputs": [
+ {
+ "name": "out_file1",
+ "type": "tabular"
+ }
+ ],
+ "position": {
+ "left": 640,
+ "top": 200
+ },
+ "post_job_actions": {},
+ "tool_errors": null,
+ "tool_id": "Convert characters1",
+ "tool_state": "{\"__page__\": 0, \"convert_from\": \"\\\"s\\\"\", \"__rerun_remap_job_id__\": null, \"chromInfo\": \"\\\"/home/john/workspace/galaxy-central/tool-data/shared/ucsc/chrom/?.len\\\"\", \"input\": \"null\"}",
+ "tool_version": "1.0.0",
+ "type": "tool",
+ "user_outputs": []
+ },
+ "4": {
+ "annotation": "",
+ "id": 4,
+ "input_connections": {
+ "input": {
+ "id": 3,
+ "output_name": "out_file1"
+ }
+ },
+ "inputs": [],
+ "name": "Add column",
+ "outputs": [
+ {
+ "name": "out_file1",
+ "type": "input"
+ }
+ ],
+ "position": {
+ "left": 860,
+ "top": 200
+ },
+ "post_job_actions": {},
+ "tool_errors": null,
+ "tool_id": "addValue",
+ "tool_state": "{\"__page__\": 0, \"__rerun_remap_job_id__\": null, \"exp\": \"\\\"1\\\"\", \"iterate\": \"\\\"yes\\\"\", \"input\": \"null\", \"chromInfo\": \"\\\"/home/john/workspace/galaxy-central/tool-data/shared/ucsc/chrom/?.len\\\"\"}",
+ "tool_version": "1.0.0",
+ "type": "tool",
+ "user_outputs": []
+ }
+ }
+}
\ No newline at end of file
diff -r 6fadd2ce40e107fdc9f754cb0b45f1a1263d5921 -r 7c5305e54c03373b9922d60e2c75e90607c61c6f test-data/workflows/1.xml
--- /dev/null
+++ b/test-data/workflows/1.xml
@@ -0,0 +1,5 @@
+<test file="1.ga">
+ <input name="Input Dataset1" value="workflows/i1.txt" />
+ <input name="Input Dataset2" value="workflows/i2.txt" />
+ <output name="2" file="workflows/o1.txt" /><!-- index of n'th output, less than ideal syntax -->
+</test>
diff -r 6fadd2ce40e107fdc9f754cb0b45f1a1263d5921 -r 7c5305e54c03373b9922d60e2c75e90607c61c6f test-data/workflows/i1.txt
--- /dev/null
+++ b/test-data/workflows/i1.txt
@@ -0,0 +1,1 @@
+1 2 3
diff -r 6fadd2ce40e107fdc9f754cb0b45f1a1263d5921 -r 7c5305e54c03373b9922d60e2c75e90607c61c6f test-data/workflows/i2.txt
--- /dev/null
+++ b/test-data/workflows/i2.txt
@@ -0,0 +1,1 @@
+4 5 6
diff -r 6fadd2ce40e107fdc9f754cb0b45f1a1263d5921 -r 7c5305e54c03373b9922d60e2c75e90607c61c6f test-data/workflows/o1.txt
--- /dev/null
+++ b/test-data/workflows/o1.txt
@@ -0,0 +1,2 @@
+1 2 3 1
+4 5 6 2
diff -r 6fadd2ce40e107fdc9f754cb0b45f1a1263d5921 -r 7c5305e54c03373b9922d60e2c75e90607c61c6f test/functional/test_workflow.py
--- /dev/null
+++ b/test/functional/test_workflow.py
@@ -0,0 +1,185 @@
+import os
+import sys
+from base.twilltestcase import TwillTestCase
+from base.interactor import GalaxyInteractorApi, stage_data_in_history
+
+from galaxy.util import parse_xml
+from galaxy.tools.test import parse_param_elem, require_file, test_data_iter, parse_output_elems
+from simplejson import load, dumps
+
+from logging import getLogger
+log = getLogger( __name__ )
+
+
+class WorkflowTestCase( TwillTestCase ):
+ """
+ Kind of a shell of a test case for running workflow tests. Probably
+ needs to look more like test_toolbox.
+ """
+ workflow_test_file = None
+ user_api_key = None
+ master_api_key = None
+
+ def test_workflow( self, workflow_test_file=None ):
+ maxseconds = 120
+ workflow_test_file = workflow_test_file or WorkflowTestCase.workflow_test_file
+ assert workflow_test_file
+ workflow_test = parse_test_file( workflow_test_file )
+ galaxy_interactor = GalaxyWorkflowInteractor( self )
+
+ # Calling workflow https://github.com/jmchilton/blend4j/blob/master/src/test/java/com/github/j…
+
+ # Import workflow
+ workflow_id, step_id_map, output_defs = self.__import_workflow( galaxy_interactor, workflow_test.workflow )
+
+ # Stage data and history for workflow
+ test_history = galaxy_interactor.new_history()
+ stage_data_in_history( galaxy_interactor, workflow_test.test_data(), test_history )
+
+ # Build workflow parameters
+ uploads = galaxy_interactor.uploads
+ ds_map = {}
+ for step_index, input_dataset_label in workflow_test.input_datasets():
+ # Upload is {"src": "hda", "id": hid}
+ try:
+ upload = uploads[ workflow_test.upload_name( input_dataset_label ) ]
+ except KeyError:
+ raise AssertionError( "Failed to find upload with label %s in uploaded datasets %s" % ( input_dataset_label, uploads ) )
+
+ ds_map[ step_id_map[ step_index ] ] = upload
+
+ payload = {
+ "history": "hist_id=%s" % test_history,
+ "ds_map": dumps( ds_map ),
+ "workflow_id": workflow_id,
+ }
+ run_response = galaxy_interactor.run_workflow( payload ).json()
+
+ outputs = run_response[ 'outputs' ]
+ if not len( outputs ) == len( output_defs ):
+ msg_template = "Number of outputs [%d] created by workflow execution does not equal expected number from input file [%d]."
+ msg = msg_template % ( len( outputs ), len( output_defs ) )
+ raise AssertionError( msg )
+
+ galaxy_interactor.wait_for_ids( test_history, outputs )
+
+ for expected_output_def in workflow_test.outputs:
+ # Get the correct hid
+ name, outfile, attributes = expected_output_def
+
+ output_data = outputs[ int( name ) ]
+ try:
+ galaxy_interactor.verify_output( test_history, output_data, outfile, attributes=attributes, shed_tool_id=None, maxseconds=maxseconds )
+ except Exception:
+ for stream in ['stdout', 'stderr']:
+ stream_output = galaxy_interactor.get_job_stream( test_history, output_data, stream=stream )
+ print >>sys.stderr, self._format_stream( stream_output, stream=stream, format=True )
+ raise
+
+ def __import_workflow( self, galaxy_interactor, workflow ):
+ """
+ Import workflow into Galaxy and return id and mapping of step ids.
+ """
+ workflow_info = galaxy_interactor.import_workflow( workflow ).json()
+ try:
+ workflow_id = workflow_info[ 'id' ]
+ except KeyError:
+ raise AssertionError( "Failed to find id for workflow import response %s" % workflow_info )
+
+ # Well ideally the local copy of the workflow would have the same step ids
+ # as the one imported through the API, but API workflow imports are 1-indexed
+ # and GUI exports 0-indexed as of mid-november 2013.
+
+ imported_workflow = galaxy_interactor.read_workflow( workflow_id )
+ #log.info("local %s\nimported%s" % (workflow, imported_workflow))
+ step_id_map = {}
+ local_steps_ids = sorted( [ int( step_id ) for step_id in workflow[ 'steps' ].keys() ] )
+ imported_steps_ids = sorted( [ int( step_id ) for step_id in imported_workflow[ 'steps' ].keys() ] )
+ for local_step_id, imported_step_id in zip( local_steps_ids, imported_steps_ids ):
+ step_id_map[ local_step_id ] = imported_step_id
+
+ output_defs = []
+ for local_step_id in local_steps_ids:
+ step_def = workflow['steps'][ str( local_step_id ) ]
+ output_defs.extend( step_def.get( "outputs", [] ) )
+
+ return workflow_id, step_id_map, output_defs
+
+
+def parse_test_file( workflow_test_file ):
+ tree = parse_xml( workflow_test_file )
+ root = tree.getroot()
+ input_elems = root.findall( "input" )
+ required_files = []
+ dataset_dict = {}
+ for input_elem in input_elems:
+ name, value, attrib = parse_param_elem( input_elem )
+ require_file( name, value, attrib, required_files )
+ dataset_dict[ name ] = value
+
+ outputs = parse_output_elems( root )
+
+ workflow_file_rel_path = root.get( 'file' )
+ if not workflow_file_rel_path:
+ raise Exception( "Workflow test XML must declare file attribute pointing to workflow under test." )
+
+ # TODO: Normalize this path, prevent it from accessing arbitrary files on system.
+ worfklow_file_abs_path = os.path.join( os.path.dirname( workflow_test_file ), workflow_file_rel_path )
+
+ return WorkflowTest(
+ dataset_dict,
+ required_files,
+ worfklow_file_abs_path,
+ outputs=outputs,
+ )
+
+
+class WorkflowTest( object ):
+
+ def __init__( self, dataset_dict, required_files, workflow_file, outputs ):
+ self.dataset_dict = dataset_dict
+ self.required_files = required_files
+ self.workflow = load( open( workflow_file, "r" ) )
+ self.outputs = outputs
+
+ def test_data( self ):
+ return test_data_iter( self.required_files )
+
+ def upload_name( self, input_dataset_label ):
+ return self.dataset_dict[ input_dataset_label ]
+
+ def input_datasets( self ):
+ steps = self.workflow[ "steps" ]
+ log.info("in input_datasets with steps %s" % steps)
+ for step_index, step_dict in steps.iteritems():
+ if step_dict.get( "name", None ) == "Input dataset":
+ yield int( step_index ), step_dict[ "inputs" ][0][ "name" ]
+
+
+class GalaxyWorkflowInteractor(GalaxyInteractorApi):
+
+ def __init__( self, twill_test_case ):
+ super(GalaxyWorkflowInteractor, self).__init__( twill_test_case )
+
+ def import_workflow( self, workflow_rep ):
+ payload = { "workflow": dumps( workflow_rep ) }
+ return self._post( "workflows/upload", data=payload )
+
+ def run_workflow( self, data ):
+ return self._post( "workflows", data=data )
+
+ def read_workflow( self, id ):
+ return self._get( "workflows/%s" % id ).json()
+
+ def wait_for_ids( self, history_id, ids ):
+ self.twill_test_case.wait_for( lambda: not all( [ self.__dataset_ready( history_id, id ) for id in ids ] ), maxseconds=120 )
+
+ def __dataset_ready( self, history_id, id ):
+ contents = self._get( 'histories/%s/contents' % history_id ).json()
+ for content in contents:
+
+ if content["id"] == id:
+ state = content[ 'state' ]
+ state_ready = self._state_ready( state, error_msg="Dataset creation failed for dataset with name %s." % content[ 'name' ] )
+ return state_ready
+ return False
https://bitbucket.org/galaxy/galaxy-central/commits/87f3e11ced5d/
Changeset: 87f3e11ced5d
User: jmchilton
Date: 2013-11-22 02:17:13
Summary: Fix for using API driver with tool outputs that change their name.
E.g. composite data types. API should probably return the original output name since that is the purpose of the parameter but I am not sure how much work that would be.
Affected #: 2 files
diff -r 7c5305e54c03373b9922d60e2c75e90607c61c6f -r 87f3e11ced5dbab70f50a2299fcda2859643ff62 test/base/interactor.py
--- a/test/base/interactor.py
+++ b/test/base/interactor.py
@@ -1,5 +1,6 @@
import os
from galaxy.tools.parameters import grouping
+from galaxy.util.odict import odict
import galaxy.model
from galaxy.model.orm import and_, desc
from galaxy.model.mapping import context as sa_session
@@ -148,7 +149,7 @@
## Convert outputs list to a dictionary that can be accessed by
## output_name so can be more flexiable about ordering of outputs
## but also allows fallback to legacy access as list mode.
- outputs_dict = {}
+ outputs_dict = odict()
index = 0
for output in datasets_object[ 'outputs' ]:
outputs_dict[ index ] = outputs_dict[ output.get("output_name") ] = output
diff -r 7c5305e54c03373b9922d60e2c75e90607c61c6f -r 87f3e11ced5dbab70f50a2299fcda2859643ff62 test/functional/test_toolbox.py
--- a/test/functional/test_toolbox.py
+++ b/test/functional/test_toolbox.py
@@ -53,8 +53,12 @@
output_data = data_list[ name ]
except (TypeError, KeyError):
# Legacy - fall back on ordered data list access if data_list is
- # just a list (case with twill variant)
- output_data = data_list[ len(data_list) - len(testdef.outputs) + output_index ]
+ # just a list (case with twill variant or if output changes its
+ # name).
+ if hasattr(data_list, "values"):
+ output_data = data_list.values()[ output_index ]
+ else:
+ output_data = data_list[ len(data_list) - len(testdef.outputs) + output_index ]
self.assertTrue( output_data is not None )
try:
galaxy_interactor.verify_output( history, output_data, outfile, attributes=attributes, shed_tool_id=shed_tool_id, maxseconds=maxseconds )
https://bitbucket.org/galaxy/galaxy-central/commits/f302dae15be9/
Changeset: f302dae15be9
User: jmchilton
Date: 2013-11-22 02:17:13
Summary: API Driven Tests: Option for very detailed per dataset error messages for when histories error out.
Affected #: 1 file
diff -r 87f3e11ced5dbab70f50a2299fcda2859643ff62 -r f302dae15be9185139d8cc17cd0208a448e7e9b0 test/base/interactor.py
--- a/test/base/interactor.py
+++ b/test/base/interactor.py
@@ -1,4 +1,5 @@
import os
+from StringIO import StringIO
from galaxy.tools.parameters import grouping
from galaxy.util.odict import odict
import galaxy.model
@@ -9,6 +10,9 @@
from logging import getLogger
log = getLogger( __name__ )
+VERBOSE_ERRORS = True
+ERROR_MESSAGE_DATASET_SEP = "--------------------------------------"
+
def build_interactor( test_case, type="api" ):
interactor_class = GALAXY_INTERACTORS[ type ]
@@ -60,7 +64,7 @@
def get_job_stream( self, history_id, output_data, stream ):
hid = self.__output_id( output_data )
- data = self._get( "histories/%s/contents/%s/provenance" % (history_id, hid) ).json()
+ data = self._dataset_provenance( history_id, hid )
return data.get( stream, '' )
def new_history( self ):
@@ -171,7 +175,61 @@
def __history_ready( self, history_id ):
history_json = self._get( "histories/%s" % history_id ).json()
state = history_json[ 'state' ]
- return self._state_ready( state, error_msg="History in error state." )
+ try:
+ return self._state_ready( state, error_msg="History in error state." )
+ except Exception:
+ if VERBOSE_ERRORS:
+ self._summarize_history_errors( history_id )
+ raise
+
+ def _summarize_history_errors( self, history_id ):
+ print "History with id %s in error - summary of datasets in error below." % history_id
+ try:
+ history_contents = self.__contents( history_id )
+ except Exception:
+ print "*TEST FRAMEWORK FAILED TO FETCH HISTORY DETAILS*"
+
+ for dataset in history_contents:
+ if dataset[ 'state' ] != 'error':
+ continue
+
+ print ERROR_MESSAGE_DATASET_SEP
+ dataset_id = dataset.get( 'id', None )
+ print "| %d - %s (HID - NAME) " % ( int( dataset['hid'] ), dataset['name'] )
+ try:
+ dataset_info = self._dataset_info( history_id, dataset_id )
+ print "| Dataset Blurb:"
+ print self.format_for_error( dataset_info.get( "misc_blurb", "" ), "Dataset blurb was empty." )
+ print "| Dataset Info:"
+ print self.format_for_error( dataset_info.get( "misc_info", "" ), "Dataset info is empty." )
+ except Exception:
+ print "| *TEST FRAMEWORK ERROR FETCHING DATASET DETAILS*"
+ try:
+ provenance_info = self._dataset_provenance( history_id, dataset_id )
+ print "| Dataset Job Standard Output:"
+ print self.format_for_error( provenance_info.get( "stdout", "" ), "Standard output was empty." )
+ print "| Dataset Job Standard Error:"
+ print self.format_for_error( provenance_info.get( "stderr", "" ), "Standard error was empty." )
+ except Exception:
+ print "| *TEST FRAMEWORK ERROR FETCHING JOB DETAILS*"
+ print "|"
+ print ERROR_MESSAGE_DATASET_SEP
+
+ def format_for_error( self, blob, empty_message, prefix="| " ):
+ contents = "\n".join([ "%s%s" % (prefix, line.strip()) for line in StringIO(blob).readlines() if line.rstrip("\n\r") ] )
+ return contents or "%s*%s*" % ( prefix, empty_message )
+
+ def _dataset_provenance( self, history_id, id ):
+ provenance = self._get( "histories/%s/contents/%s/provenance" % ( history_id, id ) ).json()
+ return provenance
+
+ def _dataset_info( self, history_id, id ):
+ dataset_json = self._get( "histories/%s/contents/%s" % ( history_id, id ) ).json()
+ return dataset_json
+
+ def __contents( self, history_id ):
+ history_contents_json = self._get( "histories/%s/contents" % history_id ).json()
+ return history_contents_json
def _state_ready( self, state_str, error_msg ):
if state_str == 'ok':
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: carlfeberhard: JS MVC: add SessionStorageModel to base-mvc
by commits-noreply@bitbucket.org 21 Nov '13
by commits-noreply@bitbucket.org 21 Nov '13
21 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/5471fa863712/
Changeset: 5471fa863712
User: carlfeberhard
Date: 2013-11-21 22:43:43
Summary: JS MVC: add SessionStorageModel to base-mvc
Affected #: 2 files
diff -r 32ecdc2fdc00a93652b27774123cde91bd3c512c -r 5471fa863712ba3f89ad9adceb194daf0baeafa2 static/scripts/mvc/base-mvc.js
--- a/static/scripts/mvc/base-mvc.js
+++ b/static/scripts/mvc/base-mvc.js
@@ -89,6 +89,59 @@
//==============================================================================
+/** Backbone model that syncs to the browser's sessionStorage API.
+ */
+var SessionStorageModel = Backbone.Model.extend({
+ initialize : function( hash, x, y, z ){
+ if( !hash || !hash.hasOwnProperty( 'id' ) ){
+ throw new Error( 'SessionStorageModel needs an id on init' );
+ }
+ // immed. save the passed in model and save on any change to it
+ this.save();
+ this.on( 'change', function(){
+ this.save();
+ });
+ },
+ sync : function( method, model, options ){
+ model.trigger('request', model, {}, options);
+ var returned;
+ switch( method ){
+ case 'create' : returned = this._create( model ); break;
+ case 'read' : returned = this._read( model ); break;
+ case 'update' : returned = this._update( model ); break;
+ case 'delete' : returned = this._delete( model ); break;
+ }
+ if( returned !== undefined || returned !== null ){
+ if( options.success ){ options.success(); }
+ } else {
+ if( options.error ){ options.error(); }
+ }
+ return returned;
+ },
+ _create : function( model ){
+ var json = model.toJSON(),
+ set = sessionStorage.setItem( model.id, JSON.stringify( json ) );
+ return ( set === null )?( set ):( json );
+ },
+ _read : function( model ){
+ return JSON.parse( sessionStorage.getItem( model.id, JSON.stringify( model.toJSON() ) ) );
+ },
+ _update : function( model ){
+ return model._create( model );
+ },
+ _delete : function( model ){
+ return sessionStorage.removeItem( model.id );
+ },
+ isNew : function(){
+ return !sessionStorage.hasOwnProperty( this.id );
+ }
+});
+(function(){
+ SessionStorageModel.prototype = _.omit( SessionStorageModel.prototype, 'url', 'urlRoot' );
+}());
+
+
+//==============================================================================
/**
* @class persistent storage adapter.
* Provides an easy interface to object based storage using method chaining.
diff -r 32ecdc2fdc00a93652b27774123cde91bd3c512c -r 5471fa863712ba3f89ad9adceb194daf0baeafa2 static/scripts/packed/mvc/base-mvc.js
--- a/static/scripts/packed/mvc/base-mvc.js
+++ b/static/scripts/packed/mvc/base-mvc.js
@@ -1,1 +1,1 @@
-var BaseModel=Backbone.RelationalModel.extend({defaults:{name:null,hidden:false},show:function(){this.set("hidden",false)},hide:function(){this.set("hidden",true)},is_visible:function(){return !this.attributes.hidden}});var BaseView=Backbone.View.extend({initialize:function(){this.model.on("change:hidden",this.update_visible,this);this.update_visible()},update_visible:function(){if(this.model.attributes.hidden){this.$el.hide()}else{this.$el.show()}}});var LoggableMixin={logger:null,log:function(){if(this.logger){var a=this.logger.log;if(typeof this.logger.log==="object"){a=Function.prototype.bind.call(this.logger.log,this.logger)}return a.apply(this.logger,arguments)}return undefined}};var PersistentStorage=function(k,g){if(!k){throw ("PersistentStorage needs storageKey argument")}g=g||{};var i=sessionStorage,c=function j(m){var n=this.getItem(m);return(n!==null)?(JSON.parse(this.getItem(m))):(null)},b=function e(m,n){return this.setItem(m,JSON.stringify(n))},d=function f(m){return this.removeItem(m)};function a(n,m){n=n||{};m=m||null;return{get:function(o){if(o===undefined){return n}else{if(n.hasOwnProperty(o)){return(jQuery.type(n[o])==="object")?(new a(n[o],this)):(n[o])}}return undefined},set:function(o,p){n[o]=p;this._save();return this},deleteKey:function(o){delete n[o];this._save();return this},_save:function(){return m._save()},toString:function(){return("StorageRecursionHelper("+n+")")}}}var l={},h=c.call(i,k);if(h===null||h===undefined){h=jQuery.extend(true,{},g);b.call(i,k,h)}l=new a(h);jQuery.extend(l,{_save:function(m){return b.call(i,k,l.get())},destroy:function(){return d.call(i,k)},toString:function(){return"PersistentStorage("+k+")"}});return l};var HiddenUntilActivatedViewMixin={hiddenUntilActivated:function(a,c){c=c||{};this.HUAVOptions={$elementShown:this.$el,showFn:jQuery.prototype.toggle,showSpeed:"fast"};_.extend(this.HUAVOptions,c||{});this.HUAVOptions.hasBeenShown=this.HUAVOptions.$elementShown.is(":visible");if(a){var b=this;a.on("click",function(d){b.toggle(b.HUAVOptions.showSpeed)})}},toggle:function(){if(this.HUAVOptions.$elementShown.is(":hidden")){if(!this.HUAVOptions.hasBeenShown){if(_.isFunction(this.HUAVOptions.onshowFirstTime)){this.HUAVOptions.hasBeenShown=true;this.HUAVOptions.onshowFirstTime.call(this)}}else{if(_.isFunction(this.HUAVOptions.onshow)){this.HUAVOptions.onshow.call(this)}}}return this.HUAVOptions.showFn.apply(this.HUAVOptions.$elementShown,arguments)}};
\ No newline at end of file
+var BaseModel=Backbone.RelationalModel.extend({defaults:{name:null,hidden:false},show:function(){this.set("hidden",false)},hide:function(){this.set("hidden",true)},is_visible:function(){return !this.attributes.hidden}});var BaseView=Backbone.View.extend({initialize:function(){this.model.on("change:hidden",this.update_visible,this);this.update_visible()},update_visible:function(){if(this.model.attributes.hidden){this.$el.hide()}else{this.$el.show()}}});var LoggableMixin={logger:null,log:function(){if(this.logger){var a=this.logger.log;if(typeof this.logger.log==="object"){a=Function.prototype.bind.call(this.logger.log,this.logger)}return a.apply(this.logger,arguments)}return undefined}};var SessionStorageModel=Backbone.Model.extend({initialize:function(b,a,d,c){if(!b||!b.hasOwnProperty("id")){throw new Error("SessionStorageModel needs an id on init")}this.save();this.on("change",function(){this.save()})},sync:function(d,b,a){b.trigger("request",b,{},a);var c;switch(d){case"create":c=this._create(b);break;case"read":c=this._read(b);break;case"update":c=this._update(b);break;case"delete":c=this._delete(b);break}if(c!==undefined||c!==null){if(a.success){a.success()}}else{if(a.error){a.error()}}return c},_create:function(a){var b=a.toJSON(),c=sessionStorage.setItem(a.id,JSON.stringify(b));return(c===null)?(c):(b)},_read:function(a){return JSON.parse(sessionStorage.getItem(a.id,JSON.stringify(a.toJSON())))},_update:function(a){return a._create(a)},_delete:function(a){return sessionStorage.removeItem(a.id)},isNew:function(){return !sessionStorage.hasOwnProperty(this.id)}});(function(){SessionStorageModel.prototype=_.omit(SessionStorageModel.prototype,"url","urlRoot")}());var PersistentStorage=function(k,g){if(!k){throw ("PersistentStorage needs storageKey argument")}g=g||{};var i=sessionStorage,c=function j(m){var n=this.getItem(m);return(n!==null)?(JSON.parse(this.getItem(m))):(null)},b=function e(m,n){return this.setItem(m,JSON.stringify(n))},d=function f(m){return this.removeItem(m)};function a(n,m){n=n||{};m=m||null;return{get:function(o){if(o===undefined){return n}else{if(n.hasOwnProperty(o)){return(jQuery.type(n[o])==="object")?(new a(n[o],this)):(n[o])}}return undefined},set:function(o,p){n[o]=p;this._save();return this},deleteKey:function(o){delete n[o];this._save();return this},_save:function(){return m._save()},toString:function(){return("StorageRecursionHelper("+n+")")}}}var l={},h=c.call(i,k);if(h===null||h===undefined){h=jQuery.extend(true,{},g);b.call(i,k,h)}l=new a(h);jQuery.extend(l,{_save:function(m){return b.call(i,k,l.get())},destroy:function(){return d.call(i,k)},toString:function(){return"PersistentStorage("+k+")"}});return l};var HiddenUntilActivatedViewMixin={hiddenUntilActivated:function(a,c){c=c||{};this.HUAVOptions={$elementShown:this.$el,showFn:jQuery.prototype.toggle,showSpeed:"fast"};_.extend(this.HUAVOptions,c||{});this.HUAVOptions.hasBeenShown=this.HUAVOptions.$elementShown.is(":visible");if(a){var b=this;a.on("click",function(d){b.toggle(b.HUAVOptions.showSpeed)})}},toggle:function(){if(this.HUAVOptions.$elementShown.is(":hidden")){if(!this.HUAVOptions.hasBeenShown){if(_.isFunction(this.HUAVOptions.onshowFirstTime)){this.HUAVOptions.hasBeenShown=true;this.HUAVOptions.onshowFirstTime.call(this)}}else{if(_.isFunction(this.HUAVOptions.onshow)){this.HUAVOptions.onshow.call(this)}}}return this.HUAVOptions.showFn.apply(this.HUAVOptions.$elementShown,arguments)}};
\ No newline at end of file
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: carlfeberhard: UI: move loading indicator into ui.js, add cover option
by commits-noreply@bitbucket.org 21 Nov '13
by commits-noreply@bitbucket.org 21 Nov '13
21 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/32ecdc2fdc00/
Changeset: 32ecdc2fdc00
User: carlfeberhard
Date: 2013-11-21 22:22:58
Summary: UI: move loading indicator into ui.js, add cover option
Affected #: 4 files
diff -r a39f452a7f657f3b43dce87d08e143ed6f02867e -r 32ecdc2fdc00a93652b27774123cde91bd3c512c static/scripts/mvc/base-mvc.js
--- a/static/scripts/mvc/base-mvc.js
+++ b/static/scripts/mvc/base-mvc.js
@@ -260,54 +260,3 @@
return this.HUAVOptions.showFn.apply( this.HUAVOptions.$elementShown, arguments );
}
};
-
-
-
-//==============================================================================
-function LoadingIndicator( $where, options ){
- options = options || {};
- var self = this;
-
- function render(){
- var html = [
- '<div class="loading-indicator">',
- '<span class="fa fa-spinner fa-spin fa-lg" style="color: grey"></span>',
- '<span style="margin-left: 8px; color: grey"><i>loading...</i></span>',
- '</div>'
- ].join( '\n' );
-
- return $( html ).css( options.css || {
- 'position' : 'fixed',
- 'margin' : '12px 0px 0px 10px',
- 'opacity' : '0.85'
- }).hide();
- }
-
- self.show = function( msg, speed, callback ){
- msg = msg || 'loading...';
- speed = speed || 'fast';
- // since position is fixed - we insert as sibling
- self.$indicator = render().insertBefore( $where );
- self.message( msg );
- self.$indicator.fadeIn( speed, callback );
- return self;
- };
-
- self.message = function( msg ){
- self.$indicator.find( 'i' ).text( msg );
- };
-
- self.hide = function( speed, callback ){
- speed = speed || 'fast';
- if( self.$indicator && self.$indicator.size() ){
- self.$indicator.fadeOut( speed, function(){
- self.$indicator.remove();
- if( callback ){ callback(); }
- });
- } else {
- if( callback ){ callback(); }
- }
- return self;
- };
- return self;
-}
diff -r a39f452a7f657f3b43dce87d08e143ed6f02867e -r 32ecdc2fdc00a93652b27774123cde91bd3c512c static/scripts/mvc/ui.js
--- a/static/scripts/mvc/ui.js
+++ b/static/scripts/mvc/ui.js
@@ -465,8 +465,10 @@
return popupMenusCreated;
};
+
+//==============================================================================
var faIconButton = function( options ){
- //TODO: move out of global
+//TODO: move out of global
options = options || {};
options.tooltipConfig = options.tooltipConfig || { placement: 'bottom' };
@@ -491,6 +493,8 @@
return $button;
};
+
+//==============================================================================
var searchInput = function( options ){
//TODO: move out of global
//TODO: consolidate with tool menu functionality, use there
@@ -589,3 +593,88 @@
}
return $searchInput.append([ $input(), $clearBtn() ]);
};
+
+
+//==============================================================================
+function LoadingIndicator( $where, options ){
+//TODO: move out of global
+//TODO: too specific to history panel
+
+ var self = this;
+ // defaults
+ options = jQuery.extend({
+ cover : false
+ }, options || {} );
+
+ function render(){
+ var html = [
+ '<div class="loading-indicator">',
+ '<div class="loading-indicator-text">',
+ '<span class="fa fa-spinner fa-spin fa-lg"></span>',
+ '<span class="loading-indicator-message">loading...</span>',
+ '</div>',
+ '</div>'
+ ].join( '\n' );
+
+ var $indicator = $( html ).hide().css( options.css || {
+ position : 'fixed'
+ }),
+ $text = $indicator.children( '.loading-indicator-text' );
+
+ if( options.cover ){
+ $indicator.css({
+ 'z-index' : 2,
+ top : $where.css( 'top' ),
+ bottom : $where.css( 'bottom' ),
+ left : $where.css( 'left' ),
+ right : $where.css( 'right' ),
+ opacity : 0.5,
+ 'background-color': 'white',
+ 'text-align': 'center'
+ });
+ $text = $indicator.children( '.loading-indicator-text' ).css({
+ 'margin-top' : '20px'
+ });
+
+ } else {
+ $text = $indicator.children( '.loading-indicator-text' ).css({
+ margin : '12px 0px 0px 10px',
+ opacity : '0.85',
+ color : 'grey'
+ });
+ $text.children( '.loading-indicator-message' ).css({
+ margin : '0px 8px 0px 0px',
+ 'font-style' : 'italic'
+ });
+ }
+ return $indicator;
+ }
+
+ self.show = function( msg, speed, callback ){
+ msg = msg || 'loading...';
+ speed = speed || 'fast';
+ // since position is fixed - we insert as sibling
+ self.$indicator = render().insertBefore( $where );
+ self.message( msg );
+ self.$indicator.fadeIn( speed, callback );
+ return self;
+ };
+
+ self.message = function( msg ){
+ self.$indicator.find( 'i' ).text( msg );
+ };
+
+ self.hide = function( speed, callback ){
+ speed = speed || 'fast';
+ if( self.$indicator && self.$indicator.size() ){
+ self.$indicator.fadeOut( speed, function(){
+ self.$indicator.remove();
+ if( callback ){ callback(); }
+ });
+ } else {
+ if( callback ){ callback(); }
+ }
+ return self;
+ };
+ return self;
+}
diff -r a39f452a7f657f3b43dce87d08e143ed6f02867e -r 32ecdc2fdc00a93652b27774123cde91bd3c512c static/scripts/packed/mvc/base-mvc.js
--- a/static/scripts/packed/mvc/base-mvc.js
+++ b/static/scripts/packed/mvc/base-mvc.js
@@ -1,1 +1,1 @@
-var BaseModel=Backbone.RelationalModel.extend({defaults:{name:null,hidden:false},show:function(){this.set("hidden",false)},hide:function(){this.set("hidden",true)},is_visible:function(){return !this.attributes.hidden}});var BaseView=Backbone.View.extend({initialize:function(){this.model.on("change:hidden",this.update_visible,this);this.update_visible()},update_visible:function(){if(this.model.attributes.hidden){this.$el.hide()}else{this.$el.show()}}});var LoggableMixin={logger:null,log:function(){if(this.logger){var a=this.logger.log;if(typeof this.logger.log==="object"){a=Function.prototype.bind.call(this.logger.log,this.logger)}return a.apply(this.logger,arguments)}return undefined}};var PersistentStorage=function(k,g){if(!k){throw ("PersistentStorage needs storageKey argument")}g=g||{};var i=sessionStorage,c=function j(m){var n=this.getItem(m);return(n!==null)?(JSON.parse(this.getItem(m))):(null)},b=function e(m,n){return this.setItem(m,JSON.stringify(n))},d=function f(m){return this.removeItem(m)};function a(n,m){n=n||{};m=m||null;return{get:function(o){if(o===undefined){return n}else{if(n.hasOwnProperty(o)){return(jQuery.type(n[o])==="object")?(new a(n[o],this)):(n[o])}}return undefined},set:function(o,p){n[o]=p;this._save();return this},deleteKey:function(o){delete n[o];this._save();return this},_save:function(){return m._save()},toString:function(){return("StorageRecursionHelper("+n+")")}}}var l={},h=c.call(i,k);if(h===null||h===undefined){h=jQuery.extend(true,{},g);b.call(i,k,h)}l=new a(h);jQuery.extend(l,{_save:function(m){return b.call(i,k,l.get())},destroy:function(){return d.call(i,k)},toString:function(){return"PersistentStorage("+k+")"}});return l};var HiddenUntilActivatedViewMixin={hiddenUntilActivated:function(a,c){c=c||{};this.HUAVOptions={$elementShown:this.$el,showFn:jQuery.prototype.toggle,showSpeed:"fast"};_.extend(this.HUAVOptions,c||{});this.HUAVOptions.hasBeenShown=this.HUAVOptions.$elementShown.is(":visible");if(a){var b=this;a.on("click",function(d){b.toggle(b.HUAVOptions.showSpeed)})}},toggle:function(){if(this.HUAVOptions.$elementShown.is(":hidden")){if(!this.HUAVOptions.hasBeenShown){if(_.isFunction(this.HUAVOptions.onshowFirstTime)){this.HUAVOptions.hasBeenShown=true;this.HUAVOptions.onshowFirstTime.call(this)}}else{if(_.isFunction(this.HUAVOptions.onshow)){this.HUAVOptions.onshow.call(this)}}}return this.HUAVOptions.showFn.apply(this.HUAVOptions.$elementShown,arguments)}};function LoadingIndicator(a,c){c=c||{};var b=this;function d(){var e=['<div class="loading-indicator">','<span class="fa fa-spinner fa-spin fa-lg" style="color: grey"></span>','<span style="margin-left: 8px; color: grey"><i>loading...</i></span>',"</div>"].join("\n");return $(e).css(c.css||{position:"fixed",margin:"12px 0px 0px 10px",opacity:"0.85"}).hide()}b.show=function(f,e,g){f=f||"loading...";e=e||"fast";b.$indicator=d().insertBefore(a);b.message(f);b.$indicator.fadeIn(e,g);return b};b.message=function(e){b.$indicator.find("i").text(e)};b.hide=function(e,f){e=e||"fast";if(b.$indicator&&b.$indicator.size()){b.$indicator.fadeOut(e,function(){b.$indicator.remove();if(f){f()}})}else{if(f){f()}}return b};return b};
\ No newline at end of file
+var BaseModel=Backbone.RelationalModel.extend({defaults:{name:null,hidden:false},show:function(){this.set("hidden",false)},hide:function(){this.set("hidden",true)},is_visible:function(){return !this.attributes.hidden}});var BaseView=Backbone.View.extend({initialize:function(){this.model.on("change:hidden",this.update_visible,this);this.update_visible()},update_visible:function(){if(this.model.attributes.hidden){this.$el.hide()}else{this.$el.show()}}});var LoggableMixin={logger:null,log:function(){if(this.logger){var a=this.logger.log;if(typeof this.logger.log==="object"){a=Function.prototype.bind.call(this.logger.log,this.logger)}return a.apply(this.logger,arguments)}return undefined}};var PersistentStorage=function(k,g){if(!k){throw ("PersistentStorage needs storageKey argument")}g=g||{};var i=sessionStorage,c=function j(m){var n=this.getItem(m);return(n!==null)?(JSON.parse(this.getItem(m))):(null)},b=function e(m,n){return this.setItem(m,JSON.stringify(n))},d=function f(m){return this.removeItem(m)};function a(n,m){n=n||{};m=m||null;return{get:function(o){if(o===undefined){return n}else{if(n.hasOwnProperty(o)){return(jQuery.type(n[o])==="object")?(new a(n[o],this)):(n[o])}}return undefined},set:function(o,p){n[o]=p;this._save();return this},deleteKey:function(o){delete n[o];this._save();return this},_save:function(){return m._save()},toString:function(){return("StorageRecursionHelper("+n+")")}}}var l={},h=c.call(i,k);if(h===null||h===undefined){h=jQuery.extend(true,{},g);b.call(i,k,h)}l=new a(h);jQuery.extend(l,{_save:function(m){return b.call(i,k,l.get())},destroy:function(){return d.call(i,k)},toString:function(){return"PersistentStorage("+k+")"}});return l};var HiddenUntilActivatedViewMixin={hiddenUntilActivated:function(a,c){c=c||{};this.HUAVOptions={$elementShown:this.$el,showFn:jQuery.prototype.toggle,showSpeed:"fast"};_.extend(this.HUAVOptions,c||{});this.HUAVOptions.hasBeenShown=this.HUAVOptions.$elementShown.is(":visible");if(a){var b=this;a.on("click",function(d){b.toggle(b.HUAVOptions.showSpeed)})}},toggle:function(){if(this.HUAVOptions.$elementShown.is(":hidden")){if(!this.HUAVOptions.hasBeenShown){if(_.isFunction(this.HUAVOptions.onshowFirstTime)){this.HUAVOptions.hasBeenShown=true;this.HUAVOptions.onshowFirstTime.call(this)}}else{if(_.isFunction(this.HUAVOptions.onshow)){this.HUAVOptions.onshow.call(this)}}}return this.HUAVOptions.showFn.apply(this.HUAVOptions.$elementShown,arguments)}};
\ No newline at end of file
diff -r a39f452a7f657f3b43dce87d08e143ed6f02867e -r 32ecdc2fdc00a93652b27774123cde91bd3c512c static/scripts/packed/mvc/ui.js
--- a/static/scripts/packed/mvc/ui.js
+++ b/static/scripts/packed/mvc/ui.js
@@ -1,1 +1,1 @@
-var IconButton=Backbone.Model.extend({defaults:{title:"",icon_class:"",on_click:null,menu_options:null,is_menu_button:true,id:null,href:null,target:null,enabled:true,visible:true,tooltip_config:{}}});var IconButtonView=Backbone.View.extend({initialize:function(){this.model.attributes.tooltip_config={placement:"bottom"};this.model.bind("change",this.render,this)},render:function(){this.$el.tooltip("hide");var a=this.template(this.model.toJSON());a.tooltip(this.model.get("tooltip_config"));this.$el.replaceWith(a);this.setElement(a);return this},events:{click:"click"},click:function(a){if(_.isFunction(this.model.get("on_click"))){this.model.get("on_click")(a);return false}return true},template:function(b){var a='title="'+b.title+'" class="icon-button';if(b.is_menu_button){a+=" menu-button"}a+=" "+b.icon_class;if(!b.enabled){a+="_disabled"}a+='"';if(b.id){a+=' id="'+b.id+'"'}a+=' href="'+b.href+'"';if(b.target){a+=' target="'+b.target+'"'}if(!b.visible){a+=' style="display: none;"'}if(b.enabled){a="<a "+a+"/>"}else{a="<span "+a+"/>"}return $(a)}});var IconButtonCollection=Backbone.Collection.extend({model:IconButton});var IconButtonMenuView=Backbone.View.extend({tagName:"div",initialize:function(){this.render()},render:function(){var a=this;this.collection.each(function(d){var b=$("<a/>").attr("href","javascript:void(0)").attr("title",d.attributes.title).addClass("icon-button menu-button").addClass(d.attributes.icon_class).appendTo(a.$el).click(d.attributes.on_click);if(d.attributes.tooltip_config){b.tooltip(d.attributes.tooltip_config)}var c=d.get("options");if(c){make_popupmenu(b,c)}});return this}});var create_icon_buttons_menu=function(b,a){if(!a){a={}}var c=new IconButtonCollection(_.map(b,function(d){return new IconButton(_.extend(d,a))}));return new IconButtonMenuView({collection:c})};var Grid=Backbone.Collection.extend({});var GridView=Backbone.View.extend({});var PopupMenu=Backbone.View.extend({initialize:function(b,a){this.$button=b||$("<div/>");this.options=a||[];var c=this;this.$button.click(function(d){c._renderAndShow(d);return false})},_renderAndShow:function(a){this.render();this.$el.appendTo("body");this.$el.css(this._getShownPosition(a));this._setUpCloseBehavior();this.$el.show()},render:function(){this.$el.addClass("popmenu-wrapper").hide().css({position:"absolute"}).html(this.template(this.$button.attr("id"),this.options));if(this.options.length){var a=this;this.$el.find("li").each(function(c,b){var d=a.options[c];if(d.func){$(this).children("a.popupmenu-option").click(function(e){d.func.call(a,e,d)})}})}return this},template:function(b,a){return['<ul id="',b,'-menu" class="dropdown-menu">',this._templateOptions(a),"</ul>"].join("")},_templateOptions:function(a){if(!a.length){return"<li>(no options)</li>"}return _.map(a,function(d){if(d.divider){return'<li class="divider"></li>'}else{if(d.header){return['<li class="head"><a href="javascript:void(0);">',d.html,"</a></li>"].join("")}}var c=d.href||"javascript:void(0);",e=(d.target)?(' target="'+d.target+'"'):(""),b=(d.checked)?('<span class="fa fa-check"></span>'):("");return['<li><a class="popupmenu-option" href="',c,'"',e,">",b,d.html,"</a></li>"].join("")}).join("")},_getShownPosition:function(b){var c=this.$el.width();var a=b.pageX-c/2;a=Math.min(a,$(document).scrollLeft()+$(window).width()-c-5);a=Math.max(a,$(document).scrollLeft()+5);return{top:b.pageY,left:a}},_setUpCloseBehavior:function(){var b=this;var a=function(c){c.one("click.close_popup",function(){b.remove()})};a($(window.document));a($(window.top.document));_.each(window.top.frames,function(c){a($(c.document))})},addItem:function(b,a){a=(a>=0)?a:this.options.length;this.options.splice(a,0,b);return this},removeItem:function(a){if(a>=0){this.options.splice(a,1)}return this},findIndexByHtml:function(b){for(var a=0;a<this.options.length;a++){if(_.has(this.options[a],"html")&&(this.options[a].html===b)){return a}}return null},findItemByHtml:function(a){return this.options[(this.findIndexByHtml(a))]},toString:function(){return"PopupMenu"}});PopupMenu.make_popupmenu=function(b,c){var a=[];_.each(c,function(f,d){var e={html:d};if(f===null){e.header=true}else{if(jQuery.type(f)==="function"){e.func=f}}a.push(e)});return new PopupMenu($(b),a)};PopupMenu.convertLinksToOptions=function(c,a){c=$(c);a=a||"a";var b=[];c.find(a).each(function(g,e){var f={},d=$(g);f.html=d.text();if(d.attr("href")){var j=d.attr("href"),k=d.attr("target"),h=d.attr("confirm");f.func=function(){if((h)&&(!confirm(h))){return}switch(k){case"_parent":window.parent.location=j;break;case"_top":window.top.location=j;break;default:window.location=j}}}b.push(f)});return b};PopupMenu.fromExistingDom=function(d,c,a){d=$(d);c=$(c);var b=PopupMenu.convertLinksToOptions(c,a);c.remove();return new PopupMenu(d,b)};PopupMenu.make_popup_menus=function(c,b,d){c=c||document;b=b||"div[popupmenu]";d=d||function(e,f){return"#"+e.attr("popupmenu")};var a=[];$(c).find(b).each(function(){var e=$(this),f=$(c).find(d(e,c));a.push(PopupMenu.fromDom(f,e));f.addClass("popup")});return a};var faIconButton=function(a){a=a||{};a.tooltipConfig=a.tooltipConfig||{placement:"bottom"};a.classes=["icon-btn"].concat(a.classes||[]);if(a.disabled){a.classes.push("disabled")}var b=['<a class="',a.classes.join(" "),'"',((a.title)?(' title="'+a.title+'"'):("")),((a.target)?(' target="'+a.target+'"'):("")),' href="',((a.href)?(a.href):("javascript:void(0);")),'">','<span class="fa ',a.faIcon,'"></span>',"</a>"].join("");var c=$(b).tooltip(a.tooltipConfig);if(_.isFunction(a.onclick)){c.click(a.onclick)}return c};var searchInput=function(k){var a=27,h=13,i=$("<div/>"),b={initialVal:"",name:"search",placeholder:"search",classes:"",onclear:function(){},onsearch:function(l){},minSearchLen:0,escWillClear:true,oninit:function(){}};if(jQuery.type(k)==="object"){k=jQuery.extend(true,b,k)}function d(l){var m=$(this).parent().children("input");m.val("");m.trigger("clear:searchInput");k.onclear()}function j(m,l){$(this).trigger("search:searchInput",l);k.onsearch(l)}function c(){return['<input type="text" name="',k.name,'" placeholder="',k.placeholder,'" ','class="search-query ',k.classes,'" ',"/>"].join("")}function g(){return $(c()).css({width:"100%","padding-right":"24px"}).focus(function(l){$(this).select()}).keyup(function(m){if(m.which===a&&k.escWillClear){d.call(this,m)}else{var l=$(this).val();if((m.which===h)||(k.minSearchLen&&l.length>=k.minSearchLen)){j.call(this,m,l)}else{if(!l.length){d.call(this,m)}}}}).val(k.initialVal)}function f(){return'<span class="search-clear fa fa-times-circle"></span>'}function e(){return $(f()).css({position:"absolute",right:"15px","font-size":"1.4em","line-height":"23px",color:"grey"}).click(function(l){d.call(this,l)})}return i.append([g(),e()])};
\ No newline at end of file
+var IconButton=Backbone.Model.extend({defaults:{title:"",icon_class:"",on_click:null,menu_options:null,is_menu_button:true,id:null,href:null,target:null,enabled:true,visible:true,tooltip_config:{}}});var IconButtonView=Backbone.View.extend({initialize:function(){this.model.attributes.tooltip_config={placement:"bottom"};this.model.bind("change",this.render,this)},render:function(){this.$el.tooltip("hide");var a=this.template(this.model.toJSON());a.tooltip(this.model.get("tooltip_config"));this.$el.replaceWith(a);this.setElement(a);return this},events:{click:"click"},click:function(a){if(_.isFunction(this.model.get("on_click"))){this.model.get("on_click")(a);return false}return true},template:function(b){var a='title="'+b.title+'" class="icon-button';if(b.is_menu_button){a+=" menu-button"}a+=" "+b.icon_class;if(!b.enabled){a+="_disabled"}a+='"';if(b.id){a+=' id="'+b.id+'"'}a+=' href="'+b.href+'"';if(b.target){a+=' target="'+b.target+'"'}if(!b.visible){a+=' style="display: none;"'}if(b.enabled){a="<a "+a+"/>"}else{a="<span "+a+"/>"}return $(a)}});var IconButtonCollection=Backbone.Collection.extend({model:IconButton});var IconButtonMenuView=Backbone.View.extend({tagName:"div",initialize:function(){this.render()},render:function(){var a=this;this.collection.each(function(d){var b=$("<a/>").attr("href","javascript:void(0)").attr("title",d.attributes.title).addClass("icon-button menu-button").addClass(d.attributes.icon_class).appendTo(a.$el).click(d.attributes.on_click);if(d.attributes.tooltip_config){b.tooltip(d.attributes.tooltip_config)}var c=d.get("options");if(c){make_popupmenu(b,c)}});return this}});var create_icon_buttons_menu=function(b,a){if(!a){a={}}var c=new IconButtonCollection(_.map(b,function(d){return new IconButton(_.extend(d,a))}));return new IconButtonMenuView({collection:c})};var Grid=Backbone.Collection.extend({});var GridView=Backbone.View.extend({});var PopupMenu=Backbone.View.extend({initialize:function(b,a){this.$button=b||$("<div/>");this.options=a||[];var c=this;this.$button.click(function(d){c._renderAndShow(d);return false})},_renderAndShow:function(a){this.render();this.$el.appendTo("body");this.$el.css(this._getShownPosition(a));this._setUpCloseBehavior();this.$el.show()},render:function(){this.$el.addClass("popmenu-wrapper").hide().css({position:"absolute"}).html(this.template(this.$button.attr("id"),this.options));if(this.options.length){var a=this;this.$el.find("li").each(function(c,b){var d=a.options[c];if(d.func){$(this).children("a.popupmenu-option").click(function(e){d.func.call(a,e,d)})}})}return this},template:function(b,a){return['<ul id="',b,'-menu" class="dropdown-menu">',this._templateOptions(a),"</ul>"].join("")},_templateOptions:function(a){if(!a.length){return"<li>(no options)</li>"}return _.map(a,function(d){if(d.divider){return'<li class="divider"></li>'}else{if(d.header){return['<li class="head"><a href="javascript:void(0);">',d.html,"</a></li>"].join("")}}var c=d.href||"javascript:void(0);",e=(d.target)?(' target="'+d.target+'"'):(""),b=(d.checked)?('<span class="fa fa-check"></span>'):("");return['<li><a class="popupmenu-option" href="',c,'"',e,">",b,d.html,"</a></li>"].join("")}).join("")},_getShownPosition:function(b){var c=this.$el.width();var a=b.pageX-c/2;a=Math.min(a,$(document).scrollLeft()+$(window).width()-c-5);a=Math.max(a,$(document).scrollLeft()+5);return{top:b.pageY,left:a}},_setUpCloseBehavior:function(){var b=this;var a=function(c){c.one("click.close_popup",function(){b.remove()})};a($(window.document));a($(window.top.document));_.each(window.top.frames,function(c){a($(c.document))})},addItem:function(b,a){a=(a>=0)?a:this.options.length;this.options.splice(a,0,b);return this},removeItem:function(a){if(a>=0){this.options.splice(a,1)}return this},findIndexByHtml:function(b){for(var a=0;a<this.options.length;a++){if(_.has(this.options[a],"html")&&(this.options[a].html===b)){return a}}return null},findItemByHtml:function(a){return this.options[(this.findIndexByHtml(a))]},toString:function(){return"PopupMenu"}});PopupMenu.make_popupmenu=function(b,c){var a=[];_.each(c,function(f,d){var e={html:d};if(f===null){e.header=true}else{if(jQuery.type(f)==="function"){e.func=f}}a.push(e)});return new PopupMenu($(b),a)};PopupMenu.convertLinksToOptions=function(c,a){c=$(c);a=a||"a";var b=[];c.find(a).each(function(g,e){var f={},d=$(g);f.html=d.text();if(d.attr("href")){var j=d.attr("href"),k=d.attr("target"),h=d.attr("confirm");f.func=function(){if((h)&&(!confirm(h))){return}switch(k){case"_parent":window.parent.location=j;break;case"_top":window.top.location=j;break;default:window.location=j}}}b.push(f)});return b};PopupMenu.fromExistingDom=function(d,c,a){d=$(d);c=$(c);var b=PopupMenu.convertLinksToOptions(c,a);c.remove();return new PopupMenu(d,b)};PopupMenu.make_popup_menus=function(c,b,d){c=c||document;b=b||"div[popupmenu]";d=d||function(e,f){return"#"+e.attr("popupmenu")};var a=[];$(c).find(b).each(function(){var e=$(this),f=$(c).find(d(e,c));a.push(PopupMenu.fromDom(f,e));f.addClass("popup")});return a};var faIconButton=function(a){a=a||{};a.tooltipConfig=a.tooltipConfig||{placement:"bottom"};a.classes=["icon-btn"].concat(a.classes||[]);if(a.disabled){a.classes.push("disabled")}var b=['<a class="',a.classes.join(" "),'"',((a.title)?(' title="'+a.title+'"'):("")),((a.target)?(' target="'+a.target+'"'):("")),' href="',((a.href)?(a.href):("javascript:void(0);")),'">','<span class="fa ',a.faIcon,'"></span>',"</a>"].join("");var c=$(b).tooltip(a.tooltipConfig);if(_.isFunction(a.onclick)){c.click(a.onclick)}return c};var searchInput=function(k){var a=27,h=13,i=$("<div/>"),b={initialVal:"",name:"search",placeholder:"search",classes:"",onclear:function(){},onsearch:function(l){},minSearchLen:0,escWillClear:true,oninit:function(){}};if(jQuery.type(k)==="object"){k=jQuery.extend(true,b,k)}function d(l){var m=$(this).parent().children("input");m.val("");m.trigger("clear:searchInput");k.onclear()}function j(m,l){$(this).trigger("search:searchInput",l);k.onsearch(l)}function c(){return['<input type="text" name="',k.name,'" placeholder="',k.placeholder,'" ','class="search-query ',k.classes,'" ',"/>"].join("")}function g(){return $(c()).css({width:"100%","padding-right":"24px"}).focus(function(l){$(this).select()}).keyup(function(m){if(m.which===a&&k.escWillClear){d.call(this,m)}else{var l=$(this).val();if((m.which===h)||(k.minSearchLen&&l.length>=k.minSearchLen)){j.call(this,m,l)}else{if(!l.length){d.call(this,m)}}}}).val(k.initialVal)}function f(){return'<span class="search-clear fa fa-times-circle"></span>'}function e(){return $(f()).css({position:"absolute",right:"15px","font-size":"1.4em","line-height":"23px",color:"grey"}).click(function(l){d.call(this,l)})}return i.append([g(),e()])};function LoadingIndicator(a,c){var b=this;c=jQuery.extend({cover:false},c||{});function d(){var e=['<div class="loading-indicator">','<div class="loading-indicator-text">','<span class="fa fa-spinner fa-spin fa-lg"></span>','<span class="loading-indicator-message">loading...</span>',"</div>","</div>"].join("\n");var g=$(e).hide().css(c.css||{position:"fixed"}),f=g.children(".loading-indicator-text");if(c.cover){g.css({"z-index":2,top:a.css("top"),bottom:a.css("bottom"),left:a.css("left"),right:a.css("right"),opacity:0.5,"background-color":"white","text-align":"center"});f=g.children(".loading-indicator-text").css({"margin-top":"20px"})}else{f=g.children(".loading-indicator-text").css({margin:"12px 0px 0px 10px",opacity:"0.85",color:"grey"});f.children(".loading-indicator-message").css({margin:"0px 8px 0px 0px","font-style":"italic"})}return g}b.show=function(f,e,g){f=f||"loading...";e=e||"fast";b.$indicator=d().insertBefore(a);b.message(f);b.$indicator.fadeIn(e,g);return b};b.message=function(e){b.$indicator.find("i").text(e)};b.hide=function(e,f){e=e||"fast";if(b.$indicator&&b.$indicator.size()){b.$indicator.fadeOut(e,function(){b.$indicator.remove();if(f){f()}})}else{if(f){f()}}return b};return b};
\ No newline at end of file
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: carlfeberhard: UI, History panel: connect search to HDAs
by commits-noreply@bitbucket.org 21 Nov '13
by commits-noreply@bitbucket.org 21 Nov '13
21 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/a39f452a7f65/
Changeset: a39f452a7f65
User: carlfeberhard
Date: 2013-11-21 20:16:09
Summary: UI, History panel: connect search to HDAs
Affected #: 8 files
diff -r 253f888144aaa4ae4eadaf15de493b3144fc5cd3 -r a39f452a7f657f3b43dce87d08e143ed6f02867e static/scripts/mvc/dataset/hda-model.js
--- a/static/scripts/mvc/dataset/hda-model.js
+++ b/static/scripts/mvc/dataset/hda-model.js
@@ -218,30 +218,97 @@
},
// ........................................................................ sorting/filtering
- /** what attributes of an HDA will be used in a search */
- searchKeys : [
+
+ // ........................................................................ search
+ /** what attributes of an HDA will be used in a text search */
+ searchAttributes : [
'name', 'file_ext', 'genome_build', 'misc_blurb', 'misc_info', 'annotation', 'tags'
],
- /** search this HDA for the string searchFor
- * @param {String} searchFor look for this string in all attributes listed in searchKeys (above) using indexOf
- * @returns {Array} an array of attribute keys where searchFor was found
+ /** our attr keys don't often match the labels we display to the user - so, when using
+ * attribute specifiers ('name="bler"') in a term, allow passing in aliases for the
+ * following attr keys.
+ */
+ searchAliases : {
+ title : 'name',
+ format : 'file_ext',
+ database : 'genome_build',
+ blurb : 'misc_blurb',
+ description : 'misc_blurb',
+ info : 'misc_info',
+ tag : 'tags'
+ },
+
+ /** search the attribute with key attrKey for the string searchFor; T/F if found */
+ searchAttribute : function( attrKey, searchFor ){
+ var attrVal = this.get( attrKey );
+ //console.debug( 'searchAttribute', attrKey, attrVal, searchFor );
+ // bail if empty searchFor or unsearchable values
+ if( !searchFor
+ || ( attrVal === undefined || attrVal === null ) ){
+ return false;
+ }
+ // pass to sep. fn for deep search of array attributes
+ if( _.isArray( attrVal ) ){ return this._searchArrayAttribute( attrVal, searchFor ); }
+ return ( attrVal.toString().toLowerCase().indexOf( searchFor.toLowerCase() ) !== -1 );
+ },
+
+ /** deep(er) search for array attributes; T/F if found */
+ _searchArrayAttribute : function( array, searchFor ){
+ //console.debug( '_searchArrayAttribute', array, searchFor );
+ searchFor = searchFor.toLowerCase();
+ //precondition: searchFor has already been validated as non-empty string
+ //precondition: assumes only 1 level array
+ //TODO: could possibly break up searchFor more (CSV...)
+ return _.any( array, function( elem ){
+ return ( elem.toString().toLowerCase().indexOf( searchFor.toLowerCase() ) !== -1 );
+ });
+ },
+
+ /** search all searchAttributes for the string searchFor,
+ * returning a list of keys of attributes that contain searchFor
*/
search : function( searchFor ){
var model = this;
- searchFor = searchFor.toLowerCase();
- return _.filter( this.searchKeys, function( key ){
- var attr = model.get( key );
- return ( _.isString( attr ) && attr.toLowerCase().indexOf( searchFor ) !== -1 );
+ return _.filter( this.searchAttributes, function( key ){
+ return model.searchAttribute( key, searchFor );
});
},
- /** alias of search, but returns a boolean
- * @param {String} matchesWhat look for this string in all attributes listed in searchKeys (above) using indexOf
- * @returns {Boolean} was matchesWhat found in any attributes
+ /** alias of search, but returns a boolean; accepts attribute specifiers where
+ * the attributes searched can be narrowed to a single attribute using
+ * the form: matches( 'genome_build=hg19' )
+ * (the attribute keys allowed can also be aliases to the true attribute key;
+ * see searchAliases above)
+ * @param {String} term plain text or ATTR_SPECIFIER sep. key=val pair
+ * @returns {Boolean} was term found in (any) attribute(s)
*/
- matches : function( matchesWhat ){
- return !!this.search( matchesWhat ).length;
+ matches : function( term ){
+ var ATTR_SPECIFIER = '=',
+ split = term.split( ATTR_SPECIFIER );
+ // attribute is specified - search only that
+ if( split.length >= 2 ){
+ var attrKey = split[0];
+ attrKey = this.searchAliases[ attrKey ] || attrKey;
+ return this.searchAttribute( attrKey, split[1] );
+ }
+ // no attribute is specified - search all attributes in searchAttributes
+ return !!this.search( term ).length;
+ },
+
+ /** an implicit AND search for all terms; IOW, an hda must match all terms given
+ * where terms is a whitespace separated value string.
+ * e.g. given terms of: 'blah bler database=hg19'
+ * an HDA would have to have attributes containing blah AND bler AND a genome_build == hg19
+ * To include whitespace in terms: wrap the term in double quotations.
+ */
+ matchesAll : function( terms ){
+ var model = this;
+ // break the terms up by whitespace and filter out the empty strings
+ terms = terms.match( /(".*"|\w*=".*"|\S*)/g ).filter( function( s ){ return !!s; });
+ return _.all( terms, function( term ){
+ return model.matches( term );
+ });
},
// ........................................................................ misc
@@ -389,8 +456,28 @@
* @returns array of hda models
* @see HistoryDatasetAssociation#isVisible
*/
- getVisible : function( show_deleted, show_hidden ){
- return this.filter( function( item ){ return item.isVisible( show_deleted, show_hidden ); });
+ getVisible : function( show_deleted, show_hidden, filters ){
+ filters = filters || [];
+ //console.debug( 'filters:', filters );
+ //TODO:?? why doesn't this return a collection?
+ // always filter by show deleted/hidden first
+ var filteredHdas = new HDACollection( this.filter( function( item ){
+ return item.isVisible( show_deleted, show_hidden );
+ }));
+
+ _.each( filters, function( filter_fn ){
+ if( !_.isFunction( filter_fn ) ){ return; }
+ filteredHdas = new HDACollection( filteredHdas.filter( filter_fn ) );
+ });
+ //if( filteredHdas.length ){
+ // console.debug( 'filteredHdas:' );
+ // filteredHdas.each( function( hda ){
+ // console.debug( '\t', hda );
+ // });
+ //} else {
+ // console.warn( 'no visible hdas' );
+ //}
+ return filteredHdas;
},
// ........................................................................ ajax
@@ -433,7 +520,7 @@
/** String representation. */
toString : function(){
- return ( 'HDACollection()' );
+ return ([ 'HDACollection(', [ this.historyId, this.length ].join(), ')' ].join( '' ));
}
});
diff -r 253f888144aaa4ae4eadaf15de493b3144fc5cd3 -r a39f452a7f657f3b43dce87d08e143ed6f02867e static/scripts/mvc/history/history-panel.js
--- a/static/scripts/mvc/history/history-panel.js
+++ b/static/scripts/mvc/history/history-panel.js
@@ -78,6 +78,9 @@
/** loading indicator */
this.indicator = new LoadingIndicator( this.$el );
+ /** filters for displaying hdas */
+ this.filters = [];
+
// ---- handle models passed on init
if( this.model ){
this._setUpWebStorage( attributes.initiallyExpanded, attributes.show_deleted, attributes.show_hidden );
@@ -579,21 +582,25 @@
* @fires: rendered when the panel is attached and fully visible
* @see Backbone.View#render
*/
- render : function( callback ){
+ render : function( speed, callback ){
+ // send a speed of 0 to have no fade in/out performed
+ speed = ( speed === undefined )?( this.fxSpeed ):( speed );
var panel = this,
$newRender;
+ // handle the possibility of no model (can occur if fetching the model returns an error)
if( this.model ){
$newRender = this.renderModel();
} else {
$newRender = this.renderWithoutModel();
}
+
// fade out existing, swap with the new, fade in, set up behaviours
$( panel ).queue( 'fx', [
function( next ){
//panel.$el.fadeTo( panel.fxSpeed, 0.0001, next );
- if( panel.$el.is( ':visible' ) ){
- panel.$el.fadeOut( panel.fxSpeed, next );
+ if( speed && panel.$el.is( ':visible' ) ){
+ panel.$el.fadeOut( speed, next );
} else {
next();
}
@@ -604,12 +611,20 @@
if( $newRender ){
panel.$el.append( $newRender.children() );
}
- panel.$el.fadeIn( panel.fxSpeed, next );
+ next();
+ },
+ function( next ){
+ if( speed && !panel.$el.is( ':visible' ) ){
+ panel.$el.fadeIn( speed, next );
+ } else {
+ next();
+ }
},
function( next ){
//TODO: ideally, these would be set up before the fade in (can't because of async save text)
if( callback ){ callback.call( this ); }
panel.trigger( 'rendered', this );
+ next();
}
]);
return this;
@@ -640,12 +655,7 @@
this._setUpBehaviours( $newRender );
// render hda views (if any and any shown (show_deleted/hidden)
- //TODO: this seems too elaborate
- if( !this.model.hdas.length
- || !this.renderHdas( $newRender.find( this.datasetsSelector ) ) ){
- // if history is empty or no hdas would be rendered, show the empty message
- $newRender.find( this.emptyMsgSelector ).show();
- }
+ this.renderHdas( $newRender );
return $newRender;
},
@@ -702,20 +712,32 @@
* @returns the number of visible hda views
*/
renderHdas : function( $whereTo ){
+ $whereTo = $whereTo || this.$el;
this.hdaViews = {};
var historyView = this,
+ $datasetsList = $whereTo.find( this.datasetsSelector ),
// only render the shown hdas
//TODO: switch to more general filtered pattern
visibleHdas = this.model.hdas.getVisible(
this.storage.get( 'show_deleted' ),
- this.storage.get( 'show_hidden' )
+ this.storage.get( 'show_hidden' ),
+ this.filters
);
+ //console.debug( 'renderHdas, visibleHdas:', visibleHdas, $whereTo );
+ $datasetsList.empty();
- _.each( visibleHdas, function( hda ){
- // render it (NOTE: reverse order, newest on top (prepend))
- $whereTo.prepend( historyView.createHdaView( hda ).$el );
- });
- return visibleHdas.length;
+ if( visibleHdas.length ){
+ visibleHdas.each( function( hda ){
+ // render it (NOTE: reverse order, newest on top (prepend))
+ $datasetsList.prepend( historyView.createHdaView( hda ).$el );
+ });
+ $whereTo.find( this.emptyMsgSelector ).hide();
+
+ } else {
+ //console.debug( 'emptyMsg:', $whereTo.find( this.emptyMsgSelector ) )
+ $whereTo.find( this.emptyMsgSelector ).show();
+ }
+ return this.hdaViews;
},
// ------------------------------------------------------------------------ panel events
@@ -763,6 +785,55 @@
return this.storage.get( 'show_hidden' );
},
+ // ........................................................................ filters
+ /** render a search input for filtering datasets shown
+ * (see the search section in the HDA model for implementation of the actual searching)
+ * return will start the search
+ * esc will clear the search
+ * clicking the clear button will clear the search
+ * uses searchInput in ui.js
+ */
+ renderSearchControls : function(){
+ var panel = this;
+ //TODO: needs proper async
+ panel.model.hdas.fetchAllDetails({ silent: true });
+
+ function onSearch( searchFor ){
+ //console.debug( 'onSearch', searchFor, panel );
+ panel.searchFor = searchFor;
+ panel.filters = [ function( hda ){ return hda.matchesAll( panel.searchFor ); } ];
+ panel.trigger( 'search:searching', searchFor, panel );
+ panel.renderHdas();
+ }
+ function onSearchClear(){
+ //console.debug( 'onSearchClear', panel );
+ panel.searchFor = '';
+ panel.filters = [];
+ panel.trigger( 'search:clear', panel );
+ panel.renderHdas();
+ }
+ return searchInput({
+ initialVal : panel.searchFor,
+ name : 'history-search',
+ placeholder : 'search datasets',
+ classes : 'history-search',
+ onsearch : onSearch,
+ onclear : onSearchClear
+ })
+ .addClass( 'history-search-controls' )
+ .css( 'padding', '0px 0px 8px 0px' );
+ },
+
+ /** toggle showing/hiding the search controls (rendering first on the initial show) */
+ toggleSearchControls : function(){
+ var $searchInput = this.$el.find( '.history-search-controls' );
+ if( !$searchInput.size() ){
+ $searchInput = this.renderSearchControls().hide();
+ this.$el.find( '.history-title' ).before( $searchInput );
+ }
+ $searchInput.slideToggle( this.fxSpeed );
+ },
+
// ........................................................................ multi-select of hdas
showSelect : function( speed ){
_.each( this.hdaViews, function( view ){
diff -r 253f888144aaa4ae4eadaf15de493b3144fc5cd3 -r a39f452a7f657f3b43dce87d08e143ed6f02867e static/scripts/mvc/ui.js
--- a/static/scripts/mvc/ui.js
+++ b/static/scripts/mvc/ui.js
@@ -466,7 +466,7 @@
};
var faIconButton = function( options ){
- //FFS
+ //TODO: move out of global
options = options || {};
options.tooltipConfig = options.tooltipConfig || { placement: 'bottom' };
@@ -491,4 +491,101 @@
return $button;
};
-//var hideUntilActivated;
+var searchInput = function( options ){
+//TODO: move out of global
+//TODO: consolidate with tool menu functionality, use there
+//TODO: this could and should be merged/oop'd with editableText (the behaviors are mostly the same - but not style)
+ var KEYCODE_ESC = 27,
+ KEYCODE_RETURN = 13,
+ $searchInput = $( '<div/>' ),
+ defaults = {
+ initialVal : '',
+ name : 'search',
+ placeholder : 'search',
+ classes : '',
+ onclear : function(){},
+ onsearch : function( inputVal ){},
+ minSearchLen : 0,
+ escWillClear : true,
+ oninit : function(){}
+ };
+
+ if( jQuery.type( options ) === 'object' ){
+ options = jQuery.extend( true, defaults, options );
+ }
+ //console.debug( options );
+
+ function clearSearchInput( event ){
+ //console.debug( this, 'clear' );
+ var $input = $( this ).parent().children( 'input' );
+ //console.debug( 'input', $input );
+ $input.val( '' );
+ $input.trigger( 'clear:searchInput' );
+ options.onclear();
+ }
+ function search( event, searchTerms ){
+ //console.debug( this, 'searching', searchTerms );
+ $( this ).trigger( 'search:searchInput', searchTerms );
+ options.onsearch( searchTerms );
+ //var $input = $( this ).parent().children( 'input' );
+ //console.debug( 'input', $input );
+ }
+
+ function inputTemplate(){
+ // class search-query is bootstrap 2.3 style that now lives in base.less
+ return [ '<input type="text" name="', options.name, '" placeholder="', options.placeholder, '" ',
+ 'class="search-query ', options.classes, '" ', '/>'
+ ].join( '' );
+ }
+ // the search input that responds to keyboard events and displays the search value
+ function $input(){
+ return $( inputTemplate() ).css({
+ 'width' : '100%',
+ // make space for the clear button
+ 'padding-right' : '24px'
+ })
+ // select all text on a focus
+ .focus( function( event ){
+ $( this ).select();
+ })
+ // attach behaviors to esc, return if desired, search on some min len string
+ .keyup( function( event ){
+ //console.debug( event.which, $( this ).val() )
+ // esc key will clear if
+ if( event.which === KEYCODE_ESC && options.escWillClear ){
+ clearSearchInput.call( this, event );
+
+ } else {
+ var searchTerms = $( this ).val();
+ // return key or the search string len > minSearchLen (if not 0) triggers search
+ if( ( event.which === KEYCODE_RETURN )
+ || ( options.minSearchLen && searchTerms.length >= options.minSearchLen ) ){
+ search.call( this, event, searchTerms );
+ } else if( !searchTerms.length ){
+ clearSearchInput.call( this, event );
+ }
+ }
+ })
+ .val( options.initialVal );
+ }
+
+ // a button for clearing the search bar, placed on the right hand side
+ function clearBtnTemplate(){
+ return '<span class="search-clear fa fa-times-circle"></span>';
+ }
+ function $clearBtn(){
+//TODO: to base.less
+//TODO: hover effects
+ return $( clearBtnTemplate() ).css({
+ position : 'absolute',
+ right : '15px',
+ 'font-size' : '1.4em',
+ 'line-height': '23px',
+ color : 'grey'
+ })
+ .click( function( event ){
+ clearSearchInput.call( this, event );
+ });
+ }
+ return $searchInput.append([ $input(), $clearBtn() ]);
+};
diff -r 253f888144aaa4ae4eadaf15de493b3144fc5cd3 -r a39f452a7f657f3b43dce87d08e143ed6f02867e static/scripts/packed/mvc/dataset/hda-model.js
--- a/static/scripts/packed/mvc/dataset/hda-model.js
+++ b/static/scripts/packed/mvc/dataset/hda-model.js
@@ -1,1 +1,1 @@
-define([],function(){var d=Backbone.Model.extend(LoggableMixin).extend({defaults:{history_id:null,model_class:"HistoryDatasetAssociation",hid:0,id:null,name:"(unnamed dataset)",state:"new",deleted:false,visible:true,accessible:true,purged:false,data_type:null,file_size:0,file_ext:"",meta_files:[],misc_blurb:"",misc_info:"",tags:null},urlRoot:galaxy_config.root+"api/histories/",url:function(){return this.urlRoot+this.get("history_id")+"/contents/"+this.get("id")},urls:function(){var i=this.get("id");if(!i){return{}}var h={purge:galaxy_config.root+"datasets/"+i+"/purge_async",display:galaxy_config.root+"datasets/"+i+"/display/?preview=True",edit:galaxy_config.root+"datasets/"+i+"/edit",download:galaxy_config.root+"datasets/"+i+"/display?to_ext="+this.get("file_ext"),report_error:galaxy_config.root+"dataset/errors?id="+i,rerun:galaxy_config.root+"tool_runner/rerun?id="+i,show_params:galaxy_config.root+"datasets/"+i+"/show_params",visualization:galaxy_config.root+"visualization",annotation:{get:galaxy_config.root+"dataset/get_annotation_async?id="+i,set:galaxy_config.root+"dataset/annotate_async?id="+i},meta_download:galaxy_config.root+"dataset/get_metadata_file?hda_id="+i+"&metadata_name="};return h},initialize:function(h){this.log(this+".initialize",this.attributes);this.log("\tparent history_id: "+this.get("history_id"));if(!this.get("accessible")){this.set("state",d.STATES.NOT_VIEWABLE)}this._setUpListeners()},_setUpListeners:function(){this.on("change:state",function(i,h){this.log(this+" has changed state:",i,h);if(this.inReadyState()){this.trigger("state:ready",i,h,this.previous("state"))}})},toJSON:function(){var h=Backbone.Model.prototype.toJSON.call(this);h.misc_info=jQuery.trim(h.misc_info);return h},isDeletedOrPurged:function(){return(this.get("deleted")||this.get("purged"))},isVisible:function(i,j){var h=true;if((!i)&&(this.get("deleted")||this.get("purged"))){h=false}if((!j)&&(!this.get("visible"))){h=false}return h},hidden:function(){return !this.get("visible")},inReadyState:function(){var h=_.contains(d.READY_STATES,this.get("state"));return(this.isDeletedOrPurged()||h)},hasDetails:function(){return _.has(this.attributes,"genome_build")},hasData:function(){return(this.get("file_size")>0)},"delete":function c(h){return this.save({deleted:true},h)},undelete:function a(h){return this.save({deleted:false},h)},hide:function b(h){return this.save({visible:false},h)},unhide:function g(h){return this.save({visible:true},h)},purge:function f(h){h=h||{};h.url=galaxy_config.root+"datasets/"+this.get("id")+"/purge_async";var i=this,j=jQuery.ajax(h);j.done(function(m,k,l){i.set("purged",true)});j.fail(function(o,k,n){var l=_l("Unable to purge this dataset");var m=("Removal of datasets by users is not allowed in this Galaxy instance");if(o.responseJSON&&o.responseJSON.error){l=o.responseJSON.error}else{if(o.responseText.indexOf(m)!==-1){l=m}}o.responseText=l;i.trigger("error",i,o,h,_l(l),{error:l})})},searchKeys:["name","file_ext","genome_build","misc_blurb","misc_info","annotation","tags"],search:function(h){var i=this;h=h.toLowerCase();return _.filter(this.searchKeys,function(k){var j=i.get(k);return(_.isString(j)&&j.toLowerCase().indexOf(h)!==-1)})},matches:function(h){return !!this.search(h).length},toString:function(){var h=this.get("id")||"";if(this.get("name")){h=this.get("hid")+' :"'+this.get("name")+'",'+h}return"HDA("+h+")"}});d.STATES={UPLOAD:"upload",QUEUED:"queued",RUNNING:"running",SETTING_METADATA:"setting_metadata",NEW:"new",EMPTY:"empty",OK:"ok",PAUSED:"paused",FAILED_METADATA:"failed_metadata",NOT_VIEWABLE:"noPermission",DISCARDED:"discarded",ERROR:"error"};d.READY_STATES=[d.STATES.NEW,d.STATES.OK,d.STATES.EMPTY,d.STATES.PAUSED,d.STATES.FAILED_METADATA,d.STATES.NOT_VIEWABLE,d.STATES.DISCARDED,d.STATES.ERROR];d.NOT_READY_STATES=[d.STATES.UPLOAD,d.STATES.QUEUED,d.STATES.RUNNING,d.STATES.SETTING_METADATA];var e=Backbone.Collection.extend(LoggableMixin).extend({model:d,urlRoot:galaxy_config.root+"api/histories",url:function(){return this.urlRoot+"/"+this.historyId+"/contents"},initialize:function(i,h){h=h||{};this.historyId=h.historyId},ids:function(){return this.map(function(h){return h.id})},notReady:function(){return this.filter(function(h){return !h.inReadyState()})},running:function(){var h=[];this.each(function(i){if(!i.inReadyState()){h.push(i.get("id"))}});return h},getByHid:function(h){return _.first(this.filter(function(i){return i.get("hid")===h}))},getVisible:function(h,i){return this.filter(function(j){return j.isVisible(h,i)})},fetchAllDetails:function(i){i=i||{};var h={details:"all"};i.data=(i.data)?(_.extend(i.data,h)):(h);return this.fetch(i)},matches:function(h){return this.filter(function(i){return i.matches(h)})},set:function(j,h){var i=this;j=_.map(j,function(l){var m=i.get(l.id);if(!m){return l}var k=m.toJSON();_.extend(k,l);return k});Backbone.Collection.prototype.set.call(this,j,h)},toString:function(){return("HDACollection()")}});return{HistoryDatasetAssociation:d,HDACollection:e}});
\ No newline at end of file
+define([],function(){var d=Backbone.Model.extend(LoggableMixin).extend({defaults:{history_id:null,model_class:"HistoryDatasetAssociation",hid:0,id:null,name:"(unnamed dataset)",state:"new",deleted:false,visible:true,accessible:true,purged:false,data_type:null,file_size:0,file_ext:"",meta_files:[],misc_blurb:"",misc_info:"",tags:null},urlRoot:galaxy_config.root+"api/histories/",url:function(){return this.urlRoot+this.get("history_id")+"/contents/"+this.get("id")},urls:function(){var i=this.get("id");if(!i){return{}}var h={purge:galaxy_config.root+"datasets/"+i+"/purge_async",display:galaxy_config.root+"datasets/"+i+"/display/?preview=True",edit:galaxy_config.root+"datasets/"+i+"/edit",download:galaxy_config.root+"datasets/"+i+"/display?to_ext="+this.get("file_ext"),report_error:galaxy_config.root+"dataset/errors?id="+i,rerun:galaxy_config.root+"tool_runner/rerun?id="+i,show_params:galaxy_config.root+"datasets/"+i+"/show_params",visualization:galaxy_config.root+"visualization",annotation:{get:galaxy_config.root+"dataset/get_annotation_async?id="+i,set:galaxy_config.root+"dataset/annotate_async?id="+i},meta_download:galaxy_config.root+"dataset/get_metadata_file?hda_id="+i+"&metadata_name="};return h},initialize:function(h){this.log(this+".initialize",this.attributes);this.log("\tparent history_id: "+this.get("history_id"));if(!this.get("accessible")){this.set("state",d.STATES.NOT_VIEWABLE)}this._setUpListeners()},_setUpListeners:function(){this.on("change:state",function(i,h){this.log(this+" has changed state:",i,h);if(this.inReadyState()){this.trigger("state:ready",i,h,this.previous("state"))}})},toJSON:function(){var h=Backbone.Model.prototype.toJSON.call(this);h.misc_info=jQuery.trim(h.misc_info);return h},isDeletedOrPurged:function(){return(this.get("deleted")||this.get("purged"))},isVisible:function(i,j){var h=true;if((!i)&&(this.get("deleted")||this.get("purged"))){h=false}if((!j)&&(!this.get("visible"))){h=false}return h},hidden:function(){return !this.get("visible")},inReadyState:function(){var h=_.contains(d.READY_STATES,this.get("state"));return(this.isDeletedOrPurged()||h)},hasDetails:function(){return _.has(this.attributes,"genome_build")},hasData:function(){return(this.get("file_size")>0)},"delete":function c(h){return this.save({deleted:true},h)},undelete:function a(h){return this.save({deleted:false},h)},hide:function b(h){return this.save({visible:false},h)},unhide:function g(h){return this.save({visible:true},h)},purge:function f(h){h=h||{};h.url=galaxy_config.root+"datasets/"+this.get("id")+"/purge_async";var i=this,j=jQuery.ajax(h);j.done(function(m,k,l){i.set("purged",true)});j.fail(function(o,k,n){var l=_l("Unable to purge this dataset");var m=("Removal of datasets by users is not allowed in this Galaxy instance");if(o.responseJSON&&o.responseJSON.error){l=o.responseJSON.error}else{if(o.responseText.indexOf(m)!==-1){l=m}}o.responseText=l;i.trigger("error",i,o,h,_l(l),{error:l})})},searchAttributes:["name","file_ext","genome_build","misc_blurb","misc_info","annotation","tags"],searchAliases:{title:"name",format:"file_ext",database:"genome_build",blurb:"misc_blurb",description:"misc_blurb",info:"misc_info",tag:"tags"},searchAttribute:function(j,h){var i=this.get(j);if(!h||(i===undefined||i===null)){return false}if(_.isArray(i)){return this._searchArrayAttribute(i,h)}return(i.toString().toLowerCase().indexOf(h.toLowerCase())!==-1)},_searchArrayAttribute:function(i,h){h=h.toLowerCase();return _.any(i,function(j){return(j.toString().toLowerCase().indexOf(h.toLowerCase())!==-1)})},search:function(h){var i=this;return _.filter(this.searchAttributes,function(j){return i.searchAttribute(j,h)})},matches:function(i){var k="=",h=i.split(k);if(h.length>=2){var j=h[0];j=this.searchAliases[j]||j;return this.searchAttribute(j,h[1])}return !!this.search(i).length},matchesAll:function(i){var h=this;i=i.match(/(".*"|\w*=".*"|\S*)/g).filter(function(j){return !!j});return _.all(i,function(j){return h.matches(j)})},toString:function(){var h=this.get("id")||"";if(this.get("name")){h=this.get("hid")+' :"'+this.get("name")+'",'+h}return"HDA("+h+")"}});d.STATES={UPLOAD:"upload",QUEUED:"queued",RUNNING:"running",SETTING_METADATA:"setting_metadata",NEW:"new",EMPTY:"empty",OK:"ok",PAUSED:"paused",FAILED_METADATA:"failed_metadata",NOT_VIEWABLE:"noPermission",DISCARDED:"discarded",ERROR:"error"};d.READY_STATES=[d.STATES.NEW,d.STATES.OK,d.STATES.EMPTY,d.STATES.PAUSED,d.STATES.FAILED_METADATA,d.STATES.NOT_VIEWABLE,d.STATES.DISCARDED,d.STATES.ERROR];d.NOT_READY_STATES=[d.STATES.UPLOAD,d.STATES.QUEUED,d.STATES.RUNNING,d.STATES.SETTING_METADATA];var e=Backbone.Collection.extend(LoggableMixin).extend({model:d,urlRoot:galaxy_config.root+"api/histories",url:function(){return this.urlRoot+"/"+this.historyId+"/contents"},initialize:function(i,h){h=h||{};this.historyId=h.historyId},ids:function(){return this.map(function(h){return h.id})},notReady:function(){return this.filter(function(h){return !h.inReadyState()})},running:function(){var h=[];this.each(function(i){if(!i.inReadyState()){h.push(i.get("id"))}});return h},getByHid:function(h){return _.first(this.filter(function(i){return i.get("hid")===h}))},getVisible:function(h,k,j){j=j||[];var i=new e(this.filter(function(l){return l.isVisible(h,k)}));_.each(j,function(l){if(!_.isFunction(l)){return}i=new e(i.filter(l))});return i},fetchAllDetails:function(i){i=i||{};var h={details:"all"};i.data=(i.data)?(_.extend(i.data,h)):(h);return this.fetch(i)},matches:function(h){return this.filter(function(i){return i.matches(h)})},set:function(j,h){var i=this;j=_.map(j,function(l){var m=i.get(l.id);if(!m){return l}var k=m.toJSON();_.extend(k,l);return k});Backbone.Collection.prototype.set.call(this,j,h)},toString:function(){return(["HDACollection(",[this.historyId,this.length].join(),")"].join(""))}});return{HistoryDatasetAssociation:d,HDACollection:e}});
\ No newline at end of file
diff -r 253f888144aaa4ae4eadaf15de493b3144fc5cd3 -r a39f452a7f657f3b43dce87d08e143ed6f02867e static/scripts/packed/mvc/history/history-panel.js
--- a/static/scripts/packed/mvc/history/history-panel.js
+++ b/static/scripts/packed/mvc/history/history-panel.js
@@ -1,1 +1,1 @@
-define(["mvc/history/history-model","mvc/dataset/hda-base","mvc/dataset/hda-edit"],function(d,b,a){var c=Backbone.View.extend(LoggableMixin).extend({HDAView:a.HDAEditView,tagName:"div",className:"history-panel",fxSpeed:"fast",datasetsSelector:".datasets-list",emptyMsgSelector:".empty-history-message",msgsSelector:".message-container",initialize:function(e){e=e||{};if(e.logger){this.logger=e.logger}this.log(this+".initialize:",e);this._setUpListeners();this.hdaViews={};this.indicator=new LoadingIndicator(this.$el);if(this.model){this._setUpWebStorage(e.initiallyExpanded,e.show_deleted,e.show_hidden);this._setUpModelEventHandlers()}if(e.onready){e.onready.call(this)}},_setUpListeners:function(){this.on("error",function(f,i,e,h,g){this.errorHandler(f,i,e,h,g)});this.on("loading-history",function(){this.showLoadingIndicator("loading history...")});this.on("loading-done",function(){this.hideLoadingIndicator()});this.once("rendered",function(){this.trigger("rendered:initial",this);return false});this.on("switched-history current-history new-history",function(){if(_.isEmpty(this.hdaViews)){this.trigger("empty-history",this)}});if(this.logger){this.on("all",function(e){this.log(this+"",arguments)},this)}},errorHandler:function(g,j,f,i,h){var e=this._parseErrorMessage(g,j,f,i,h);if(j&&j.status===0&&j.readyState===0){}else{if(j&&j.status===502){}else{if(!this.$el.find(this.msgsSelector).is(":visible")){this.once("rendered",function(){this.displayMessage("error",e.message,e.details)})}else{this.displayMessage("error",e.message,e.details)}}}},_parseErrorMessage:function(h,l,g,k,j){var f=Galaxy.currUser,e={message:this._bePolite(k),details:{user:(f instanceof User)?(f.toJSON()):(f+""),source:(h instanceof Backbone.Model)?(h.toJSON()):(h+""),xhr:l,options:(l)?(_.omit(g,"xhr")):(g)}};_.extend(e.details,j||{});if(l&&_.isFunction(l.getAllResponseHeaders)){var i=l.getAllResponseHeaders();i=_.compact(i.split("\n"));i=_.map(i,function(m){return m.split(": ")});e.details.xhr.responseHeaders=_.object(i)}return e},_bePolite:function(e){e=e||_l("An error occurred while getting updates from the server");return e+". "+_l("Please contact a Galaxy administrator if the problem persists.")},loadCurrentHistory:function(f){var e=this;return this.loadHistoryWithHDADetails("current",f).then(function(h,g){e.trigger("current-history",e)})},switchToHistory:function(h,g){var e=this,f=function(){return jQuery.post(galaxy_config.root+"api/histories/"+h+"/set_as_current")};return this.loadHistoryWithHDADetails(h,g,f).then(function(j,i){e.trigger("switched-history",e)})},createNewHistory:function(g){if(!Galaxy||!Galaxy.currUser||Galaxy.currUser.isAnonymous()){this.displayMessage("error",_l("You must be logged in to create histories"));return $.when()}var e=this,f=function(){return jQuery.post(galaxy_config.root+"api/histories",{current:true})};return this.loadHistory(undefined,g,f).then(function(i,h){e.trigger("new-history",e)})},loadHistoryWithHDADetails:function(h,g,f,j){var e=this,i=function(k){return e.getExpandedHdaIds(k.id)};return this.loadHistory(h,g,f,j,i)},loadHistory:function(h,g,f,k,i){this.trigger("loading-history",this);g=g||{};var e=this;var j=d.History.getHistoryData(h,{historyFn:f,hdaFn:k,hdaDetailIds:g.initiallyExpanded||i});return this._loadHistoryFromXHR(j,g).fail(function(n,l,m){e.trigger("error",e,n,g,_l("An error was encountered while "+l),{historyId:h,history:m||{}})}).always(function(){e.trigger("loading-done",e)})},_loadHistoryFromXHR:function(g,f){var e=this;g.then(function(h,i){e.setModel(h,i,f)});g.fail(function(i,h){e.render()});return g},setModel:function(g,e,f){f=f||{};if(this.model){this.model.clearUpdateTimeout();this.stopListening(this.model);this.stopListening(this.model.hdas)}this.hdaViews={};if(Galaxy&&Galaxy.currUser){g.user=Galaxy.currUser.toJSON()}this.model=new d.History(g,e,f);this._setUpWebStorage(f.initiallyExpanded,f.show_deleted,f.show_hidden);this._setUpModelEventHandlers();this.trigger("new-model",this);this.render();return this},refreshHdas:function(f,e){if(this.model){return this.model.refresh(f,e)}return $.when()},_setUpWebStorage:function(f,e,g){this.storage=new PersistentStorage(this._getStorageKey(this.model.get("id")),{expandedHdas:{},show_deleted:false,show_hidden:false});this.log(this+" (prev) storage:",JSON.stringify(this.storage.get(),null,2));if(f){this.storage.set("exandedHdas",f)}if((e===true)||(e===false)){this.storage.set("show_deleted",e)}if((g===true)||(g===false)){this.storage.set("show_hidden",g)}this.show_deleted=this.storage.get("show_deleted");this.show_hidden=this.storage.get("show_hidden");this.trigger("new-storage",this.storage,this);this.log(this+" (init'd) storage:",this.storage.get())},_getStorageKey:function(e){if(!e){throw new Error("_getStorageKey needs valid id: "+e)}return("history:"+e)},clearWebStorage:function(){for(var e in sessionStorage){if(e.indexOf("history:")===0){sessionStorage.removeItem(e)}}},getStoredOptions:function(f){if(!f||f==="current"){return(this.storage)?(this.storage.get()):({})}var e=sessionStorage.getItem(this._getStorageKey(f));return(e===null)?({}):(JSON.parse(e))},getExpandedHdaIds:function(e){var f=this.getStoredOptions(e).expandedHdas;return((_.isEmpty(f))?([]):(_.keys(f)))},_setUpModelEventHandlers:function(){this.model.on("error error:hdas",function(f,h,e,g){this.errorHandler(f,h,e,g)},this);this.model.on("change:nice_size",this.updateHistoryDiskSize,this);if(Galaxy&&Galaxy.quotaMeter){this.listenTo(this.model,"change:nice_size",function(){Galaxy.quotaMeter.update()})}this.model.hdas.on("add",this.addHdaView,this);this.model.hdas.on("change:deleted",this.handleHdaDeletionChange,this);this.model.hdas.on("change:visible",this.handleHdaVisibleChange,this);this.model.hdas.on("change:purged",function(e){this.model.fetch()},this);this.model.hdas.on("state:ready",function(f,g,e){if((!f.get("visible"))&&(!this.storage.get("show_hidden"))){this.removeHdaView(this.hdaViews[f.id])}},this)},addHdaView:function(h){this.log("add."+this,h);var f=this;if(!h.isVisible(this.storage.get("show_deleted"),this.storage.get("show_hidden"))){return}$({}).queue([function g(j){var i=f.$el.find(f.emptyMsgSelector);if(i.is(":visible")){i.fadeOut(f.fxSpeed,j)}else{j()}},function e(j){f.scrollToTop();var i=f.$el.find(f.datasetsSelector);f.createHdaView(h).$el.hide().prependTo(i).slideDown(f.fxSpeed)}])},createHdaView:function(g){var f=g.get("id"),e=this.storage.get("expandedHdas").get(f),h=new this.HDAView({model:g,expanded:e,hasUser:this.model.hasUser(),logger:this.logger});this._setUpHdaListeners(h);this.hdaViews[f]=h;return h.render()},_setUpHdaListeners:function(f){var e=this;f.on("body-expanded",function(g){e.storage.get("expandedHdas").set(g,true)});f.on("body-collapsed",function(g){e.storage.get("expandedHdas").deleteKey(g)});f.on("error",function(h,j,g,i){e.errorHandler(h,j,g,i)})},handleHdaDeletionChange:function(e){if(e.get("deleted")&&!this.storage.get("show_deleted")){this.removeHdaView(this.hdaViews[e.id])}},handleHdaVisibleChange:function(e){if(e.hidden()&&!this.storage.get("show_hidden")){this.removeHdaView(this.hdaViews[e.id])}},removeHdaView:function(f){if(!f){return}var e=this;f.$el.fadeOut(e.fxSpeed,function(){f.off();f.remove();delete e.hdaViews[f.model.id];if(_.isEmpty(e.hdaViews)){e.$el.find(e.emptyMsgSelector).fadeIn(e.fxSpeed,function(){e.trigger("empty-history",e)})}})},render:function(g){var e=this,f;if(this.model){f=this.renderModel()}else{f=this.renderWithoutModel()}$(e).queue("fx",[function(h){if(e.$el.is(":visible")){e.$el.fadeOut(e.fxSpeed,h)}else{h()}},function(h){e.$el.empty();if(f){e.$el.append(f.children())}e.$el.fadeIn(e.fxSpeed,h)},function(h){if(g){g.call(this)}e.trigger("rendered",this)}]);return this},renderWithoutModel:function(){var e=$("<div/>"),f=$("<div/>").addClass("message-container").css({"margin-left":"4px","margin-right":"4px"});return e.append(f)},renderModel:function(){var e=$("<div/>");if(!Galaxy||!Galaxy.currUser||Galaxy.currUser.isAnonymous()){e.append(c.templates.anonHistoryPanel(this.model.toJSON()))}else{e.append(c.templates.historyPanel(this.model.toJSON()));this._renderTags(e);this._renderAnnotation(e)}this._setUpBehaviours(e);if(!this.model.hdas.length||!this.renderHdas(e.find(this.datasetsSelector))){e.find(this.emptyMsgSelector).show()}return e},_renderTags:function(e){this.tagsEditor=new TagsEditor({model:this.model,el:e.find(".history-controls .tags-display"),onshowFirstTime:function(){this.render()},$activator:faIconButton({title:_l("Edit history tags"),classes:"history-tag-btn",faIcon:"fa-tags"}).appendTo(e.find(".history-secondary-actions"))})},_renderAnnotation:function(e){this.annotationEditor=new AnnotationEditor({model:this.model,el:e.find(".history-controls .annotation-display"),onshowFirstTime:function(){this.render()},$activator:faIconButton({title:_l("Edit history tags"),classes:"history-annotate-btn",faIcon:"fa-comment"}).appendTo(e.find(".history-secondary-actions"))})},_setUpBehaviours:function(e){e=e||this.$el;e.find("[title]").tooltip({placement:"bottom"});if(!this.model||!Galaxy.currUser||Galaxy.currUser.isAnonymous()){return}var f=this;e.find(".history-name").make_text_editable({on_finish:function(g){e.find(".history-name").text(g);f.model.save({name:g}).fail(function(){e.find(".history-name").text(f.model.previous("name"))})}})},renderHdas:function(f){this.hdaViews={};var e=this,g=this.model.hdas.getVisible(this.storage.get("show_deleted"),this.storage.get("show_hidden"));_.each(g,function(h){f.prepend(e.createHdaView(h).$el)});return g.length},events:{"click .message-container":"clearMessages"},updateHistoryDiskSize:function(){this.$el.find(".history-size").text(this.model.get("nice_size"))},collapseAllHdaBodies:function(){_.each(this.hdaViews,function(e){e.toggleBodyVisibility(null,false)});this.storage.set("expandedHdas",{})},toggleShowDeleted:function(){this.storage.set("show_deleted",!this.storage.get("show_deleted"));this.render();return this.storage.get("show_deleted")},toggleShowHidden:function(){this.storage.set("show_hidden",!this.storage.get("show_hidden"));this.render();return this.storage.get("show_hidden")},showSelect:function(e){_.each(this.hdaViews,function(f){f.showSelect(e)})},hideSelect:function(e){_.each(this.hdaViews,function(f){f.hideSelect(e)})},getSelectedHdaViews:function(){return _.filter(this.hdaViews,function(e){return e.selected})},showLoadingIndicator:function(f,e,g){e=(e!==undefined)?(e):(this.fxSpeed);if(!this.indicator){this.indicator=new LoadingIndicator(this.$el,this.$el.parent())}if(!this.$el.is(":visible")){this.indicator.show(0,g)}else{this.$el.fadeOut(e);this.indicator.show(f,e,g)}},hideLoadingIndicator:function(e,f){e=(e!==undefined)?(e):(this.fxSpeed);if(this.indicator){this.indicator.hide(e,f)}},displayMessage:function(j,k,i){var g=this;this.scrollToTop();var h=this.$el.find(this.msgsSelector),e=$("<div/>").addClass(j+"message").html(k);if(!_.isEmpty(i)){var f=$('<a href="javascript:void(0)">Details</a>').click(function(){Galaxy.modal.show(g.messageToModalOptions(j,k,i));return false});e.append(" ",f)}return h.html(e)},messageToModalOptions:function(i,k,h){var e=this,j=$("<div/>"),g={title:"Details"};function f(l){l=_.omit(l,_.functions(l));return["<table>",_.map(l,function(n,m){n=(_.isObject(n))?(f(n)):(n);return'<tr><td style="vertical-align: top; color: grey">'+m+'</td><td style="padding-left: 8px">'+n+"</td></tr>"}).join(""),"</table>"].join("")}if(_.isObject(h)){g.body=j.append(f(h))}else{g.body=j.html(h)}g.buttons={Ok:function(){Galaxy.modal.hide();e.clearMessages()}};return g},clearMessages:function(){var e=this.$el.find(this.msgsSelector);e.empty()},scrollPosition:function(){return this.$el.parent().scrollTop()},scrollTo:function(e){this.$el.parent().scrollTop(e)},scrollToTop:function(){this.$el.parent().scrollTop(0);return this},scrollIntoView:function(f,g){if(!g){this.$el.parent().parent().scrollTop(f);return this}var e=window,h=this.$el.parent().parent(),j=$(e).innerHeight(),i=(j/2)-(g/2);$(h).scrollTop(f-i);return this},scrollToId:function(f){if((!f)||(!this.hdaViews[f])){return this}var e=this.hdaViews[f].$el;this.scrollIntoView(e.offset().top,e.outerHeight());return this},scrollToHid:function(e){var f=this.model.hdas.getByHid(e);if(!f){return this}return this.scrollToId(f.id)},connectToQuotaMeter:function(e){if(!e){return this}this.listenTo(e,"quota:over",this.showQuotaMessage);this.listenTo(e,"quota:under",this.hideQuotaMessage);this.on("rendered rendered:initial",function(){if(e&&e.isOverQuota()){this.showQuotaMessage()}});return this},showQuotaMessage:function(){var e=this.$el.find(".quota-message");if(e.is(":hidden")){e.slideDown(this.fxSpeed)}},hideQuotaMessage:function(){var e=this.$el.find(".quota-message");if(!e.is(":hidden")){e.slideUp(this.fxSpeed)}},connectToOptionsMenu:function(e){if(!e){return this}this.on("new-storage",function(g,f){if(e&&g){e.findItemByHtml(_l("Include Deleted Datasets")).checked=g.get("show_deleted");e.findItemByHtml(_l("Include Hidden Datasets")).checked=g.get("show_hidden")}});return this},toString:function(){return"HistoryPanel("+((this.model)?(this.model.get("name")):(""))+")"}});c.templates={historyPanel:Handlebars.templates["template-history-historyPanel"],anonHistoryPanel:Handlebars.templates["template-history-historyPanel-anon"]};return{HistoryPanel:c}});
\ No newline at end of file
+define(["mvc/history/history-model","mvc/dataset/hda-base","mvc/dataset/hda-edit"],function(d,b,a){var c=Backbone.View.extend(LoggableMixin).extend({HDAView:a.HDAEditView,tagName:"div",className:"history-panel",fxSpeed:"fast",datasetsSelector:".datasets-list",emptyMsgSelector:".empty-history-message",msgsSelector:".message-container",initialize:function(e){e=e||{};if(e.logger){this.logger=e.logger}this.log(this+".initialize:",e);this._setUpListeners();this.hdaViews={};this.indicator=new LoadingIndicator(this.$el);this.filters=[];if(this.model){this._setUpWebStorage(e.initiallyExpanded,e.show_deleted,e.show_hidden);this._setUpModelEventHandlers()}if(e.onready){e.onready.call(this)}},_setUpListeners:function(){this.on("error",function(f,i,e,h,g){this.errorHandler(f,i,e,h,g)});this.on("loading-history",function(){this.showLoadingIndicator("loading history...")});this.on("loading-done",function(){this.hideLoadingIndicator()});this.once("rendered",function(){this.trigger("rendered:initial",this);return false});this.on("switched-history current-history new-history",function(){if(_.isEmpty(this.hdaViews)){this.trigger("empty-history",this)}});if(this.logger){this.on("all",function(e){this.log(this+"",arguments)},this)}},errorHandler:function(g,j,f,i,h){var e=this._parseErrorMessage(g,j,f,i,h);if(j&&j.status===0&&j.readyState===0){}else{if(j&&j.status===502){}else{if(!this.$el.find(this.msgsSelector).is(":visible")){this.once("rendered",function(){this.displayMessage("error",e.message,e.details)})}else{this.displayMessage("error",e.message,e.details)}}}},_parseErrorMessage:function(h,l,g,k,j){var f=Galaxy.currUser,e={message:this._bePolite(k),details:{user:(f instanceof User)?(f.toJSON()):(f+""),source:(h instanceof Backbone.Model)?(h.toJSON()):(h+""),xhr:l,options:(l)?(_.omit(g,"xhr")):(g)}};_.extend(e.details,j||{});if(l&&_.isFunction(l.getAllResponseHeaders)){var i=l.getAllResponseHeaders();i=_.compact(i.split("\n"));i=_.map(i,function(m){return m.split(": ")});e.details.xhr.responseHeaders=_.object(i)}return e},_bePolite:function(e){e=e||_l("An error occurred while getting updates from the server");return e+". "+_l("Please contact a Galaxy administrator if the problem persists.")},loadCurrentHistory:function(f){var e=this;return this.loadHistoryWithHDADetails("current",f).then(function(h,g){e.trigger("current-history",e)})},switchToHistory:function(h,g){var e=this,f=function(){return jQuery.post(galaxy_config.root+"api/histories/"+h+"/set_as_current")};return this.loadHistoryWithHDADetails(h,g,f).then(function(j,i){e.trigger("switched-history",e)})},createNewHistory:function(g){if(!Galaxy||!Galaxy.currUser||Galaxy.currUser.isAnonymous()){this.displayMessage("error",_l("You must be logged in to create histories"));return $.when()}var e=this,f=function(){return jQuery.post(galaxy_config.root+"api/histories",{current:true})};return this.loadHistory(undefined,g,f).then(function(i,h){e.trigger("new-history",e)})},loadHistoryWithHDADetails:function(h,g,f,j){var e=this,i=function(k){return e.getExpandedHdaIds(k.id)};return this.loadHistory(h,g,f,j,i)},loadHistory:function(h,g,f,k,i){this.trigger("loading-history",this);g=g||{};var e=this;var j=d.History.getHistoryData(h,{historyFn:f,hdaFn:k,hdaDetailIds:g.initiallyExpanded||i});return this._loadHistoryFromXHR(j,g).fail(function(n,l,m){e.trigger("error",e,n,g,_l("An error was encountered while "+l),{historyId:h,history:m||{}})}).always(function(){e.trigger("loading-done",e)})},_loadHistoryFromXHR:function(g,f){var e=this;g.then(function(h,i){e.setModel(h,i,f)});g.fail(function(i,h){e.render()});return g},setModel:function(g,e,f){f=f||{};if(this.model){this.model.clearUpdateTimeout();this.stopListening(this.model);this.stopListening(this.model.hdas)}this.hdaViews={};if(Galaxy&&Galaxy.currUser){g.user=Galaxy.currUser.toJSON()}this.model=new d.History(g,e,f);this._setUpWebStorage(f.initiallyExpanded,f.show_deleted,f.show_hidden);this._setUpModelEventHandlers();this.trigger("new-model",this);this.render();return this},refreshHdas:function(f,e){if(this.model){return this.model.refresh(f,e)}return $.when()},_setUpWebStorage:function(f,e,g){this.storage=new PersistentStorage(this._getStorageKey(this.model.get("id")),{expandedHdas:{},show_deleted:false,show_hidden:false});this.log(this+" (prev) storage:",JSON.stringify(this.storage.get(),null,2));if(f){this.storage.set("exandedHdas",f)}if((e===true)||(e===false)){this.storage.set("show_deleted",e)}if((g===true)||(g===false)){this.storage.set("show_hidden",g)}this.show_deleted=this.storage.get("show_deleted");this.show_hidden=this.storage.get("show_hidden");this.trigger("new-storage",this.storage,this);this.log(this+" (init'd) storage:",this.storage.get())},_getStorageKey:function(e){if(!e){throw new Error("_getStorageKey needs valid id: "+e)}return("history:"+e)},clearWebStorage:function(){for(var e in sessionStorage){if(e.indexOf("history:")===0){sessionStorage.removeItem(e)}}},getStoredOptions:function(f){if(!f||f==="current"){return(this.storage)?(this.storage.get()):({})}var e=sessionStorage.getItem(this._getStorageKey(f));return(e===null)?({}):(JSON.parse(e))},getExpandedHdaIds:function(e){var f=this.getStoredOptions(e).expandedHdas;return((_.isEmpty(f))?([]):(_.keys(f)))},_setUpModelEventHandlers:function(){this.model.on("error error:hdas",function(f,h,e,g){this.errorHandler(f,h,e,g)},this);this.model.on("change:nice_size",this.updateHistoryDiskSize,this);if(Galaxy&&Galaxy.quotaMeter){this.listenTo(this.model,"change:nice_size",function(){Galaxy.quotaMeter.update()})}this.model.hdas.on("add",this.addHdaView,this);this.model.hdas.on("change:deleted",this.handleHdaDeletionChange,this);this.model.hdas.on("change:visible",this.handleHdaVisibleChange,this);this.model.hdas.on("change:purged",function(e){this.model.fetch()},this);this.model.hdas.on("state:ready",function(f,g,e){if((!f.get("visible"))&&(!this.storage.get("show_hidden"))){this.removeHdaView(this.hdaViews[f.id])}},this)},addHdaView:function(h){this.log("add."+this,h);var f=this;if(!h.isVisible(this.storage.get("show_deleted"),this.storage.get("show_hidden"))){return}$({}).queue([function g(j){var i=f.$el.find(f.emptyMsgSelector);if(i.is(":visible")){i.fadeOut(f.fxSpeed,j)}else{j()}},function e(j){f.scrollToTop();var i=f.$el.find(f.datasetsSelector);f.createHdaView(h).$el.hide().prependTo(i).slideDown(f.fxSpeed)}])},createHdaView:function(g){var f=g.get("id"),e=this.storage.get("expandedHdas").get(f),h=new this.HDAView({model:g,expanded:e,hasUser:this.model.hasUser(),logger:this.logger});this._setUpHdaListeners(h);this.hdaViews[f]=h;return h.render()},_setUpHdaListeners:function(f){var e=this;f.on("body-expanded",function(g){e.storage.get("expandedHdas").set(g,true)});f.on("body-collapsed",function(g){e.storage.get("expandedHdas").deleteKey(g)});f.on("error",function(h,j,g,i){e.errorHandler(h,j,g,i)})},handleHdaDeletionChange:function(e){if(e.get("deleted")&&!this.storage.get("show_deleted")){this.removeHdaView(this.hdaViews[e.id])}},handleHdaVisibleChange:function(e){if(e.hidden()&&!this.storage.get("show_hidden")){this.removeHdaView(this.hdaViews[e.id])}},removeHdaView:function(f){if(!f){return}var e=this;f.$el.fadeOut(e.fxSpeed,function(){f.off();f.remove();delete e.hdaViews[f.model.id];if(_.isEmpty(e.hdaViews)){e.$el.find(e.emptyMsgSelector).fadeIn(e.fxSpeed,function(){e.trigger("empty-history",e)})}})},render:function(g,h){g=(g===undefined)?(this.fxSpeed):(g);var e=this,f;if(this.model){f=this.renderModel()}else{f=this.renderWithoutModel()}$(e).queue("fx",[function(i){if(g&&e.$el.is(":visible")){e.$el.fadeOut(g,i)}else{i()}},function(i){e.$el.empty();if(f){e.$el.append(f.children())}i()},function(i){if(g&&!e.$el.is(":visible")){e.$el.fadeIn(g,i)}else{i()}},function(i){if(h){h.call(this)}e.trigger("rendered",this);i()}]);return this},renderWithoutModel:function(){var e=$("<div/>"),f=$("<div/>").addClass("message-container").css({"margin-left":"4px","margin-right":"4px"});return e.append(f)},renderModel:function(){var e=$("<div/>");if(!Galaxy||!Galaxy.currUser||Galaxy.currUser.isAnonymous()){e.append(c.templates.anonHistoryPanel(this.model.toJSON()))}else{e.append(c.templates.historyPanel(this.model.toJSON()));this._renderTags(e);this._renderAnnotation(e)}this._setUpBehaviours(e);this.renderHdas(e);return e},_renderTags:function(e){this.tagsEditor=new TagsEditor({model:this.model,el:e.find(".history-controls .tags-display"),onshowFirstTime:function(){this.render()},$activator:faIconButton({title:_l("Edit history tags"),classes:"history-tag-btn",faIcon:"fa-tags"}).appendTo(e.find(".history-secondary-actions"))})},_renderAnnotation:function(e){this.annotationEditor=new AnnotationEditor({model:this.model,el:e.find(".history-controls .annotation-display"),onshowFirstTime:function(){this.render()},$activator:faIconButton({title:_l("Edit history tags"),classes:"history-annotate-btn",faIcon:"fa-comment"}).appendTo(e.find(".history-secondary-actions"))})},_setUpBehaviours:function(e){e=e||this.$el;e.find("[title]").tooltip({placement:"bottom"});if(!this.model||!Galaxy.currUser||Galaxy.currUser.isAnonymous()){return}var f=this;e.find(".history-name").make_text_editable({on_finish:function(g){e.find(".history-name").text(g);f.model.save({name:g}).fail(function(){e.find(".history-name").text(f.model.previous("name"))})}})},renderHdas:function(g){g=g||this.$el;this.hdaViews={};var f=this,e=g.find(this.datasetsSelector),h=this.model.hdas.getVisible(this.storage.get("show_deleted"),this.storage.get("show_hidden"),this.filters);e.empty();if(h.length){h.each(function(i){e.prepend(f.createHdaView(i).$el)});g.find(this.emptyMsgSelector).hide()}else{g.find(this.emptyMsgSelector).show()}return this.hdaViews},events:{"click .message-container":"clearMessages"},updateHistoryDiskSize:function(){this.$el.find(".history-size").text(this.model.get("nice_size"))},collapseAllHdaBodies:function(){_.each(this.hdaViews,function(e){e.toggleBodyVisibility(null,false)});this.storage.set("expandedHdas",{})},toggleShowDeleted:function(){this.storage.set("show_deleted",!this.storage.get("show_deleted"));this.render();return this.storage.get("show_deleted")},toggleShowHidden:function(){this.storage.set("show_hidden",!this.storage.get("show_hidden"));this.render();return this.storage.get("show_hidden")},renderSearchControls:function(){var e=this;e.model.hdas.fetchAllDetails({silent:true});function g(h){e.searchFor=h;e.filters=[function(i){return i.matchesAll(e.searchFor)}];e.trigger("search:searching",h,e);e.renderHdas()}function f(){e.searchFor="";e.filters=[];e.trigger("search:clear",e);e.renderHdas()}return searchInput({initialVal:e.searchFor,name:"history-search",placeholder:"search datasets",classes:"history-search",onsearch:g,onclear:f}).addClass("history-search-controls").css("padding","0px 0px 8px 0px")},toggleSearchControls:function(){var e=this.$el.find(".history-search-controls");if(!e.size()){e=this.renderSearchControls().hide();this.$el.find(".history-title").before(e)}e.slideToggle(this.fxSpeed)},showSelect:function(e){_.each(this.hdaViews,function(f){f.showSelect(e)})},hideSelect:function(e){_.each(this.hdaViews,function(f){f.hideSelect(e)})},getSelectedHdaViews:function(){return _.filter(this.hdaViews,function(e){return e.selected})},showLoadingIndicator:function(f,e,g){e=(e!==undefined)?(e):(this.fxSpeed);if(!this.indicator){this.indicator=new LoadingIndicator(this.$el,this.$el.parent())}if(!this.$el.is(":visible")){this.indicator.show(0,g)}else{this.$el.fadeOut(e);this.indicator.show(f,e,g)}},hideLoadingIndicator:function(e,f){e=(e!==undefined)?(e):(this.fxSpeed);if(this.indicator){this.indicator.hide(e,f)}},displayMessage:function(j,k,i){var g=this;this.scrollToTop();var h=this.$el.find(this.msgsSelector),e=$("<div/>").addClass(j+"message").html(k);if(!_.isEmpty(i)){var f=$('<a href="javascript:void(0)">Details</a>').click(function(){Galaxy.modal.show(g.messageToModalOptions(j,k,i));return false});e.append(" ",f)}return h.html(e)},messageToModalOptions:function(i,k,h){var e=this,j=$("<div/>"),g={title:"Details"};function f(l){l=_.omit(l,_.functions(l));return["<table>",_.map(l,function(n,m){n=(_.isObject(n))?(f(n)):(n);return'<tr><td style="vertical-align: top; color: grey">'+m+'</td><td style="padding-left: 8px">'+n+"</td></tr>"}).join(""),"</table>"].join("")}if(_.isObject(h)){g.body=j.append(f(h))}else{g.body=j.html(h)}g.buttons={Ok:function(){Galaxy.modal.hide();e.clearMessages()}};return g},clearMessages:function(){var e=this.$el.find(this.msgsSelector);e.empty()},scrollPosition:function(){return this.$el.parent().scrollTop()},scrollTo:function(e){this.$el.parent().scrollTop(e)},scrollToTop:function(){this.$el.parent().scrollTop(0);return this},scrollIntoView:function(f,g){if(!g){this.$el.parent().parent().scrollTop(f);return this}var e=window,h=this.$el.parent().parent(),j=$(e).innerHeight(),i=(j/2)-(g/2);$(h).scrollTop(f-i);return this},scrollToId:function(f){if((!f)||(!this.hdaViews[f])){return this}var e=this.hdaViews[f].$el;this.scrollIntoView(e.offset().top,e.outerHeight());return this},scrollToHid:function(e){var f=this.model.hdas.getByHid(e);if(!f){return this}return this.scrollToId(f.id)},connectToQuotaMeter:function(e){if(!e){return this}this.listenTo(e,"quota:over",this.showQuotaMessage);this.listenTo(e,"quota:under",this.hideQuotaMessage);this.on("rendered rendered:initial",function(){if(e&&e.isOverQuota()){this.showQuotaMessage()}});return this},showQuotaMessage:function(){var e=this.$el.find(".quota-message");if(e.is(":hidden")){e.slideDown(this.fxSpeed)}},hideQuotaMessage:function(){var e=this.$el.find(".quota-message");if(!e.is(":hidden")){e.slideUp(this.fxSpeed)}},connectToOptionsMenu:function(e){if(!e){return this}this.on("new-storage",function(g,f){if(e&&g){e.findItemByHtml(_l("Include Deleted Datasets")).checked=g.get("show_deleted");e.findItemByHtml(_l("Include Hidden Datasets")).checked=g.get("show_hidden")}});return this},toString:function(){return"HistoryPanel("+((this.model)?(this.model.get("name")):(""))+")"}});c.templates={historyPanel:Handlebars.templates["template-history-historyPanel"],anonHistoryPanel:Handlebars.templates["template-history-historyPanel-anon"]};return{HistoryPanel:c}});
\ No newline at end of file
diff -r 253f888144aaa4ae4eadaf15de493b3144fc5cd3 -r a39f452a7f657f3b43dce87d08e143ed6f02867e static/scripts/packed/mvc/ui.js
--- a/static/scripts/packed/mvc/ui.js
+++ b/static/scripts/packed/mvc/ui.js
@@ -1,1 +1,1 @@
-var IconButton=Backbone.Model.extend({defaults:{title:"",icon_class:"",on_click:null,menu_options:null,is_menu_button:true,id:null,href:null,target:null,enabled:true,visible:true,tooltip_config:{}}});var IconButtonView=Backbone.View.extend({initialize:function(){this.model.attributes.tooltip_config={placement:"bottom"};this.model.bind("change",this.render,this)},render:function(){this.$el.tooltip("hide");var a=this.template(this.model.toJSON());a.tooltip(this.model.get("tooltip_config"));this.$el.replaceWith(a);this.setElement(a);return this},events:{click:"click"},click:function(a){if(_.isFunction(this.model.get("on_click"))){this.model.get("on_click")(a);return false}return true},template:function(b){var a='title="'+b.title+'" class="icon-button';if(b.is_menu_button){a+=" menu-button"}a+=" "+b.icon_class;if(!b.enabled){a+="_disabled"}a+='"';if(b.id){a+=' id="'+b.id+'"'}a+=' href="'+b.href+'"';if(b.target){a+=' target="'+b.target+'"'}if(!b.visible){a+=' style="display: none;"'}if(b.enabled){a="<a "+a+"/>"}else{a="<span "+a+"/>"}return $(a)}});var IconButtonCollection=Backbone.Collection.extend({model:IconButton});var IconButtonMenuView=Backbone.View.extend({tagName:"div",initialize:function(){this.render()},render:function(){var a=this;this.collection.each(function(d){var b=$("<a/>").attr("href","javascript:void(0)").attr("title",d.attributes.title).addClass("icon-button menu-button").addClass(d.attributes.icon_class).appendTo(a.$el).click(d.attributes.on_click);if(d.attributes.tooltip_config){b.tooltip(d.attributes.tooltip_config)}var c=d.get("options");if(c){make_popupmenu(b,c)}});return this}});var create_icon_buttons_menu=function(b,a){if(!a){a={}}var c=new IconButtonCollection(_.map(b,function(d){return new IconButton(_.extend(d,a))}));return new IconButtonMenuView({collection:c})};var Grid=Backbone.Collection.extend({});var GridView=Backbone.View.extend({});var PopupMenu=Backbone.View.extend({initialize:function(b,a){this.$button=b||$("<div/>");this.options=a||[];var c=this;this.$button.click(function(d){c._renderAndShow(d);return false})},_renderAndShow:function(a){this.render();this.$el.appendTo("body");this.$el.css(this._getShownPosition(a));this._setUpCloseBehavior();this.$el.show()},render:function(){this.$el.addClass("popmenu-wrapper").hide().css({position:"absolute"}).html(this.template(this.$button.attr("id"),this.options));if(this.options.length){var a=this;this.$el.find("li").each(function(c,b){var d=a.options[c];if(d.func){$(this).children("a.popupmenu-option").click(function(e){d.func.call(a,e,d)})}})}return this},template:function(b,a){return['<ul id="',b,'-menu" class="dropdown-menu">',this._templateOptions(a),"</ul>"].join("")},_templateOptions:function(a){if(!a.length){return"<li>(no options)</li>"}return _.map(a,function(d){if(d.divider){return'<li class="divider"></li>'}else{if(d.header){return['<li class="head"><a href="javascript:void(0);">',d.html,"</a></li>"].join("")}}var c=d.href||"javascript:void(0);",e=(d.target)?(' target="'+d.target+'"'):(""),b=(d.checked)?('<span class="fa fa-check"></span>'):("");return['<li><a class="popupmenu-option" href="',c,'"',e,">",b,d.html,"</a></li>"].join("")}).join("")},_getShownPosition:function(b){var c=this.$el.width();var a=b.pageX-c/2;a=Math.min(a,$(document).scrollLeft()+$(window).width()-c-5);a=Math.max(a,$(document).scrollLeft()+5);return{top:b.pageY,left:a}},_setUpCloseBehavior:function(){var b=this;var a=function(c){c.one("click.close_popup",function(){b.remove()})};a($(window.document));a($(window.top.document));_.each(window.top.frames,function(c){a($(c.document))})},addItem:function(b,a){a=(a>=0)?a:this.options.length;this.options.splice(a,0,b);return this},removeItem:function(a){if(a>=0){this.options.splice(a,1)}return this},findIndexByHtml:function(b){for(var a=0;a<this.options.length;a++){if(_.has(this.options[a],"html")&&(this.options[a].html===b)){return a}}return null},findItemByHtml:function(a){return this.options[(this.findIndexByHtml(a))]},toString:function(){return"PopupMenu"}});PopupMenu.make_popupmenu=function(b,c){var a=[];_.each(c,function(f,d){var e={html:d};if(f===null){e.header=true}else{if(jQuery.type(f)==="function"){e.func=f}}a.push(e)});return new PopupMenu($(b),a)};PopupMenu.convertLinksToOptions=function(c,a){c=$(c);a=a||"a";var b=[];c.find(a).each(function(g,e){var f={},d=$(g);f.html=d.text();if(d.attr("href")){var j=d.attr("href"),k=d.attr("target"),h=d.attr("confirm");f.func=function(){if((h)&&(!confirm(h))){return}switch(k){case"_parent":window.parent.location=j;break;case"_top":window.top.location=j;break;default:window.location=j}}}b.push(f)});return b};PopupMenu.fromExistingDom=function(d,c,a){d=$(d);c=$(c);var b=PopupMenu.convertLinksToOptions(c,a);c.remove();return new PopupMenu(d,b)};PopupMenu.make_popup_menus=function(c,b,d){c=c||document;b=b||"div[popupmenu]";d=d||function(e,f){return"#"+e.attr("popupmenu")};var a=[];$(c).find(b).each(function(){var e=$(this),f=$(c).find(d(e,c));a.push(PopupMenu.fromDom(f,e));f.addClass("popup")});return a};var faIconButton=function(a){a=a||{};a.tooltipConfig=a.tooltipConfig||{placement:"bottom"};a.classes=["icon-btn"].concat(a.classes||[]);if(a.disabled){a.classes.push("disabled")}var b=['<a class="',a.classes.join(" "),'"',((a.title)?(' title="'+a.title+'"'):("")),((a.target)?(' target="'+a.target+'"'):("")),' href="',((a.href)?(a.href):("javascript:void(0);")),'">','<span class="fa ',a.faIcon,'"></span>',"</a>"].join("");var c=$(b).tooltip(a.tooltipConfig);if(_.isFunction(a.onclick)){c.click(a.onclick)}return c};
\ No newline at end of file
+var IconButton=Backbone.Model.extend({defaults:{title:"",icon_class:"",on_click:null,menu_options:null,is_menu_button:true,id:null,href:null,target:null,enabled:true,visible:true,tooltip_config:{}}});var IconButtonView=Backbone.View.extend({initialize:function(){this.model.attributes.tooltip_config={placement:"bottom"};this.model.bind("change",this.render,this)},render:function(){this.$el.tooltip("hide");var a=this.template(this.model.toJSON());a.tooltip(this.model.get("tooltip_config"));this.$el.replaceWith(a);this.setElement(a);return this},events:{click:"click"},click:function(a){if(_.isFunction(this.model.get("on_click"))){this.model.get("on_click")(a);return false}return true},template:function(b){var a='title="'+b.title+'" class="icon-button';if(b.is_menu_button){a+=" menu-button"}a+=" "+b.icon_class;if(!b.enabled){a+="_disabled"}a+='"';if(b.id){a+=' id="'+b.id+'"'}a+=' href="'+b.href+'"';if(b.target){a+=' target="'+b.target+'"'}if(!b.visible){a+=' style="display: none;"'}if(b.enabled){a="<a "+a+"/>"}else{a="<span "+a+"/>"}return $(a)}});var IconButtonCollection=Backbone.Collection.extend({model:IconButton});var IconButtonMenuView=Backbone.View.extend({tagName:"div",initialize:function(){this.render()},render:function(){var a=this;this.collection.each(function(d){var b=$("<a/>").attr("href","javascript:void(0)").attr("title",d.attributes.title).addClass("icon-button menu-button").addClass(d.attributes.icon_class).appendTo(a.$el).click(d.attributes.on_click);if(d.attributes.tooltip_config){b.tooltip(d.attributes.tooltip_config)}var c=d.get("options");if(c){make_popupmenu(b,c)}});return this}});var create_icon_buttons_menu=function(b,a){if(!a){a={}}var c=new IconButtonCollection(_.map(b,function(d){return new IconButton(_.extend(d,a))}));return new IconButtonMenuView({collection:c})};var Grid=Backbone.Collection.extend({});var GridView=Backbone.View.extend({});var PopupMenu=Backbone.View.extend({initialize:function(b,a){this.$button=b||$("<div/>");this.options=a||[];var c=this;this.$button.click(function(d){c._renderAndShow(d);return false})},_renderAndShow:function(a){this.render();this.$el.appendTo("body");this.$el.css(this._getShownPosition(a));this._setUpCloseBehavior();this.$el.show()},render:function(){this.$el.addClass("popmenu-wrapper").hide().css({position:"absolute"}).html(this.template(this.$button.attr("id"),this.options));if(this.options.length){var a=this;this.$el.find("li").each(function(c,b){var d=a.options[c];if(d.func){$(this).children("a.popupmenu-option").click(function(e){d.func.call(a,e,d)})}})}return this},template:function(b,a){return['<ul id="',b,'-menu" class="dropdown-menu">',this._templateOptions(a),"</ul>"].join("")},_templateOptions:function(a){if(!a.length){return"<li>(no options)</li>"}return _.map(a,function(d){if(d.divider){return'<li class="divider"></li>'}else{if(d.header){return['<li class="head"><a href="javascript:void(0);">',d.html,"</a></li>"].join("")}}var c=d.href||"javascript:void(0);",e=(d.target)?(' target="'+d.target+'"'):(""),b=(d.checked)?('<span class="fa fa-check"></span>'):("");return['<li><a class="popupmenu-option" href="',c,'"',e,">",b,d.html,"</a></li>"].join("")}).join("")},_getShownPosition:function(b){var c=this.$el.width();var a=b.pageX-c/2;a=Math.min(a,$(document).scrollLeft()+$(window).width()-c-5);a=Math.max(a,$(document).scrollLeft()+5);return{top:b.pageY,left:a}},_setUpCloseBehavior:function(){var b=this;var a=function(c){c.one("click.close_popup",function(){b.remove()})};a($(window.document));a($(window.top.document));_.each(window.top.frames,function(c){a($(c.document))})},addItem:function(b,a){a=(a>=0)?a:this.options.length;this.options.splice(a,0,b);return this},removeItem:function(a){if(a>=0){this.options.splice(a,1)}return this},findIndexByHtml:function(b){for(var a=0;a<this.options.length;a++){if(_.has(this.options[a],"html")&&(this.options[a].html===b)){return a}}return null},findItemByHtml:function(a){return this.options[(this.findIndexByHtml(a))]},toString:function(){return"PopupMenu"}});PopupMenu.make_popupmenu=function(b,c){var a=[];_.each(c,function(f,d){var e={html:d};if(f===null){e.header=true}else{if(jQuery.type(f)==="function"){e.func=f}}a.push(e)});return new PopupMenu($(b),a)};PopupMenu.convertLinksToOptions=function(c,a){c=$(c);a=a||"a";var b=[];c.find(a).each(function(g,e){var f={},d=$(g);f.html=d.text();if(d.attr("href")){var j=d.attr("href"),k=d.attr("target"),h=d.attr("confirm");f.func=function(){if((h)&&(!confirm(h))){return}switch(k){case"_parent":window.parent.location=j;break;case"_top":window.top.location=j;break;default:window.location=j}}}b.push(f)});return b};PopupMenu.fromExistingDom=function(d,c,a){d=$(d);c=$(c);var b=PopupMenu.convertLinksToOptions(c,a);c.remove();return new PopupMenu(d,b)};PopupMenu.make_popup_menus=function(c,b,d){c=c||document;b=b||"div[popupmenu]";d=d||function(e,f){return"#"+e.attr("popupmenu")};var a=[];$(c).find(b).each(function(){var e=$(this),f=$(c).find(d(e,c));a.push(PopupMenu.fromDom(f,e));f.addClass("popup")});return a};var faIconButton=function(a){a=a||{};a.tooltipConfig=a.tooltipConfig||{placement:"bottom"};a.classes=["icon-btn"].concat(a.classes||[]);if(a.disabled){a.classes.push("disabled")}var b=['<a class="',a.classes.join(" "),'"',((a.title)?(' title="'+a.title+'"'):("")),((a.target)?(' target="'+a.target+'"'):("")),' href="',((a.href)?(a.href):("javascript:void(0);")),'">','<span class="fa ',a.faIcon,'"></span>',"</a>"].join("");var c=$(b).tooltip(a.tooltipConfig);if(_.isFunction(a.onclick)){c.click(a.onclick)}return c};var searchInput=function(k){var a=27,h=13,i=$("<div/>"),b={initialVal:"",name:"search",placeholder:"search",classes:"",onclear:function(){},onsearch:function(l){},minSearchLen:0,escWillClear:true,oninit:function(){}};if(jQuery.type(k)==="object"){k=jQuery.extend(true,b,k)}function d(l){var m=$(this).parent().children("input");m.val("");m.trigger("clear:searchInput");k.onclear()}function j(m,l){$(this).trigger("search:searchInput",l);k.onsearch(l)}function c(){return['<input type="text" name="',k.name,'" placeholder="',k.placeholder,'" ','class="search-query ',k.classes,'" ',"/>"].join("")}function g(){return $(c()).css({width:"100%","padding-right":"24px"}).focus(function(l){$(this).select()}).keyup(function(m){if(m.which===a&&k.escWillClear){d.call(this,m)}else{var l=$(this).val();if((m.which===h)||(k.minSearchLen&&l.length>=k.minSearchLen)){j.call(this,m,l)}else{if(!l.length){d.call(this,m)}}}}).val(k.initialVal)}function f(){return'<span class="search-clear fa fa-times-circle"></span>'}function e(){return $(f()).css({position:"absolute",right:"15px","font-size":"1.4em","line-height":"23px",color:"grey"}).click(function(l){d.call(this,l)})}return i.append([g(),e()])};
\ No newline at end of file
diff -r 253f888144aaa4ae4eadaf15de493b3144fc5cd3 -r a39f452a7f657f3b43dce87d08e143ed6f02867e static/style/blue/base.css
--- a/static/style/blue/base.css
+++ b/static/style/blue/base.css
@@ -1363,7 +1363,7 @@
input[type="submit"].btn-primary,button.btn-primary{color:#fff;background-color:#5f6990;border-color:#0e0f15}input[type="submit"].btn-primary:hover,button.btn-primary:hover,input[type="submit"].btn-primary:focus,button.btn-primary:focus,input[type="submit"].btn-primary:active,button.btn-primary:active,input[type="submit"].btn-primary.active,button.btn-primary.active,.open .dropdown-toggleinput[type="submit"].btn-primary,.open .dropdown-togglebutton.btn-primary{color:#fff;background-color:#4e5777;border-color:#000}
input[type="submit"].btn-primary:active,button.btn-primary:active,input[type="submit"].btn-primary.active,button.btn-primary.active,.open .dropdown-toggleinput[type="submit"].btn-primary,.open .dropdown-togglebutton.btn-primary{background-image:none}
input[type="submit"].btn-primary.disabled,button.btn-primary.disabled,input[type="submit"].btn-primary[disabled],button.btn-primary[disabled],fieldset[disabled] input[type="submit"].btn-primary,fieldset[disabled] button.btn-primary,input[type="submit"].btn-primary.disabled:hover,button.btn-primary.disabled:hover,input[type="submit"].btn-primary[disabled]:hover,button.btn-primary[disabled]:hover,fieldset[disabled] input[type="submit"].btn-primary:hover,fieldset[disabled] button.btn-primary:hover,input[type="submit"].btn-primary.disabled:focus,button.btn-primary.disabled:focus,input[type="submit"].btn-primary[disabled]:focus,button.btn-primary[disabled]:focus,fieldset[disabled] input[type="submit"].btn-primary:focus,fieldset[disabled] button.btn-primary:focus,input[type="submit"].btn-primary.disabled:active,button.btn-primary.disabled:active,input[type="submit"].btn-primary[disabled]:active,button.btn-primary[disabled]:active,fieldset[disabled] input[type="submit"].btn-primary:active,fieldset[disabled] button.btn-primary:active,input[type="submit"].btn-primary.disabled.active,button.btn-primary.disabled.active,input[type="submit"].btn-primary[disabled].active,button.btn-primary[disabled].active,fieldset[disabled] input[type="submit"].btn-primary.active,fieldset[disabled] button.btn-primary.active{background-color:#5f6990;border-color:#0e0f15}
-.search-query{display:inline-block;padding:4px;font-size:12px;line-height:1.428571429;color:#555;border:1px solid #aaa;padding-left:14px !important;padding-right:14px !important;margin-bottom:0;-webkit-border-radius:14px;-moz-border-radius:14px;border-radius:14px;max-width:auto}
+.search-query{display:inline-block;padding:4px;font-size:12px;line-height:1.428571429;color:#555;border:1px solid #aaa;padding-left:14px !important;padding-right:14px;margin-bottom:0;-webkit-border-radius:14px;-moz-border-radius:14px;border-radius:14px;max-width:auto}
.search-query:focus{border-color:rgba(24,132,218,0.8);-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(82,168,236,0.6);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(82,168,236,0.6);outline:0;outline:thin dotted \9;}
.search-spinner{position:absolute;display:none;right:6px;top:9px}
#search-clear-btn{position:absolute;right:6px;top:5px;display:block;font-size:1.4em;text-decoration:none;color:#888;font-family:FontAwesome;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}#search-clear-btn:before{content:"\f057"}
diff -r 253f888144aaa4ae4eadaf15de493b3144fc5cd3 -r a39f452a7f657f3b43dce87d08e143ed6f02867e static/style/src/less/base.less
--- a/static/style/src/less/base.less
+++ b/static/style/src/less/base.less
@@ -721,7 +721,7 @@
color: @gray;
border: 1px solid @input-border;
padding-left: 14px !important;
- padding-right: 14px !important;
+ padding-right: 14px;
margin-bottom: 0; // remove the default margin on all inputs
.border-radius(14px);
max-width: auto;
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: Dave Bouvier: Functional tests for tool dependency XML verification. Functional tests for detecting missing env.sh files due to tool dependency installation errors.
by commits-noreply@bitbucket.org 21 Nov '13
by commits-noreply@bitbucket.org 21 Nov '13
21 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/253f888144aa/
Changeset: 253f888144aa
User: Dave Bouvier
Date: 2013-11-21 17:12:13
Summary: Functional tests for tool dependency XML verification. Functional tests for detecting missing env.sh files due to tool dependency installation errors.
Affected #: 6 files
diff -r d448ed76962c8b36fde83e5cd8dec91e97c9d812 -r 253f888144aaa4ae4eadaf15de493b3144fc5cd3 test/tool_shed/base/twilltestcase.py
--- a/test/tool_shed/base/twilltestcase.py
+++ b/test/tool_shed/base/twilltestcase.py
@@ -1,6 +1,15 @@
+import common
+import string
+import os
+import re
+import test_db_util
+import simplejson
+import logging
+import time
+import tempfile
+import tarfile
import galaxy.webapps.tool_shed.util.hgweb_config
import galaxy.model as galaxy_model
-import common, string, os, re, test_db_util, simplejson, logging, time
import galaxy.util as util
from tool_shed.util import shed_util_common as suc
from base.twilltestcase import tc, from_json_string, TwillTestCase, security, urllib
@@ -44,7 +53,7 @@
url = '/repository_review/create_component?operation=create'
self.visit_url( url )
self.submit_form( 1, 'create_component_button', **kwd )
-
+
def browse_category( self, category, strings_displayed=[], strings_not_displayed=[] ):
url = '/repository/browse_valid_categories?sort=name&operation=valid_repositories_by_category&id=%s' % \
self.security.encode_id( category.id )
@@ -479,7 +488,17 @@
else:
string = string.replace( character, replacement )
return string
-
+
+ def export_capsule( self, repository ):
+ url = '/repository/export?repository_id=%s&changeset_revision=%s' % \
+ ( self.security.encode_id( repository.id ), self.get_repository_tip( repository ) )
+ self.visit_url( url )
+ self.submit_form( 'export_repository', 'export_repository_button' )
+ fd, capsule_filename = tempfile.mkstemp()
+ os.close( fd )
+ file( capsule_filename, 'w' ).write( self.last_page() )
+ return capsule_filename
+
def fill_review_form( self, review_contents_dict, strings_displayed=[], strings_not_displayed=[] ):
kwd = dict()
changed = False
@@ -727,7 +746,17 @@
tc.fv( "user_access", "allow_push", '+%s' % username )
tc.submit( 'user_access_button' )
self.check_for_strings( post_submit_strings_displayed, post_submit_strings_not_displayed )
-
+
+ def import_capsule( self, filename, strings_displayed=[], strings_not_displayed=[],
+ strings_displayed_after_submit=[], strings_not_displayed_after_submit=[] ):
+ url = '/repository/upload_capsule'
+ self.visit_url( url )
+ tc.formfile( 'upload_capsule', 'file_data', filename )
+ tc.submit( 'upload_capsule_button' )
+ self.check_for_strings( strings_displayed, strings_not_displayed )
+ self.submit_form( 'import_capsule', 'import_capsule_button' )
+ self.check_for_strings( strings_displayed_after_submit, strings_not_displayed_after_submit )
+
def import_workflow( self, repository, workflow_name, strings_displayed=[], strings_not_displayed=[] ):
url = '/admin_toolshed/import_workflow?repository_id=%s&workflow_name=%s' % \
( self.security.encode_id( repository.id ), tool_shed_encode( workflow_name ) )
diff -r d448ed76962c8b36fde83e5cd8dec91e97c9d812 -r 253f888144aaa4ae4eadaf15de493b3144fc5cd3 test/tool_shed/functional/test_0480_tool_dependency_xml_verification.py
--- /dev/null
+++ b/test/tool_shed/functional/test_0480_tool_dependency_xml_verification.py
@@ -0,0 +1,75 @@
+from tool_shed.base.twilltestcase import ShedTwillTestCase, common, os
+import tool_shed.base.test_db_util as test_db_util
+
+import logging
+log = logging.getLogger( __name__ )
+
+category_name = 'Test 0480 Tool dependency definition validation'
+category_description = 'Test script 0480 for validating tool dependency definitions.'
+repository_name = 'package_invalid_tool_dependency_xml_1_0_0'
+repository_description = "Contains a tool dependency definition that should return an error."
+repository_long_description = "This repository is in the test suite 0480"
+
+'''
+
+1. Create a repository package_invalid_tool_dependency_xml_1_0_0
+2. Upload a tool_dependencies.xml file to the repository with no <actions> tags around the <action> tags.
+3. Verify error message is displayed.
+
+'''
+
+class TestDependencyDefinitionValidation( ShedTwillTestCase ):
+ '''Test the tool shed's tool dependency XML validation.'''
+
+ def test_0000_initiate_users_and_category( self ):
+ """Create necessary user accounts and login as an admin user."""
+ self.logout()
+ self.login( email=common.admin_email, username=common.admin_username )
+ admin_user = test_db_util.get_user( common.admin_email )
+ assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
+ admin_user_private_role = test_db_util.get_private_role( admin_user )
+ self.create_category( name=category_name, description=category_description )
+ self.logout()
+ self.login( email=common.test_user_2_email, username=common.test_user_2_name )
+ test_user_2 = test_db_util.get_user( common.test_user_2_email )
+ assert test_user_2 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_2_email
+ test_user_2_private_role = test_db_util.get_private_role( test_user_2 )
+ self.logout()
+ self.login( email=common.test_user_1_email, username=common.test_user_1_name )
+ test_user_1 = test_db_util.get_user( common.test_user_1_email )
+ assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email
+ test_user_1_private_role = test_db_util.get_private_role( test_user_1 )
+
+ def test_0005_create_tool_dependency_repository( self ):
+ '''Create and populate package_invalid_tool_dependency_xml_1_0_0.'''
+ '''
+ This is step 1 - Create a repository package_invalid_tool_dependency_xml_1_0_0.
+
+ Create a repository named package_invalid_tool_dependency_xml_1_0_0 that will contain only a single file named tool_dependencies.xml.
+ '''
+ category = test_db_util.get_category_by_name( category_name )
+ repository = self.get_or_create_repository( name=repository_name,
+ description=repository_description,
+ long_description=repository_long_description,
+ owner=common.test_user_1_name,
+ category_id=self.security.encode_id( category.id ),
+ strings_displayed=[] )
+ self.upload_file( repository,
+ filename='0480_files/tool_dependencies.xml',
+ filepath=None,
+ valid_tools_only=False,
+ uncompress_file=False,
+ remove_repo_files_not_in_tar=False,
+ commit_message='Populate package_invalid_tool_dependency_xml_1_0_0 with an improperly defined tool dependency.',
+ strings_displayed=[ 'package cannot be installed because', 'missing either an <actions> tag set' ],
+ strings_not_displayed=[] )
+
+ def test_0010_populate_tool_dependency_repository( self ):
+ '''Verify package_invalid_tool_dependency_xml_1_0_0.'''
+ '''
+ This is step 3 - Verify repository. The uploaded tool dependency XML should not have resulted in a new changeset.
+ '''
+ repository = test_db_util.get_repository_by_name_and_owner( repository_name, common.test_user_1_name )
+ assert self.repository_is_new( repository ), 'Uploading an incorrectly defined tool_dependencies.xml resulted in a changeset being generated.'
+
+
\ No newline at end of file
diff -r d448ed76962c8b36fde83e5cd8dec91e97c9d812 -r 253f888144aaa4ae4eadaf15de493b3144fc5cd3 test/tool_shed/functional/test_1440_missing_env_sh_files.py
--- /dev/null
+++ b/test/tool_shed/functional/test_1440_missing_env_sh_files.py
@@ -0,0 +1,130 @@
+from tool_shed.base.twilltestcase import ShedTwillTestCase, common, os
+import tool_shed.base.test_db_util as test_db_util
+
+import logging
+log = logging.getLogger( __name__ )
+
+category_name = 'Test 1440 Tool dependency missing env.sh'
+category_description = 'Test script 1440 for detection of missing environment settings.'
+package_repository_name = 'package_env_sh_1_0_1440'
+tool_repository_name = 'filter_1440'
+package_repository_description = 'Repository that should result in an env.sh file, but does not.'
+tool_repository_description = 'Galaxy filtering tool.'
+package_repository_long_description = '%s: %s' % ( package_repository_name, package_repository_description )
+tool_repository_long_description = '%s: %s' % ( tool_repository_name, tool_repository_description )
+
+'''
+1. Create a tool dependency type repository that reliably fails to install successfully. This repository should define
+ an action that would have created an env.sh file on success, resulting in an env.sh file that should exist, but is missing.
+
+2. Create a repository that defines a complex repository dependency in the repository created in step 1, with prior_install_required
+ and set_environment_for_install.
+
+3. Attempt to install the second repository into a galaxy instance, verify that it is installed but missing tool dependencies.
+
+'''
+
+
+class TestMissingEnvSh( ShedTwillTestCase ):
+ '''Test installing a repository that should create an env.sh file, but does not.'''
+
+ def test_0000_initiate_users_and_category( self ):
+ """Create necessary user accounts and login as an admin user."""
+ self.logout()
+ self.login( email=common.admin_email, username=common.admin_username )
+ admin_user = test_db_util.get_user( common.admin_email )
+ assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
+ admin_user_private_role = test_db_util.get_private_role( admin_user )
+ self.create_category( name=category_name, description=category_description )
+ self.logout()
+ self.login( email=common.test_user_2_email, username=common.test_user_2_name )
+ test_user_2 = test_db_util.get_user( common.test_user_2_email )
+ assert test_user_2 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_2_email
+ test_user_2_private_role = test_db_util.get_private_role( test_user_2 )
+ self.logout()
+ self.login( email=common.test_user_1_email, username=common.test_user_1_name )
+ test_user_1 = test_db_util.get_user( common.test_user_1_email )
+ assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email
+ test_user_1_private_role = test_db_util.get_private_role( test_user_1 )
+
+ def test_0005_create_package_repository( self ):
+ '''Create and populate package_env_sh_1_0_1440.'''
+ '''
+ This is step 1 - Create repository package_env_sh_1_0_1440.
+
+ Create and populate a repository that is designed to fail a tool dependency installation. This tool dependency should
+ also define one or more environment variables.
+ '''
+ category = test_db_util.get_category_by_name( category_name )
+ repository = self.get_or_create_repository( name=package_repository_name,
+ description=package_repository_description,
+ long_description=package_repository_long_description,
+ owner=common.test_user_1_name,
+ category_id=self.security.encode_id( category.id ),
+ strings_displayed=[] )
+ # Upload the edited tool dependency definition to the package_lapack_3_4_1440 repository.
+ self.upload_file( repository,
+ filename='1440_files/dependency_definition/tool_dependencies.xml',
+ filepath=None,
+ valid_tools_only=True,
+ uncompress_file=False,
+ remove_repo_files_not_in_tar=False,
+ commit_message='Populate package_env_sh_1_0_1440 with a broken tool dependency definition.',
+ strings_displayed=[],
+ strings_not_displayed=[] )
+
+ def test_0010_create_filter_repository( self ):
+ '''Create and populate filter_1440.'''
+ '''
+ This is step 2 - Create a repository that defines a complex repository dependency on the repository created in
+ step 1, with prior_install_required and set_environment_for_install.
+ '''
+ category = test_db_util.get_category_by_name( category_name )
+ repository = self.get_or_create_repository( name=tool_repository_name,
+ description=tool_repository_description,
+ long_description=tool_repository_long_description,
+ owner=common.test_user_1_name,
+ category_id=self.security.encode_id( category.id ),
+ strings_displayed=[] )
+ # Upload the edited tool dependency definition to the package_lapack_3_4_1440 repository.
+ self.upload_file( repository,
+ filename='filtering/filtering_2.2.0.tar',
+ filepath=None,
+ valid_tools_only=True,
+ uncompress_file=False,
+ remove_repo_files_not_in_tar=False,
+ commit_message='Populate filter_1440 with the filtering tool.',
+ strings_displayed=[],
+ strings_not_displayed=[] )
+ self.upload_file( repository,
+ filename='1440_files/complex_dependency/tool_dependencies.xml',
+ filepath=None,
+ valid_tools_only=True,
+ uncompress_file=False,
+ remove_repo_files_not_in_tar=False,
+ commit_message='Populate filter_1440 with a dependency on package_env_sh_1_0_1440.',
+ strings_displayed=[],
+ strings_not_displayed=[] )
+
+ def test_0015_install_filter_repository( self ):
+ '''Install the filter_1440 repository to galaxy.'''
+ '''
+ This is step 3 - Attempt to install the second repository into a galaxy instance, verify that it is installed but
+ missing tool dependencies.
+ '''
+ self.galaxy_logout()
+ self.galaxy_login( email=common.admin_email, username=common.admin_username )
+ post_submit_strings_displayed = [ 'filter_1440', 'package_env_sh_1_0_1440' ]
+ self.install_repository( 'filter_1440',
+ common.test_user_1_name,
+ category_name,
+ install_tool_dependencies=True,
+ post_submit_strings_displayed=post_submit_strings_displayed )
+
+ def test_0020_verify_missing_tool_dependency( self ):
+ '''Verify that the filter_1440 repository is installed and missing tool dependencies.'''
+ repository = test_db_util.get_installed_repository_by_name_owner( 'filter_1440', common.test_user_1_name )
+ strings_displayed = [ 'Missing tool dependencies' ]
+ self.display_installed_repository_manage_page( repository, strings_displayed=strings_displayed )
+ assert len( repository.missing_tool_dependencies ) == 1, 'filter_1440 should have a missing tool dependency, but does not.'
+
diff -r d448ed76962c8b36fde83e5cd8dec91e97c9d812 -r 253f888144aaa4ae4eadaf15de493b3144fc5cd3 test/tool_shed/test_data/0480_files/tool_dependencies.xml
--- /dev/null
+++ b/test/tool_shed/test_data/0480_files/tool_dependencies.xml
@@ -0,0 +1,14 @@
+<?xml version="1.0"?>
+<tool_dependency>
+ <package name="invalid_tool_dependency_xml" version="1.0.0">
+ <install version="1.0">
+ <action type="download_file">file://./</action>
+ <action type="shell_command">/bin/true</action>
+ <action type="set_environment">
+ <environment_variable action="set_to" name="INSTALL_DIR">$INSTALL_DIR</environment_variable>
+ </action>
+ </install>
+ <readme>
+ </readme>
+ </package>
+</tool_dependency>
\ No newline at end of file
diff -r d448ed76962c8b36fde83e5cd8dec91e97c9d812 -r 253f888144aaa4ae4eadaf15de493b3144fc5cd3 test/tool_shed/test_data/1440_files/complex_dependency/tool_dependencies.xml
--- /dev/null
+++ b/test/tool_shed/test_data/1440_files/complex_dependency/tool_dependencies.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='utf-8'?>
+<tool_dependency>
+ <package name="failure" version="1.0.0">
+ <repository name="package_env_sh_1_0_1440" owner="user1" />
+ </package>
+</tool_dependency>
\ No newline at end of file
diff -r d448ed76962c8b36fde83e5cd8dec91e97c9d812 -r 253f888144aaa4ae4eadaf15de493b3144fc5cd3 test/tool_shed/test_data/1440_files/dependency_definition/tool_dependencies.xml
--- /dev/null
+++ b/test/tool_shed/test_data/1440_files/dependency_definition/tool_dependencies.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='utf-8'?>
+<tool_dependency>
+ <package name="failure" version="1.0.0">
+ <install version="1.0">
+ <actions>
+ <action type="shell_command">false</action>
+ <action type="set_environment">
+ <environment_variable name="TEST_SUCCEEDED" action="prepend_to">true</environment_variable>
+ </action>
+ </actions>
+ </install>
+ <readme></readme>
+ </package>
+</tool_dependency>
\ No newline at end of file
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Code cleanup and minor fixes in prep scripts for tool shed nightly test runs and its functional test framework.
by commits-noreply@bitbucket.org 21 Nov '13
by commits-noreply@bitbucket.org 21 Nov '13
21 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/d448ed76962c/
Changeset: d448ed76962c
User: greg
Date: 2013-11-21 16:28:22
Summary: Code cleanup and minor fixes in prep scripts for tool shed nightly test runs and its functional test framework.
Affected #: 3 files
diff -r e982fcaaaab558e6d4ecb3750d2ec721d5fd861d -r d448ed76962c8b36fde83e5cd8dec91e97c9d812 lib/tool_shed/scripts/check_repositories_for_functional_tests.py
--- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
+++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
@@ -1,91 +1,69 @@
#!/usr/bin/env python
+import ConfigParser
+import logging
+import os
+import shutil
+import sys
+import tempfile
+import time
-import os, sys, logging, tempfile
+from datetime import datetime
+from optparse import OptionParser
+from time import strftime
new_path = [ os.path.join( os.getcwd(), "lib" ), os.path.join( os.getcwd(), "test" ) ]
-new_path.extend( sys.path[1:] )
+new_path.extend( sys.path[ 1: ] )
sys.path = new_path
+from galaxy import eggs
+eggs.require( "SQLAlchemy >= 0.4" )
+eggs.require( 'mercurial' )
+
+import galaxy.webapps.tool_shed.config as tool_shed_config
+import galaxy.webapps.tool_shed.model.mapping
+
+from base.util import get_test_environment
+from base.util import get_database_version
+from base.util import get_repository_current_revision
+from galaxy.model.orm import and_, not_, select
+from mercurial import hg
+from mercurial import ui
+from mercurial import __version__
+from tool_shed.util.shed_util_common import clone_repository
+from tool_shed.util.shed_util_common import get_configured_ui
+
log = logging.getLogger()
log.setLevel( 10 )
log.addHandler( logging.StreamHandler( sys.stdout ) )
-from galaxy import eggs
-import pkg_resources
-eggs.require( "SQLAlchemy >= 0.4" )
-eggs.require( 'mercurial' )
-from mercurial import hg, ui, commands, __version__
+assert sys.version_info[ :2 ] >= ( 2, 6 )
-import time, ConfigParser, shutil
-from datetime import datetime, timedelta
-from time import strftime
-from optparse import OptionParser
-import galaxy.webapps.tool_shed.config as tool_shed_config
-import galaxy.webapps.tool_shed.model.mapping
-import sqlalchemy as sa
-from galaxy.model.orm import and_, not_, select
-from galaxy.util.json import from_json_string, to_json_string
-from galaxy.web import url_for
-from galaxy.tools import parameters
-from tool_shed.util.shed_util_common import clone_repository, get_configured_ui
-
-from base.util import get_test_environment, get_database_version, get_repository_current_revision
-
-assert sys.version_info[:2] >= ( 2, 4 )
-
-def main():
- '''Script that checks repositories to see if the tools contained within them have functional tests defined.'''
- parser = OptionParser()
- parser.add_option( "-i", "--info_only", action="store_true", dest="info_only", help="info about the requested action", default=False )
- parser.add_option( "-s",
- "--section",
- action="store",
- dest="section",
- default='server:main',
- help="which .ini file section to extract the host and port from" )
- parser.add_option(
- "-v", "--verbose",
- action="count", dest="verbosity",
- default=1,
- help="Control the amount of detail in the log output.")
- parser.add_option(
- "--verbosity", action="store", dest="verbosity",
- metavar='VERBOSITY',
- type="int", help="Control the amount of detail in the log output. --verbosity=1 is "
- "the same as -v")
- ( options, args ) = parser.parse_args()
- try:
- ini_file = args[0]
- except IndexError:
- print "Usage: python %s <tool shed .ini file> [options]" % sys.argv[ 0 ]
- exit( 127 )
- config_parser = ConfigParser.ConfigParser( {'here':os.getcwd()} )
- config_parser.read( ini_file )
- config_dict = {}
- for key, value in config_parser.items( "app:main" ):
- config_dict[key] = value
- config = tool_shed_config.Configuration( **config_dict )
-
- config_section = options.section
- now = strftime( "%Y-%m-%d %H:%M:%S" )
- print "#############################################################################"
- print "# %s - Checking repositories for tools with functional tests." % now
- print "# This tool shed is configured to listen on %s:%s." % ( config_parser.get( config_section, 'host' ), config_parser.get( config_section, 'port' ) )
- app = FlagRepositoriesApplication( config )
-
- if options.info_only:
- print "# Displaying info only ( --info_only )"
- if options.verbosity:
- print "# Displaying extra information ( --verbosity = %d )" % options.verbosity
-
- check_and_flag_repositories( app, info_only=options.info_only, verbosity=options.verbosity )
+class FlagRepositoriesApplication( object ):
+ """Encapsulates the state of a Universe application"""
+ def __init__( self, config ):
+ if config.database_connection is False:
+ config.database_connection = "sqlite:///%s?isolation_level=IMMEDIATE" % config.database
+ # Setup the database engine and ORM
+ self.model = galaxy.webapps.tool_shed.model.mapping.init( config.file_path, config.database_connection, engine_options={}, create_tables=False )
+ self.hgweb_config_manager = self.model.hgweb_config_manager
+ self.hgweb_config_manager.hgweb_config_dir = config.hgweb_config_dir
+ print "# Using configured hgweb.config file: ", self.hgweb_config_manager.hgweb_config
+ @property
+ def sa_session( self ):
+ """
+ Returns a SQLAlchemy session -- currently just gets the current
+ session from the threadlocal session context, but this is provided
+ to allow migration toward a more SQLAlchemy 0.4 style of use.
+ """
+ return self.model.context.current
+ def shutdown( self ):
+ pass
def check_and_flag_repositories( app, info_only=False, verbosity=1 ):
- '''
+ """
This method will iterate through all records in the repository_metadata table, checking each one for tool metadata,
- then checking the tool metadata for tests.
- Each tool's metadata should look something like:
+ then checking the tool metadata for tests. Each tool's metadata should look something like:
{
"add_to_tool_panel": true,
"description": "",
@@ -111,94 +89,11 @@
"version": "1.2.3",
"version_string_cmd": null
}
-
If the "tests" attribute is missing or empty, this script will mark the metadata record (which is specific to a changeset revision of a repository)
not to be tested. If each "tools" attribute has at least one valid "tests" entry, this script will do nothing, and leave it available for the install
and test repositories script to process. If the tested changeset revision does not have a test-data directory, this script will also mark the revision
not to be tested.
-
- TODO: Update this dict structure with the recently added components.
-
- If any error is encountered, the script will update the repository_metadata.tool_test_results attribute following this structure:
- {
- "test_environment":
- {
- "galaxy_revision": "9001:abcd1234",
- "galaxy_database_version": "114",
- "tool_shed_revision": "9001:abcd1234",
- "tool_shed_mercurial_version": "2.3.1",
- "tool_shed_database_version": "17",
- "python_version": "2.7.2",
- "architecture": "x86_64",
- "system": "Darwin 12.2.0"
- },
- "passed_tests":
- [
- {
- "test_id": "The test ID, generated by twill",
- "tool_id": "The tool ID that was tested",
- "tool_version": "The tool version that was tested",
- },
- ]
- "failed_tests":
- [
- {
- "test_id": "The test ID, generated by twill",
- "tool_id": "The tool ID that was tested",
- "tool_version": "The tool version that was tested",
- "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
- "traceback": "The captured traceback."
- },
- ]
- "installation_errors":
- {
- 'tool_dependencies':
- [
- {
- 'type': 'Type of tool dependency, e.g. package, set_environment, etc.',
- 'name': 'Name of the tool dependency.',
- 'version': 'Version if this is a package, otherwise blank.',
- 'error_message': 'The error message returned when installation was attempted.',
- },
- ],
- 'repository_dependencies':
- [
- {
- 'tool_shed': 'The tool shed that this repository was installed from.',
- 'name': 'The name of the repository that failed to install.',
- 'owner': 'Owner of the failed repository.',
- 'changeset_revision': 'Changeset revision of the failed repository.',
- 'error_message': 'The error message that was returned when the repository failed to install.',
- },
- ],
- 'current_repository':
- [
- {
- 'tool_shed': 'The tool shed that this repository was installed from.',
- 'name': 'The name of the repository that failed to install.',
- 'owner': 'Owner of the failed repository.',
- 'changeset_revision': 'Changeset revision of the failed repository.',
- 'error_message': 'The error message that was returned when the repository failed to install.',
- },
- ],
- {
- "name": "The name of the repository.",
- "owner": "The owner of the repository.",
- "changeset_revision": "The changeset revision of the repository.",
- "error_message": "The message stored in tool_dependency.error_message."
- },
- }
- "missing_test_components":
- [
- {
- "tool_id": "The tool ID that missing components.",
- "tool_version": "The version of the tool."
- "tool_guid": "The guid of the tool."
- "missing_components": "Which components are missing, e.g. the test data filename, or the test-data directory."
- },
- ]
- }
- '''
+ """
start = time.time()
skip_metadata_ids = []
checked_repository_ids = []
@@ -214,49 +109,68 @@
# Get the list of metadata records to check for functional tests and test data. Limit this to records that have not been flagged do_not_test,
# since there's no need to check them again if they won't be tested anyway. Also filter out changeset revisions that are not downloadable,
# because it's redundant to test a revision that a user can't install.
- for metadata_record in app.sa_session.query( app.model.RepositoryMetadata ) \
- .filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True,
- app.model.RepositoryMetadata.table.c.includes_tools == True,
- app.model.RepositoryMetadata.table.c.do_not_test == False,
- not_( app.model.RepositoryMetadata.table.c.id.in_( skip_metadata_ids ) ) ) ):
+ for repository_metadata in app.sa_session.query( app.model.RepositoryMetadata ) \
+ .filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True,
+ app.model.RepositoryMetadata.table.c.includes_tools == True,
+ app.model.RepositoryMetadata.table.c.do_not_test == False,
+ not_( app.model.RepositoryMetadata.table.c.id.in_( skip_metadata_ids ) ) ) ):
records_checked += 1
- # Initialize the repository_status dict with the test environment, but leave the test_errors empty.
- repository_status = {}
- if metadata_record.tool_test_results:
- repository_status = metadata_record.tool_test_results
- # Clear any old invalid tests for this metadata revision, since this could lead to duplication of invalid test rows,
- # or tests incorrectly labeled as invalid.
- missing_test_components = []
- if 'test_environment' in repository_status:
- repository_status[ 'test_environment' ] = get_test_environment( repository_status[ 'test_environment' ] )
+ # Create the repository_status dictionary, using the dictionary from the previous test run if available.
+ if repository_metadata.tool_test_results:
+ repository_status = repository_metadata.tool_test_results
else:
- repository_status[ 'test_environment' ] = get_test_environment()
- repository_status[ 'test_environment' ][ 'tool_shed_database_version' ] = get_database_version( app )
- repository_status[ 'test_environment' ][ 'tool_shed_mercurial_version' ] = __version__.version
- repository_status[ 'test_environment' ][ 'tool_shed_revision' ] = get_repository_current_revision( os.getcwd() )
- name = metadata_record.repository.name
- owner = metadata_record.repository.user.username
- changeset_revision = str( metadata_record.changeset_revision )
- if metadata_record.repository.id not in checked_repository_ids:
- checked_repository_ids.append( metadata_record.repository.id )
+ repository_status = {}
+ # Initialize the repository_status dictionary with the information about the current test environment.
+ last_test_environment = repository_status.get( 'test_environment', None )
+ if last_test_environment is None:
+ test_environment = get_test_environment()
+ else:
+ test_environment = get_test_environment( last_test_environment )
+ test_environment[ 'tool_shed_database_version' ] = get_database_version( app )
+ test_environment[ 'tool_shed_mercurial_version' ] = __version__.version
+ test_environment[ 'tool_shed_revision' ] = get_repository_current_revision( os.getcwd() )
+ repository_status[ 'test_environment' ] = test_environment
+ # Check the next repository revision.
+ changeset_revision = str( repository_metadata.changeset_revision )
+ name = repository.name
+ owner = repository.user.username
+ metadata = repository_metadata.metadata
+ repository = repository_metadata.repository
+ if repository.id not in checked_repository_ids:
+ checked_repository_ids.append( repository.id )
if verbosity >= 1:
print '# -------------------------------------------------------------------------------------------'
print '# Now checking revision %s of %s, owned by %s.' % ( changeset_revision, name, owner )
# If this changeset revision has no tools, we don't need to do anything here, the install and test script has a filter for returning
# only repositories that contain tools.
- if 'tools' not in metadata_record.metadata:
- continue
- else:
+ tool_dicts = repository_metadata.get( 'tools', None )
+ """
+ "{"tools":
+ [{"add_to_tool_panel": true,
+ "description": "data on any column using simple expressions",
+ "guid": "localhost:9009/repos/iuc/filtering_with_empty_test_tags/Filter1/1.1.0",
+ "id": "Filter1",
+ "name": "Filter",
+ "requirements": [],
+ "tests":
+ [{"inputs": [], "name": "Test-1", "outputs": [], "required_files": []},
+ {"inputs": [], "name": "Test-2", "outputs": [], "required_files": []}],
+ "tool_config": "database/community_files/000/repo_6/filtering.xml",
+ "tool_type": "default",
+ "version": "1.1.0",
+ "version_string_cmd": null}]}"
+ """
+ if tool_dicts is not None:
has_test_data = False
testable_revision_found = False
# Clone the repository up to the changeset revision we're checking.
- repo_dir = metadata_record.repository.repo_path( app )
+ repo_dir = repository_metadata.repository.repo_path( app )
repo = hg.repository( get_configured_ui(), repo_dir )
work_dir = tempfile.mkdtemp( prefix="tmp-toolshed-cafr" )
cloned_ok, error_message = clone_repository( repo_dir, work_dir, changeset_revision )
if cloned_ok:
# Iterate through all the directories in the cloned changeset revision and determine whether there's a
- # directory named test-data. If this directory is not present, update the metadata record for the changeset
+ # directory named test-data. If this directory is not present update the metadata record for the changeset
# revision we're checking.
for root, dirs, files in os.walk( work_dir ):
if '.hg' in dirs:
@@ -266,18 +180,18 @@
test_data_path = os.path.join( root, dirs[ dirs.index( 'test-data' ) ] )
break
if verbosity >= 1:
- if not has_test_data:
+ if has_test_data:
+ print '# Test data directory found in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
+ else:
print '# Test data directory missing in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
- else:
- print '# Test data directory found in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
print '# Checking for functional tests in changeset revision %s of %s, owned by %s.' % \
( changeset_revision, name, owner )
- # Loop through all the tools in this metadata record, checking each one for defined functional tests.
- for tool_metadata in metadata_record.metadata[ 'tools' ]:
+ # Inspect each tool_dict for defined functional tests.
+ for tool_dict in tool_dicts:
tool_count += 1
- tool_id = tool_metadata[ 'id' ]
- tool_version = tool_metadata[ 'version' ]
- tool_guid = tool_metadata[ 'guid' ]
+ tool_id = tool_dict[ 'id' ]
+ tool_version = tool_dict[ 'version' ]
+ tool_guid = tool_dict[ 'guid' ]
if verbosity >= 2:
print "# Checking tool ID '%s' in changeset revision %s of %s." % \
( tool_id, changeset_revision, name )
@@ -285,7 +199,7 @@
# not if it's missing or undefined. Filtering out those repositories at this step will reduce the number of "false negatives" the
# automated functional test framework produces.
tool_has_tests = False
- defined_test_dicts = tool_metadata.get( 'tests', None )
+ defined_test_dicts = tool_dict.get( 'tests', None )
if defined_test_dicts is not None:
# We need to inspect the <test> tags because the following tags...
# <tests>
@@ -321,7 +235,7 @@
missing_test_files = []
has_test_files = False
if tool_has_tests and has_test_data:
- missing_test_files = check_for_missing_test_files( tool_metadata[ 'tests' ], test_data_path )
+ missing_test_files = check_for_missing_test_files( defined_test_dicts, test_data_path )
if missing_test_files:
if verbosity >= 2:
print "# Tool ID '%s' in changeset revision %s of %s is missing one or more required test files: %s" % \
@@ -337,90 +251,7 @@
if missing_test_files:
failure_reason += 'One or more test files are missing for tool %s: %s' % ( tool_id, ', '.join( missing_test_files ) )
problem_found = True
- test_errors = dict( tool_id=tool_id, tool_version=tool_version, tool_guid=tool_guid,
- missing_components=failure_reason )
- # The repository_metadata.tool_test_results attribute should always have the following structure:
- # {
- # "test_environment":
- # {
- # "galaxy_revision": "9001:abcd1234",
- # "galaxy_database_version": "114",
- # "tool_shed_revision": "9001:abcd1234",
- # "tool_shed_mercurial_version": "2.3.1",
- # "tool_shed_database_version": "17",
- # "python_version": "2.7.2",
- # "architecture": "x86_64",
- # "system": "Darwin 12.2.0"
- # },
- # "passed_tests":
- # [
- # {
- # "test_id": "The test ID, generated by twill",
- # "tool_id": "The tool ID that was tested",
- # "tool_version": "The tool version that was tested",
- # },
- # ]
- # "failed_tests":
- # [
- # {
- # "test_id": "The test ID, generated by twill",
- # "tool_id": "The tool ID that was tested",
- # "tool_version": "The tool version that was tested",
- # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
- # "traceback": "The captured traceback."
- # },
- # ]
- # "installation_errors":
- # {
- # 'tool_dependencies':
- # [
- # {
- # 'type': 'Type of tool dependency, e.g. package, set_environment, etc.',
- # 'name': 'Name of the tool dependency.',
- # 'version': 'Version if this is a package, otherwise blank.',
- # 'error_message': 'The error message returned when installation was attempted.',
- # },
- # ],
- # 'repository_dependencies':
- # [
- # {
- # 'tool_shed': 'The tool shed that this repository was installed from.',
- # 'name': 'The name of the repository that failed to install.',
- # 'owner': 'Owner of the failed repository.',
- # 'changeset_revision': 'Changeset revision of the failed repository.',
- # 'error_message': 'The error message that was returned when the repository failed to install.',
- # },
- # ],
- # 'current_repository':
- # [
- # {
- # 'tool_shed': 'The tool shed that this repository was installed from.',
- # 'name': 'The name of the repository that failed to install.',
- # 'owner': 'Owner of the failed repository.',
- # 'changeset_revision': 'Changeset revision of the failed repository.',
- # 'error_message': 'The error message that was returned when the repository failed to install.',
- # },
- # ],
- # {
- # "name": "The name of the repository.",
- # "owner": "The owner of the repository.",
- # "changeset_revision": "The changeset revision of the repository.",
- # "error_message": "The message stored in tool_dependency.error_message."
- # },
- # }
- # "missing_test_components":
- # [
- # {
- # "tool_id": "The tool ID that missing components.",
- # "tool_version": "The version of the tool."
- # "tool_guid": "The guid of the tool."
- # "missing_components": "Which components are missing, e.g. the test data filename, or the test-data directory."
- # },
- # ]
- # }
- #
- # Optionally, "traceback" may be included in a test_errors dict, if it is relevant. No script should overwrite anything other
- # than the list relevant to what it is testing.
+ test_errors = dict( tool_id=tool_id, tool_version=tool_version, tool_guid=tool_guid, missing_components=failure_reason )
# Only append this error dict if it hasn't already been added.
if problem_found:
if test_errors not in missing_test_components:
@@ -443,8 +274,7 @@
if 'missing_components' in invalid_test:
print '# %s' % invalid_test[ 'missing_components' ]
if not info_only:
- # If repository_status[ 'test_errors' ] is empty, no issues were found, and we can just update time_last_tested with the platform
- # on which this script was run.
+ # The repository_metadata.time_last_tested column is not changed by this script since no testing is performed here.
if missing_test_components:
# If functional test definitions or test data are missing, set do_not_test = True if no tool with valid tests has been
# found in this revision, and:
@@ -456,14 +286,13 @@
# changeset revision will be created, either of which will be automatically checked and flagged as appropriate.
# In the install and test script, this behavior is slightly different, since we do want to always run functional
# tests on the most recent downloadable changeset revision.
- if should_set_do_not_test_flag( app, metadata_record.repository, changeset_revision ) and not testable_revision_found:
- metadata_record.do_not_test = True
- metadata_record.tools_functionally_correct = False
- metadata_record.missing_test_components = True
+ if should_set_do_not_test_flag( app, repository_metadata.repository, changeset_revision ) and not testable_revision_found:
+ repository_metadata.do_not_test = True
+ repository_metadata.tools_functionally_correct = False
+ repository_metadata.missing_test_components = True
repository_status[ 'missing_test_components' ] = missing_test_components
- metadata_record.tool_test_results = repository_status
- metadata_record.time_last_tested = datetime.utcnow()
- app.sa_session.add( metadata_record )
+ repository_metadata.tool_test_results = repository_status
+ app.sa_session.add( repository_metadata )
app.sa_session.flush()
stop = time.time()
print '# -------------------------------------------------------------------------------------------'
@@ -477,14 +306,6 @@
print "# Elapsed time: ", stop - start
print "#############################################################################"
-def get_repo_changelog_tuples( repo_path ):
- repo = hg.repository( ui.ui(), repo_path )
- changelog_tuples = []
- for changeset in repo.changelog:
- ctx = repo.changectx( changeset )
- changelog_tuples.append( ( ctx.rev(), str( ctx ) ) )
- return changelog_tuples
-
def check_for_missing_test_files( test_definition, test_data_path ):
'''Process the tool's functional test definitions and check for each file specified as an input or output.'''
missing_test_files = []
@@ -500,6 +321,14 @@
missing_test_files.append( required_file )
return missing_test_files
+def get_repo_changelog_tuples( repo_path ):
+ repo = hg.repository( ui.ui(), repo_path )
+ changelog_tuples = []
+ for changeset in repo.changelog:
+ ctx = repo.changectx( changeset )
+ changelog_tuples.append( ( ctx.rev(), str( ctx ) ) )
+ return changelog_tuples
+
def is_most_recent_downloadable_revision( app, repository, changeset_revision, downloadable_revisions ):
# Get a list of ( numeric revision, changeset hash ) tuples from the changelog.
changelog = get_repo_changelog_tuples( repository.repo_path( app ) )
@@ -513,6 +342,41 @@
return True
return False
+def main():
+ '''Script that checks repositories to see if the tools contained within them have functional tests defined.'''
+ parser = OptionParser()
+ parser.add_option( "-i", "--info_only", action="store_true", dest="info_only", help="info about the requested action", default=False )
+ parser.add_option( "-s", "--section", action="store", dest="section", default='server:main',
+ help=".ini file section from which to to extract the host and port" )
+ parser.add_option( "-v", "--verbose", action="count", dest="verbosity", default=1, help="Control the amount of detail in the log output.")
+ parser.add_option( "--verbosity", action="store", dest="verbosity", metavar='VERBOSITY', type="int",
+ help="Control the amount of detail in the log output. --verbosity=1 is the same as -v" )
+ ( options, args ) = parser.parse_args()
+ try:
+ ini_file = args[ 0 ]
+ except IndexError:
+ print "Usage: python %s <tool shed .ini file> [options]" % sys.argv[ 0 ]
+ exit( 127 )
+ config_parser = ConfigParser.ConfigParser( {'here':os.getcwd() } )
+ config_parser.read( ini_file )
+ config_dict = {}
+ for key, value in config_parser.items( "app:main" ):
+ config_dict[key] = value
+ config = tool_shed_config.Configuration( **config_dict )
+ config_section = options.section
+ now = strftime( "%Y-%m-%d %H:%M:%S" )
+
+ print "#############################################################################"
+ print "# %s - Checking repositories for tools with functional tests." % now
+ print "# This tool shed is configured to listen on %s:%s." % ( config_parser.get( config_section, 'host' ),
+ config_parser.get( config_section, 'port' ) )
+ app = FlagRepositoriesApplication( config )
+ if options.info_only:
+ print "# Displaying info only ( --info_only )"
+ if options.verbosity:
+ print "# Displaying extra information ( --verbosity = %d )" % options.verbosity
+ check_and_flag_repositories( app, info_only=options.info_only, verbosity=options.verbosity )
+
def should_set_do_not_test_flag( app, repository, changeset_revision ):
'''
Returns True if:
@@ -524,11 +388,11 @@
flagged as appropriate. In the install and test script, this behavior is slightly different, since we do want to always run functional tests
on the most recent downloadable changeset revision.
'''
- metadata_records = app.sa_session.query( app.model.RepositoryMetadata ) \
+ repository_revisions = app.sa_session.query( app.model.RepositoryMetadata ) \
.filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True,
app.model.RepositoryMetadata.table.c.repository_id == repository.id ) ) \
.all()
- downloadable_revisions = [ metadata_record.changeset_revision for metadata_record in metadata_records ]
+ downloadable_revisions = [ repository_metadata.changeset_revision for repository_metadata in repository_revisions ]
is_latest_revision = is_most_recent_downloadable_revision( app, repository, changeset_revision, downloadable_revisions )
if len( downloadable_revisions ) == 1:
return True
@@ -539,26 +403,87 @@
else:
return False
-
-class FlagRepositoriesApplication( object ):
- """Encapsulates the state of a Universe application"""
- def __init__( self, config ):
- if config.database_connection is False:
- config.database_connection = "sqlite:///%s?isolation_level=IMMEDIATE" % config.database
- # Setup the database engine and ORM
- self.model = galaxy.webapps.tool_shed.model.mapping.init( config.file_path, config.database_connection, engine_options={}, create_tables=False )
- self.hgweb_config_manager = self.model.hgweb_config_manager
- self.hgweb_config_manager.hgweb_config_dir = config.hgweb_config_dir
- print "# Using configured hgweb.config file: ", self.hgweb_config_manager.hgweb_config
- @property
- def sa_session( self ):
- """
- Returns a SQLAlchemy session -- currently just gets the current
- session from the threadlocal session context, but this is provided
- to allow migration toward a more SQLAlchemy 0.4 style of use.
- """
- return self.model.context.current
- def shutdown( self ):
- pass
-
-if __name__ == "__main__": main()
+if __name__ == "__main__":
+ # The repository_metadata.tool_test_results json value should have the following structure:
+ # {
+ # "test_environment":
+ # {
+ # "galaxy_revision": "9001:abcd1234",
+ # "galaxy_database_version": "114",
+ # "tool_shed_revision": "9001:abcd1234",
+ # "tool_shed_mercurial_version": "2.3.1",
+ # "tool_shed_database_version": "17",
+ # "python_version": "2.7.2",
+ # "architecture": "x86_64",
+ # "system": "Darwin 12.2.0"
+ # },
+ # "passed_tests":
+ # [
+ # {
+ # "test_id": "The test ID, generated by twill",
+ # "tool_id": "The tool ID that was tested",
+ # "tool_version": "The tool version that was tested",
+ # },
+ # ]
+ # "failed_tests":
+ # [
+ # {
+ # "test_id": "The test ID, generated by twill",
+ # "tool_id": "The tool ID that was tested",
+ # "tool_version": "The tool version that was tested",
+ # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
+ # "traceback": "The captured traceback."
+ # },
+ # ]
+ # "installation_errors":
+ # {
+ # 'tool_dependencies':
+ # [
+ # {
+ # 'type': 'Type of tool dependency, e.g. package, set_environment, etc.',
+ # 'name': 'Name of the tool dependency.',
+ # 'version': 'Version if this is a package, otherwise blank.',
+ # 'error_message': 'The error message returned when installation was attempted.',
+ # },
+ # ],
+ # 'repository_dependencies':
+ # [
+ # {
+ # 'tool_shed': 'The tool shed that this repository was installed from.',
+ # 'name': 'The name of the repository that failed to install.',
+ # 'owner': 'Owner of the failed repository.',
+ # 'changeset_revision': 'Changeset revision of the failed repository.',
+ # 'error_message': 'The error message that was returned when the repository failed to install.',
+ # },
+ # ],
+ # 'current_repository':
+ # [
+ # {
+ # 'tool_shed': 'The tool shed that this repository was installed from.',
+ # 'name': 'The name of the repository that failed to install.',
+ # 'owner': 'Owner of the failed repository.',
+ # 'changeset_revision': 'Changeset revision of the failed repository.',
+ # 'error_message': 'The error message that was returned when the repository failed to install.',
+ # },
+ # ],
+ # {
+ # "name": "The name of the repository.",
+ # "owner": "The owner of the repository.",
+ # "changeset_revision": "The changeset revision of the repository.",
+ # "error_message": "The message stored in tool_dependency.error_message."
+ # },
+ # }
+ # "missing_test_components":
+ # [
+ # {
+ # "tool_id": "The tool ID that missing components.",
+ # "tool_version": "The version of the tool."
+ # "tool_guid": "The guid of the tool."
+ # "missing_components": "Which components are missing, e.g. the test data filename, or the test-data directory."
+ # },
+ # ]
+ # }
+ #
+ # Optionally, "traceback" may be included in a test_errors dict, if it is relevant. No script should overwrite anything other
+ # than the list relevant to what it is testing.
+ main()
diff -r e982fcaaaab558e6d4ecb3750d2ec721d5fd861d -r d448ed76962c8b36fde83e5cd8dec91e97c9d812 lib/tool_shed/scripts/check_tool_dependency_definition_repositories.py
--- a/lib/tool_shed/scripts/check_tool_dependency_definition_repositories.py
+++ b/lib/tool_shed/scripts/check_tool_dependency_definition_repositories.py
@@ -86,7 +86,7 @@
now = strftime( "%Y-%m-%d %H:%M:%S" )
print "#############################################################################"
- print "# %s - Validating repositories of type %s on %s..." % ( TOOL_DEPENDENCY_DEFINITION, now, config_parser.get( config_section, 'host' ) )
+ print "# %s - Validating repositories of type %s on %s..." % ( now, TOOL_DEPENDENCY_DEFINITION, config_parser.get( config_section, 'host' ) )
print "# This tool shed is configured to listen on %s:%s" % ( config_parser.get( config_section, 'host' ), config_parser.get( config_section, 'port' ) )
app = RepositoriesApplication( config )
@@ -175,14 +175,13 @@
invalid_metadata += 1
if not info_only:
repository_metadata.tool_test_results = repository_status
- repository_metadata.time_last_tested = datetime.utcnow()
app.sa_session.add( repository_metadata )
app.sa_session.flush()
stop = time.time()
print '# -------------------------------------------------------------------------------------------'
print '# Checked %d repository revisions.' % records_checked
print '# %d revisions found with valid tool dependency definition metadata.' % valid_metadata
- print '# %d revisions found with valid tool dependency definition metadata.' % invalid_metadata
+ print '# %d revisions found with invalid tool dependency definition metadata.' % invalid_metadata
if info_only:
print '# Database not updated with any information from this run.'
print "# Elapsed time: ", stop - start
diff -r e982fcaaaab558e6d4ecb3750d2ec721d5fd861d -r d448ed76962c8b36fde83e5cd8dec91e97c9d812 test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -120,8 +120,8 @@
def get_webapp_global_conf():
"""Get the global_conf dictionary sent as the first argument to app_factory.
"""
- # (was originally sent 'dict()') - nothing here for now except static settings
- global_conf = dict()
+ # (was originally sent '{}') - nothing here for now except static settings
+ global_conf = {}
if STATIC_ENABLED:
global_conf.update( get_static_settings() )
return global_conf
@@ -229,7 +229,7 @@
class ReportResults( Plugin ):
'''Simple Nose plugin to record the IDs of all tests run, regardless of success.'''
name = "reportresults"
- passed = dict()
+ passed = {}
def options( self, parser, env=os.environ ):
super( ReportResults, self ).options( parser, env=env )
@@ -479,7 +479,7 @@
tool_test_results = repository_metadata.get( 'tool_test_results', {} )
# If, for some reason, the script that checks for functional tests has not run, tool_test_results will be None.
if tool_test_results is None:
- return dict()
+ return {}
return tool_test_results
def install_repository( repository_info_dict ):
@@ -581,14 +581,16 @@
return exclude_list
def register_test_result( url, metadata_id, test_results_dict, repository_info_dict, params ):
- '''
- Update the repository metadata tool_test_results and appropriate flags using the API.
- '''
+ '''Update the repository metadata tool_test_results and appropriate flags using the API.'''
params[ 'tool_test_results' ] = test_results_dict
if '-info_only' in sys.argv or 'GALAXY_INSTALL_TEST_INFO_ONLY' in os.environ:
return {}
else:
- return update( tool_shed_api_key, '%s' % ( suc.url_join( galaxy_tool_shed_url, 'api', 'repository_revisions', metadata_id ) ), params, return_formatted=False )
+ url = '%s' % ( suc.url_join( galaxy_tool_shed_url,'api', 'repository_revisions', metadata_id ) )
+ return update( tool_shed_api_key,
+ url,
+ params,
+ return_formatted=False )
def remove_generated_tests( app ):
# Delete any configured tool functional tests from the test_toolbox.__dict__, otherwise nose will find them
@@ -644,7 +646,7 @@
return result, test_config.plugins._plugins
def show_summary_output( repository_info_dicts ):
- repositories_by_owner = dict()
+ repositories_by_owner = {}
for repository in repository_info_dicts:
if repository[ 'owner' ] not in repositories_by_owner:
repositories_by_owner[ repository[ 'owner' ] ] = []
@@ -706,6 +708,7 @@
def main():
# ---- Configuration ------------------------------------------------------
+ # TODO: refactor this very large main method into smaller, more maintainable components.
galaxy_test_host = os.environ.get( 'GALAXY_INSTALL_TEST_HOST', default_galaxy_test_host )
galaxy_test_port = os.environ.get( 'GALAXY_INSTALL_TEST_PORT', str( default_galaxy_test_port_max ) )
@@ -730,7 +733,7 @@
galaxy_migrated_tool_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_MIGRATED_TOOL_CONF', os.path.join( galaxy_test_tmp_dir, 'test_migrated_tool_conf.xml' ) )
galaxy_tool_sheds_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF', os.path.join( galaxy_test_tmp_dir, 'test_tool_sheds_conf.xml' ) )
galaxy_shed_tools_dict = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_DICT_FILE', os.path.join( galaxy_test_tmp_dir, 'shed_tool_dict' ) )
- file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( dict() ) )
+ file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( {} ) )
if 'GALAXY_INSTALL_TEST_TOOL_DATA_PATH' in os.environ:
tool_data_path = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_DATA_PATH' )
else:
@@ -946,8 +949,8 @@
"user_id": "529fd61ab1c6cc36"
}
"""
- repository_status = dict()
- params = dict()
+ repository_status = {}
+ params = {}
repository_id = str( repository_info_dict.get( 'repository_id', None ) )
changeset_revision = str( repository_info_dict.get( 'changeset_revision', None ) )
metadata_revision_id = repository_info_dict.get( 'id', None )
@@ -1027,7 +1030,7 @@
# Generate the shed_tools_dict that specifies the location of test data contained within this repository. If the repository
# does not have a test-data directory, this will return has_test_data = False, and we will set the do_not_test flag to True,
# and the tools_functionally_correct flag to False, as well as updating tool_test_results.
- file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( dict() ) )
+ file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( {} ) )
has_test_data, shed_tools_dict = parse_tool_panel_config( galaxy_shed_tool_conf_file, from_json_string( file( galaxy_shed_tools_dict, 'r' ).read() ) )
# The repository_status dict should always have the following structure:
# {
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0