15 new commits in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/commits/d1aad7b7405c/ Changeset: d1aad7b7405c User: jmchilton Date: 2014-09-03 14:43:25 Summary: Small tweaks to some functional test tools. Add data labels so the tools works in the workflow editor and added a conditional switches to some with collection params and multiple input data parameters to test some state-y logic in workflow editor. Affected #: 4 files diff -r 8e699330dfcd889b02acccb058f16b28c95ac848 -r d1aad7b7405c82c9898e21662affd9e9c9ac7e3b test/functional/tools/collection_mixed_param.xml --- a/test/functional/tools/collection_mixed_param.xml +++ b/test/functional/tools/collection_mixed_param.xml @@ -3,8 +3,19 @@ cat #for $f in $f1# ${f} #end for# $f2 >> $out1; </command><inputs> - <param name="f1" type="data_collection" collection_type="paired" /> - <param name="f2" type="data" format="txt" /> + <param name="f1" type="data_collection" collection_type="paired" label="collection param" /> + <param name="f2" type="data" format="txt" label="data param" /> + <conditional name="advanced"> + <param name="full" type="select" label="Parameter Settings"> + <option value="no">Use defaults</option> + <option value="yes">Full parameter list</option> + </param> + <when value="yes"> + <param name="advanced_threshold" type="integer" value="8" /> + </when> + <when value="no"> + </when> + </conditional></inputs><outputs><data format="txt" name="out1" /> diff -r 8e699330dfcd889b02acccb058f16b28c95ac848 -r d1aad7b7405c82c9898e21662affd9e9c9ac7e3b test/functional/tools/collection_paired_test.xml --- a/test/functional/tools/collection_paired_test.xml +++ b/test/functional/tools/collection_paired_test.xml @@ -3,7 +3,7 @@ cat $f1.forward $f1['reverse'] >> $out1; </command><inputs> - <param name="f1" type="data_collection" collection_type="paired" /> + <param name="f1" type="data_collection" collection_type="paired" label="Input pair" /></inputs><outputs><data format="txt" name="out1" /> diff -r 8e699330dfcd889b02acccb058f16b28c95ac848 -r d1aad7b7405c82c9898e21662affd9e9c9ac7e3b test/functional/tools/multi_data_param.xml --- a/test/functional/tools/multi_data_param.xml +++ b/test/functional/tools/multi_data_param.xml @@ -4,8 +4,19 @@ cat #for $f in $f2# ${f} #end for# >> $out2 </command><inputs> - <param name="f1" type="data" format="txt" multiple="true" /> - <param name="f2" type="data" format="txt" multiple="true" /> + <param name="f1" type="data" format="txt" multiple="true" label="Data 1" /> + <param name="f2" type="data" format="txt" multiple="true" label="Data 2" /> + <conditional name="advanced"> + <param name="full" type="select" label="Parameter Settings"> + <option value="no">Use defaults</option> + <option value="yes">Full parameter list</option> + </param> + <when value="yes"> + <param name="advanced_threshold" type="integer" value="8" /> + </when> + <when value="no"> + </when> + </conditional></inputs><outputs><data format="txt" name="out1" /> diff -r 8e699330dfcd889b02acccb058f16b28c95ac848 -r d1aad7b7405c82c9898e21662affd9e9c9ac7e3b test/qunit/tests/workflow_editor_tests.js --- a/test/qunit/tests/workflow_editor_tests.js +++ b/test/qunit/tests/workflow_editor_tests.js @@ -803,11 +803,12 @@ return node; }, addOutput: function( terminal, connected ) { + var self = this; var connectedOutput = this.newOutputTerminal(); - var inputTerminal = this.newInputTerminal(); var node = terminal.node; if( connected ) { with_workflow_global( function() { + var inputTerminal = self.newInputTerminal(); new Connector( inputTerminal, connectedOutput ); } ); } @@ -818,10 +819,11 @@ return this.addOutput( terminal, true ); }, addConnectedInput: function( terminal ) { + var self = this; var connectedInput = this.newInputTerminal(); - var outputTerminal = this.newOutputTerminal(); var node = terminal.node; with_workflow_global( function() { + var outputTerminal = self.newOutputTerminal(); new Connector( connectedInput, outputTerminal ); } ); this._addTerminalTo( connectedInput, node.input_terminals ); https://bitbucket.org/galaxy/galaxy-central/commits/b63d8c689f25/ Changeset: b63d8c689f25 User: jmchilton Date: 2014-09-03 14:43:25 Summary: Add functional test tool demonstrating special parameters. :( Affected #: 2 files diff -r d1aad7b7405c82c9898e21662affd9e9c9ac7e3b -r b63d8c689f25c7423b72c11455703a90b64ff42d test/functional/tools/samples_tool_conf.xml --- a/test/functional/tools/samples_tool_conf.xml +++ b/test/functional/tools/samples_tool_conf.xml @@ -18,6 +18,7 @@ <tool file="parallelism_optional.xml" /><tool file="implicit_default_conds.xml" /><tool file="multi_data_param.xml" /> + <tool file="special_params.xml" /><tool file="collection_paired_test.xml" /><tool file="collection_nested_test.xml" /><tool file="collection_mixed_param.xml" /> diff -r d1aad7b7405c82c9898e21662affd9e9c9ac7e3b -r b63d8c689f25c7423b72c11455703a90b64ff42d test/functional/tools/special_params.xml --- /dev/null +++ b/test/functional/tools/special_params.xml @@ -0,0 +1,36 @@ +<tool id="special_params" name="special_params" version="1.0.0"> + <command>echo $__root_dir__ > out_root_dir; + echo $__datatypes_config__ > out_datatypes_config; + echo $__admin_users__ > out_admin_users; + echo $__user_email__ > out_user_email + </command> + <inputs> + <param name="ignored" type="integer" value="0" /> + </inputs> + <outputs> + <data format="txt" name="out_root_dir" from_work_dir="out_root_dir" /> + <data format="txt" name="out_datatypes_config" from_work_dir="out_datatypes_config" /> + <data format="txt" name="out_admin_users" from_work_dir="out_admin_users" /> + <data format="txt" name="out_user_email" from_work_dir="out_user_email" /> + </outputs> + <tests> + <test> + <output name="out_root_dir"> + <!-- Is an absolute path. --> + <assert_contents><has_line_matching expression="^\/.*$" /></assert_contents> + </output> + <output name="out_datatypes_config"> + <!-- Is an absolute path. --> + <assert_contents><has_line_matching expression="^\/.*$" /></assert_contents> + </output> + <output name="out_admin_users"> + <!-- Has at least on e-mail address. --> + <assert_contents><has_text text="@" /></assert_contents> + </output> + <output name="out_user_email"> + <!-- Looks like an e-mail address. --> + <assert_contents><has_line_matching expression="[^@]+@[^@]+\.[^@]+" /></assert_contents> + </output> + </test> + </tests> +</tool> https://bitbucket.org/galaxy/galaxy-central/commits/7dc4090783da/ Changeset: 7dc4090783da User: jmchilton Date: 2014-09-03 14:43:25 Summary: Add functional test tool demonstrating setting output dataset formats. Directly setting format attribute, setting to format to 'input' (ambigious, non-deterministic and should be deprecated IMO), using format_source, and using change_format actions. Affected #: 3 files diff -r b63d8c689f25c7423b72c11455703a90b64ff42d -r 7dc4090783daea490af1e8b2ad41a3573eaa9eba test/functional/tools/output_format.xml --- /dev/null +++ b/test/functional/tools/output_format.xml @@ -0,0 +1,72 @@ +<tool id="output_format" name="output_format" version="1.0.0"> + <command> + echo "test" > 1; + echo "test" > 2; + echo "test" > 3; + echo "test" > 4; + echo "test" > 5; + </command> + <inputs> + <param name="input_data_1" type="data" format="data" /> + <param name="input_data_2" type="data" format="data" /> + <param name="input_text" type="text" value="1" /> + </inputs> + <outputs> + <data format="txt" from_work_dir="1" name="direct_output" /> + <!-- TODO: fixme, following input gets random type fastqsanger or + fastqsolexa. --> + <data format="input" from_work_dir="2" name="input_based_output" /> + <data format="txt" from_work_dir="3" name="format_source_1_output" format_source="input_data_1" /> + <data format="txt" from_work_dir="4" name="format_source_2_output" format_source="input_data_2" /> + <data format="txt" from_work_dir="5" name="change_format_output"> + <change_format> + <when input="input_text" value="foo" format="fastqsolexa" /> + <when input="input_text" value="bar" format="fastqillumina" /> + </change_format> + </data> + </outputs> + <tests> + <test> + <param name="input_data_1" value="1.fastqsanger" ftype="fastqsanger" /> + <param name="input_data_2" value="1.fastqsolexa" ftype="fastqsolexa" /> + <param name="input_text" value="foo" /> + <output name="direct_output" ftype="txt"> + <assert_contents><has_line line="test" /></assert_contents> + </output> + <!-- In this case input_based_output ftype is "randomly" either + fastqsanger or fastqsolexa --> + <output name="format_source_1_output" ftype="fastqsanger"> + <assert_contents><has_line line="test" /></assert_contents> + </output> + <output name="format_source_2_output" ftype="fastqsolexa"> + <assert_contents><has_line line="test" /></assert_contents> + </output> + <!-- input_text == bar => format set to fastsolexa --> + <output name="change_format_action" ftype="fastqsolexa"> + <assert_contents><has_line line="test" /></assert_contents> + </output> + </test> + <test> + <param name="input_data_1" value="1.fastqsanger" ftype="fastqsanger" /> + <param name="input_data_2" value="1.fastqsanger" ftype="fastqsanger" /> + <param name="input_text" value="bar" /> + <output name="input_based_output" ftype="fastqsanger"> + <assert_contents><has_line line="test" /></assert_contents> + </output> + <!-- input_text == bar => format set to fastqillumina --> + <output name="change_format_action" ftype="fastqillumina"> + <assert_contents><has_line line="test" /></assert_contents> + </output> + </test> + <test> + <param name="input_data_1" value="1.fastqsanger" ftype="fastqsanger" /> + <param name="input_data_2" value="1.fastqsanger" ftype="fastqsanger" /> + <param name="input_text" value="not_foo_or_bar" /> + <!-- input_text doesn't match any when, default to explicitly declared + type. --> + <output name="change_format_action" ftype="text"> + <assert_contents><has_line line="test" /></assert_contents> + </output> + </test> + </tests> +</tool> diff -r b63d8c689f25c7423b72c11455703a90b64ff42d -r 7dc4090783daea490af1e8b2ad41a3573eaa9eba test/functional/tools/sample_datatypes_conf.xml --- a/test/functional/tools/sample_datatypes_conf.xml +++ b/test/functional/tools/sample_datatypes_conf.xml @@ -4,5 +4,11 @@ <datatype extension="velvet" type="galaxy.datatypes.assembly:Velvet" display_in_upload="true"/><datatype extension="txt" type="galaxy.datatypes.data:Text" display_in_upload="true"/><datatype extension="tabular" type="galaxy.datatypes.tabular:Tabular" display_in_upload="true"/> + <datatype extension="fasta" type="galaxy.datatypes.sequence:Fasta" display_in_upload="true" /> + <datatype extension="fastq" type="galaxy.datatypes.sequence:Fastq" display_in_upload="true" /> + <datatype extension="fastqsanger" type="galaxy.datatypes.sequence:FastqSanger" display_in_upload="true" /> + <datatype extension="fastqsolexa" type="galaxy.datatypes.sequence:FastqSolexa" display_in_upload="true" /> + <datatype extension="fastqcssanger" type="galaxy.datatypes.sequence:FastqCSSanger" display_in_upload="true" /> + <datatype extension="fastqillumina" type="galaxy.datatypes.sequence:FastqIllumina" display_in_upload="true" /></registration></datatypes> \ No newline at end of file diff -r b63d8c689f25c7423b72c11455703a90b64ff42d -r 7dc4090783daea490af1e8b2ad41a3573eaa9eba test/functional/tools/samples_tool_conf.xml --- a/test/functional/tools/samples_tool_conf.xml +++ b/test/functional/tools/samples_tool_conf.xml @@ -12,6 +12,7 @@ <tool file="composite_output.xml" /><tool file="metadata.xml" /><tool file="output_order.xml" /> + <tool file="output_format.xml" /><tool file="disambiguate_repeats.xml" /><tool file="min_repeat.xml" /><tool file="parallelism.xml" /> https://bitbucket.org/galaxy/galaxy-central/commits/3632736a88ea/ Changeset: 3632736a88ea User: jmchilton Date: 2014-09-03 14:43:25 Summary: Add functional test tool demonstrating output filters. Add new 'expect_num_outputs' attribute to 'test' element in tool XML to verify the produced number - needed to test output filtering. Affected #: 5 files diff -r 7dc4090783daea490af1e8b2ad41a3573eaa9eba -r 3632736a88ea3541bf64d8ba616e65db94c42b84 lib/galaxy/tools/test.py --- a/lib/galaxy/tools/test.py +++ b/lib/galaxy/tools/test.py @@ -50,6 +50,9 @@ self.required_files = [] self.inputs = [] self.outputs = [] + self.num_outputs = None # By default do not making assertions on + # number of outputs - but to test filtering + # allow explicitly state number of outputs. self.error = False self.exception = None @@ -122,6 +125,10 @@ self.__parse_inputs_elems( test_elem, i ) self.outputs = parse_output_elems( test_elem ) + num_outputs = test_elem.get( 'expect_num_outputs', None ) + if num_outputs: + num_outputs = int( num_outputs ) + self.num_outputs = num_outputs except Exception, e: self.error = True self.exception = e diff -r 7dc4090783daea490af1e8b2ad41a3573eaa9eba -r 3632736a88ea3541bf64d8ba616e65db94c42b84 test/base/interactor.py --- a/test/base/interactor.py +++ b/test/base/interactor.py @@ -236,6 +236,9 @@ for output in datasets_object[ 'outputs' ]: outputs_dict[ index ] = outputs_dict[ output.get("output_name") ] = output index += 1 + # Adding each item twice (once with index for backward compat), + # overiding length to reflect the real number of outputs. + outputs_dict.__len__ = lambda: index return outputs_dict def output_hid( self, output_data ): diff -r 7dc4090783daea490af1e8b2ad41a3573eaa9eba -r 3632736a88ea3541bf64d8ba616e65db94c42b84 test/functional/test_toolbox.py --- a/test/functional/test_toolbox.py +++ b/test/functional/test_toolbox.py @@ -49,7 +49,13 @@ def _verify_outputs( self, testdef, history, shed_tool_id, data_list, galaxy_interactor ): maxseconds = testdef.maxseconds - + if testdef.num_outputs is not None: + expected = testdef.num_outputs + actual = len( data_list ) + if expected != actual: + messaage_template = "Incorrect number of outputs - expected %d, found %s." + message = messaage_template % ( expected, actual ) + raise Exception( message ) for output_index, output_tuple in enumerate(testdef.outputs): # Get the correct hid name, outfile, attributes = output_tuple diff -r 7dc4090783daea490af1e8b2ad41a3573eaa9eba -r 3632736a88ea3541bf64d8ba616e65db94c42b84 test/functional/tools/output_filter.xml --- /dev/null +++ b/test/functional/tools/output_filter.xml @@ -0,0 +1,46 @@ +<tool id="output_filter" name="output_filter" version="1.0.0"> + <command> + echo "test" > 1; + echo "test" > 2; + echo "test" > 3; + echo "test" > 4; + echo "test" > 5; + </command> + <inputs> + <param name="produce_out_1" type="boolean" truevalue="true" falsevalue="false" checked="False" label="Do Filter 1" /> + <param name="filter_text_1" type="text" value="1" /> + </inputs> + <outputs> + <data format="txt" from_work_dir="1" name="out_1"> + <filter>produce_out_1 is True</filter> + </data> + <data format="txt" from_work_dir="2" name="out_2"> + <filter>filter_text_1 in ["foo", "bar"]</filter> + <!-- Must pass all filters... --> + <filter>filter_text_1 == "foo"</filter> + </data> + <data format="txt" from_work_dir="3" name="out_3"> + </data> + </outputs> + <tests> + <test expect_num_outputs="3"> + <param name="produce_out_1" value="true" /> + <param name="filter_text_1" value="foo" /> + <output name="out_1"><assert_contents><has_line line="test" /></assert_contents></output> + <output name="out_2"><assert_contents><has_line line="test" /></assert_contents></output> + <output name="out_3"><assert_contents><has_line line="test" /></assert_contents></output> + </test> + <test expect_num_outputs="2"> + <param name="produce_out_1" value="true" /> + <param name="filter_text_1" value="bar" /><!-- fails second filter in out2 --> + <output name="out_1"><assert_contents><has_line line="test" /></assert_contents></output> + <output name="out_3"><assert_contents><has_line line="test" /></assert_contents></output> + </test> + <test expect_num_outputs="1"> + <param name="produce_out_1" value="false" /> + <param name="filter_text_1" value="not_foo_or_bar" /> + <output name="out_3"><assert_contents><has_line line="test" /></assert_contents></output> + </test> + </tests> +</tool> + diff -r 7dc4090783daea490af1e8b2ad41a3573eaa9eba -r 3632736a88ea3541bf64d8ba616e65db94c42b84 test/functional/tools/samples_tool_conf.xml --- a/test/functional/tools/samples_tool_conf.xml +++ b/test/functional/tools/samples_tool_conf.xml @@ -13,6 +13,7 @@ <tool file="metadata.xml" /><tool file="output_order.xml" /><tool file="output_format.xml" /> + <tool file="output_filter.xml" /><tool file="disambiguate_repeats.xml" /><tool file="min_repeat.xml" /><tool file="parallelism.xml" /> https://bitbucket.org/galaxy/galaxy-central/commits/2287d2496a71/ Changeset: 2287d2496a71 User: jmchilton Date: 2014-09-03 14:43:25 Summary: PEP-8 fixes for tools/actions/__init__.py. Affected #: 1 file diff -r 3632736a88ea3541bf64d8ba616e65db94c42b84 -r 2287d2496a71b05c6c595984c90721c3c743afe6 lib/galaxy/tools/actions/__init__.py --- a/lib/galaxy/tools/actions/__init__.py +++ b/lib/galaxy/tools/actions/__init__.py @@ -1,18 +1,13 @@ -import os - from galaxy.exceptions import ObjectInvalid from galaxy.model import LibraryDatasetDatasetAssociation from galaxy import model from galaxy.tools.parameters import DataToolParameter from galaxy.tools.parameters import DataCollectionToolParameter from galaxy.tools.parameters.wrapped import WrappedParameters -from galaxy.util.json import from_json_string from galaxy.util.json import to_json_string from galaxy.util.none_like import NoneDataset from galaxy.util.odict import odict from galaxy.util.template import fill_template -from galaxy.util import listify -from galaxy.util.json import to_json_string from galaxy.web import url_for import logging @@ -194,8 +189,7 @@ input_dbkey = data.dbkey # Collect chromInfo dataset and add as parameters to incoming - db_datasets = {} - ( chrom_info, db_dataset ) = trans.app.genome_builds.get_chrom_info( input_dbkey, trans=trans, custom_build_hack_get_len_from_fasta_conversion=tool.id!='CONVERTER_fasta_to_len' ) + ( chrom_info, db_dataset ) = trans.app.genome_builds.get_chrom_info( input_dbkey, trans=trans, custom_build_hack_get_len_from_fasta_conversion=tool.id != 'CONVERTER_fasta_to_len' ) if db_dataset: inp_data.update( { "chromInfo": db_dataset } ) incoming[ "chromInfo" ] = chrom_info @@ -238,7 +232,7 @@ if name in incoming: dataid = incoming[name] data = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( dataid ) - assert data != None + assert data is not None out_data[name] = data else: # the type should match the input https://bitbucket.org/galaxy/galaxy-central/commits/942b4bffc427/ Changeset: 942b4bffc427 User: jmchilton Date: 2014-09-03 14:43:25 Summary: Unit test for galaxy.tools.action to ensure all datasets get same object store id. Enables subsequent refactoring. Affected #: 1 file diff -r 2287d2496a71b05c6c595984c90721c3c743afe6 -r 942b4bffc4277d090e6cd788e706706f56aa1d62 test/unit/tools/test_actions.py --- a/test/unit/tools/test_actions.py +++ b/test/unit/tools/test_actions.py @@ -25,6 +25,20 @@ </tool> ''' +# Tool with two outputs - used to verify all datasets within same job get same +# object store id. +TWO_OUTPUTS = '''<tool id="test_tool" name="Test Tool"> + <command>echo "$param1" < $out1</command> + <inputs> + <param type="text" name="param1" value="" /> + </inputs> + <outputs> + <data name="out1" format="data" label="Output ($param1)" /> + <data name="out2" format="data" label="Output 2 ($param1)" /> + </outputs> +</tool> +''' + def test_on_text_for_names(): def assert_on_text_is( expected, *names ): @@ -80,6 +94,11 @@ ) self.assertEquals( output[ "out1" ].name, "Test Tool on data 2 and data 1" ) + def test_object_store_ids( self ): + _, output = self._simple_execute( contents=TWO_OUTPUTS ) + self.assertEquals( output[ "out1" ].name, "Output (moo)" ) + self.assertEquals( output[ "out2" ].name, "Output 2 (moo)" ) + def test_params_wrapped( self ): hda1 = self.__add_dataset() _, output = self._simple_execute( @@ -142,6 +161,14 @@ def __init__( self ): self.created_datasets = [] + self.first_create = True + self.object_store_id = "mycoolid" def create( self, dataset ): self.created_datasets.append( dataset ) + if self.first_create: + self.first_create = False + assert dataset.object_store_id is None + dataset.object_store_id = self.object_store_id + else: + assert dataset.object_store_id == self.object_store_id https://bitbucket.org/galaxy/galaxy-central/commits/8a1951d3cf45/ Changeset: 8a1951d3cf45 User: jmchilton Date: 2014-09-03 14:43:25 Summary: Small galaxy.tools.actions refactor related to object store... Breaks up the big loop in execute a little bit but will enable a much bigger breakup in subsequent refactoring because the loop will no longer have to update the object_store_id local variable - that will be tracked in new object. Affected #: 1 file diff -r 942b4bffc4277d090e6cd788e706706f56aa1d62 -r 8a1951d3cf45c6e3b74abffe9c90c22d28fd955e lib/galaxy/tools/actions/__init__.py --- a/lib/galaxy/tools/actions/__init__.py +++ b/lib/galaxy/tools/actions/__init__.py @@ -213,7 +213,7 @@ # datasets first, then create the associations parent_to_child_pairs = [] child_dataset_names = set() - object_store_id = None + object_store_populator = ObjectStorePopulator( trans.app ) for name, output in tool.outputs.items(): for filter in output.filters: try: @@ -275,15 +275,9 @@ trans.sa_session.add( data ) trans.sa_session.flush() trans.app.security_agent.set_all_dataset_permissions( data.dataset, output_permissions ) - # Create an empty file immediately. The first dataset will be - # created in the "default" store, all others will be created in - # the same store as the first. - data.dataset.object_store_id = object_store_id - try: - trans.app.object_store.create( data.dataset ) - except ObjectInvalid: - raise Exception('Unable to create output dataset: object store is full') - object_store_id = data.dataset.object_store_id # these will be the same thing after the first output + + object_store_populator.set_object_store_id( data ) + # This may not be neccesary with the new parent/child associations data.designation = name # Copy metadata from one of the inputs if requested. @@ -362,7 +356,7 @@ job.add_input_dataset( name, None ) for name, dataset in out_data.iteritems(): job.add_output_dataset( name, dataset ) - job.object_store_id = object_store_id + job.object_store_id = object_store_populator.object_store_id if job_params: job.params = to_json_string( job_params ) job.set_handler(tool.get_job_handler(job_params)) @@ -443,6 +437,27 @@ return name +class ObjectStorePopulator( object ): + """ Small helper for interacting with the object store and making sure all + datasets from a job end up with the same object_store_id. + """ + + def __init__( self, app ): + self.object_store = app.object_store + self.object_store_id = None + + def set_object_store_id( self, data ): + # Create an empty file immediately. The first dataset will be + # created in the "default" store, all others will be created in + # the same store as the first. + data.dataset.object_store_id = self.object_store_id + try: + self.object_store.create( data.dataset ) + except ObjectInvalid: + raise Exception('Unable to create output dataset: object store is full') + self.object_store_id = data.dataset.object_store_id # these will be the same thing after the first output + + def on_text_for_names( input_names ): # input_names may contain duplicates... this is because the first value in # multiple input dataset parameters will appear twice once as param_name https://bitbucket.org/galaxy/galaxy-central/commits/3f2c5f313087/ Changeset: 3f2c5f313087 User: jmchilton Date: 2014-09-03 14:43:25 Summary: Refactor tool action loop allowing reuse of job dataset creation logic. Reusing this logic will be useful in the context of creating datasets belonging to output collections. This also breaks up the big loop slightly and allows a small reduction in cyclomatic complexity in subsequent refactoring. Affected #: 1 file diff -r 8a1951d3cf45c6e3b74abffe9c90c22d28fd955e -r 3f2c5f313087c63eabf8e9e251d3c5fe03db9868 lib/galaxy/tools/actions/__init__.py --- a/lib/galaxy/tools/actions/__init__.py +++ b/lib/galaxy/tools/actions/__init__.py @@ -214,6 +214,89 @@ parent_to_child_pairs = [] child_dataset_names = set() object_store_populator = ObjectStorePopulator( trans.app ) + + def handle_output( name, output ): + if output.parent: + parent_to_child_pairs.append( ( output.parent, name ) ) + child_dataset_names.add( name ) + ## What is the following hack for? Need to document under what + ## conditions can the following occur? (james@bx.psu.edu) + # HACK: the output data has already been created + # this happens i.e. as a result of the async controller + if name in incoming: + dataid = incoming[name] + data = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( dataid ) + assert data is not None + out_data[name] = data + else: + # the type should match the input + ext = output.format + if ext == "input": + ext = input_ext + if output.format_source is not None and output.format_source in inp_data: + try: + input_dataset = inp_data[output.format_source] + input_extension = input_dataset.ext + ext = input_extension + except Exception: + pass + + #process change_format tags + if output.change_format: + for change_elem in output.change_format: + for when_elem in change_elem.findall( 'when' ): + check = when_elem.get( 'input', None ) + if check is not None: + try: + if '$' not in check: + #allow a simple name or more complex specifications + check = '${%s}' % check + if str( fill_template( check, context=wrapped_params.params ) ) == when_elem.get( 'value', None ): + ext = when_elem.get( 'format', ext ) + except: # bad tag input value; possibly referencing a param within a different conditional when block or other nonexistent grouping construct + continue + else: + check = when_elem.get( 'input_dataset', None ) + if check is not None: + check = inp_data.get( check, None ) + if check is not None: + if str( getattr( check, when_elem.get( 'attribute' ) ) ) == when_elem.get( 'value', None ): + ext = when_elem.get( 'format', ext ) + data = trans.app.model.HistoryDatasetAssociation( extension=ext, create_dataset=True, sa_session=trans.sa_session ) + if output.hidden: + data.visible = False + # Commit the dataset immediately so it gets database assigned unique id + trans.sa_session.add( data ) + trans.sa_session.flush() + trans.app.security_agent.set_all_dataset_permissions( data.dataset, output_permissions ) + + object_store_populator.set_object_store_id( data ) + + # This may not be neccesary with the new parent/child associations + data.designation = name + # Copy metadata from one of the inputs if requested. + if output.metadata_source: + data.init_meta( copy_from=inp_data[output.metadata_source] ) + else: + data.init_meta() + # Take dbkey from LAST input + data.dbkey = str(input_dbkey) + # Set state + # FIXME: shouldn't this be NEW until the job runner changes it? + data.state = data.states.QUEUED + data.blurb = "queued" + # Set output label + data.name = self.get_output_name( output, data, tool, on_text, trans, incoming, history, wrapped_params.params, job_params ) + # Store output + out_data[ name ] = data + if output.actions: + #Apply pre-job tool-output-dataset actions; e.g. setting metadata, changing format + output_action_params = dict( out_data ) + output_action_params.update( incoming ) + output.actions.apply_action( data, output_action_params ) + # Store all changes to database + trans.sa_session.flush() + for name, output in tool.outputs.items(): for filter in output.filters: try: @@ -221,87 +304,8 @@ break # do not create this dataset except Exception, e: log.debug( 'Dataset output filter failed: %s' % e ) - else: # all filters passed - if output.parent: - parent_to_child_pairs.append( ( output.parent, name ) ) - child_dataset_names.add( name ) - ## What is the following hack for? Need to document under what - ## conditions can the following occur? (james@bx.psu.edu) - # HACK: the output data has already been created - # this happens i.e. as a result of the async controller - if name in incoming: - dataid = incoming[name] - data = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( dataid ) - assert data is not None - out_data[name] = data - else: - # the type should match the input - ext = output.format - if ext == "input": - ext = input_ext - if output.format_source is not None and output.format_source in inp_data: - try: - input_dataset = inp_data[output.format_source] - input_extension = input_dataset.ext - ext = input_extension - except Exception, e: - pass - - #process change_format tags - if output.change_format: - for change_elem in output.change_format: - for when_elem in change_elem.findall( 'when' ): - check = when_elem.get( 'input', None ) - if check is not None: - try: - if '$' not in check: - #allow a simple name or more complex specifications - check = '${%s}' % check - if str( fill_template( check, context=wrapped_params.params ) ) == when_elem.get( 'value', None ): - ext = when_elem.get( 'format', ext ) - except: # bad tag input value; possibly referencing a param within a different conditional when block or other nonexistent grouping construct - continue - else: - check = when_elem.get( 'input_dataset', None ) - if check is not None: - check = inp_data.get( check, None ) - if check is not None: - if str( getattr( check, when_elem.get( 'attribute' ) ) ) == when_elem.get( 'value', None ): - ext = when_elem.get( 'format', ext ) - data = trans.app.model.HistoryDatasetAssociation( extension=ext, create_dataset=True, sa_session=trans.sa_session ) - if output.hidden: - data.visible = False - # Commit the dataset immediately so it gets database assigned unique id - trans.sa_session.add( data ) - trans.sa_session.flush() - trans.app.security_agent.set_all_dataset_permissions( data.dataset, output_permissions ) - - object_store_populator.set_object_store_id( data ) - - # This may not be neccesary with the new parent/child associations - data.designation = name - # Copy metadata from one of the inputs if requested. - if output.metadata_source: - data.init_meta( copy_from=inp_data[output.metadata_source] ) - else: - data.init_meta() - # Take dbkey from LAST input - data.dbkey = str(input_dbkey) - # Set state - # FIXME: shouldn't this be NEW until the job runner changes it? - data.state = data.states.QUEUED - data.blurb = "queued" - # Set output label - data.name = self.get_output_name( output, data, tool, on_text, trans, incoming, history, wrapped_params.params, job_params ) - # Store output - out_data[ name ] = data - if output.actions: - #Apply pre-job tool-output-dataset actions; e.g. setting metadata, changing format - output_action_params = dict( out_data ) - output_action_params.update( incoming ) - output.actions.apply_action( data, output_action_params ) - # Store all changes to database - trans.sa_session.flush() + else: + handle_output( name, output ) # Add all the top-level (non-child) datasets to the history unless otherwise specified for name in out_data.keys(): if name not in child_dataset_names and name not in incoming: # don't add children; or already existing datasets, i.e. async created https://bitbucket.org/galaxy/galaxy-central/commits/b7c657b77647/ Changeset: b7c657b77647 User: jmchilton Date: 2014-09-03 14:43:25 Summary: Unit tests for tool action code determining output formats. With refactoring to reduce cyclomatic complexity. Also renaming 'input_ext' what it actually is 'random_input_ext'. We should fix that or at least issue a huge warning if we detect 'input' could have reasonable been different things. Affected #: 2 files diff -r 3f2c5f313087c63eabf8e9e251d3c5fe03db9868 -r b7c657b77647164126ffd75ba1bb8692fb808eff lib/galaxy/tools/actions/__init__.py --- a/lib/galaxy/tools/actions/__init__.py +++ b/lib/galaxy/tools/actions/__init__.py @@ -229,39 +229,7 @@ assert data is not None out_data[name] = data else: - # the type should match the input - ext = output.format - if ext == "input": - ext = input_ext - if output.format_source is not None and output.format_source in inp_data: - try: - input_dataset = inp_data[output.format_source] - input_extension = input_dataset.ext - ext = input_extension - except Exception: - pass - - #process change_format tags - if output.change_format: - for change_elem in output.change_format: - for when_elem in change_elem.findall( 'when' ): - check = when_elem.get( 'input', None ) - if check is not None: - try: - if '$' not in check: - #allow a simple name or more complex specifications - check = '${%s}' % check - if str( fill_template( check, context=wrapped_params.params ) ) == when_elem.get( 'value', None ): - ext = when_elem.get( 'format', ext ) - except: # bad tag input value; possibly referencing a param within a different conditional when block or other nonexistent grouping construct - continue - else: - check = when_elem.get( 'input_dataset', None ) - if check is not None: - check = inp_data.get( check, None ) - if check is not None: - if str( getattr( check, when_elem.get( 'attribute' ) ) ) == when_elem.get( 'value', None ): - ext = when_elem.get( 'format', ext ) + ext = determine_output_format( output, wrapped_params.params, inp_data, input_ext ) data = trans.app.model.HistoryDatasetAssociation( extension=ext, create_dataset=True, sa_session=trans.sa_session ) if output.hidden: data.visible = False @@ -484,3 +452,51 @@ else: on_text = "" return on_text + + +def determine_output_format(output, parameter_context, input_datasets, random_input_ext): + """ Determines the output format for a dataset based on an abstract + description of the output (galaxy.tools.ToolOutput), the parameter + wrappers, a map of the input datasets (name => HDA), and the last input + extensions in the tool form. + + TODO: Don't deal with XML here - move this logic into ToolOutput. + TODO: Make the input extension used deterministic instead of random. + """ + # the type should match the input + ext = output.format + if ext == "input": + ext = random_input_ext + if output.format_source is not None and output.format_source in input_datasets: + try: + input_dataset = input_datasets[output.format_source] + input_extension = input_dataset.ext + ext = input_extension + except Exception: + pass + + #process change_format tags + if output.change_format: + for change_elem in output.change_format: + print change_elem + for when_elem in change_elem.findall( 'when' ): + check = when_elem.get( 'input', None ) + print check + if check is not None: + try: + if '$' not in check: + #allow a simple name or more complex specifications + check = '${%s}' % check + if str( fill_template( check, context=parameter_context ) ) == when_elem.get( 'value', None ): + ext = when_elem.get( 'format', ext ) + except: # bad tag input value; possibly referencing a param within a different conditional when block or other nonexistent grouping construct + continue + else: + check = when_elem.get( 'input_dataset', None ) + if check is not None: + check = input_datasets.get( check, None ) + if check is not None: + if str( getattr( check, when_elem.get( 'attribute' ) ) ) == when_elem.get( 'value', None ): + ext = when_elem.get( 'format', ext ) + + return ext diff -r 3f2c5f313087c63eabf8e9e251d3c5fe03db9868 -r b7c657b77647164126ffd75ba1bb8692fb808eff test/unit/tools/test_actions.py --- a/test/unit/tools/test_actions.py +++ b/test/unit/tools/test_actions.py @@ -1,8 +1,11 @@ import unittest from galaxy import model +from galaxy.tools import ToolOutput from galaxy.tools.actions import DefaultToolAction from galaxy.tools.actions import on_text_for_names +from galaxy.tools.actions import determine_output_format +from elementtree.ElementTree import XML import tools_support @@ -135,6 +138,57 @@ ) +def test_determine_output_format(): + # Test simple case of explicitly defined output with no changes. + direct_output = quick_output("txt") + __assert_output_format_is("txt", direct_output) + + # Test if format is "input" (which just uses the last input on the form.) + input_based_output = quick_output("input") + __assert_output_format_is("fastq", input_based_output, [("i1", "fasta"), ("i2", "fastq")]) + + # Test using format_source (testing a couple different positions) + input_based_output = quick_output("txt", format_source="i1") + __assert_output_format_is("fasta", input_based_output, [("i1", "fasta"), ("i2", "fastq")]) + + input_based_output = quick_output("txt", format_source="i2") + __assert_output_format_is("fastq", input_based_output, [("i1", "fasta"), ("i2", "fastq")]) + + change_format_xml = """<data><change_format> + <when input="options_type.output_type" value="solexa" format="fastqsolexa" /> + <when input="options_type.output_type" value="illumina" format="fastqillumina" /> + </change_format></data>""" + + change_format_output = quick_output("fastq", change_format_xml=change_format_xml) + # Test maching a change_format when. + __assert_output_format_is("fastqillumina", change_format_output, param_context={"options_type": {"output_type": "illumina"}} ) + # Test change_format but no match + __assert_output_format_is("fastq", change_format_output, param_context={"options_type": {"output_type": "sanger"}} ) + + +def __assert_output_format_is( expected, output, input_extensions=[], param_context=[] ): + inputs = {} + last_ext = "data" + for name, ext in input_extensions: + hda = model.HistoryDatasetAssociation(extension=ext) + inputs[ name ] = hda + last_ext = ext + + actual_format = determine_output_format( output, param_context, inputs, last_ext ) + assert actual_format == expected, "Actual format %s, does not match expected %s" % (actual_format, expected) + + +def quick_output(format, format_source=None, change_format_xml=None): + test_output = ToolOutput( "test_output" ) + test_output.format = format + test_output.format_source = format_source + if change_format_xml: + test_output.change_format = XML(change_format_xml) + else: + test_output.change_format = None + return test_output + + class MockTrans( object ): def __init__( self, app, history, user=None ): https://bitbucket.org/galaxy/galaxy-central/commits/3e485075985b/ Changeset: 3e485075985b User: jmchilton Date: 2014-09-03 14:43:25 Summary: Break up tool action loop by refactoring filtering logic out. Affected #: 1 file diff -r b7c657b77647164126ffd75ba1bb8692fb808eff -r 3e485075985bb492f83fdd49ee5d5042e6404d80 lib/galaxy/tools/actions/__init__.py --- a/lib/galaxy/tools/actions/__init__.py +++ b/lib/galaxy/tools/actions/__init__.py @@ -266,13 +266,7 @@ trans.sa_session.flush() for name, output in tool.outputs.items(): - for filter in output.filters: - try: - if not eval( filter.text.strip(), globals(), incoming ): - break # do not create this dataset - except Exception, e: - log.debug( 'Dataset output filter failed: %s' % e ) - else: + if not filter_output(output, incoming): handle_output( name, output ) # Add all the top-level (non-child) datasets to the history unless otherwise specified for name in out_data.keys(): @@ -454,6 +448,16 @@ return on_text +def filter_output(output, incoming): + for filter in output.filters: + try: + if not eval( filter.text.strip(), globals(), incoming ): + return True # do not create this dataset + except Exception, e: + log.debug( 'Dataset output filter failed: %s' % e ) + return False + + def determine_output_format(output, parameter_context, input_datasets, random_input_ext): """ Determines the output format for a dataset based on an abstract description of the output (galaxy.tools.ToolOutput), the parameter https://bitbucket.org/galaxy/galaxy-central/commits/ea812c16cbf9/ Changeset: ea812c16cbf9 User: jmchilton Date: 2014-09-03 14:43:25 Summary: Small optimization for implicit collection creation from tool execution. Don't write out encode identifier, add to dict, and then reload object in dataset collection service - just pass through the object in the dictionary as is. Affected #: 2 files diff -r 3e485075985bb492f83fdd49ee5d5042e6404d80 -r ea812c16cbf9674b2ba153041c3dd33bf36b7fe2 lib/galaxy/dataset_collections/__init__.py --- a/lib/galaxy/dataset_collections/__init__.py +++ b/lib/galaxy/dataset_collections/__init__.py @@ -54,8 +54,12 @@ ): """ """ - if element_identifiers: + # Trust embedded, newly created objects created by tool subsystem. + trusted_identifiers = implicit_collection_info is not None + + if element_identifiers and not trusted_identifiers: validate_input_element_identifiers( element_identifiers ) + dataset_collection = self.__create_dataset_collection( trans=trans, collection_type=collection_type, diff -r 3e485075985bb492f83fdd49ee5d5042e6404d80 -r ea812c16cbf9674b2ba153041c3dd33bf36b7fe2 lib/galaxy/dataset_collections/structure.py --- a/lib/galaxy/dataset_collections/structure.py +++ b/lib/galaxy/dataset_collections/structure.py @@ -78,7 +78,7 @@ child_identifiers[ "name" ] = identifier element_identifiers.append( child_identifiers ) else: - element_identifiers.append( dict( name=identifier, src="hda", id=trans.security.encode_id( datasets[ 0 ].id ) ) ) + element_identifiers.append( dict( name=identifier, __object__=datasets[ 0 ] ) ) datasets = datasets[ len( child ): ] https://bitbucket.org/galaxy/galaxy-central/commits/148b770b37aa/ Changeset: 148b770b37aa User: jmchilton Date: 2014-09-03 14:43:25 Summary: Refactor some collection-y tool methods ahead of collection outputs. Tool mapping "structure" logic no longer assumes tool outputs are datasets - could also potentially handle logic related to structure of output collections as well now. These variable names will make for sense down the road when tools that explicitly create collections exist and can be mapped over. Affected #: 4 files diff -r ea812c16cbf9674b2ba153041c3dd33bf36b7fe2 -r 148b770b37aa202facde8c79bc49bbd875f92ddc lib/galaxy/dataset_collections/__init__.py --- a/lib/galaxy/dataset_collections/__init__.py +++ b/lib/galaxy/dataset_collections/__init__.py @@ -74,7 +74,7 @@ if implicit_collection_info: for input_name, input_collection in implicit_collection_info[ "implicit_inputs" ]: dataset_collection_instance.add_implicit_input_collection( input_name, input_collection ) - for output_dataset in implicit_collection_info.get( "outputs_datasets" ): + for output_dataset in implicit_collection_info.get( "outputs" ): output_dataset.hidden_beneath_collection_instance = dataset_collection_instance trans.sa_session.add( output_dataset ) diff -r ea812c16cbf9674b2ba153041c3dd33bf36b7fe2 -r 148b770b37aa202facde8c79bc49bbd875f92ddc lib/galaxy/dataset_collections/structure.py --- a/lib/galaxy/dataset_collections/structure.py +++ b/lib/galaxy/dataset_collections/structure.py @@ -3,6 +3,8 @@ import logging log = logging.getLogger( __name__ ) +from .type_description import map_over_collection_type + class Leaf( object ): @@ -70,21 +72,27 @@ def __len__( self ): return sum( [ len( c[ 1 ] ) for c in self.children ] ) - def element_identifiers_for_datasets( self, trans, datasets ): + def element_identifiers_for_outputs( self, trans, outputs ): element_identifiers = [] + elements_collection_type = None for identifier, child in self.children: if isinstance( child, Tree ): - child_identifiers = child.element_identifiers_for_datasets( trans, datasets[ 0:len( child ) ] ) + child_identifiers = child.element_identifiers_for_outputs( trans, outputs[ 0:len( child ) ] ) child_identifiers[ "name" ] = identifier element_identifiers.append( child_identifiers ) + elements_collection_type = child_identifiers[ "collection_type" ] else: - element_identifiers.append( dict( name=identifier, __object__=datasets[ 0 ] ) ) + output_object = outputs[ 0 ] + element_identifiers.append( dict( name=identifier, __object__=output_object ) ) + if hasattr( output_object, "collection_type" ): + elements_collection_type = output_object.collection_type - datasets = datasets[ len( child ): ] + outputs = outputs[ len( child ): ] + collection_type = map_over_collection_type( self.collection_type_description.rank_collection_type(), elements_collection_type ) return dict( src="new_collection", - collection_type=self.collection_type_description.collection_type, + collection_type=collection_type, element_identifiers=element_identifiers, ) diff -r ea812c16cbf9674b2ba153041c3dd33bf36b7fe2 -r 148b770b37aa202facde8c79bc49bbd875f92ddc lib/galaxy/dataset_collections/type_description.py --- a/lib/galaxy/dataset_collections/type_description.py +++ b/lib/galaxy/dataset_collections/type_description.py @@ -104,3 +104,16 @@ def __str__( self ): return "CollectionTypeDescription[%s]" % self.collection_type + + +def map_over_collection_type( mapped_over_collection_type, target_collection_type ): + if hasattr( mapped_over_collection_type, 'collection_type' ): + mapped_over_collection_type = mapped_over_collection_type.collection_type + + if not target_collection_type: + return mapped_over_collection_type + else: + if hasattr( target_collection_type, 'collection_type' ): + target_collection_type = target_collection_type.collection_type + + return "%s:%s" % (mapped_over_collection_type, target_collection_type) diff -r ea812c16cbf9674b2ba153041c3dd33bf36b7fe2 -r 148b770b37aa202facde8c79bc49bbd875f92ddc lib/galaxy/tools/execute.py --- a/lib/galaxy/tools/execute.py +++ b/lib/galaxy/tools/execute.py @@ -46,14 +46,14 @@ self.failed_jobs = 0 self.execution_errors = [] self.output_datasets = [] - self.output_datasets_by_output_name = collections.defaultdict(list) + self.outputs_by_output_name = collections.defaultdict(list) self.created_collections = {} def record_success( self, job, outputs ): self.successful_jobs.append( job ) self.output_datasets.extend( outputs ) for output_name, output_dataset in outputs: - self.output_datasets_by_output_name[ output_name ].append( output_dataset ) + self.outputs_by_output_name[ output_name ].append( output_dataset ) def record_error( self, error ): self.failed_jobs += 1 @@ -82,19 +82,19 @@ collections = {} implicit_inputs = list(self.collection_info.collections.iteritems()) - for output_name, outputs_datasets in self.output_datasets_by_output_name.iteritems(): - if not len( structure ) == len( outputs_datasets ): + for output_name, outputs in self.outputs_by_output_name.iteritems(): + if not len( structure ) == len( outputs ): # Output does not have the same structure, if all jobs were # successfully submitted this shouldn't have happened. log.warn( "Problem matching up datasets while attempting to create implicit dataset collections") continue output = self.tool.outputs[ output_name ] - element_identifiers_for_datasets = structure.element_identifiers_for_datasets( trans, outputs_datasets ) + element_identifiers = structure.element_identifiers_for_outputs( trans, outputs ) implicit_collection_info = dict( implicit_inputs=implicit_inputs, implicit_output_name=output_name, - outputs_datasets=outputs_datasets + outputs=outputs ) try: output_collection_name = self.tool_action.get_output_name( @@ -110,12 +110,14 @@ except Exception: output_collection_name = "%s across %s" % ( self.tool.name, on_text ) + child_element_identifiers = element_identifiers[ "element_identifiers" ] + collection_type = element_identifiers[ "collection_type" ] collection = trans.app.dataset_collections_service.create( trans=trans, parent=history, name=output_collection_name, - element_identifiers=element_identifiers_for_datasets[ "element_identifiers" ], - collection_type=structure.collection_type_description.collection_type, + element_identifiers=child_element_identifiers, + collection_type=collection_type, implicit_collection_info=implicit_collection_info, ) collections[ output_name ] = collection https://bitbucket.org/galaxy/galaxy-central/commits/8b0230532488/ Changeset: 8b0230532488 User: jmchilton Date: 2014-09-03 14:43:25 Summary: Fix unit tests broken with 9f63d96. Affected #: 3 files diff -r 148b770b37aa202facde8c79bc49bbd875f92ddc -r 8b02305324881c506162b5ba60b7512c01d951cf test/unit/jobs/test_job_wrapper.py --- a/test/unit/jobs/test_job_wrapper.py +++ b/test/unit/jobs/test_job_wrapper.py @@ -137,7 +137,9 @@ integrated_datatypes_configs=os.path.join(test_directory, "datatypes_conf.xml"), ), ) - self.job_config = Bunch() + self.job_config = Bunch( + dynamic_params=None, + ) self.model = Bunch(context=MockContext(model_objects)) diff -r 148b770b37aa202facde8c79bc49bbd875f92ddc -r 8b02305324881c506162b5ba60b7512c01d951cf test/unit/jobs/test_mapper.py --- a/test/unit/jobs/test_mapper.py +++ b/test/unit/jobs/test_mapper.py @@ -119,6 +119,7 @@ def __init__( self ): self.rule_response = None + self.dynamic_params = None def get_destination( self, rep ): # Called to transform dynamic job destination rule response diff -r 148b770b37aa202facde8c79bc49bbd875f92ddc -r 8b02305324881c506162b5ba60b7512c01d951cf test/unit/tools_support.py --- a/test/unit/tools_support.py +++ b/test/unit/tools_support.py @@ -103,7 +103,9 @@ ) # Setup some attributes for downstream extension by specific tests. - self.job_config = Bunch() + self.job_config = Bunch( + dynamic_params=None, + ) # Two ways to handle model layer, one is to stub out some objects that # have an interface similar to real model (mock_model) and can keep https://bitbucket.org/galaxy/galaxy-central/commits/3d73e68aa3fb/ Changeset: 3d73e68aa3fb User: jmchilton Date: 2014-09-03 14:43:25 Summary: Delete incorrect unit test added in d15f9a1. Don't know what I was thinking - it doesn't look like that method ever had that behavior. Affected #: 1 file diff -r 8b02305324881c506162b5ba60b7512c01d951cf -r 3d73e68aa3fb0e9300e2eeb655de3d09b9c31d67 test/unit/workflows/test_modules.py --- a/test/unit/workflows/test_modules.py +++ b/test/unit/workflows/test_modules.py @@ -22,13 +22,6 @@ assert not tool_module.get_errors() -def test_missing_tool_has_errors(): - trans = MockTrans() - tool_dict = { "type": "tool", "tool_id": "cat1" } - tool_module = modules.module_factory.from_dict( trans, tool_dict ) - assert tool_module.get_errors() - - def test_cannot_create_tool_modules_for_missing_tools(): trans = MockTrans() exception = False https://bitbucket.org/galaxy/galaxy-central/commits/012e6eb76568/ Changeset: 012e6eb76568 User: jmchilton Date: 2014-09-03 14:43:25 Summary: Fix workflow rendering unit test when run in suite. It was stubbing out connection stuff which worked fine when running test on its own - but after any test sets up the sql alchemy mappings this breaks down because events are trying to fire. This just uses actual model classes - which is just as easy anyway really. Affected #: 1 file diff -r 3d73e68aa3fb0e9300e2eeb655de3d09b9c31d67 -r 012e6eb765684020eb597d8925e4ca5b58b9d904 test/unit/workflows/test_render.py --- a/test/unit/workflows/test_render.py +++ b/test/unit/workflows/test_render.py @@ -1,5 +1,4 @@ from galaxy import model -from galaxy.util import bunch from galaxy.workflow import render @@ -18,6 +17,12 @@ workflow.steps.append( workflow_step ) return workflow_step + def connection( **kwds ): + conn = model.WorkflowStepConnection() + for key, value in kwds.iteritems(): + setattr(conn, key, value) + return conn + step_0 = add_step( type="data_input", order_index=0, @@ -32,12 +37,13 @@ input_connections=[], position={"top": 6, "left": 4} ) + step_2 = add_step( type="tool", tool_id="cat1", order_index=2, input_connections=[ - bunch.Bunch(input_name="input1", output_step=step_0, output_name="di1") + connection(input_name="input1", output_step=step_0, output_name="di1") ], position={"top": 13, "left": 10} ) @@ -46,7 +52,7 @@ tool_id="cat1", order_index=3, input_connections=[ - bunch.Bunch(input_name="input1", output_step=step_0, output_name="di1") + connection(input_name="input1", output_step=step_0, output_name="di1") ], position={"top": 33, "left": 103} ) Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.