commit/galaxy-central: 3 new changesets
3 new commits in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/commits/767d9f567b6f/ Changeset: 767d9f567b6f User: jmchilton Date: 2014-02-22 05:09:57 Summary: Unit tests for various data tool parameter handling. Test optional datasets can be used in tool evaluation (in test_evaluation.py). Add test_data_parameters.py which test many random DataToolParameter behaviors. Test various paths to DataToolParameter.to_python - including recently enhanced ability to use optional dataset with 'multiple=True' data parameters. Test filtering on datatypes, implicit conversion options (both existing conversions and new ones) both when building HTML forms and picking intial values for workflows. Test special handling of hidden datasets. Tests picking intial datasets when optional and without repeats when used in subsequent calls. Affected #: 2 files diff -r 2403d6f67500ec35b16ce604142d98766286b65a -r 767d9f567b6fa5f6554ec2180b4f1249587f1473 test/unit/tools/test_data_parameters.py --- /dev/null +++ b/test/unit/tools/test_data_parameters.py @@ -0,0 +1,223 @@ +from unittest import TestCase + +from galaxy import model +from galaxy.util import bunch +from galaxy.tools.parameters import basic + +from elementtree.ElementTree import XML + +import tools_support + + +class DataToolParameterTestCase( TestCase, tools_support.UsesApp ): + + def test_to_python_none_values( self ): + assert None is self.param.to_python( None, self.app ) + assert 'None' == self.param.to_python( 'None', self.app ) + assert '' == self.param.to_python( '', self.app ) + + def test_to_python_hda( self ): + hda = self._new_hda() + as_python = self.param.to_python( hda.id, self.app ) + assert hda == as_python + + def test_to_python_multi_hdas( self ): + hda1 = self._new_hda() + hda2 = self._new_hda() + as_python = self.param.to_python( "%s,%s" % ( hda1.id, hda2.id ), self.app ) + assert as_python == [ hda1, hda2 ] + + def test_to_python_multi_none( self ): + self.multiple = True + hda = self._new_hda() + # Selection is Optional. may be selected with other stuff, + # not sure the UI should really allow this but easy enough + # to just filter it out. + assert [hda] == self.param.to_python( '%s,None' % hda.id, self.app ) + + def test_field_filter_on_types( self ): + hda1 = MockHistoryDatasetAssociation( name="hda1", id=1 ) + hda2 = MockHistoryDatasetAssociation( name="hda2", id=2 ) + self.stub_active_datasets( hda1, hda2 ) + field = self.param.get_html_field( trans=self.trans ) + assert len( field.options ) == 2 + assert field.options[ 0 ][ 0 ] == "1: hda1" + assert field.options[ 1 ][ 0 ] == "2: hda2" + + assert field.options[ 1 ][ 2 ] # Last one selected + assert not field.options[ 0 ][ 2 ] # Others not selected + + hda2.datatype_matches = False + field = self.param.get_html_field( trans=self.trans ) + assert len( field.options ) == 1 + assert field.options[ 0 ][ 2 ] is True # Last one selected + + def test_field_display_hidden_hdas_only_if_selected( self ): + hda1 = MockHistoryDatasetAssociation( name="hda1", id=1 ) + hda2 = MockHistoryDatasetAssociation( name="hda2", id=2 ) + self.stub_active_datasets( hda1, hda2 ) + hda1.visible = False + hda2.visible = False + field = self.param.get_html_field( trans=self.trans, value=hda2 ) + assert len( field.options ) == 1 # hda1 not an option, not visible or selected + assert field.options[ 0 ][ 0 ] == "2: (hidden) hda2" + + def test_field_implicit_conversion_new( self ): + hda1 = MockHistoryDatasetAssociation( name="hda1", id=1 ) + hda1.datatype_matches = False + hda1.conversion_destination = ( "tabular", None ) + self.stub_active_datasets( hda1 ) + field = self.param.get_html_field( trans=self.trans ) + assert len( field.options ) == 1 + assert field.options[ 0 ][ 0 ] == "1: (as tabular) hda1" + assert field.options[ 0 ][ 1 ] == 1 + + def test_field_implicit_conversion_existing( self ): + hda1 = MockHistoryDatasetAssociation( name="hda1", id=1 ) + hda1.datatype_matches = False + hda1.conversion_destination = ( "tabular", MockHistoryDatasetAssociation( name="hda1converted", id=2 ) ) + self.stub_active_datasets( hda1 ) + field = self.param.get_html_field( trans=self.trans ) + assert len( field.options ) == 1 + assert field.options[ 0 ][ 0 ] == "1: (as tabular) hda1" + # This is difference with previous test, value is existing + # hda id not new one. + assert field.options[ 0 ][ 1 ] == 2 + + def test_field_multiple( self ): + self.multiple = True + field = self.param.get_html_field( trans=self.trans ) + assert field.multiple + + def test_field_empty_selection( self ): + field = self.param.get_html_field( trans=self.trans ) + assert len( field.options ) == 0 + + def test_field_empty_selection_optional( self ): + self.optional = True + field = self.param.get_html_field( trans=self.trans ) + assert len( field.options ) == 1 + option = field.options[ 0 ] + assert option[ 0 ] == "Selection is Optional" + assert option[ 1 ] == "None" + assert option[ 2 ] is True + + def test_get_initial_value_prevents_repeats( self ): + hda1 = MockHistoryDatasetAssociation( name="hda1", id=1 ) + hda2 = MockHistoryDatasetAssociation( name="hda2", id=2 ) + self.stub_active_datasets( hda1, hda2 ) + already_used = [] + assert hda2 == self.param.get_initial_value_from_history_prevent_repeats( self.trans, {}, already_used ) + assert hda1 == self.param.get_initial_value_from_history_prevent_repeats( self.trans, {}, already_used ) + + def test_get_initial_value_is_empty_string_if_no_match( self ): + hda1 = MockHistoryDatasetAssociation( name="hda1", id=1 ) + hda1.visible = False + hda2 = MockHistoryDatasetAssociation( name="hda2", id=2 ) + hda2.visible = False + self.stub_active_datasets( hda1, hda2 ) + assert '' == self.param.get_initial_value( self.trans, {} ) + + def test_get_initial_none_when_optional( self ): + self.optional = True + hda1 = MockHistoryDatasetAssociation( name="hda1", id=1 ) + hda2 = MockHistoryDatasetAssociation( name="hda2", id=2 ) + self.stub_active_datasets( hda1, hda2 ) + assert self.param.get_initial_value( self.trans, {} ) is None + + def test_get_initial_with_previously_converted_data( self ): + hda1 = MockHistoryDatasetAssociation( name="hda1", id=1 ) + hda1.datatype_matches = False + converted = MockHistoryDatasetAssociation( name="hda1converted", id=2 ) + hda1.conversion_destination = ( "tabular", converted ) + self.stub_active_datasets( hda1 ) + assert converted == self.param.get_initial_value( self.trans, {} ) + + def test_get_initial_with_to_be_converted_data( self ): + hda1 = MockHistoryDatasetAssociation( name="hda1", id=1 ) + hda1.datatype_matches = False + hda1.conversion_destination = ( "tabular", None ) + self.stub_active_datasets( hda1 ) + assert hda1 == self.param.get_initial_value( self.trans, {} ) + + def _new_hda( self ): + hda = model.HistoryDatasetAssociation() + hda.visible = True + hda.dataset = model.Dataset() + self.app.model.context.add( hda ) + self.app.model.context.flush( ) + return hda + + def setUp( self ): + self.setup_app( mock_model=False ) + self.mock_tool = bunch.Bunch( + app=self.app, + tool_type="default", + ) + self.test_history = model.History() + self.app.model.context.add( self.test_history ) + self.app.model.context.flush() + self.trans = bunch.Bunch( + app=self.app, + get_history=lambda: self.test_history, + get_current_user_roles=lambda: [], + workflow_building_mode=False, + webapp=bunch.Bunch( name="galaxy" ), + ) + self.multiple = False + self.optional = False + self._param = None + + def stub_active_datasets( self, *hdas ): + self.test_history._active_datasets_children_and_roles = hdas + + @property + def param( self ): + if not self._param: + multi_text = "" + if self.multiple: + multi_text = 'multiple="True"' + optional_text = "" + if self.optional: + optional_text = 'optional="True"' + template_xml = '''<param name="data2" type="data" ext="txt" %s %s></param>''' + self.param_xml = XML( template_xml % ( multi_text, optional_text ) ) + self._param = basic.DataToolParameter( self.mock_tool, self.param_xml ) + + return self._param + + +class MockHistoryDatasetAssociation( object ): + """ Fake HistoryDatasetAssociation stubbed out for testing matching and + stuff like that. + """ + + def __init__( self, test_dataset=None, name="Test Dataset", id=1 ): + if not test_dataset: + test_dataset = model.Dataset() + self.states = model.HistoryDatasetAssociation.states + self.deleted = False + self.dataset = test_dataset + self.visible = True + self.datatype_matches = True + self.conversion_destination = ( None, None ) + self.datatype = bunch.Bunch( + matches_any=lambda formats: self.datatype_matches, + ) + self.dbkey = "hg19" + self.implicitly_converted_parent_datasets = False + + self.name = name + self.hid = id + self.id = id + self.children = [] + + @property + def state( self ): + return self.dataset.state + + def get_dbkey( self ): + return self.dbkey + + def find_conversion_destination( self, formats ): + return self.conversion_destination diff -r 2403d6f67500ec35b16ce604142d98766286b65a -r 767d9f567b6fa5f6554ec2180b4f1249587f1473 test/unit/tools/test_evaluation.py --- a/test/unit/tools/test_evaluation.py +++ b/test/unit/tools/test_evaluation.py @@ -20,6 +20,7 @@ from galaxy.tools.parameters.grouping import ConditionalWhen from galaxy.tools.parameters.basic import IntegerToolParameter from galaxy.tools.parameters.basic import SelectToolParameter +from galaxy.tools.parameters.basic import DataToolParameter from elementtree.ElementTree import XML # Import after model, to ensure elementtree @@ -78,6 +79,18 @@ command_line, extra_filenames = self.evaluator.build( ) self.assertEquals( command_line, "prog1 --thresh=4 --test_param=true" ) + def test_evaluation_of_optional_datasets( self ): + # Make sure optional dataset don't cause evaluation to break and + # evaluate in cheetah templates as 'None'. + select_xml = XML('''<param name="input1" type="data" optional="true"></param>''') + parameter = DataToolParameter( self.tool, select_xml ) + self.job.parameters = [ JobParameter( name="input1", value=u'null' ) ] + self.tool.set_params( { "input1": parameter } ) + self.tool._command_line = "prog1 --opt_input='${input1}'" + self._set_compute_environment() + command_line, extra_filenames = self.evaluator.build( ) + self.assertEquals( command_line, "prog1 --opt_input='None'" ) + def test_evaluation_with_path_rewrites_wrapped( self ): self.tool.check_values = True self.__test_evaluation_with_path_rewrites() https://bitbucket.org/galaxy/galaxy-central/commits/9051ceb43593/ Changeset: 9051ceb43593 User: jmchilton Date: 2014-02-22 05:09:57 Summary: Rearrange unit tests. Move tests that were in tests/unit but could logically be placed into tests/unit/tools or tests/unit/jobs into these directory. Affected #: 8 files diff -r 767d9f567b6fa5f6554ec2180b4f1249587f1473 -r 9051ceb435937348e6417fb96eb9611474496828 test/unit/jobs/test_command_factory.py --- /dev/null +++ b/test/unit/jobs/test_command_factory.py @@ -0,0 +1,156 @@ +from os import getcwd +from unittest import TestCase + +from galaxy.jobs.command_factory import build_command +from galaxy.util.bunch import Bunch + +MOCK_COMMAND_LINE = "/opt/galaxy/tools/bowtie /mnt/galaxyData/files/000/input000.dat" +TEST_METADATA_LINE = "set_metadata_and_stuff.sh" +TEST_FILES_PATH = "file_path" + + +class TestCommandFactory(TestCase): + + def setUp(self): + self.job_wrapper = MockJobWrapper() + self.workdir_outputs = [] + + def workdir_outputs(job_wrapper, **kwds): + assert job_wrapper == self.job_wrapper + return self.workdir_outputs + + self.runner = Bunch(app=Bunch(model=Bunch(Dataset=Bunch(file_path=TEST_FILES_PATH))), get_work_dir_outputs=workdir_outputs) + self.include_metadata = False + self.include_work_dir_outputs = True + + def test_simplest_command(self): + self.include_work_dir_outputs = False + self.__assert_command_is( MOCK_COMMAND_LINE ) + + def test_shell_commands(self): + self.include_work_dir_outputs = False + dep_commands = [". /opt/galaxy/tools/bowtie/default/env.sh"] + self.job_wrapper.dependency_shell_commands = dep_commands + self.__assert_command_is( "%s; %s" % (dep_commands[0], MOCK_COMMAND_LINE) ) + + def test_remote_dependency_resolution(self): + self.include_work_dir_outputs = False + dep_commands = [". /opt/galaxy/tools/bowtie/default/env.sh"] + self.job_wrapper.dependency_shell_commands = dep_commands + self.__assert_command_is(MOCK_COMMAND_LINE, remote_command_params=dict(dependency_resolution="remote")) + + def test_explicit_local_dependency_resolution(self): + self.include_work_dir_outputs = False + dep_commands = [". /opt/galaxy/tools/bowtie/default/env.sh"] + self.job_wrapper.dependency_shell_commands = dep_commands + self.__assert_command_is("%s; %s" % (dep_commands[0], MOCK_COMMAND_LINE), + remote_command_params=dict(dependency_resolution="local")) + + def test_task_prepare_inputs(self): + self.include_work_dir_outputs = False + self.job_wrapper.prepare_input_files_cmds = ["/opt/split1", "/opt/split2"] + self.__assert_command_is( "/opt/split1; /opt/split2; %s" % MOCK_COMMAND_LINE ) + + def test_workdir_outputs(self): + self.include_work_dir_outputs = True + self.workdir_outputs = [("foo", "bar")] + self.__assert_command_is( '%s; return_code=$?; if [ -f foo ] ; then cp foo bar ; fi; sh -c "exit $return_code"' % MOCK_COMMAND_LINE ) + + def test_set_metadata_skipped_if_unneeded(self): + self.include_metadata = True + self.include_work_dir_outputs = False + self.__assert_command_is( MOCK_COMMAND_LINE ) + + def test_set_metadata(self): + self._test_set_metadata() + + def test_strips_trailing_semicolons(self): + self.job_wrapper.command_line = "%s;" % MOCK_COMMAND_LINE + self._test_set_metadata() + + def _test_set_metadata(self): + self.include_metadata = True + self.include_work_dir_outputs = False + self.job_wrapper.metadata_line = TEST_METADATA_LINE + expected_command = '%s; return_code=$?; cd %s; %s; sh -c "exit $return_code"' % (MOCK_COMMAND_LINE, getcwd(), TEST_METADATA_LINE) + self.__assert_command_is( expected_command ) + + def test_empty_metadata(self): + """ + As produced by TaskWrapper. + """ + self.include_metadata = True + self.include_work_dir_outputs = False + self.job_wrapper.metadata_line = ' ' + # Empty metadata command do not touch command line. + expected_command = '%s' % (MOCK_COMMAND_LINE) + self.__assert_command_is( expected_command ) + + def test_metadata_kwd_defaults(self): + configured_kwds = self.__set_metadata_with_kwds() + assert configured_kwds['exec_dir'] == getcwd() + assert configured_kwds['tmp_dir'] == self.job_wrapper.working_directory + assert configured_kwds['dataset_files_path'] == TEST_FILES_PATH + assert configured_kwds['output_fnames'] == ['output1'] + + def test_metadata_kwds_overrride(self): + configured_kwds = self.__set_metadata_with_kwds( + exec_dir="/path/to/remote/galaxy", + tmp_dir="/path/to/remote/staging/directory/job1", + dataset_files_path="/path/to/remote/datasets/", + output_fnames=['/path/to/remote_output1'], + ) + assert configured_kwds['exec_dir'] == "/path/to/remote/galaxy" + assert configured_kwds['tmp_dir'] == "/path/to/remote/staging/directory/job1" + assert configured_kwds['dataset_files_path'] == "/path/to/remote/datasets/" + assert configured_kwds['output_fnames'] == ['/path/to/remote_output1'] + + def __set_metadata_with_kwds(self, **kwds): + self.include_metadata = True + self.include_work_dir_outputs = False + self.job_wrapper.metadata_line = TEST_METADATA_LINE + if kwds: + self.__command(remote_command_params=dict(metadata_kwds=kwds)) + else: + self.__command() + return self.job_wrapper.configured_external_metadata_kwds + + def __assert_command_is(self, expected_command, **command_kwds): + command = self.__command(**command_kwds) + self.assertEqual(command, expected_command) + + def __command(self, **extra_kwds): + kwds = dict( + runner=self.runner, + job_wrapper=self.job_wrapper, + include_metadata=self.include_metadata, + include_work_dir_outputs=self.include_work_dir_outputs, + **extra_kwds + ) + return build_command(**kwds) + + +class MockJobWrapper(object): + + def __init__(self): + self.write_version_cmd = None + self.command_line = MOCK_COMMAND_LINE + self.dependency_shell_commands = [] + self.metadata_line = None + self.configured_external_metadata_kwds = None + self.working_directory = "job1" + self.prepare_input_files_cmds = None + + def get_command_line(self): + return self.command_line + + @property + def requires_setting_metadata(self): + return self.metadata_line is not None + + def setup_external_metadata(self, *args, **kwds): + self.configured_external_metadata_kwds = kwds + return self.metadata_line + + def get_output_fnames(self): + return ["output1"] diff -r 767d9f567b6fa5f6554ec2180b4f1249587f1473 -r 9051ceb435937348e6417fb96eb9611474496828 test/unit/jobs/test_job_output_checker.py --- /dev/null +++ b/test/unit/jobs/test_job_output_checker.py @@ -0,0 +1,62 @@ +from unittest import TestCase +from galaxy.util.bunch import Bunch +from galaxy.jobs.output_checker import check_output +from galaxy.jobs.error_level import StdioErrorLevel + + +class OutputCheckerTestCase( TestCase ): + + def setUp( self ): + self.tool = Bunch( + stdio_regexes=[], + stdio_exit_codes=[], + ) + self.job = Bunch( + stdout=None, + stderr=None, + get_id_tag=lambda: "test_id", + ) + self.stdout = '' + self.stderr = '' + self.tool_exit_code = None + + def test_default_no_stderr_success( self ): + self.__assertSuccessful() + + def test_default_stderr_failure( self ): + self.stderr = 'foo' + self.__assertNotSuccessful() + + def test_exit_code_error( self ): + mock_exit_code = Bunch( range_start=1, range_end=1, error_level=StdioErrorLevel.FATAL, desc=None ) + self.tool.stdio_exit_codes.append( mock_exit_code ) + self.tool_exit_code = 1 + self.__assertNotSuccessful() + + def test_exit_code_success( self ): + mock_exit_code = Bunch( range_start=1, range_end=1, error_level=StdioErrorLevel.FATAL, desc=None ) + self.tool.stdio_exit_codes.append( mock_exit_code ) + self.tool_exit_code = 0 + self.__assertSuccessful() + + def test_problematic_strings( self ): + problematic_str = '\x80abc' + regex_rule = Bunch( match=r'.abc', stdout_match=False, stderr_match=True, error_level=StdioErrorLevel.FATAL, desc=None ) + self.tool.stdio_regexes = [ regex_rule ] + self.stderr = problematic_str + self.__assertNotSuccessful() + + problematic_str = '\x80abc' + regex_rule = Bunch( match=r'.abcd', stdout_match=False, stderr_match=True, error_level=StdioErrorLevel.FATAL, desc=None ) + self.tool.stdio_regexes = [ regex_rule ] + self.stderr = problematic_str + self.__assertSuccessful() + + def __assertSuccessful( self ): + self.assertTrue( self.__check_output() ) + + def __assertNotSuccessful( self ): + self.assertFalse( self.__check_output() ) + + def __check_output( self ): + return check_output( self.tool, self.stdout, self.stderr, self.tool_exit_code, self.job ) diff -r 767d9f567b6fa5f6554ec2180b4f1249587f1473 -r 9051ceb435937348e6417fb96eb9611474496828 test/unit/test_command_factory.py --- a/test/unit/test_command_factory.py +++ /dev/null @@ -1,156 +0,0 @@ -from os import getcwd -from unittest import TestCase - -from galaxy.jobs.command_factory import build_command -from galaxy.util.bunch import Bunch - -MOCK_COMMAND_LINE = "/opt/galaxy/tools/bowtie /mnt/galaxyData/files/000/input000.dat" -TEST_METADATA_LINE = "set_metadata_and_stuff.sh" -TEST_FILES_PATH = "file_path" - - -class TestCommandFactory(TestCase): - - def setUp(self): - self.job_wrapper = MockJobWrapper() - self.workdir_outputs = [] - - def workdir_outputs(job_wrapper, **kwds): - assert job_wrapper == self.job_wrapper - return self.workdir_outputs - - self.runner = Bunch(app=Bunch(model=Bunch(Dataset=Bunch(file_path=TEST_FILES_PATH))), get_work_dir_outputs=workdir_outputs) - self.include_metadata = False - self.include_work_dir_outputs = True - - def test_simplest_command(self): - self.include_work_dir_outputs = False - self.__assert_command_is( MOCK_COMMAND_LINE ) - - def test_shell_commands(self): - self.include_work_dir_outputs = False - dep_commands = [". /opt/galaxy/tools/bowtie/default/env.sh"] - self.job_wrapper.dependency_shell_commands = dep_commands - self.__assert_command_is( "%s; %s" % (dep_commands[0], MOCK_COMMAND_LINE) ) - - def test_remote_dependency_resolution(self): - self.include_work_dir_outputs = False - dep_commands = [". /opt/galaxy/tools/bowtie/default/env.sh"] - self.job_wrapper.dependency_shell_commands = dep_commands - self.__assert_command_is(MOCK_COMMAND_LINE, remote_command_params=dict(dependency_resolution="remote")) - - def test_explicit_local_dependency_resolution(self): - self.include_work_dir_outputs = False - dep_commands = [". /opt/galaxy/tools/bowtie/default/env.sh"] - self.job_wrapper.dependency_shell_commands = dep_commands - self.__assert_command_is("%s; %s" % (dep_commands[0], MOCK_COMMAND_LINE), - remote_command_params=dict(dependency_resolution="local")) - - def test_task_prepare_inputs(self): - self.include_work_dir_outputs = False - self.job_wrapper.prepare_input_files_cmds = ["/opt/split1", "/opt/split2"] - self.__assert_command_is( "/opt/split1; /opt/split2; %s" % MOCK_COMMAND_LINE ) - - def test_workdir_outputs(self): - self.include_work_dir_outputs = True - self.workdir_outputs = [("foo", "bar")] - self.__assert_command_is( '%s; return_code=$?; if [ -f foo ] ; then cp foo bar ; fi; sh -c "exit $return_code"' % MOCK_COMMAND_LINE ) - - def test_set_metadata_skipped_if_unneeded(self): - self.include_metadata = True - self.include_work_dir_outputs = False - self.__assert_command_is( MOCK_COMMAND_LINE ) - - def test_set_metadata(self): - self._test_set_metadata() - - def test_strips_trailing_semicolons(self): - self.job_wrapper.command_line = "%s;" % MOCK_COMMAND_LINE - self._test_set_metadata() - - def _test_set_metadata(self): - self.include_metadata = True - self.include_work_dir_outputs = False - self.job_wrapper.metadata_line = TEST_METADATA_LINE - expected_command = '%s; return_code=$?; cd %s; %s; sh -c "exit $return_code"' % (MOCK_COMMAND_LINE, getcwd(), TEST_METADATA_LINE) - self.__assert_command_is( expected_command ) - - def test_empty_metadata(self): - """ - As produced by TaskWrapper. - """ - self.include_metadata = True - self.include_work_dir_outputs = False - self.job_wrapper.metadata_line = ' ' - # Empty metadata command do not touch command line. - expected_command = '%s' % (MOCK_COMMAND_LINE) - self.__assert_command_is( expected_command ) - - def test_metadata_kwd_defaults(self): - configured_kwds = self.__set_metadata_with_kwds() - assert configured_kwds['exec_dir'] == getcwd() - assert configured_kwds['tmp_dir'] == self.job_wrapper.working_directory - assert configured_kwds['dataset_files_path'] == TEST_FILES_PATH - assert configured_kwds['output_fnames'] == ['output1'] - - def test_metadata_kwds_overrride(self): - configured_kwds = self.__set_metadata_with_kwds( - exec_dir="/path/to/remote/galaxy", - tmp_dir="/path/to/remote/staging/directory/job1", - dataset_files_path="/path/to/remote/datasets/", - output_fnames=['/path/to/remote_output1'], - ) - assert configured_kwds['exec_dir'] == "/path/to/remote/galaxy" - assert configured_kwds['tmp_dir'] == "/path/to/remote/staging/directory/job1" - assert configured_kwds['dataset_files_path'] == "/path/to/remote/datasets/" - assert configured_kwds['output_fnames'] == ['/path/to/remote_output1'] - - def __set_metadata_with_kwds(self, **kwds): - self.include_metadata = True - self.include_work_dir_outputs = False - self.job_wrapper.metadata_line = TEST_METADATA_LINE - if kwds: - self.__command(remote_command_params=dict(metadata_kwds=kwds)) - else: - self.__command() - return self.job_wrapper.configured_external_metadata_kwds - - def __assert_command_is(self, expected_command, **command_kwds): - command = self.__command(**command_kwds) - self.assertEqual(command, expected_command) - - def __command(self, **extra_kwds): - kwds = dict( - runner=self.runner, - job_wrapper=self.job_wrapper, - include_metadata=self.include_metadata, - include_work_dir_outputs=self.include_work_dir_outputs, - **extra_kwds - ) - return build_command(**kwds) - - -class MockJobWrapper(object): - - def __init__(self): - self.write_version_cmd = None - self.command_line = MOCK_COMMAND_LINE - self.dependency_shell_commands = [] - self.metadata_line = None - self.configured_external_metadata_kwds = None - self.working_directory = "job1" - self.prepare_input_files_cmds = None - - def get_command_line(self): - return self.command_line - - @property - def requires_setting_metadata(self): - return self.metadata_line is not None - - def setup_external_metadata(self, *args, **kwds): - self.configured_external_metadata_kwds = kwds - return self.metadata_line - - def get_output_fnames(self): - return ["output1"] diff -r 767d9f567b6fa5f6554ec2180b4f1249587f1473 -r 9051ceb435937348e6417fb96eb9611474496828 test/unit/test_job_output_checker.py --- a/test/unit/test_job_output_checker.py +++ /dev/null @@ -1,62 +0,0 @@ -from unittest import TestCase -from galaxy.util.bunch import Bunch -from galaxy.jobs.output_checker import check_output -from galaxy.jobs.error_level import StdioErrorLevel - - -class OutputCheckerTestCase( TestCase ): - - def setUp( self ): - self.tool = Bunch( - stdio_regexes=[], - stdio_exit_codes=[], - ) - self.job = Bunch( - stdout=None, - stderr=None, - get_id_tag=lambda: "test_id", - ) - self.stdout = '' - self.stderr = '' - self.tool_exit_code = None - - def test_default_no_stderr_success( self ): - self.__assertSuccessful() - - def test_default_stderr_failure( self ): - self.stderr = 'foo' - self.__assertNotSuccessful() - - def test_exit_code_error( self ): - mock_exit_code = Bunch( range_start=1, range_end=1, error_level=StdioErrorLevel.FATAL, desc=None ) - self.tool.stdio_exit_codes.append( mock_exit_code ) - self.tool_exit_code = 1 - self.__assertNotSuccessful() - - def test_exit_code_success( self ): - mock_exit_code = Bunch( range_start=1, range_end=1, error_level=StdioErrorLevel.FATAL, desc=None ) - self.tool.stdio_exit_codes.append( mock_exit_code ) - self.tool_exit_code = 0 - self.__assertSuccessful() - - def test_problematic_strings( self ): - problematic_str = '\x80abc' - regex_rule = Bunch( match=r'.abc', stdout_match=False, stderr_match=True, error_level=StdioErrorLevel.FATAL, desc=None ) - self.tool.stdio_regexes = [ regex_rule ] - self.stderr = problematic_str - self.__assertNotSuccessful() - - problematic_str = '\x80abc' - regex_rule = Bunch( match=r'.abcd', stdout_match=False, stderr_match=True, error_level=StdioErrorLevel.FATAL, desc=None ) - self.tool.stdio_regexes = [ regex_rule ] - self.stderr = problematic_str - self.__assertSuccessful() - - def __assertSuccessful( self ): - self.assertTrue( self.__check_output() ) - - def __assertNotSuccessful( self ): - self.assertFalse( self.__check_output() ) - - def __check_output( self ): - return check_output( self.tool, self.stdout, self.stderr, self.tool_exit_code, self.job ) diff -r 767d9f567b6fa5f6554ec2180b4f1249587f1473 -r 9051ceb435937348e6417fb96eb9611474496828 test/unit/test_tool_deps.py --- a/test/unit/test_tool_deps.py +++ /dev/null @@ -1,378 +0,0 @@ -import tempfile -import os.path -from stat import S_IXUSR -from os import makedirs, stat, symlink, chmod, environ -from shutil import rmtree -from galaxy.tools.deps import DependencyManager, INDETERMINATE_DEPENDENCY -from galaxy.tools.deps.resolvers.galaxy_packages import GalaxyPackageDependency -from galaxy.tools.deps.resolvers.modules import ModuleDependencyResolver, ModuleDependency -from galaxy.util.bunch import Bunch -from contextlib import contextmanager -from subprocess import Popen, PIPE - - -def test_tool_dependencies(): - # Setup directories - - with __test_base_path() as base_path: - for name, version, sub in [ ( "dep1", "1.0", "env.sh" ), ( "dep1", "2.0", "bin" ), ( "dep2", "1.0", None ) ]: - if sub == "bin": - p = os.path.join( base_path, name, version, "bin" ) - else: - p = os.path.join( base_path, name, version ) - try: - makedirs( p ) - except: - pass - if sub == "env.sh": - __touch( os.path.join( p, "env.sh" ) ) - - dm = DependencyManager( default_base_path=base_path ) - dependency = dm.find_dep( "dep1", "1.0" ) - assert dependency.script == os.path.join( base_path, 'dep1', '1.0', 'env.sh' ) - assert dependency.path == os.path.join( base_path, 'dep1', '1.0' ) - assert dependency.version == "1.0" - dependency = dm.find_dep( "dep1", "2.0" ) - assert dependency.script == None - assert dependency.path == os.path.join( base_path, 'dep1', '2.0' ) - assert dependency.version == "2.0" - - ## Test default versions - symlink( os.path.join( base_path, 'dep1', '2.0'), os.path.join( base_path, 'dep1', 'default' ) ) - dependency = dm.find_dep( "dep1", None ) - assert dependency.version == "2.0" - - ## Test default resolve will be fall back on default package dependency - ## when using the default resolver. - dependency = dm.find_dep( "dep1", "2.1" ) - assert dependency.version == "2.0" # 2.0 is defined as default_version - - -TEST_REPO_USER = "devteam" -TEST_REPO_NAME = "bwa" -TEST_REPO_CHANGESET = "12abcd41223da" -TEST_VERSION = "0.5.9" - - -def test_toolshed_set_enviornment_requiremetns(): - with __test_base_path() as base_path: - test_repo = __build_test_repo('set_environment') - dm = DependencyManager( default_base_path=base_path ) - env_settings_dir = os.path.join(base_path, "environment_settings", TEST_REPO_NAME, TEST_REPO_USER, TEST_REPO_NAME, TEST_REPO_CHANGESET) - os.makedirs(env_settings_dir) - dependency = dm.find_dep( TEST_REPO_NAME, version=None, type='set_environment', installed_tool_dependencies=[test_repo] ) - assert dependency.version == None - assert dependency.script == os.path.join(env_settings_dir, "env.sh") - - -def test_toolshed_package_requirements(): - with __test_base_path() as base_path: - test_repo = __build_test_repo('package', version=TEST_VERSION) - dm = DependencyManager( default_base_path=base_path ) - package_dir = __build_ts_test_package(base_path) - dependency = dm.find_dep( TEST_REPO_NAME, version=TEST_VERSION, type='package', installed_tool_dependencies=[test_repo] ) - assert dependency.version == TEST_VERSION - assert dependency.script == os.path.join(package_dir, "env.sh") - - -def test_toolshed_tools_fallback_on_manual_dependencies(): - with __test_base_path() as base_path: - dm = DependencyManager( default_base_path=base_path ) - test_repo = __build_test_repo('package', version=TEST_VERSION) - env_path = __setup_galaxy_package_dep(base_path, "dep1", "1.0") - dependency = dm.find_dep( "dep1", version="1.0", type='package', installed_tool_dependencies=[test_repo] ) - assert dependency.version == "1.0" - assert dependency.script == env_path - - -def test_toolshed_greater_precendence(): - with __test_base_path() as base_path: - dm = DependencyManager( default_base_path=base_path ) - test_repo = __build_test_repo('package', version=TEST_VERSION) - ts_package_dir = __build_ts_test_package(base_path) - gx_env_path = __setup_galaxy_package_dep(base_path, TEST_REPO_NAME, TEST_VERSION) - ts_env_path = os.path.join(ts_package_dir, "env.sh") - dependency = dm.find_dep( TEST_REPO_NAME, version=TEST_VERSION, type='package', installed_tool_dependencies=[test_repo] ) - assert dependency.script != gx_env_path # Not the galaxy path, it should be the tool shed path used. - assert dependency.script == ts_env_path - - -def __build_ts_test_package(base_path, script_contents=''): - package_dir = os.path.join(base_path, TEST_REPO_NAME, TEST_VERSION, TEST_REPO_USER, TEST_REPO_NAME, TEST_REPO_CHANGESET) - __touch(os.path.join(package_dir, 'env.sh'), script_contents) - return package_dir - - -def test_module_dependency_resolver(): - with __test_base_path() as temp_directory: - module_script = os.path.join(temp_directory, "modulecmd") - __write_script(module_script, '''#!/bin/sh -cat %s/example_output 1>&2; -''' % temp_directory) - with open(os.path.join(temp_directory, "example_output"), "w") as f: - # Subset of module avail from MSI cluster. - f.write(''' --------------------------- /soft/modules/modulefiles --------------------------- -JAGS/3.2.0-gcc45 -JAGS/3.3.0-gcc4.7.2 -ProbABEL/0.1-3 -ProbABEL/0.1-9e -R/2.12.2 -R/2.13.1 -R/2.14.1 -R/2.15.0 -R/2.15.1 -R/3.0.1(default) -abokia-blast/2.0.2-130524/ompi_intel -abokia-blast/2.0.2-130630/ompi_intel - ---------------------------- /soft/intel/modulefiles ---------------------------- -advisor/2013/update1 intel/11.1.075 mkl/10.2.1.017 -advisor/2013/update2 intel/11.1.080 mkl/10.2.5.035 -advisor/2013/update3 intel/12.0 mkl/10.2.7.041 -''') - resolver = ModuleDependencyResolver(None, modulecmd=module_script) - module = resolver.resolve( name="R", version=None, type="package" ) - assert module.module_name == "R" - assert module.module_version == None - - module = resolver.resolve( name="R", version="3.0.1", type="package" ) - assert module.module_name == "R" - assert module.module_version == "3.0.1" - - module = resolver.resolve( name="R", version="3.0.4", type="package" ) - assert module == INDETERMINATE_DEPENDENCY - - -def test_module_dependency(): - with __test_base_path() as temp_directory: - ## Create mock modulecmd script that just exports a variable - ## the way modulecmd sh load would, but also validate correct - ## module name and version are coming through. - mock_modulecmd = os.path.join(temp_directory, 'modulecmd') - __write_script(mock_modulecmd, '''#!/bin/sh -if [ $3 != "foomodule/1.0" ]; -then - exit 1 -fi -echo 'FOO="bar"' -''') - resolver = Bunch(modulecmd=mock_modulecmd) - dependency = ModuleDependency(resolver, "foomodule", "1.0") - __assert_foo_exported( dependency.shell_commands( Bunch( type="package" ) ) ) - - -def __write_script(path, contents): - with open(path, 'w') as f: - f.write(contents) - st = stat(path) - chmod(path, st.st_mode | S_IXUSR) - - -def test_galaxy_dependency_object_script(): - with __test_base_path() as base_path: - ## Create env.sh file that just exports variable FOO and verify it - ## shell_commands export it correctly. - env_path = __setup_galaxy_package_dep(base_path, TEST_REPO_NAME, TEST_VERSION, "export FOO=\"bar\"") - dependency = GalaxyPackageDependency(env_path, os.path.dirname(env_path), TEST_VERSION) - __assert_foo_exported( dependency.shell_commands( Bunch( type="package" ) ) ) - - -def test_shell_commands_built(): - ## Test that dependency manager builds valid shell commands for a list of - ## requirements. - with __test_base_path() as base_path: - dm = DependencyManager( default_base_path=base_path ) - __setup_galaxy_package_dep( base_path, TEST_REPO_NAME, TEST_VERSION, contents="export FOO=\"bar\"" ) - mock_requirements = [ Bunch(type="package", version=TEST_VERSION, name=TEST_REPO_NAME ) ] - commands = dm.dependency_shell_commands( mock_requirements ) - __assert_foo_exported( commands ) - - -def __assert_foo_exported( commands ): - command = ["bash", "-c", "%s; echo \"$FOO\"" % "".join(commands)] - process = Popen(command, stdout=PIPE) - output = process.communicate()[0].strip() - assert output == 'bar', "Command %s exports FOO as %s, not bar" % (command, output) - - -def __setup_galaxy_package_dep(base_path, name, version, contents=""): - dep_directory = os.path.join( base_path, name, version ) - env_path = os.path.join( dep_directory, "env.sh" ) - __touch( env_path, contents ) - return env_path - - -def __touch( fname, data=None ): - dirname = os.path.dirname( fname ) - if not os.path.exists( dirname ): - makedirs( dirname ) - f = open( fname, 'w' ) - try: - if data: - f.write( data ) - finally: - f.close() - - -def __build_test_repo(type, version=None): - return Bunch( - owner=TEST_REPO_USER, - name=TEST_REPO_NAME, - type=type, - version=version, - tool_shed_repository=Bunch( - owner=TEST_REPO_USER, - name=TEST_REPO_NAME, - installed_changeset_revision=TEST_REPO_CHANGESET - ) - ) - - -@contextmanager -def __test_base_path(): - base_path = tempfile.mkdtemp() - try: - yield base_path - finally: - rmtree(base_path) - - -def test_parse(): - with __parse_resolvers('''<dependency_resolvers> - <tool_shed_packages /> - <galaxy_packages /> -</dependency_resolvers> -''') as dependency_resolvers: - assert 'ToolShed' in dependency_resolvers[0].__class__.__name__ - assert 'Galaxy' in dependency_resolvers[1].__class__.__name__ - - with __parse_resolvers('''<dependency_resolvers> - <galaxy_packages /> - <tool_shed_packages /> -</dependency_resolvers> -''') as dependency_resolvers: - assert 'Galaxy' in dependency_resolvers[0].__class__.__name__ - assert 'ToolShed' in dependency_resolvers[1].__class__.__name__ - - with __parse_resolvers('''<dependency_resolvers> - <galaxy_packages /> - <tool_shed_packages /> - <galaxy_packages versionless="true" /> -</dependency_resolvers> -''') as dependency_resolvers: - assert not dependency_resolvers[0].versionless - assert dependency_resolvers[2].versionless - - with __parse_resolvers('''<dependency_resolvers> - <galaxy_packages /> - <tool_shed_packages /> - <galaxy_packages base_path="/opt/galaxy/legacy/"/> -</dependency_resolvers> -''') as dependency_resolvers: - # Unspecified base_paths are both default_base_paths - assert dependency_resolvers[0].base_path == dependency_resolvers[1].base_path - # Can specify custom base path... - assert dependency_resolvers[2].base_path == "/opt/galaxy/legacy" - # ... that is different from the default. - assert dependency_resolvers[0].base_path != dependency_resolvers[2].base_path - - -def test_uses_tool_shed_dependencies(): - with __dependency_manager('''<dependency_resolvers> - <galaxy_packages /> -</dependency_resolvers> -''') as dm: - assert not dm.uses_tool_shed_dependencies() - - with __dependency_manager('''<dependency_resolvers> - <tool_shed_packages /> -</dependency_resolvers> -''') as dm: - assert dm.uses_tool_shed_dependencies() - - -def test_config_module_defaults(): - with __parse_resolvers('''<dependency_resolvers> - <modules prefetch="false" /> -</dependency_resolvers> -''') as dependency_resolvers: - module_resolver = dependency_resolvers[0] - assert module_resolver.module_checker.__class__.__name__ == "AvailModuleChecker" - - -def test_config_modulepath(): - # Test reads and splits MODULEPATH if modulepath is not specified. - with __parse_resolvers('''<dependency_resolvers> - <modules find_by="directory" modulepath="/opt/modules/modulefiles:/usr/local/modules/modulefiles" /> -</dependency_resolvers> -''') as dependency_resolvers: - assert dependency_resolvers[0].module_checker.directories == ["/opt/modules/modulefiles", "/usr/local/modules/modulefiles"] - - -def test_config_MODULEPATH(): - # Test reads and splits MODULEPATH if modulepath is not specified. - with __environ({"MODULEPATH": "/opt/modules/modulefiles:/usr/local/modules/modulefiles"}): - with __parse_resolvers('''<dependency_resolvers> - <modules find_by="directory" /> -</dependency_resolvers> -''') as dependency_resolvers: - assert dependency_resolvers[0].module_checker.directories == ["/opt/modules/modulefiles", "/usr/local/modules/modulefiles"] - - -def test_config_MODULESHOME(): - # Test fallbacks to read MODULESHOME if modulepath is not specified and - # neither is MODULEPATH. - with __environ({"MODULESHOME": "/opt/modules"}, remove="MODULEPATH"): - with __parse_resolvers('''<dependency_resolvers> - <modules find_by="directory" /> -</dependency_resolvers> -''') as dependency_resolvers: - assert dependency_resolvers[0].module_checker.directories == ["/opt/modules/modulefiles"] - - -def test_config_module_directory_searcher(): - with __parse_resolvers('''<dependency_resolvers> - <modules find_by="directory" modulepath="/opt/Modules/modulefiles" /> -</dependency_resolvers> -''') as dependency_resolvers: - module_resolver = dependency_resolvers[0] - assert module_resolver.module_checker.directories == ["/opt/Modules/modulefiles"] - - -@contextmanager -def __environ(values, remove=[]): - """ - Modify the environment for a test, adding/updating values in dict `values` and - removing any environment variables mentioned in list `remove`. - """ - new_keys = set(environ.keys()) - set(values.keys()) - old_environ = environ.copy() - try: - environ.update(values) - for to_remove in remove: - try: - del environ[remove] - except KeyError: - pass - yield - finally: - environ.update(old_environ) - for key in new_keys: - del environ[key] - - -@contextmanager -def __parse_resolvers(xml_content): - with __dependency_manager(xml_content) as dm: - yield dm.dependency_resolvers - - -@contextmanager -def __dependency_manager(xml_content): - with __test_base_path() as base_path: - f = tempfile.NamedTemporaryFile() - f.write(xml_content) - f.flush() - dm = DependencyManager( default_base_path=base_path, conf_file=f.name ) - yield dm diff -r 767d9f567b6fa5f6554ec2180b4f1249587f1473 -r 9051ceb435937348e6417fb96eb9611474496828 test/unit/test_tool_loader.py --- a/test/unit/test_tool_loader.py +++ /dev/null @@ -1,191 +0,0 @@ -from tempfile import mkdtemp -from shutil import rmtree -import os - -from galaxy.util import parse_xml -from galaxy.tools.loader import template_macro_params, load_tool - -def test_loader(): - - class TestToolDirectory(object): - def __init__(self): - self.temp_directory = mkdtemp() - - def __enter__(self): - return self - - def __exit__(self, type, value, tb): - rmtree(self.temp_directory) - - def write(self, contents, name="tool.xml"): - open(os.path.join(self.temp_directory, name), "w").write(contents) - - def load(self, name="tool.xml", preprocess=True): - if preprocess: - loader = load_tool - else: - loader = parse_xml - return loader(os.path.join(self.temp_directory, name)) - - ## Test simple macro replacement. - with TestToolDirectory() as tool_dir: - tool_dir.write(''' -<tool> - <expand macro="inputs" /> - <macros> - <macro name="inputs"> - <inputs /> - </macro> - </macros> -</tool>''') - xml = tool_dir.load(preprocess=False) - assert xml.find("inputs") is None - xml = tool_dir.load(preprocess=True) - assert xml.find("inputs") is not None - - # Test importing macros from external files - with TestToolDirectory() as tool_dir: - tool_dir.write(''' -<tool> - <expand macro="inputs" /> - <macros> - <import>external.xml</import> - </macros> -</tool>''') - - tool_dir.write(''' -<macros> - <macro name="inputs"> - <inputs /> - </macro> -</macros>''', name="external.xml") - xml = tool_dir.load(preprocess=False) - assert xml.find("inputs") is None - xml = tool_dir.load(preprocess=True) - assert xml.find("inputs") is not None - - # Test macros with unnamed yield statements. - with TestToolDirectory() as tool_dir: - tool_dir.write(''' -<tool> - <expand macro="inputs"> - <input name="first_input" /> - </expand> - <macros> - <macro name="inputs"> - <inputs> - <yield /> - </inputs> - </macro> - </macros> -</tool>''') - xml = tool_dir.load() - assert xml.find("inputs").find("input").get("name") == "first_input" - - # Test recursive macro applications. - with TestToolDirectory() as tool_dir: - tool_dir.write(''' -<tool> - <expand macro="inputs"> - <input name="first_input" /> - <expand macro="second" /> - </expand> - <macros> - <macro name="inputs"> - <inputs> - <yield /> - </inputs> - </macro> - <macro name="second"> - <input name="second_input" /> - </macro> - </macros> -</tool>''') - xml = tool_dir.load() - assert xml.find("inputs").findall("input")[1].get("name") == "second_input" - - # Test recursive macro applications. - with TestToolDirectory() as tool_dir: - tool_dir.write(''' -<tool> - <expand macro="inputs"> - <input name="first_input" /> - <expand macro="second" /> - </expand> - <macros> - <macro name="inputs"> - <inputs> - <yield /> - </inputs> - </macro> - <macro name="second"> - <expand macro="second_delegate" /> - </macro> - <macro name="second_delegate"> - <input name="second_input" /> - </macro> - </macros> -</tool>''') - xml = tool_dir.load() - assert xml.find("inputs").findall("input")[1].get("name") == "second_input" - - # Test <xml> is shortcut for macro type="xml" - with TestToolDirectory() as tool_dir: - tool_dir.write(''' -<tool> - <expand macro="inputs" /> - <macros> - <xml name="inputs"> - <inputs /> - </xml> - </macros> -</tool>''') - xml = tool_dir.load() - assert xml.find("inputs") is not None - - with TestToolDirectory() as tool_dir: - tool_dir.write(''' -<tool> - <command interpreter="python">tool_wrapper.py - #include source=$tool_params - </command> - <macros> - <template name="tool_params">-a 1 -b 2</template> - </macros> -</tool> -''') - xml = tool_dir.load() - params_dict = template_macro_params(xml.getroot()) - assert params_dict['tool_params'] == "-a 1 -b 2" - - with TestToolDirectory() as tool_dir: - tool_dir.write(''' -<tool> - <macros> - <token name="@CITATION@">The citation.</token> - </macros> - <help>@CITATION@</help> - <another> - <tag /> - </another> -</tool> -''') - xml = tool_dir.load() - help_el = xml.find("help") - assert help_el.text == "The citation.", help_el.text - - with TestToolDirectory() as tool_dir: - tool_dir.write(''' -<tool> - <macros> - <token name="@TAG_VAL@">The value.</token> - </macros> - <another> - <tag value="@TAG_VAL@" /> - </another> -</tool> -''') - xml = tool_dir.load() - tag_el = xml.find("another").find("tag") - value = tag_el.get('value') - assert value == "The value.", value diff -r 767d9f567b6fa5f6554ec2180b4f1249587f1473 -r 9051ceb435937348e6417fb96eb9611474496828 test/unit/tools/test_tool_deps.py --- /dev/null +++ b/test/unit/tools/test_tool_deps.py @@ -0,0 +1,378 @@ +import tempfile +import os.path +from stat import S_IXUSR +from os import makedirs, stat, symlink, chmod, environ +from shutil import rmtree +from galaxy.tools.deps import DependencyManager, INDETERMINATE_DEPENDENCY +from galaxy.tools.deps.resolvers.galaxy_packages import GalaxyPackageDependency +from galaxy.tools.deps.resolvers.modules import ModuleDependencyResolver, ModuleDependency +from galaxy.util.bunch import Bunch +from contextlib import contextmanager +from subprocess import Popen, PIPE + + +def test_tool_dependencies(): + # Setup directories + + with __test_base_path() as base_path: + for name, version, sub in [ ( "dep1", "1.0", "env.sh" ), ( "dep1", "2.0", "bin" ), ( "dep2", "1.0", None ) ]: + if sub == "bin": + p = os.path.join( base_path, name, version, "bin" ) + else: + p = os.path.join( base_path, name, version ) + try: + makedirs( p ) + except: + pass + if sub == "env.sh": + __touch( os.path.join( p, "env.sh" ) ) + + dm = DependencyManager( default_base_path=base_path ) + dependency = dm.find_dep( "dep1", "1.0" ) + assert dependency.script == os.path.join( base_path, 'dep1', '1.0', 'env.sh' ) + assert dependency.path == os.path.join( base_path, 'dep1', '1.0' ) + assert dependency.version == "1.0" + dependency = dm.find_dep( "dep1", "2.0" ) + assert dependency.script == None + assert dependency.path == os.path.join( base_path, 'dep1', '2.0' ) + assert dependency.version == "2.0" + + ## Test default versions + symlink( os.path.join( base_path, 'dep1', '2.0'), os.path.join( base_path, 'dep1', 'default' ) ) + dependency = dm.find_dep( "dep1", None ) + assert dependency.version == "2.0" + + ## Test default resolve will be fall back on default package dependency + ## when using the default resolver. + dependency = dm.find_dep( "dep1", "2.1" ) + assert dependency.version == "2.0" # 2.0 is defined as default_version + + +TEST_REPO_USER = "devteam" +TEST_REPO_NAME = "bwa" +TEST_REPO_CHANGESET = "12abcd41223da" +TEST_VERSION = "0.5.9" + + +def test_toolshed_set_enviornment_requiremetns(): + with __test_base_path() as base_path: + test_repo = __build_test_repo('set_environment') + dm = DependencyManager( default_base_path=base_path ) + env_settings_dir = os.path.join(base_path, "environment_settings", TEST_REPO_NAME, TEST_REPO_USER, TEST_REPO_NAME, TEST_REPO_CHANGESET) + os.makedirs(env_settings_dir) + dependency = dm.find_dep( TEST_REPO_NAME, version=None, type='set_environment', installed_tool_dependencies=[test_repo] ) + assert dependency.version == None + assert dependency.script == os.path.join(env_settings_dir, "env.sh") + + +def test_toolshed_package_requirements(): + with __test_base_path() as base_path: + test_repo = __build_test_repo('package', version=TEST_VERSION) + dm = DependencyManager( default_base_path=base_path ) + package_dir = __build_ts_test_package(base_path) + dependency = dm.find_dep( TEST_REPO_NAME, version=TEST_VERSION, type='package', installed_tool_dependencies=[test_repo] ) + assert dependency.version == TEST_VERSION + assert dependency.script == os.path.join(package_dir, "env.sh") + + +def test_toolshed_tools_fallback_on_manual_dependencies(): + with __test_base_path() as base_path: + dm = DependencyManager( default_base_path=base_path ) + test_repo = __build_test_repo('package', version=TEST_VERSION) + env_path = __setup_galaxy_package_dep(base_path, "dep1", "1.0") + dependency = dm.find_dep( "dep1", version="1.0", type='package', installed_tool_dependencies=[test_repo] ) + assert dependency.version == "1.0" + assert dependency.script == env_path + + +def test_toolshed_greater_precendence(): + with __test_base_path() as base_path: + dm = DependencyManager( default_base_path=base_path ) + test_repo = __build_test_repo('package', version=TEST_VERSION) + ts_package_dir = __build_ts_test_package(base_path) + gx_env_path = __setup_galaxy_package_dep(base_path, TEST_REPO_NAME, TEST_VERSION) + ts_env_path = os.path.join(ts_package_dir, "env.sh") + dependency = dm.find_dep( TEST_REPO_NAME, version=TEST_VERSION, type='package', installed_tool_dependencies=[test_repo] ) + assert dependency.script != gx_env_path # Not the galaxy path, it should be the tool shed path used. + assert dependency.script == ts_env_path + + +def __build_ts_test_package(base_path, script_contents=''): + package_dir = os.path.join(base_path, TEST_REPO_NAME, TEST_VERSION, TEST_REPO_USER, TEST_REPO_NAME, TEST_REPO_CHANGESET) + __touch(os.path.join(package_dir, 'env.sh'), script_contents) + return package_dir + + +def test_module_dependency_resolver(): + with __test_base_path() as temp_directory: + module_script = os.path.join(temp_directory, "modulecmd") + __write_script(module_script, '''#!/bin/sh +cat %s/example_output 1>&2; +''' % temp_directory) + with open(os.path.join(temp_directory, "example_output"), "w") as f: + # Subset of module avail from MSI cluster. + f.write(''' +-------------------------- /soft/modules/modulefiles --------------------------- +JAGS/3.2.0-gcc45 +JAGS/3.3.0-gcc4.7.2 +ProbABEL/0.1-3 +ProbABEL/0.1-9e +R/2.12.2 +R/2.13.1 +R/2.14.1 +R/2.15.0 +R/2.15.1 +R/3.0.1(default) +abokia-blast/2.0.2-130524/ompi_intel +abokia-blast/2.0.2-130630/ompi_intel + +--------------------------- /soft/intel/modulefiles ---------------------------- +advisor/2013/update1 intel/11.1.075 mkl/10.2.1.017 +advisor/2013/update2 intel/11.1.080 mkl/10.2.5.035 +advisor/2013/update3 intel/12.0 mkl/10.2.7.041 +''') + resolver = ModuleDependencyResolver(None, modulecmd=module_script) + module = resolver.resolve( name="R", version=None, type="package" ) + assert module.module_name == "R" + assert module.module_version == None + + module = resolver.resolve( name="R", version="3.0.1", type="package" ) + assert module.module_name == "R" + assert module.module_version == "3.0.1" + + module = resolver.resolve( name="R", version="3.0.4", type="package" ) + assert module == INDETERMINATE_DEPENDENCY + + +def test_module_dependency(): + with __test_base_path() as temp_directory: + ## Create mock modulecmd script that just exports a variable + ## the way modulecmd sh load would, but also validate correct + ## module name and version are coming through. + mock_modulecmd = os.path.join(temp_directory, 'modulecmd') + __write_script(mock_modulecmd, '''#!/bin/sh +if [ $3 != "foomodule/1.0" ]; +then + exit 1 +fi +echo 'FOO="bar"' +''') + resolver = Bunch(modulecmd=mock_modulecmd) + dependency = ModuleDependency(resolver, "foomodule", "1.0") + __assert_foo_exported( dependency.shell_commands( Bunch( type="package" ) ) ) + + +def __write_script(path, contents): + with open(path, 'w') as f: + f.write(contents) + st = stat(path) + chmod(path, st.st_mode | S_IXUSR) + + +def test_galaxy_dependency_object_script(): + with __test_base_path() as base_path: + ## Create env.sh file that just exports variable FOO and verify it + ## shell_commands export it correctly. + env_path = __setup_galaxy_package_dep(base_path, TEST_REPO_NAME, TEST_VERSION, "export FOO=\"bar\"") + dependency = GalaxyPackageDependency(env_path, os.path.dirname(env_path), TEST_VERSION) + __assert_foo_exported( dependency.shell_commands( Bunch( type="package" ) ) ) + + +def test_shell_commands_built(): + ## Test that dependency manager builds valid shell commands for a list of + ## requirements. + with __test_base_path() as base_path: + dm = DependencyManager( default_base_path=base_path ) + __setup_galaxy_package_dep( base_path, TEST_REPO_NAME, TEST_VERSION, contents="export FOO=\"bar\"" ) + mock_requirements = [ Bunch(type="package", version=TEST_VERSION, name=TEST_REPO_NAME ) ] + commands = dm.dependency_shell_commands( mock_requirements ) + __assert_foo_exported( commands ) + + +def __assert_foo_exported( commands ): + command = ["bash", "-c", "%s; echo \"$FOO\"" % "".join(commands)] + process = Popen(command, stdout=PIPE) + output = process.communicate()[0].strip() + assert output == 'bar', "Command %s exports FOO as %s, not bar" % (command, output) + + +def __setup_galaxy_package_dep(base_path, name, version, contents=""): + dep_directory = os.path.join( base_path, name, version ) + env_path = os.path.join( dep_directory, "env.sh" ) + __touch( env_path, contents ) + return env_path + + +def __touch( fname, data=None ): + dirname = os.path.dirname( fname ) + if not os.path.exists( dirname ): + makedirs( dirname ) + f = open( fname, 'w' ) + try: + if data: + f.write( data ) + finally: + f.close() + + +def __build_test_repo(type, version=None): + return Bunch( + owner=TEST_REPO_USER, + name=TEST_REPO_NAME, + type=type, + version=version, + tool_shed_repository=Bunch( + owner=TEST_REPO_USER, + name=TEST_REPO_NAME, + installed_changeset_revision=TEST_REPO_CHANGESET + ) + ) + + +@contextmanager +def __test_base_path(): + base_path = tempfile.mkdtemp() + try: + yield base_path + finally: + rmtree(base_path) + + +def test_parse(): + with __parse_resolvers('''<dependency_resolvers> + <tool_shed_packages /> + <galaxy_packages /> +</dependency_resolvers> +''') as dependency_resolvers: + assert 'ToolShed' in dependency_resolvers[0].__class__.__name__ + assert 'Galaxy' in dependency_resolvers[1].__class__.__name__ + + with __parse_resolvers('''<dependency_resolvers> + <galaxy_packages /> + <tool_shed_packages /> +</dependency_resolvers> +''') as dependency_resolvers: + assert 'Galaxy' in dependency_resolvers[0].__class__.__name__ + assert 'ToolShed' in dependency_resolvers[1].__class__.__name__ + + with __parse_resolvers('''<dependency_resolvers> + <galaxy_packages /> + <tool_shed_packages /> + <galaxy_packages versionless="true" /> +</dependency_resolvers> +''') as dependency_resolvers: + assert not dependency_resolvers[0].versionless + assert dependency_resolvers[2].versionless + + with __parse_resolvers('''<dependency_resolvers> + <galaxy_packages /> + <tool_shed_packages /> + <galaxy_packages base_path="/opt/galaxy/legacy/"/> +</dependency_resolvers> +''') as dependency_resolvers: + # Unspecified base_paths are both default_base_paths + assert dependency_resolvers[0].base_path == dependency_resolvers[1].base_path + # Can specify custom base path... + assert dependency_resolvers[2].base_path == "/opt/galaxy/legacy" + # ... that is different from the default. + assert dependency_resolvers[0].base_path != dependency_resolvers[2].base_path + + +def test_uses_tool_shed_dependencies(): + with __dependency_manager('''<dependency_resolvers> + <galaxy_packages /> +</dependency_resolvers> +''') as dm: + assert not dm.uses_tool_shed_dependencies() + + with __dependency_manager('''<dependency_resolvers> + <tool_shed_packages /> +</dependency_resolvers> +''') as dm: + assert dm.uses_tool_shed_dependencies() + + +def test_config_module_defaults(): + with __parse_resolvers('''<dependency_resolvers> + <modules prefetch="false" /> +</dependency_resolvers> +''') as dependency_resolvers: + module_resolver = dependency_resolvers[0] + assert module_resolver.module_checker.__class__.__name__ == "AvailModuleChecker" + + +def test_config_modulepath(): + # Test reads and splits MODULEPATH if modulepath is not specified. + with __parse_resolvers('''<dependency_resolvers> + <modules find_by="directory" modulepath="/opt/modules/modulefiles:/usr/local/modules/modulefiles" /> +</dependency_resolvers> +''') as dependency_resolvers: + assert dependency_resolvers[0].module_checker.directories == ["/opt/modules/modulefiles", "/usr/local/modules/modulefiles"] + + +def test_config_MODULEPATH(): + # Test reads and splits MODULEPATH if modulepath is not specified. + with __environ({"MODULEPATH": "/opt/modules/modulefiles:/usr/local/modules/modulefiles"}): + with __parse_resolvers('''<dependency_resolvers> + <modules find_by="directory" /> +</dependency_resolvers> +''') as dependency_resolvers: + assert dependency_resolvers[0].module_checker.directories == ["/opt/modules/modulefiles", "/usr/local/modules/modulefiles"] + + +def test_config_MODULESHOME(): + # Test fallbacks to read MODULESHOME if modulepath is not specified and + # neither is MODULEPATH. + with __environ({"MODULESHOME": "/opt/modules"}, remove="MODULEPATH"): + with __parse_resolvers('''<dependency_resolvers> + <modules find_by="directory" /> +</dependency_resolvers> +''') as dependency_resolvers: + assert dependency_resolvers[0].module_checker.directories == ["/opt/modules/modulefiles"] + + +def test_config_module_directory_searcher(): + with __parse_resolvers('''<dependency_resolvers> + <modules find_by="directory" modulepath="/opt/Modules/modulefiles" /> +</dependency_resolvers> +''') as dependency_resolvers: + module_resolver = dependency_resolvers[0] + assert module_resolver.module_checker.directories == ["/opt/Modules/modulefiles"] + + +@contextmanager +def __environ(values, remove=[]): + """ + Modify the environment for a test, adding/updating values in dict `values` and + removing any environment variables mentioned in list `remove`. + """ + new_keys = set(environ.keys()) - set(values.keys()) + old_environ = environ.copy() + try: + environ.update(values) + for to_remove in remove: + try: + del environ[remove] + except KeyError: + pass + yield + finally: + environ.update(old_environ) + for key in new_keys: + del environ[key] + + +@contextmanager +def __parse_resolvers(xml_content): + with __dependency_manager(xml_content) as dm: + yield dm.dependency_resolvers + + +@contextmanager +def __dependency_manager(xml_content): + with __test_base_path() as base_path: + f = tempfile.NamedTemporaryFile() + f.write(xml_content) + f.flush() + dm = DependencyManager( default_base_path=base_path, conf_file=f.name ) + yield dm diff -r 767d9f567b6fa5f6554ec2180b4f1249587f1473 -r 9051ceb435937348e6417fb96eb9611474496828 test/unit/tools/test_tool_loader.py --- /dev/null +++ b/test/unit/tools/test_tool_loader.py @@ -0,0 +1,191 @@ +from tempfile import mkdtemp +from shutil import rmtree +import os + +from galaxy.util import parse_xml +from galaxy.tools.loader import template_macro_params, load_tool + +def test_loader(): + + class TestToolDirectory(object): + def __init__(self): + self.temp_directory = mkdtemp() + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + rmtree(self.temp_directory) + + def write(self, contents, name="tool.xml"): + open(os.path.join(self.temp_directory, name), "w").write(contents) + + def load(self, name="tool.xml", preprocess=True): + if preprocess: + loader = load_tool + else: + loader = parse_xml + return loader(os.path.join(self.temp_directory, name)) + + ## Test simple macro replacement. + with TestToolDirectory() as tool_dir: + tool_dir.write(''' +<tool> + <expand macro="inputs" /> + <macros> + <macro name="inputs"> + <inputs /> + </macro> + </macros> +</tool>''') + xml = tool_dir.load(preprocess=False) + assert xml.find("inputs") is None + xml = tool_dir.load(preprocess=True) + assert xml.find("inputs") is not None + + # Test importing macros from external files + with TestToolDirectory() as tool_dir: + tool_dir.write(''' +<tool> + <expand macro="inputs" /> + <macros> + <import>external.xml</import> + </macros> +</tool>''') + + tool_dir.write(''' +<macros> + <macro name="inputs"> + <inputs /> + </macro> +</macros>''', name="external.xml") + xml = tool_dir.load(preprocess=False) + assert xml.find("inputs") is None + xml = tool_dir.load(preprocess=True) + assert xml.find("inputs") is not None + + # Test macros with unnamed yield statements. + with TestToolDirectory() as tool_dir: + tool_dir.write(''' +<tool> + <expand macro="inputs"> + <input name="first_input" /> + </expand> + <macros> + <macro name="inputs"> + <inputs> + <yield /> + </inputs> + </macro> + </macros> +</tool>''') + xml = tool_dir.load() + assert xml.find("inputs").find("input").get("name") == "first_input" + + # Test recursive macro applications. + with TestToolDirectory() as tool_dir: + tool_dir.write(''' +<tool> + <expand macro="inputs"> + <input name="first_input" /> + <expand macro="second" /> + </expand> + <macros> + <macro name="inputs"> + <inputs> + <yield /> + </inputs> + </macro> + <macro name="second"> + <input name="second_input" /> + </macro> + </macros> +</tool>''') + xml = tool_dir.load() + assert xml.find("inputs").findall("input")[1].get("name") == "second_input" + + # Test recursive macro applications. + with TestToolDirectory() as tool_dir: + tool_dir.write(''' +<tool> + <expand macro="inputs"> + <input name="first_input" /> + <expand macro="second" /> + </expand> + <macros> + <macro name="inputs"> + <inputs> + <yield /> + </inputs> + </macro> + <macro name="second"> + <expand macro="second_delegate" /> + </macro> + <macro name="second_delegate"> + <input name="second_input" /> + </macro> + </macros> +</tool>''') + xml = tool_dir.load() + assert xml.find("inputs").findall("input")[1].get("name") == "second_input" + + # Test <xml> is shortcut for macro type="xml" + with TestToolDirectory() as tool_dir: + tool_dir.write(''' +<tool> + <expand macro="inputs" /> + <macros> + <xml name="inputs"> + <inputs /> + </xml> + </macros> +</tool>''') + xml = tool_dir.load() + assert xml.find("inputs") is not None + + with TestToolDirectory() as tool_dir: + tool_dir.write(''' +<tool> + <command interpreter="python">tool_wrapper.py + #include source=$tool_params + </command> + <macros> + <template name="tool_params">-a 1 -b 2</template> + </macros> +</tool> +''') + xml = tool_dir.load() + params_dict = template_macro_params(xml.getroot()) + assert params_dict['tool_params'] == "-a 1 -b 2" + + with TestToolDirectory() as tool_dir: + tool_dir.write(''' +<tool> + <macros> + <token name="@CITATION@">The citation.</token> + </macros> + <help>@CITATION@</help> + <another> + <tag /> + </another> +</tool> +''') + xml = tool_dir.load() + help_el = xml.find("help") + assert help_el.text == "The citation.", help_el.text + + with TestToolDirectory() as tool_dir: + tool_dir.write(''' +<tool> + <macros> + <token name="@TAG_VAL@">The value.</token> + </macros> + <another> + <tag value="@TAG_VAL@" /> + </another> +</tool> +''') + xml = tool_dir.load() + tag_el = xml.find("another").find("tag") + value = tag_el.get('value') + assert value == "The value.", value https://bitbucket.org/galaxy/galaxy-central/commits/fac578cca758/ Changeset: fac578cca758 User: jmchilton Date: 2014-02-22 05:09:57 Summary: Introduce DatasetMatcher to simplify DatasetToolParameter... This reduces code duplication related dataset_collectors now and abstracts out important functionality I reuse further to collect dataset collections downstream. Was originally added in 28d43f4 as DatasetParamContext and backed out of right away. I have reworked it so that it no longer breaks implicit conversion, the relevant classes and methods have less generic names, and it has a healthy set of test cases. In addition to basic tests on matching datasets to parameters, selections, and implicit conversions - these tests include testing of dataset security inconjuction with data_destination tools as well as filtering data parameters on other data parameters. Affected #: 3 files diff -r 9051ceb435937348e6417fb96eb9611474496828 -r fac578cca75887d811e67993a655fed20c0d8713 lib/galaxy/tools/parameters/basic.py --- a/lib/galaxy/tools/parameters/basic.py +++ b/lib/galaxy/tools/parameters/basic.py @@ -16,6 +16,7 @@ from sanitize import ToolParameterSanitizer import validation import dynamic_options +from .dataset_matcher import DatasetMatcher # For BaseURLToolParameter from galaxy.web import url_for from galaxy.model.item_attrs import Dictifiable @@ -1610,52 +1611,36 @@ self.conversions.append( ( name, conv_extensions, conv_types ) ) def get_html_field( self, trans=None, value=None, other_values={} ): - filter_value = None - if self.options: - try: - filter_value = self.options.get_options( trans, other_values )[0][0] - except IndexError: - pass # no valid options history = self._get_history( trans ) if value is not None: if type( value ) != list: value = [ value ] + dataset_matcher = DatasetMatcher( trans, self, value, other_values ) field = form_builder.SelectField( self.name, self.multiple, None, self.refresh_on_change, refresh_on_change_values=self.refresh_on_change_values ) # CRUCIAL: the dataset_collector function needs to be local to DataToolParameter.get_html_field() def dataset_collector( hdas, parent_hid ): - current_user_roles = trans.get_current_user_roles() for i, hda in enumerate( hdas ): hda_name = hda.name if parent_hid is not None: hid = "%s.%d" % ( parent_hid, i + 1 ) else: hid = str( hda.hid ) - if not hda.dataset.state in [galaxy.model.Dataset.states.ERROR, galaxy.model.Dataset.states.DISCARDED] and \ - ( hda.visible or ( value and hda in value and not hda.implicitly_converted_parent_datasets ) ) and \ - trans.app.security_agent.can_access_dataset( current_user_roles, hda.dataset ): - # If we are sending data to an external application, then we need to make sure there are no roles - # associated with the dataset that restrict it's access from "public". - if self.tool and self.tool.tool_type == 'data_destination' and not trans.app.security_agent.dataset_is_public( hda.dataset ): - continue - if self.options and self._options_filter_attribute( hda ) != filter_value: - continue - if hda.datatype.matches_any( self.formats ): - selected = ( value and ( hda in value ) ) - if hda.visible: - hidden_text = "" - else: - hidden_text = " (hidden)" - field.add_option( "%s:%s %s" % ( hid, hidden_text, hda_name ), hda.id, selected ) + hda_match = dataset_matcher.hda_match( hda ) + if not hda_match: + continue + if not hda_match.implicit_conversion: + selected = dataset_matcher.selected( hda ) + if hda.visible: + hidden_text = "" else: - target_ext, converted_dataset = hda.find_conversion_destination( self.formats ) - if target_ext: - if converted_dataset: - hda = converted_dataset - if not trans.app.security_agent.can_access_dataset( current_user_roles, hda.dataset ): - continue - selected = ( value and ( hda in value ) ) - field.add_option( "%s: (as %s) %s" % ( hid, target_ext, hda_name ), hda.id, selected ) + hidden_text = " (hidden)" + field.add_option( "%s:%s %s" % ( hid, hidden_text, hda_name ), hda.id, selected ) + else: + hda = hda_match.hda # Get converted dataset + target_ext = hda_match.target_ext + selected = dataset_matcher.selected( hda ) + field.add_option( "%s: (as %s) %s" % ( hid, target_ext, hda_name ), hda.id, selected ) # Also collect children via association object dataset_collector( hda.children, hid ) dataset_collector( history.active_datasets_children_and_roles, None ) @@ -1685,30 +1670,18 @@ if trans is None or trans.workflow_building_mode or trans.webapp.name == 'tool_shed': return DummyDataset() history = self._get_history( trans, history ) + dataset_matcher = DatasetMatcher( trans, self, None, context ) if self.optional: return None most_recent_dataset = [] - filter_value = None - if self.options: - try: - filter_value = self.options.get_options( trans, context )[0][0] - except IndexError: - pass # no valid options def dataset_collector( datasets ): for i, data in enumerate( datasets ): - if data.visible and not data.deleted and data.state not in [data.states.ERROR, data.states.DISCARDED]: - is_valid = False - if data.datatype.matches_any( self.formats ): - is_valid = True - else: - target_ext, converted_dataset = data.find_conversion_destination( self.formats ) - if target_ext: - is_valid = True - if converted_dataset: - data = converted_dataset - if not is_valid or ( self.options and self._options_filter_attribute( data ) != filter_value ): + if data.visible and dataset_matcher.hda_accessible( data, check_security=False ): + match = dataset_matcher.valid_hda_match( data, check_security=False ) + if not match or dataset_matcher.filter( match.hda ): continue + data = match.hda most_recent_dataset.append(data) # Also collect children via association object dataset_collector( data.children ) diff -r 9051ceb435937348e6417fb96eb9611474496828 -r fac578cca75887d811e67993a655fed20c0d8713 lib/galaxy/tools/parameters/dataset_matcher.py --- /dev/null +++ b/lib/galaxy/tools/parameters/dataset_matcher.py @@ -0,0 +1,126 @@ +import galaxy.model + +from logging import getLogger +log = getLogger( __name__ ) + +ROLES_UNSET = object() +INVALID_STATES = [ galaxy.model.Dataset.states.ERROR, galaxy.model.Dataset.states.DISCARDED ] + + +class DatasetMatcher( object ): + """ Utility class to aid DataToolParameter and similar classes in reasoning + about what HDAs could match or are selected for a parameter and value. + + Goal here is to both encapsulate and reuse logic related to filtering, + datatype matching, hiding errored dataset, finding implicit conversions, + and permission handling. + """ + + def __init__( self, trans, param, value, other_values ): + self.trans = trans + self.param = param + self.tool = param.tool + self.value = value + self.current_user_roles = ROLES_UNSET + filter_value = None + if param.options: + try: + filter_value = param.options.get_options( trans, other_values )[0][0] + except IndexError: + pass # no valid options + self.filter_value = filter_value + + def hda_accessible( self, hda, check_security=True ): + """ Does HDA correspond to dataset that is an a valid state and is + accessible to user. + """ + dataset = hda.dataset + state_valid = not dataset.state in INVALID_STATES + return state_valid and ( not check_security or self.__can_access_dataset( dataset ) ) + + def valid_hda_match( self, hda, check_implicit_conversions=True, check_security=False ): + """ Return False of this parameter can not be matched to a the supplied + HDA, otherwise return a description of the match (either a + HdaDirectMatch describing a direct match or a HdaImplicitMatch + describing an implicit conversion.) + """ + if self.filter( hda ): + return False + formats = self.param.formats + if hda.datatype.matches_any( formats ): + return HdaDirectMatch( hda ) + if not check_implicit_conversions: + return False + target_ext, converted_dataset = hda.find_conversion_destination( formats ) + if target_ext: + if converted_dataset: + hda = converted_dataset + if check_security and not self.__can_access_dataset( hda.dataset ): + return False + return HdaImplicitMatch( hda, target_ext ) + return False + + def hda_match( self, hda, check_implicit_conversions=True ): + """ If HDA is accessible, return information about whether it could + match this parameter and if so how. See valid_hda_match for more + information. + """ + accessible = self.hda_accessible( hda ) + if accessible and ( hda.visible or ( self.selected( hda ) and not hda.implicitly_converted_parent_datasets ) ): + # If we are sending data to an external application, then we need to make sure there are no roles + # associated with the dataset that restrict it's access from "public". + require_public = self.tool and self.tool.tool_type == 'data_destination' + if require_public and not self.trans.app.security_agent.dataset_is_public( hda.dataset ): + return False + if self.filter( hda ): + return False + return self.valid_hda_match( hda, check_implicit_conversions=check_implicit_conversions ) + + def selected( self, hda ): + """ Given value for DataToolParameter, is this HDA "selected". + """ + value = self.value + return value and hda in value + + def filter( self, hda ): + """ Filter out this value based on other values for job (if + applicable). + """ + param = self.param + return param.options and param._options_filter_attribute( hda ) != self.filter_value + + def __can_access_dataset( self, dataset ): + # Lazily cache current_user_roles. + if self.current_user_roles is ROLES_UNSET: + self.current_user_roles = self.trans.get_current_user_roles() + return self.trans.app.security_agent.can_access_dataset( self.current_user_roles, dataset ) + + +class HdaDirectMatch( object ): + """ Supplied HDA was a valid option directly (did not need to find implicit + conversion). + """ + + def __init__( self, hda ): + self.hda = hda + + @property + def implicit_conversion( self ): + return False + + +class HdaImplicitMatch( object ): + """ Supplied HDA was a valid option directly (did not need to find implicit + conversion). + """ + + def __init__( self, hda, target_ext ): + self.hda = hda + self.target_ext = target_ext + + @property + def implicit_conversion( self ): + return True + + +__all__ = [ DatasetMatcher ] diff -r 9051ceb435937348e6417fb96eb9611474496828 -r fac578cca75887d811e67993a655fed20c0d8713 test/unit/tools/test_dataset_matcher.py --- /dev/null +++ b/test/unit/tools/test_dataset_matcher.py @@ -0,0 +1,167 @@ +from unittest import TestCase + +from galaxy import model +from galaxy.util import bunch +from galaxy.tools.parameters import basic +from galaxy.tools.parameters import dataset_matcher + +from elementtree.ElementTree import XML + +import tools_support +from .test_data_parameters import MockHistoryDatasetAssociation + + +class DatasetMatcherTestCase( TestCase, tools_support.UsesApp ): + + def test_hda_accessible( self ): + # Cannot access errored or discard datasets. + self.mock_hda.dataset.state = model.Dataset.states.ERROR + assert not self.test_context.hda_accessible( self.mock_hda ) + + self.mock_hda.dataset.state = model.Dataset.states.DISCARDED + assert not self.test_context.hda_accessible( self.mock_hda ) + + # Can access datasets in other states. + self.mock_hda.dataset.state = model.Dataset.states.OK + assert self.test_context.hda_accessible( self.mock_hda ) + + self.mock_hda.dataset.state = model.Dataset.states.QUEUED + assert self.test_context.hda_accessible( self.mock_hda ) + + # Cannot access dataset if security agent says no. + self.app.security_agent.can_access_dataset = lambda roles, dataset: False + assert not self.test_context.hda_accessible( self.mock_hda ) + + def test_selected( self ): + self.test_context.value = [] + assert not self.test_context.selected( self.mock_hda ) + + self.test_context.value = [ self.mock_hda ] + assert self.test_context.selected( self.mock_hda ) + + def test_hda_mismatches( self ): + # Datasets not visible are not "valid" for param. + self.mock_hda.visible = False + assert not self.test_context.hda_match( self.mock_hda ) + + # Datasets that don't match datatype are not valid. + self.mock_hda.visible = True + self.mock_hda.datatype_matches = False + assert not self.test_context.hda_match( self.mock_hda ) + + def test_valid_hda_direct_match( self ): + # Datasets that visible and matching are valid + self.mock_hda.visible = True + self.mock_hda.datatype_matches = True + hda_match = self.test_context.hda_match( self.mock_hda, check_implicit_conversions=False ) + assert hda_match + + # Match is not a conversion and so matching hda is the same hda + # supplied. + assert not hda_match.implicit_conversion + assert hda_match.hda == self.mock_hda + + def test_valid_hda_implicit_convered( self ): + # Find conversion returns an HDA to an already implicitly converted + # dataset. + self.mock_hda.datatype_matches = False + converted_hda = model.HistoryDatasetAssociation() + self.mock_hda.conversion_destination = ( "tabular", converted_hda ) + hda_match = self.test_context.hda_match( self.mock_hda ) + + assert hda_match + assert hda_match.implicit_conversion + assert hda_match.hda == converted_hda + assert hda_match.target_ext == "tabular" + + def test_hda_match_implicit_can_convert( self ): + # Find conversion returns a target extension to convert to, but not + # a previously implicitly converted dataset. + self.mock_hda.datatype_matches = False + self.mock_hda.conversion_destination = ( "tabular", None ) + hda_match = self.test_context.hda_match( self.mock_hda ) + + assert hda_match + assert hda_match.implicit_conversion + assert hda_match.hda == self.mock_hda + assert hda_match.target_ext == "tabular" + + def test_hda_match_properly_skips_conversion( self ): + self.mock_hda.datatype_matches = False + self.mock_hda.conversion_destination = ( "tabular", bunch.Bunch() ) + hda_match = self.test_context.hda_match( self.mock_hda, check_implicit_conversions=False ) + assert not hda_match + + def test_data_destination_tools_require_public( self ): + self.tool.tool_type = "data_destination" + + # Public datasets okay and valid + self.app.security_agent.dataset_is_public = lambda dataset: True + hda_match = self.test_context.hda_match( self.mock_hda ) + assert hda_match + + # Non-public datasets not valid + self.app.security_agent.dataset_is_public = lambda dataset: False + hda_match = self.test_context.hda_match( self.mock_hda ) + assert not hda_match + + def test_filtered_hda_matched_key( self ): + self.filtered_param = True + data1_val = model.HistoryDatasetAssociation() + data1_val.dbkey = "hg18" + self.other_values = { "data1": data1_val } + assert self.test_context.filter_value == "hg18" + + # mock_hda is hg19, other is hg18 so should not be "valid hda" + hda_match = self.test_context.hda_match( self.mock_hda ) + assert not hda_match + + def test_filtered_hda_unmatched_key( self ): + self.filtered_param = True + data1_val = model.HistoryDatasetAssociation() + data1_val.dbkey = "hg19" + self.other_values = { "data1": data1_val } + + # Other param value and this dataset both hg19, should be valid + hda_match = self.test_context.hda_match( self.mock_hda ) + assert hda_match + + def setUp( self ): + self.setup_app() + self.mock_hda = MockHistoryDatasetAssociation() + self.tool = bunch.Bunch( + app=self.app, + tool_type="default", + ) + self.current_user_roles = [] + self.other_values = {} + + # Reset lazily generated stuff + self.filtered_param = False + self._test_context = None + self.param = None + + @property + def test_context( self ): + if self._test_context is None: + option_xml = "" + if self.filtered_param: + option_xml = '''<options><filter type="data_meta" ref="data1" key="dbkey" /></options>''' + param_xml = XML( '''<param name="data2" type="data" ext="txt">%s</param>''' % option_xml ) + self.param = basic.DataToolParameter( + tool=self.tool, + elem=param_xml, + ) + + self._test_context = dataset_matcher.DatasetMatcher( + trans=bunch.Bunch( + app=self.app, + get_current_user_roles=lambda: self.current_user_roles, + workflow_building_mode=True, + ), + param=self.param, + value=[ ], + other_values=self.other_values + ) + + return self._test_context Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.
participants (1)
-
commits-noreply@bitbucket.org