4 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/e032250c3f2e/
Changeset: e032250c3f2e
User: jmchilton
Date: 2014-08-27 22:32:26
Summary: Update readme for functional test tools directory.
Affected #: 1 file
diff -r a66a80701913bb99e8abd12235c1ca82dc508f57 -r
e032250c3f2eef09002a00d6d1035de0928e58dd test/functional/tools/README.txt
--- a/test/functional/tools/README.txt
+++ b/test/functional/tools/README.txt
@@ -1,3 +1,15 @@
-This directory contains tools only useful for testing the tool test framework
-and demonstrating it features. Run the driver script 'run_functional_tests.sh'
-with '-framework' as first argument to run through these tests.
+This directory contains tools only useful for testing and
+demonstrating aspects of the tool syntax. Run the test driver script
+'run_tests.sh' with the '-framework' as first argument to run through
+these tests. Pass in an '-id' along with one of these tool ids to test
+a single tool.
+
+Some API tests use these tools to test various features of the API,
+tool, and workflow subsystems. Pass the arugment
+'-with_framework_test_tools' to 'run_tests.sh' in addition to
'-api'
+to ensure these tools get loaded during the testing process.
+
+Finally, to play around witht these tools interactively - simply
+replace the 'universe_wsgi.ini' option 'tool_config_file' with:
+
+tool_config_file = test/functional/tools/samples_tool_conf.xml
https://bitbucket.org/galaxy/galaxy-central/commits/14230488f497/
Changeset: 14230488f497
User: jmchilton
Date: 2014-08-27 22:32:26
Summary: Add some more basic test tools for building simple workflows for testing.
Affected #: 4 files
diff -r e032250c3f2eef09002a00d6d1035de0928e58dd -r
14230488f49758d1abe748cd7500f9719a3390b6 test/functional/tools/for_workflows/cat.xml
--- /dev/null
+++ b/test/functional/tools/for_workflows/cat.xml
@@ -0,0 +1,19 @@
+<tool id="cat" name="Concatenate datasets (for test
workflows)">
+ <description>tail-to-head</description>
+ <command>
+ cat $input1 #for $q in $queries# ${q.input2} #end for# > $out_file1
+ </command>
+ <inputs>
+ <param name="input1" type="data" label="Concatenate
Dataset"/>
+ <repeat name="queries" title="Dataset">
+ <param name="input2" type="data"
label="Select" />
+ </repeat>
+ </inputs>
+ <outputs>
+ <data name="out_file1" format="input"
metadata_source="input1"/>
+ </outputs>
+ <tests>
+ </tests>
+ <help>
+ </help>
+</tool>
diff -r e032250c3f2eef09002a00d6d1035de0928e58dd -r
14230488f49758d1abe748cd7500f9719a3390b6 test/functional/tools/for_workflows/cat_list.xml
--- /dev/null
+++ b/test/functional/tools/for_workflows/cat_list.xml
@@ -0,0 +1,16 @@
+<tool id="cat_list" name="Concatenate dataset list (for test
workflows)">
+ <description>tail-to-head</description>
+ <command>
+ cat #for $q in $input1# $q #end for# > $out_file1
+ </command>
+ <inputs>
+ <param name="input1" type="data" label="Concatenate
Dataset" multiple="true" />
+ </inputs>
+ <outputs>
+ <data name="out_file1" format="input"
metadata_source="input1"/>
+ </outputs>
+ <tests>
+ </tests>
+ <help>
+ </help>
+</tool>
diff -r e032250c3f2eef09002a00d6d1035de0928e58dd -r
14230488f49758d1abe748cd7500f9719a3390b6 test/functional/tools/for_workflows/head.xml
--- /dev/null
+++ b/test/functional/tools/for_workflows/head.xml
@@ -0,0 +1,13 @@
+<tool id="head" name="Select first">
+ <description>lines from a dataset</description>
+ <command>head $input $lineNum > $out_file1</command>
+ <inputs>
+ <param name="lineNum" size="5" type="integer"
value="10" label="Select first" help="lines"/>
+ <param format="txt" name="input" type="data"
label="from"/>
+ </inputs>
+ <outputs>
+ <data format="input" name="out_file1"
metadata_source="input"/>
+ </outputs>
+ <help>
+ </help>
+</tool>
diff -r e032250c3f2eef09002a00d6d1035de0928e58dd -r
14230488f49758d1abe748cd7500f9719a3390b6 test/functional/tools/samples_tool_conf.xml
--- a/test/functional/tools/samples_tool_conf.xml
+++ b/test/functional/tools/samples_tool_conf.xml
@@ -23,4 +23,10 @@
<tool file="collection_mixed_param.xml" /><tool
file="collection_two_paired.xml" /><tool
file="collection_optional_param.xml" />
-</toolbox>
\ No newline at end of file
+
+ <!-- Tools interesting only for building up test workflows. -->
+ <tool file="for_workflows/cat.xml" />
+ <tool file="for_workflows/cat_list.xml" />
+ <tool file="for_workflows/head.xml" />
+
+</toolbox>
https://bitbucket.org/galaxy/galaxy-central/commits/c692332612dd/
Changeset: c692332612dd
User: jmchilton
Date: 2014-08-27 22:32:26
Summary: Remove workflow test that accidently depended on tool not in -central.
Opps.
Affected #: 3 files
diff -r 14230488f49758d1abe748cd7500f9719a3390b6 -r
c692332612dd5c8d7eff798cdba4f2e91d4ff5a1 test/api/helpers.py
--- a/test/api/helpers.py
+++ b/test/api/helpers.py
@@ -11,7 +11,6 @@
# Simple workflow that takes an input and filters with random lines twice in a
# row - first grabbing 8 lines at random and then 6.
workflow_random_x2_str = resource_string( __name__, "test_workflow_2.ga" )
-workflow_two_paired_str = resource_string( __name__,
"test_workflow_two_paired.ga" )
DEFAULT_HISTORY_TIMEOUT = 10 # Secs to wait on history to turn ok
@@ -161,9 +160,6 @@
def load_random_x2_workflow( self, name ):
return self.load_workflow( name, content=workflow_random_x2_str )
- def load_two_paired_workflow( self, name ):
- return self.load_workflow( name, content=workflow_two_paired_str )
-
def simple_workflow( self, name, **create_kwds ):
workflow = self.load_workflow( name )
return self.create_workflow( workflow, **create_kwds )
diff -r 14230488f49758d1abe748cd7500f9719a3390b6 -r
c692332612dd5c8d7eff798cdba4f2e91d4ff5a1 test/api/test_workflow_two_paired.ga
--- a/test/api/test_workflow_two_paired.ga
+++ /dev/null
@@ -1,116 +0,0 @@
-{
- "a_galaxy_workflow": "true",
- "annotation": "",
- "format-version": "0.1",
- "name": "MultipairTest223",
- "steps": {
- "0": {
- "annotation": "",
- "id": 0,
- "input_connections": {},
- "inputs": [
- {
- "description": "",
- "name": "f1"
- }
- ],
- "name": "Input dataset collection",
- "outputs": [],
- "position": {
- "left": 302.3333435058594,
- "top": 330
- },
- "tool_errors": null,
- "tool_id": null,
- "tool_state": "{\"collection_type\":
\"paired\", \"name\": \"f1\"}",
- "tool_version": null,
- "type": "data_collection_input",
- "user_outputs": []
- },
- "1": {
- "annotation": "",
- "id": 1,
- "input_connections": {},
- "inputs": [
- {
- "description": "",
- "name": "f2"
- }
- ],
- "name": "Input dataset collection",
- "outputs": [],
- "position": {
- "left": 288.3333435058594,
- "top": 446
- },
- "tool_errors": null,
- "tool_id": null,
- "tool_state": "{\"collection_type\":
\"paired\", \"name\": \"f2\"}",
- "tool_version": null,
- "type": "data_collection_input",
- "user_outputs": []
- },
- "2": {
- "annotation": "",
- "id": 2,
- "input_connections": {
- "kind|f1": {
- "id": 0,
- "output_name": "output"
- },
- "kind|f2": {
- "id": 1,
- "output_name": "output"
- }
- },
- "inputs": [],
- "name": "collection_two_paired",
- "outputs": [
- {
- "name": "out1",
- "type": "txt"
- }
- ],
- "position": {
- "left": 782.3333740234375,
- "top": 200
- },
- "post_job_actions": {},
- "tool_errors": null,
- "tool_id": "collection_two_paired",
- "tool_state": "{\"__page__\": 0, \"kind\":
\"{\\\"f1\\\": null, \\\"f2\\\": null,
\\\"collection_type\\\": \\\"paired\\\",
\\\"__current_case__\\\": 0}\", \"__rerun_remap_job_id__\":
null}",
- "tool_version": "0.1.0",
- "type": "tool",
- "user_outputs": []
- },
- "3": {
- "annotation": "",
- "id": 3,
- "input_connections": {
- "cond1|input1": {
- "id": 2,
- "output_name": "out1"
- }
- },
- "inputs": [],
- "name": "Concatenate datasets",
- "outputs": [
- {
- "name": "out_file1",
- "type": "input"
- }
- ],
- "position": {
- "left": 1239.3333740234375,
- "top": 108.97916793823242
- },
- "post_job_actions": {},
- "tool_errors": null,
- "tool_id": "cat2",
- "tool_state": "{\"__page__\": 0,
\"__rerun_remap_job_id__\": null, \"cond1\":
\"{\\\"datatype\\\": \\\"txt\\\", \\\"input1\\\": null,
\\\"__current_case__\\\": 0}\"}",
- "tool_version": "1.0.0",
- "type": "tool",
- "user_outputs": []
- }
- }
-}
\ No newline at end of file
diff -r 14230488f49758d1abe748cd7500f9719a3390b6 -r
c692332612dd5c8d7eff798cdba4f2e91d4ff5a1 test/api/test_workflows.py
--- a/test/api/test_workflows.py
+++ b/test/api/test_workflows.py
@@ -149,26 +149,6 @@
run_workflow_response = self._post( "workflows", data=workflow_request
)
self._assert_status_code_is( run_workflow_response, 403 )
- @skip_without_tool( "cat1" )
- @skip_without_tool( "collection_two_paired" )
- def test_run_workflow_collection_params( self ):
- workflow = self.workflow_populator.load_two_paired_workflow(
name="test_for_run_two_paired" )
- workflow_id = self.workflow_populator.create_workflow( workflow )
- history_id = self.dataset_populator.new_history()
- hdca1 = self.dataset_collection_populator.create_pair_in_history( history_id,
contents=["1 2 3", "4 5 6"] ).json()
- hdca2 = self.dataset_collection_populator.create_pair_in_history( history_id,
contents=["7 8 9", "0 a b"] ).json()
- self.dataset_populator.wait_for_history( history_id, assert_ok=True )
- label_map = { "f1": self._ds_entry( hdca1 ), "f2":
self._ds_entry( hdca2 ) }
- workflow_request = dict(
- history="hist_id=%s" % history_id,
- workflow_id=workflow_id,
- ds_map=self._build_ds_map( workflow_id, label_map ),
- )
- run_workflow_response = self._post( "workflows", data=workflow_request
)
- self._assert_status_code_is( run_workflow_response, 200 )
- self.dataset_populator.wait_for_history( history_id, assert_ok=True )
- self.assertEquals("1 2 3\n4 5 6\n7 8 9\n0 a b\n",
self.dataset_populator.get_history_dataset_content( history_id ) )
-
def test_workflow_stability( self ):
# Run this index stability test with following command:
# ./run_tests.sh
test/api/test_workflows.py:WorkflowsApiTestCase.test_workflow_stability
https://bitbucket.org/galaxy/galaxy-central/commits/f36c6fc80e8c/
Changeset: f36c6fc80e8c
User: jmchilton
Date: 2014-08-27 22:32:26
Summary: Add workflow test demonstrating failure when multiple lists with different
identifiers are using in workflows.
Affected #: 3 files
diff -r c692332612dd5c8d7eff798cdba4f2e91d4ff5a1 -r
f36c6fc80e8cd2bc6a5c6f77f0806aa7271e65f0 test/api/helpers.py
--- a/test/api/helpers.py
+++ b/test/api/helpers.py
@@ -160,6 +160,12 @@
def load_random_x2_workflow( self, name ):
return self.load_workflow( name, content=workflow_random_x2_str )
+ def load_workflow_from_resource( self, name, filename=None ):
+ if filename is None:
+ filename = "%s.ga" % name
+ content = resource_string( __name__, filename )
+ return self.load_workflow( name, content=content )
+
def simple_workflow( self, name, **create_kwds ):
workflow = self.load_workflow( name )
return self.create_workflow( workflow, **create_kwds )
@@ -319,8 +325,14 @@
def list_identifiers( self, history_id, contents=None ):
count = 3 if not contents else len( contents )
- hdas = self.__datasets( history_id, count=count, contents=contents )
- hda_to_identifier = lambda ( i, hda ): dict( name="data%d" % ( i + 1 ),
src="hda", id=hda[ "id" ] )
+ # Contents can be a list of strings (with name auto-assigned here) or a list of
+ # 2-tuples of form (name, dataset_content).
+ if contents and isinstance(contents[0], tuple):
+ hdas = self.__datasets( history_id, count=count, contents=[c[1] for c in
contents] )
+ hda_to_identifier = lambda ( i, hda ): dict( name=contents[i][0],
src="hda", id=hda[ "id" ] )
+ else:
+ hdas = self.__datasets( history_id, count=count, contents=contents )
+ hda_to_identifier = lambda ( i, hda ): dict( name="data%d" % ( i +
1 ), src="hda", id=hda[ "id" ] )
element_identifiers = map( hda_to_identifier, enumerate( hdas ) )
return element_identifiers
diff -r c692332612dd5c8d7eff798cdba4f2e91d4ff5a1 -r
f36c6fc80e8cd2bc6a5c6f77f0806aa7271e65f0 test/api/test_workflow_matching_lists.ga
--- /dev/null
+++ b/test/api/test_workflow_matching_lists.ga
@@ -0,0 +1,117 @@
+{
+ "a_galaxy_workflow": "true",
+ "annotation": "",
+ "format-version": "0.1",
+ "name": "test_workflow_matching_lists",
+ "steps": {
+ "0": {
+ "annotation": "",
+ "id": 0,
+ "input_connections": {},
+ "inputs": [
+ {
+ "description": "",
+ "name": "list1"
+ }
+ ],
+ "name": "Input dataset collection",
+ "outputs": [],
+ "position": {
+ "left": 139.833336353302,
+ "top": 162.33334398269653
+ },
+ "tool_errors": null,
+ "tool_id": null,
+ "tool_state": "{\"collection_type\":
\"list\", \"name\": \"list1\"}",
+ "tool_version": null,
+ "type": "data_collection_input",
+ "user_outputs": []
+ },
+ "1": {
+ "annotation": "",
+ "id": 1,
+ "input_connections": {},
+ "inputs": [
+ {
+ "description": "",
+ "name": "list2"
+ }
+ ],
+ "name": "Input dataset collection",
+ "outputs": [],
+ "position": {
+ "left": 141.864586353302,
+ "top": 272.3680577278137
+ },
+ "tool_errors": null,
+ "tool_id": null,
+ "tool_state": "{\"collection_type\":
\"list\", \"name\": \"list2\"}",
+ "tool_version": null,
+ "type": "data_collection_input",
+ "user_outputs": []
+ },
+ "2": {
+ "annotation": "",
+ "id": 2,
+ "input_connections": {
+ "input1": {
+ "id": 0,
+ "output_name": "output"
+ },
+ "queries_0|input2": {
+ "id": 1,
+ "output_name": "output"
+ }
+ },
+ "inputs": [],
+ "name": "Concatenate datasets (for test workflows)",
+ "outputs": [
+ {
+ "name": "out_file1",
+ "type": "input"
+ }
+ ],
+ "position": {
+ "left": 453.40974473953247,
+ "top": 203.4097294807434
+ },
+ "post_job_actions": {},
+ "tool_errors": null,
+ "tool_id": "cat",
+ "tool_state": "{\"__page__\": 0,
\"__rerun_remap_job_id__\": null, \"input1\": \"null\",
\"queries\": \"[{\\\"input2\\\": null,
\\\"__index__\\\": 0}]\"}",
+ "tool_version": "1.0.0",
+ "type": "tool",
+ "user_outputs": []
+ },
+ "3": {
+ "annotation": "",
+ "id": 3,
+ "input_connections": {
+ "input1": {
+ "id": 2,
+ "output_name": "out_file1"
+ }
+ },
+ "inputs": [],
+ "name": "Concatenate dataset list (for test workflows)",
+ "outputs": [
+ {
+ "name": "out_file1",
+ "type": "input"
+ }
+ ],
+ "position": {
+ "left": 828.93061876297,
+ "top": 217.4201512336731
+ },
+ "post_job_actions": {},
+ "tool_errors": null,
+ "tool_id": "cat_list",
+ "tool_state": "{\"__page__\": 0,
\"__rerun_remap_job_id__\": null, \"input1\":
\"null\"}",
+ "tool_version": "1.0.0",
+ "type": "tool",
+ "user_outputs": []
+ }
+ },
+ "uuid": "54aadd3b-9d2b-436a-acfa-246a8c251651"
+}
\ No newline at end of file
diff -r c692332612dd5c8d7eff798cdba4f2e91d4ff5a1 -r
f36c6fc80e8cd2bc6a5c6f77f0806aa7271e65f0 test/api/test_workflows.py
--- a/test/api/test_workflows.py
+++ b/test/api/test_workflows.py
@@ -149,6 +149,26 @@
run_workflow_response = self._post( "workflows", data=workflow_request
)
self._assert_status_code_is( run_workflow_response, 403 )
+ @skip_without_tool( "cat" )
+ @skip_without_tool( "cat_list" )
+ def test_workflow_run_with_matching_lists( self ):
+ workflow = self.workflow_populator.load_workflow_from_resource(
"test_workflow_matching_lists" )
+ workflow_id = self.workflow_populator.create_workflow( workflow )
+ history_id = self.dataset_populator.new_history()
+ hdca1 = self.dataset_collection_populator.create_list_in_history( history_id,
contents=[("sample1-1", "1 2 3"), ("sample2-1", "7 8
9")] ).json()
+ hdca2 = self.dataset_collection_populator.create_list_in_history( history_id,
contents=[("sample1-2", "4 5 6"), ("sample2-2", "0 a
b")] ).json()
+ self.dataset_populator.wait_for_history( history_id, assert_ok=True )
+ label_map = { "list1": self._ds_entry( hdca1 ), "list2":
self._ds_entry( hdca2 ) }
+ workflow_request = dict(
+ history="hist_id=%s" % history_id,
+ workflow_id=workflow_id,
+ ds_map=self._build_ds_map( workflow_id, label_map ),
+ )
+ run_workflow_response = self._post( "workflows", data=workflow_request
)
+ self._assert_status_code_is( run_workflow_response, 200 )
+ self.dataset_populator.wait_for_history( history_id, assert_ok=True )
+ self.assertEquals("1 2 3\n4 5 6\n7 8 9\n0 a b\n",
self.dataset_populator.get_history_dataset_content( history_id ) )
+
def test_workflow_stability( self ):
# Run this index stability test with following command:
# ./run_tests.sh
test/api/test_workflows.py:WorkflowsApiTestCase.test_workflow_stability
Repository URL:
https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from
bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.