galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
September 2014
- 2 participants
- 236 discussions
3 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/89b6cf2df4de/
Changeset: 89b6cf2df4de
User: jmchilton
Date: 2014-09-06 00:55:21
Summary: Tighten permissions for workflow invocations.
Sharing a workflow with a user was previously sufficient to grant access to all invocations of that workflow. This isn't a huge problem since the information potentially leaking out was limitted to invocation counts, various encoded ids, and update times. Still I think no information about invocations should be avaialble as a result of sharing a workflow - and upcoming changes to Galaxy will result in much more information being made available via the workflow invocation API.
Affected #: 3 files
diff -r 2604f7623fb459f3cbc6b3e30551e1a8ce0ea1b2 -r 89b6cf2df4deafc0740372bbeaff5ea0f954cd5d lib/galaxy/managers/workflows.py
--- /dev/null
+++ b/lib/galaxy/managers/workflows.py
@@ -0,0 +1,64 @@
+from galaxy import model
+from galaxy import exceptions
+
+
+class WorkflowsManager( object ):
+ """ Handle CRUD type operaitons related to workflows. More interesting
+ stuff regarding workflow execution, step sorting, etc... can be found in
+ the galaxy.workflow module.
+ """
+
+ def __init__( self, app ):
+ self.app = app
+
+ def check_security( self, trans, has_workflow, check_ownership=True, check_accessible=True):
+ """ check accessibility or ownership of workflows, storedworkflows, and
+ workflowinvocations. Throw an exception or returns True if user has
+ needed level of access.
+ """
+ if not check_ownership or check_accessible:
+ return True
+
+ # If given an invocation follow to workflow...
+ if isinstance( has_workflow, model.WorkflowInvocation ):
+ has_workflow = has_workflow.workflow
+
+ # stored workflow contains security stuff - follow that workflow to
+ # that unless given a stored workflow.
+ if hasattr( has_workflow, "stored_workflow" ):
+ stored_workflow = has_workflow.stored_workflow
+ else:
+ stored_workflow = has_workflow
+
+ if stored_workflow.user != trans.user and not trans.user_is_admin():
+ if check_ownership:
+ raise exceptions.ItemOwnershipException()
+ # else check_accessible...
+ if trans.sa_session.query( model.StoredWorkflowUserShareAssociation ).filter_by(user=trans.user, stored_workflow=stored_workflow ).count() == 0:
+ raise exceptions.ItemAccessibilityException()
+
+ return True
+
+ def get_invocation( self, trans, decoded_invocation_id ):
+ try:
+ workflow_invocation = trans.sa_session.query(
+ self.app.model.WorkflowInvocation
+ ).get( decoded_invocation_id )
+ except Exception:
+ raise exceptions.ObjectNotFound()
+ self.check_security( trans, workflow_invocation, check_ownership=True, check_accessible=False )
+ return workflow_invocation
+
+ def build_invocations_query( self, trans, decoded_stored_workflow_id ):
+ try:
+ stored_workflow = trans.sa_session.query(
+ self.app.model.StoredWorkflow
+ ).get( decoded_stored_workflow_id )
+ except Exception:
+ raise exceptions.ObjectNotFound()
+ self.check_security( trans, stored_workflow, check_ownership=True, check_accessible=False )
+ return trans.sa_session.query(
+ model.WorkflowInvocation
+ ).filter_by(
+ workflow_id=stored_workflow.latest_workflow_id
+ )
diff -r 2604f7623fb459f3cbc6b3e30551e1a8ce0ea1b2 -r 89b6cf2df4deafc0740372bbeaff5ea0f954cd5d lib/galaxy/webapps/galaxy/api/workflows.py
--- a/lib/galaxy/webapps/galaxy/api/workflows.py
+++ b/lib/galaxy/webapps/galaxy/api/workflows.py
@@ -10,6 +10,7 @@
from galaxy import exceptions, util
from galaxy.model.item_attrs import UsesAnnotations
from galaxy.managers import histories
+from galaxy.managers import workflows
from galaxy.web import _future_expose_api as expose_api
from galaxy.web.base.controller import BaseAPIController, url_for, UsesStoredWorkflowMixin
from galaxy.web.base.controller import UsesHistoryMixin
@@ -26,6 +27,7 @@
def __init__( self, app ):
super( BaseAPIController, self ).__init__( app )
self.history_manager = histories.HistoryManager()
+ self.workflow_manager = workflows.WorkflowsManager( app )
@expose_api
def index(self, trans, **kwd):
@@ -377,15 +379,8 @@
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
- try:
- stored_workflow = trans.sa_session.query(self.app.model.StoredWorkflow).get(trans.security.decode_id(workflow_id))
- except Exception:
- raise exceptions.ObjectNotFound()
- # check to see if user has permissions to selected workflow
- if stored_workflow.user != trans.user and not trans.user_is_admin():
- if trans.sa_session.query(trans.app.model.StoredWorkflowUserShareAssociation).filter_by(user=trans.user, stored_workflow=stored_workflow).count() == 0:
- raise exceptions.ItemOwnershipException()
- results = trans.sa_session.query(self.app.model.WorkflowInvocation).filter_by(workflow_id=stored_workflow.latest_workflow_id)
+ decoded_stored_workflow_invocation_id = self.__decode_id( trans, workflow_id )
+ results = self.workflow_manager.build_invocations_query( trans, decoded_stored_workflow_invocation_id )
out = []
for r in results:
out.append( self.encode_all_ids( trans, r.to_dict(), True) )
@@ -405,20 +400,10 @@
:raises: exceptions.MessageException, exceptions.ObjectNotFound
"""
-
- try:
- stored_workflow = trans.sa_session.query(self.app.model.StoredWorkflow).get(trans.security.decode_id(workflow_id))
- except Exception:
- raise exceptions.ObjectNotFound()
- # check to see if user has permissions to selected workflow
- if stored_workflow.user != trans.user and not trans.user_is_admin():
- if trans.sa_session.query(trans.app.model.StoredWorkflowUserShareAssociation).filter_by(user=trans.user, stored_workflow=stored_workflow).count() == 0:
- raise exceptions.ItemOwnershipException()
- results = trans.sa_session.query(self.app.model.WorkflowInvocation).filter_by(workflow_id=stored_workflow.latest_workflow_id)
- results = results.filter_by(id=trans.security.decode_id(usage_id))
- out = results.first()
- if out is not None:
- return self.encode_all_ids( trans, out.to_dict('element'), True)
+ decoded_workflow_invocation_id = self.__decode_id( trans, usage_id )
+ workflow_invocation = self.workflow_manager.get_invocation( trans, decoded_workflow_invocation_id )
+ if workflow_invocation:
+ return self.encode_all_ids( trans, workflow_invocation.to_dict('element'), True)
return None
def __get_stored_accessible_workflow( self, trans, workflow_id ):
diff -r 2604f7623fb459f3cbc6b3e30551e1a8ce0ea1b2 -r 89b6cf2df4deafc0740372bbeaff5ea0f954cd5d test/api/test_workflows.py
--- a/test/api/test_workflows.py
+++ b/test/api/test_workflows.py
@@ -506,22 +506,16 @@
self._assert_has_keys( pja, "action_type", "output_name", "action_arguments" )
@skip_without_tool( "cat1" )
+ def test_only_own_invocations_accessible( self ):
+ workflow_id, usage = self._run_workflow_once_get_invocation( "test_usage")
+ with self._different_user():
+ usage_details_response = self._get( "workflows/%s/usage/%s" % ( workflow_id, usage[ "id" ] ) )
+ self._assert_status_code_is( usage_details_response, 403 )
+
+ @skip_without_tool( "cat1" )
def test_invocation_usage( self ):
- workflow = self.workflow_populator.load_workflow( name="test_usage" )
- workflow_request, history_id = self._setup_workflow_run( workflow )
- workflow_id = workflow_request[ "workflow_id" ]
- response = self._get( "workflows/%s/usage" % workflow_id )
- self._assert_status_code_is( response, 200 )
- assert len( response.json() ) == 0
- run_workflow_response = self._post( "workflows", data=workflow_request )
- self._assert_status_code_is( run_workflow_response, 200 )
-
- response = self._get( "workflows/%s/usage" % workflow_id )
- self._assert_status_code_is( response, 200 )
- usages = response.json()
- assert len( usages ) == 1
-
- usage_details_response = self._get( "workflows/%s/usage/%s" % ( workflow_id, usages[ 0 ][ "id" ] ) )
+ workflow_id, usage = self._run_workflow_once_get_invocation( "test_usage")
+ usage_details_response = self._get( "workflows/%s/usage/%s" % ( workflow_id, usage[ "id" ] ) )
self._assert_status_code_is( usage_details_response, 200 )
usage_details = usage_details_response.json()
# Assert some high-level things about the structure of data returned.
@@ -544,6 +538,22 @@
# renamed to 'the_new_name'.
assert "the_new_name" in map( lambda hda: hda[ "name" ], contents )
+ def _run_workflow_once_get_invocation( self, name ):
+ workflow = self.workflow_populator.load_workflow( name=name )
+ workflow_request, history_id = self._setup_workflow_run( workflow )
+ workflow_id = workflow_request[ "workflow_id" ]
+ response = self._get( "workflows/%s/usage" % workflow_id )
+ self._assert_status_code_is( response, 200 )
+ assert len( response.json() ) == 0
+ run_workflow_response = self._post( "workflows", data=workflow_request )
+ self._assert_status_code_is( run_workflow_response, 200 )
+
+ response = self._get( "workflows/%s/usage" % workflow_id )
+ self._assert_status_code_is( response, 200 )
+ usages = response.json()
+ assert len( usages ) == 1
+ return workflow_id, usages[ 0 ]
+
def _setup_workflow_run( self, workflow, inputs_by='step_id', history_id=None ):
uploaded_workflow_id = self.workflow_populator.create_workflow( workflow )
if not history_id:
https://bitbucket.org/galaxy/galaxy-central/commits/14953d248e9b/
Changeset: 14953d248e9b
User: jmchilton
Date: 2014-09-06 00:55:21
Summary: Update workflow invocation to_dict for recent collection workflow changes.
There may now be multiple WorkflowInvocationSteps for each WorkflowStep for steps that are mapped over collections - so to_dict creating a dictionary of this information indexed on order step is problematic because only one WorkflowInvocationStep will be represented per step. Instead now just returning a big list of all of the invocations - which contains all of the same information. This is a backward incompatible API change for the workflow invocation API.
Also update the input mapping stuff with logic for dealing with data collection inputs.
Affected #: 2 files
diff -r 89b6cf2df4deafc0740372bbeaff5ea0f954cd5d -r 14953d248e9b404589c1e81494517f47c1004e49 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -3081,20 +3081,22 @@
def to_dict( self, view='collection', value_mapper=None ):
rval = super( WorkflowInvocation, self ).to_dict( view=view, value_mapper=value_mapper )
if view == 'element':
- steps = {}
+ steps = []
for step in self.steps:
v = step.to_dict()
- steps[str(v['order_index'])] = v
+ steps.append( v )
rval['steps'] = steps
inputs = {}
for step in self.steps:
if step.workflow_step.type == 'tool':
for step_input in step.workflow_step.input_connections:
- if step_input.output_step.type == 'data_input':
+ output_step_type = step_input.output_step.type
+ if output_step_type in [ 'data_input', 'data_collection_input' ]:
+ src = "hda" if output_step_type == 'data_input' else 'hdca'
for job_input in step.job.input_datasets:
if job_input.name == step_input.input_name:
- inputs[str(step_input.output_step.order_index)] = { "id": job_input.dataset_id, "src": "hda"}
+ inputs[str(step_input.output_step.order_index)] = { "id": job_input.dataset_id, "src": src }
rval['inputs'] = inputs
return rval
diff -r 89b6cf2df4deafc0740372bbeaff5ea0f954cd5d -r 14953d248e9b404589c1e81494517f47c1004e49 test/api/test_workflows.py
--- a/test/api/test_workflows.py
+++ b/test/api/test_workflows.py
@@ -520,8 +520,8 @@
usage_details = usage_details_response.json()
# Assert some high-level things about the structure of data returned.
self._assert_has_keys( usage_details, "inputs", "steps" )
- for step in usage_details[ "steps" ].values():
- self._assert_has_keys( step, "workflow_step_id", "order_index" )
+ for step in usage_details[ "steps" ]:
+ self._assert_has_keys( step, "workflow_step_id", "order_index", "id" )
@skip_without_tool( "cat1" )
def test_post_job_action( self ):
https://bitbucket.org/galaxy/galaxy-central/commits/54efa2c365f4/
Changeset: 54efa2c365f4
User: jmchilton
Date: 2014-09-06 00:55:21
Summary: Refactoring out methods usable for downstream work on scheduling.
Affected #: 2 files
diff -r 14953d248e9b404589c1e81494517f47c1004e49 -r 54efa2c365f4e2c867d56e44fb202ec2262e1122 lib/galaxy/webapps/galaxy/api/workflows.py
--- a/lib/galaxy/webapps/galaxy/api/workflows.py
+++ b/lib/galaxy/webapps/galaxy/api/workflows.py
@@ -383,7 +383,7 @@
results = self.workflow_manager.build_invocations_query( trans, decoded_stored_workflow_invocation_id )
out = []
for r in results:
- out.append( self.encode_all_ids( trans, r.to_dict(), True) )
+ out.append( self.__encode_invocation( trans, r ) )
return out
@expose_api
@@ -403,7 +403,7 @@
decoded_workflow_invocation_id = self.__decode_id( trans, usage_id )
workflow_invocation = self.workflow_manager.get_invocation( trans, decoded_workflow_invocation_id )
if workflow_invocation:
- return self.encode_all_ids( trans, workflow_invocation.to_dict('element'), True)
+ return self.__encode_invocation( trans, workflow_invocation )
return None
def __get_stored_accessible_workflow( self, trans, workflow_id ):
@@ -435,6 +435,13 @@
raise exceptions.ObjectNotFound( "No such workflow found." )
return stored_workflow
+ def __encode_invocation( self, trans, invocation, view="element" ):
+ return self.encode_all_ids(
+ trans,
+ invocation.to_dict( view ),
+ True
+ )
+
def __decode_id( self, trans, workflow_id, model_type="workflow" ):
try:
return trans.security.decode_id( workflow_id )
diff -r 14953d248e9b404589c1e81494517f47c1004e49 -r 54efa2c365f4e2c867d56e44fb202ec2262e1122 test/api/test_workflows.py
--- a/test/api/test_workflows.py
+++ b/test/api/test_workflows.py
@@ -515,9 +515,7 @@
@skip_without_tool( "cat1" )
def test_invocation_usage( self ):
workflow_id, usage = self._run_workflow_once_get_invocation( "test_usage")
- usage_details_response = self._get( "workflows/%s/usage/%s" % ( workflow_id, usage[ "id" ] ) )
- self._assert_status_code_is( usage_details_response, 200 )
- usage_details = usage_details_response.json()
+ usage_details = self._invocation_details( workflow_id, usage[ "id" ] )
# Assert some high-level things about the structure of data returned.
self._assert_has_keys( usage_details, "inputs", "steps" )
for step in usage_details[ "steps" ]:
@@ -538,6 +536,12 @@
# renamed to 'the_new_name'.
assert "the_new_name" in map( lambda hda: hda[ "name" ], contents )
+ def _invocation_details( self, workflow_id, invocation_id ):
+ invocation_details_response = self._get( "workflows/%s/usage/%s" % ( workflow_id, invocation_id ) )
+ self._assert_status_code_is( invocation_details_response, 200 )
+ invocation_details = invocation_details_response.json()
+ return invocation_details
+
def _run_workflow_once_get_invocation( self, name ):
workflow = self.workflow_populator.load_workflow( name=name )
workflow_request, history_id = self._setup_workflow_run( workflow )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/25656020ffb4/
Changeset: 25656020ffb4
Branch: job_admin
User: dannon
Date: 2014-09-05 21:44:56
Summary: Close feature branch
Affected #: 0 files
https://bitbucket.org/galaxy/galaxy-central/commits/62c2f16bcdeb/
Changeset: 62c2f16bcdeb
Branch: update-hist-export
User: dannon
Date: 2014-09-05 21:45:04
Summary: Close feature branch
Affected #: 0 files
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: dannon: Merged in kellrott/galaxy-farm/update-hist-export (pull request #472)
by commits-noreply@bitbucket.org 05 Sep '14
by commits-noreply@bitbucket.org 05 Sep '14
05 Sep '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/2604f7623fb4/
Changeset: 2604f7623fb4
User: dannon
Date: 2014-09-05 21:43:36
Summary: Merged in kellrott/galaxy-farm/update-hist-export (pull request #472)
Updating history import export
Affected #: 8 files
diff -r 6866bba8b7e28e25820ae0ca73c9159b91bd922e -r 2604f7623fb459f3cbc6b3e30551e1a8ce0ea1b2 lib/galaxy/tools/imp_exp/__init__.py
--- a/lib/galaxy/tools/imp_exp/__init__.py
+++ b/lib/galaxy/tools/imp_exp/__init__.py
@@ -3,6 +3,7 @@
import logging
import tempfile
import json
+import datetime
from galaxy import model
from galaxy.tools.parameters.basic import UnvalidatedValue
from galaxy.web.framework.helpers import to_unicode
@@ -134,6 +135,11 @@
datasets_attrs_file_name = os.path.join( archive_dir, 'datasets_attrs.txt')
datasets_attr_str = read_file_contents( datasets_attrs_file_name )
datasets_attrs = from_json_string( datasets_attr_str )
+
+ if os.path.exists( datasets_attrs_file_name + ".provenance" ):
+ provenance_attr_str = read_file_contents( datasets_attrs_file_name + ".provenance" )
+ provenance_attrs = from_json_string( provenance_attr_str )
+ datasets_attrs += provenance_attrs
# Get counts of how often each dataset file is used; a file can
# be linked to multiple dataset objects (HDAs).
@@ -162,7 +168,14 @@
history=new_history,
create_dataset=True,
sa_session=self.sa_session )
- hda.state = hda.states.OK
+ if 'uuid' in dataset_attrs:
+ hda.dataset.uuid = dataset_attrs["uuid"]
+ if dataset_attrs.get('exported', True) == False:
+ hda.state = hda.states.DISCARDED
+ hda.deleted = True
+ hda.purged = True
+ else:
+ hda.state = hda.states.OK
self.sa_session.add( hda )
self.sa_session.flush()
new_history.add_dataset( hda, genome_build=None )
@@ -171,17 +184,18 @@
#permissions = trans.app.security_agent.history_get_default_permissions( new_history )
#trans.app.security_agent.set_all_dataset_permissions( hda.dataset, permissions )
self.sa_session.flush()
-
- # Do security check and move/copy dataset data.
- temp_dataset_file_name = \
- os.path.abspath( os.path.join( archive_dir, dataset_attrs['file_name'] ) )
- if not file_in_dir( temp_dataset_file_name, os.path.join( archive_dir, "datasets" ) ):
- raise Exception( "Invalid dataset path: %s" % temp_dataset_file_name )
- if datasets_usage_counts[ temp_dataset_file_name ] == 1:
- shutil.move( temp_dataset_file_name, hda.file_name )
- else:
- datasets_usage_counts[ temp_dataset_file_name ] -= 1
- shutil.copyfile( temp_dataset_file_name, hda.file_name )
+ if dataset_attrs.get('exported', True) == True:
+ # Do security check and move/copy dataset data.
+ temp_dataset_file_name = \
+ os.path.abspath( os.path.join( archive_dir, dataset_attrs['file_name'] ) )
+ if not file_in_dir( temp_dataset_file_name, os.path.join( archive_dir, "datasets" ) ):
+ raise Exception( "Invalid dataset path: %s" % temp_dataset_file_name )
+ if datasets_usage_counts[ temp_dataset_file_name ] == 1:
+ shutil.move( temp_dataset_file_name, hda.file_name )
+ else:
+ datasets_usage_counts[ temp_dataset_file_name ] -= 1
+ shutil.copyfile( temp_dataset_file_name, hda.file_name )
+ hda.dataset.set_total_size() #update the filesize record in the database
# Set tags, annotations.
if user:
@@ -225,10 +239,21 @@
# TODO: set session?
# imported_job.session = trans.get_galaxy_session().id
imported_job.history = new_history
+ imported_job.imported = True
imported_job.tool_id = job_attrs[ 'tool_id' ]
imported_job.tool_version = job_attrs[ 'tool_version' ]
imported_job.set_state( job_attrs[ 'state' ] )
- imported_job.imported = True
+ imported_job.info = job_attrs.get('info', None)
+ imported_job.exit_code = job_attrs.get('exit_code', None)
+ imported_job.traceback = job_attrs.get('traceback', None)
+ imported_job.stdout = job_attrs.get('stdout', None)
+ imported_job.stderr = job_attrs.get('stderr', None)
+ imported_job.command_line = job_attrs.get('command_line', None)
+ try:
+ imported_job.create_time = datetime.datetime.strptime(job_attrs["create_time"], "%Y-%m-%dT%H:%M:%S.%f")
+ imported_job.update_time = datetime.datetime.strptime(job_attrs["update_time"], "%Y-%m-%dT%H:%M:%S.%f")
+ except:
+ pass
self.sa_session.add( imported_job )
self.sa_session.flush()
@@ -266,6 +291,16 @@
if output_hda:
imported_job.add_output_dataset( output_hda.name, output_hda )
+ # Connect jobs to input datasets.
+ if 'input_mapping' in job_attrs:
+ for input_name, input_hid in job_attrs[ 'input_mapping' ].items():
+ #print "%s job has input dataset %i" % (imported_job.id, input_hid)
+ input_hda = self.sa_session.query( model.HistoryDatasetAssociation ) \
+ .filter_by( history=new_history, hid=input_hid ).first()
+ if input_hda:
+ imported_job.add_input_dataset( input_name, input_hda )
+
+
self.sa_session.flush()
# Done importing.
@@ -323,7 +358,7 @@
def default( self, obj ):
""" Encode an HDA, default encoding for everything else. """
if isinstance( obj, trans.app.model.HistoryDatasetAssociation ):
- return {
+ rval = {
"__HistoryDatasetAssociation__": True,
"create_time": obj.create_time.__str__(),
"update_time": obj.update_time.__str__(),
@@ -339,9 +374,17 @@
"deleted": obj.deleted,
"visible": obj.visible,
"file_name": obj.file_name,
+ "uuid" : ( lambda uuid: str( uuid ) if uuid else None )( obj.dataset.uuid ),
"annotation": to_unicode( getattr( obj, 'annotation', '' ) ),
"tags": get_item_tag_dict( obj ),
}
+ if not obj.visible and not include_hidden:
+ rval['exported'] = False
+ elif obj.deleted and not include_deleted:
+ rval['exported'] = False
+ else:
+ rval['exported'] = True
+ return rval
if isinstance( obj, UnvalidatedValue ):
return obj.__str__()
return json.JSONEncoder.default( self, obj )
@@ -374,19 +417,23 @@
datasets = self.get_history_datasets( trans, history )
included_datasets = []
datasets_attrs = []
+ provenance_attrs = []
for dataset in datasets:
- if not dataset.visible and not include_hidden:
- continue
- if dataset.deleted and not include_deleted:
- continue
dataset.annotation = self.get_item_annotation_str( trans.sa_session, history.user, dataset )
- datasets_attrs.append( dataset )
- included_datasets.append( dataset )
+ if (not dataset.visible and not include_hidden) or (dataset.deleted and not include_deleted):
+ provenance_attrs.append( dataset )
+ else:
+ datasets_attrs.append( dataset )
+ included_datasets.append( dataset )
datasets_attrs_filename = tempfile.NamedTemporaryFile( dir=temp_output_dir ).name
datasets_attrs_out = open( datasets_attrs_filename, 'w' )
datasets_attrs_out.write( to_json_string( datasets_attrs, cls=HistoryDatasetAssociationEncoder ) )
datasets_attrs_out.close()
jeha.datasets_attrs_filename = datasets_attrs_filename
+
+ provenance_attrs_out = open( datasets_attrs_filename + ".provenance", 'w' )
+ provenance_attrs_out.write( to_json_string( provenance_attrs, cls=HistoryDatasetAssociationEncoder ) )
+ provenance_attrs_out.close()
#
# Write jobs attributes file.
@@ -422,6 +469,15 @@
job_attrs[ 'tool_id' ] = job.tool_id
job_attrs[ 'tool_version' ] = job.tool_version
job_attrs[ 'state' ] = job.state
+ job_attrs[ 'info' ] = job.info
+ job_attrs[ 'traceback' ] = job.traceback
+ job_attrs[ 'command_line' ] = job.command_line
+ job_attrs[ 'stderr' ] = job.stderr
+ job_attrs[ 'stdout' ] = job.stdout
+ job_attrs[ 'exit_code' ] = job.exit_code
+ job_attrs[ 'create_time' ] = job.create_time.isoformat()
+ job_attrs[ 'update_time' ] = job.update_time.isoformat()
+
# Get the job's parameters
try:
@@ -438,11 +494,14 @@
# -- Get input, output datasets. --
input_datasets = []
+ input_mapping = {}
for assoc in job.input_datasets:
# Optional data inputs will not have a dataset.
if assoc.dataset:
input_datasets.append( assoc.dataset.hid )
+ input_mapping[assoc.name] = assoc.dataset.hid
job_attrs[ 'input_datasets' ] = input_datasets
+ job_attrs[ 'input_mapping'] = input_mapping
output_datasets = [ assoc.dataset.hid for assoc in job.output_datasets ]
job_attrs[ 'output_datasets' ] = output_datasets
diff -r 6866bba8b7e28e25820ae0ca73c9159b91bd922e -r 2604f7623fb459f3cbc6b3e30551e1a8ce0ea1b2 lib/galaxy/tools/imp_exp/export_history.py
--- a/lib/galaxy/tools/imp_exp/export_history.py
+++ b/lib/galaxy/tools/imp_exp/export_history.py
@@ -44,12 +44,13 @@
# Add datasets to archive and update dataset attributes.
# TODO: security check to ensure that files added are in Galaxy dataset directory?
for dataset_attrs in datasets_attrs:
- dataset_file_name = dataset_attrs[ 'file_name' ] # Full file name.
- dataset_archive_name = os.path.join( 'datasets',
- get_dataset_filename( dataset_attrs[ 'name' ], dataset_attrs[ 'extension' ] ) )
- history_archive.add( dataset_file_name, arcname=dataset_archive_name )
- # Update dataset filename to be archive name.
- dataset_attrs[ 'file_name' ] = dataset_archive_name
+ if dataset_attrs['exported']:
+ dataset_file_name = dataset_attrs[ 'file_name' ] # Full file name.
+ dataset_archive_name = os.path.join( 'datasets',
+ get_dataset_filename( dataset_attrs[ 'name' ], dataset_attrs[ 'extension' ] ) )
+ history_archive.add( dataset_file_name, arcname=dataset_archive_name )
+ # Update dataset filename to be archive name.
+ dataset_attrs[ 'file_name' ] = dataset_archive_name
# Rewrite dataset attributes file.
datasets_attrs_out = open( datasets_attrs_file, 'w' )
@@ -59,6 +60,8 @@
# Finish archive.
history_archive.add( history_attrs_file, arcname="history_attrs.txt" )
history_archive.add( datasets_attrs_file, arcname="datasets_attrs.txt" )
+ if os.path.exists( datasets_attrs_file + ".provenance" ):
+ history_archive.add( datasets_attrs_file + ".provenance", arcname="datasets_attrs.txt.provenance" )
history_archive.add( jobs_attrs_file, arcname="jobs_attrs.txt" )
history_archive.close()
diff -r 6866bba8b7e28e25820ae0ca73c9159b91bd922e -r 2604f7623fb459f3cbc6b3e30551e1a8ce0ea1b2 lib/galaxy/webapps/galaxy/api/histories.py
--- a/lib/galaxy/webapps/galaxy/api/histories.py
+++ b/lib/galaxy/webapps/galaxy/api/histories.py
@@ -312,6 +312,8 @@
check_ownership=False, check_accessible=True )
jeha = history.latest_export
up_to_date = jeha and jeha.up_to_date
+ if 'force' in kwds:
+ up_to_date = False #Temp hack to force rebuild everytime during dev
if not up_to_date:
# Need to create new JEHA + job.
gzip = kwds.get( "gzip", True )
diff -r 6866bba8b7e28e25820ae0ca73c9159b91bd922e -r 2604f7623fb459f3cbc6b3e30551e1a8ce0ea1b2 lib/galaxy/webapps/galaxy/controllers/history.py
--- a/lib/galaxy/webapps/galaxy/controllers/history.py
+++ b/lib/galaxy/webapps/galaxy/controllers/history.py
@@ -1118,7 +1118,7 @@
#TODO: used in this file and index.mako
@web.expose
- def export_archive( self, trans, id=None, gzip=True, include_hidden=False, include_deleted=False ):
+ def export_archive( self, trans, id=None, gzip=True, include_hidden=False, include_deleted=False, preview=False ):
""" Export a history to an archive. """
#
# Get history to export.
@@ -1139,7 +1139,13 @@
jeha = history.latest_export
if jeha and jeha.up_to_date:
if jeha.ready:
- return self.serve_ready_history_export( trans, jeha )
+ if preview:
+ url = url_for( controller='history', action="export_archive", id=id, qualified=True )
+ return trans.show_message( "History Ready: '%(n)s'. Use this link to download \
+ the archive or import it to another Galaxy server: \
+ <a href='%(u)s'>%(u)s</a>" % ( { 'n' : history.name, 'u' : url } ) )
+ else:
+ return self.serve_ready_history_export( trans, jeha )
elif jeha.preparing:
return trans.show_message( "Still exporting history %(n)s; please check back soon. Link: <a href='%(s)s'>%(s)s</a>" \
% ( { 'n' : history.name, 's' : url_for( controller='history', action="export_archive", id=id, qualified=True ) } ) )
diff -r 6866bba8b7e28e25820ae0ca73c9159b91bd922e -r 2604f7623fb459f3cbc6b3e30551e1a8ce0ea1b2 templates/show_params.mako
--- a/templates/show_params.mako
+++ b/templates/show_params.mako
@@ -107,6 +107,7 @@
<tbody><%
encoded_hda_id = trans.security.encode_id( hda.id )
+ encoded_history_id = trans.security.encode_id( hda.history_id )
%><tr><td>Name:</td><td>${hda.name | h}</td></tr><tr><td>Created:</td><td>${hda.create_time.strftime(trans.app.config.pretty_datetime_format)}</td></tr>
@@ -120,6 +121,7 @@
<tr><td>Tool Standard Error:</td><td><a href="${h.url_for( controller='dataset', action='stderr', dataset_id=encoded_hda_id )}">stderr</a></td></tr><tr><td>Tool Exit Code:</td><td>${job.exit_code | h}</td></tr><tr><td>API ID:</td><td>${encoded_hda_id}</td></tr>
+ <tr><td>History ID:</td><td>${encoded_history_id}</td></tr>
%if hda.dataset.uuid:
<tr><td>UUID:</td><td>${hda.dataset.uuid}</td></tr>
%endif
diff -r 6866bba8b7e28e25820ae0ca73c9159b91bd922e -r 2604f7623fb459f3cbc6b3e30551e1a8ce0ea1b2 templates/webapps/galaxy/root/index.mako
--- a/templates/webapps/galaxy/root/index.mako
+++ b/templates/webapps/galaxy/root/index.mako
@@ -105,7 +105,7 @@
galaxy_main.location = "${h.url_for( controller='history', action='citations' )}";
},
"${_("Export to File")}": function() {
- galaxy_main.location = "${h.url_for( controller='history', action='export_archive' )}";
+ galaxy_main.location = "${h.url_for( controller='history', action='export_archive', preview=True )}";
},
"${_("Delete")}": function() {
if ( confirm( "Really delete the current history?" ) ) {
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
05 Sep '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/7d7048ac4f87/
Changeset: 7d7048ac4f87
Branch: coding_snp_2bit_locfile
User: dannon
Date: 2014-09-05 21:26:10
Summary: Close feature branch
Affected #: 0 files
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: dannon: Merged in lance_parsons/galaxy-central-pull-requests/coding_snp_2bit_locfile (pull request #487)
by commits-noreply@bitbucket.org 05 Sep '14
by commits-noreply@bitbucket.org 05 Sep '14
05 Sep '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/6866bba8b7e2/
Changeset: 6866bba8b7e2
User: dannon
Date: 2014-09-05 21:25:38
Summary: Merged in lance_parsons/galaxy-central-pull-requests/coding_snp_2bit_locfile (pull request #487)
Allow specification of complete filename of 2bit files in codingSnps.pl
Affected #: 1 file
diff -r 92238b01c946c03b93f4b77d0729b24684940c89 -r 6866bba8b7e28e25820ae0ca73c9159b91bd922e tools/evolution/codingSnps.pl
--- a/tools/evolution/codingSnps.pl
+++ b/tools/evolution/codingSnps.pl
@@ -564,6 +564,8 @@
if ($nibDir eq 'Galaxy') {
print STDERR "Failed to find sequence directory in locfile $locFile\n";
}
- $nibDir .= "/$build.2bit"; #we want full path and filename
+ # lparsons: allow specification of full filename in loc file for greater felxibility
+ unless ($nibDir =~ /(.*)\.2bit$/) { $nibDir .= "/$build.2bit"; }
+ #$nibDir .= "/$build.2bit"; #we want full path and filename
}
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
3 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/b04e01c4b78a/
Changeset: b04e01c4b78a
Branch: coding_snp_2bit_locfile
User: lance_parsons
Date: 2014-09-04 22:02:54
Summary: Allow specification of complete filename of 2bit files in codingSnps.pl
Affected #: 1 file
diff -r 9b6cccb3af2d6b13a1a1da7def87dcc19172be88 -r b04e01c4b78ac308414c5047327713899195ce9d tools/evolution/codingSnps.pl
--- a/tools/evolution/codingSnps.pl
+++ b/tools/evolution/codingSnps.pl
@@ -564,6 +564,8 @@
if ($nibDir eq 'Galaxy') {
print STDERR "Failed to find sequence directory in locfile $locFile\n";
}
- $nibDir .= "/$build.2bit"; #we want full path and filename
+ # lparsons: allow specification of full filename in loc file for greater felxibility
+ unless ($nibDir =~ /(.*)\.2bit/) { $nibDir .= "/$build.2bit"; }
+ #$nibDir .= "/$build.2bit"; #we want full path and filename
}
https://bitbucket.org/galaxy/galaxy-central/commits/ecaabded6e50/
Changeset: ecaabded6e50
Branch: coding_snp_2bit_locfile
User: lance_parsons
Date: 2014-09-05 16:55:38
Summary: Made regex more specific
Affected #: 1 file
diff -r b04e01c4b78ac308414c5047327713899195ce9d -r ecaabded6e5027bb92ee5559e226a82dbde192e3 tools/evolution/codingSnps.pl
--- a/tools/evolution/codingSnps.pl
+++ b/tools/evolution/codingSnps.pl
@@ -565,7 +565,7 @@
print STDERR "Failed to find sequence directory in locfile $locFile\n";
}
# lparsons: allow specification of full filename in loc file for greater felxibility
- unless ($nibDir =~ /(.*)\.2bit/) { $nibDir .= "/$build.2bit"; }
+ unless ($nibDir =~ /(.*)\.2bit$/) { $nibDir .= "/$build.2bit"; }
#$nibDir .= "/$build.2bit"; #we want full path and filename
}
https://bitbucket.org/galaxy/galaxy-central/commits/6866bba8b7e2/
Changeset: 6866bba8b7e2
User: dannon
Date: 2014-09-05 21:25:38
Summary: Merged in lance_parsons/galaxy-central-pull-requests/coding_snp_2bit_locfile (pull request #487)
Allow specification of complete filename of 2bit files in codingSnps.pl
Affected #: 1 file
diff -r 92238b01c946c03b93f4b77d0729b24684940c89 -r 6866bba8b7e28e25820ae0ca73c9159b91bd922e tools/evolution/codingSnps.pl
--- a/tools/evolution/codingSnps.pl
+++ b/tools/evolution/codingSnps.pl
@@ -564,6 +564,8 @@
if ($nibDir eq 'Galaxy') {
print STDERR "Failed to find sequence directory in locfile $locFile\n";
}
- $nibDir .= "/$build.2bit"; #we want full path and filename
+ # lparsons: allow specification of full filename in loc file for greater felxibility
+ unless ($nibDir =~ /(.*)\.2bit$/) { $nibDir .= "/$build.2bit"; }
+ #$nibDir .= "/$build.2bit"; #we want full path and filename
}
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
3 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/0119b0d6befb/
Changeset: 0119b0d6befb
User: jmchilton
Date: 2014-09-05 21:19:18
Summary: More discovered dataset testing tweaks.
Affected #: 2 files
diff -r 77e633e926043524691dcc193f3e7d59809d06a8 -r 0119b0d6befbb77f7ae177ccf036109d97fa53f1 lib/galaxy/tools/parameters/output_collect.py
--- a/lib/galaxy/tools/parameters/output_collect.py
+++ b/lib/galaxy/tools/parameters/output_collect.py
@@ -186,7 +186,8 @@
return self.pattern.replace( DATASET_ID_TOKEN, token_replacement )
def match( self, dataset_instance, filename ):
- re_match = re.match( self.pattern_for_dataset( dataset_instance ), filename )
+ pattern = self.pattern_for_dataset( dataset_instance )
+ re_match = re.match( pattern, filename )
match_object = None
if re_match:
match_object = CollectedDatasetMatch( re_match, self )
diff -r 77e633e926043524691dcc193f3e7d59809d06a8 -r 0119b0d6befbb77f7ae177ccf036109d97fa53f1 test/functional/tools/multi_output_configured.xml
--- a/test/functional/tools/multi_output_configured.xml
+++ b/test/functional/tools/multi_output_configured.xml
@@ -1,4 +1,4 @@
-<tool id="multi_output_configured" name="Multi_Output_Configured" description="multi_output_configured" force_history_refresh="True" version="0.1.0">
+<tool id="multi_output_configured" name="Multi_Output_Configured" description="multi_output_configured" version="0.1.0"><command>
echo "Hello" > $report;
mkdir subdir1;
@@ -12,6 +12,9 @@
echo "Foo" > subdir3/Foo;
echo "mapped reads" > split_bam_.MAPPED.bam;
echo "unmapped reads" > split_bam_.UNMAPPED.bam;
+ echo "1" > sample1.report.tsv;
+ echo "2" > sample2.report.tsv;
+ echo "3" > sample3.report.tsv;
</command><inputs><param name="num_param" type="integer" value="7" />
@@ -23,6 +26,7 @@
<discover_datasets pattern="CUSTOM_(?P<designation>.+)\.(?P<ext>.+)" directory="subdir2" visible="true" /><discover_datasets pattern="__designation__" directory="subdir3" ext="input" visible="true" /><discover_datasets pattern="split_bam_\.(?P<designation>([A-Z-])\w+)\.bam" ext="txt" visible="true" />
+ <discover_datasets pattern="(?P<designation>.+)\.report\.tsv" ext="tabular" visible="true" /></data></outputs><tests>
@@ -54,6 +58,9 @@
<discovered_dataset designation="UNMAPPED" ftype="txt"><assert_contents><has_line line="unmapped reads" /></assert_contents></discovered_dataset>
+ <discovered_dataset designation="sample1" ftype="tabular">
+ <assert_contents><has_line line="1" /></assert_contents>
+ </discovered_dataset></output></test></tests>
https://bitbucket.org/galaxy/galaxy-central/commits/8b450e9886e1/
Changeset: 8b450e9886e1
User: jmchilton
Date: 2014-09-05 21:19:18
Summary: Switch default test interactor to API.
Has dozens of advantages and the main tool shed is now running tests against the API and no one has complained.
If you find regressions in test cases as part of this switch, please report them and set interactor="twill" on your test cases to revert to the old runner until the problem is being fixed. Eventually the twill interactor for tool functional tests will be eliminated all together.
Affected #: 1 file
diff -r 0119b0d6befbb77f7ae177ccf036109d97fa53f1 -r 8b450e9886e1e8d62201bf307d79e2f8e3458fbd lib/galaxy/tools/test.py
--- a/lib/galaxy/tools/test.py
+++ b/lib/galaxy/tools/test.py
@@ -14,7 +14,7 @@
DEFAULT_FTYPE = 'auto'
DEFAULT_DBKEY = 'hg17'
-DEFAULT_INTERACTOR = "twill" # Default mechanism test code uses for interacting with Galaxy instance.
+DEFAULT_INTERACTOR = "api" # Default mechanism test code uses for interacting with Galaxy instance.
DEFAULT_MAX_SECS = 120
https://bitbucket.org/galaxy/galaxy-central/commits/92238b01c946/
Changeset: 92238b01c946
User: jmchilton
Date: 2014-09-05 21:19:18
Summary: Add another example tool - to illustrate three ways to collect files for concatenation.
Affected #: 3 files
diff -r 8b450e9886e1e8d62201bf307d79e2f8e3458fbd -r 92238b01c946c03b93f4b77d0729b24684940c89 test/functional/tools/for_workflows/cat_collection.xml
--- /dev/null
+++ b/test/functional/tools/for_workflows/cat_collection.xml
@@ -0,0 +1,16 @@
+<tool id="cat_collection" name="Concatenate dataset collection (for test workflows)">
+ <description>tail-to-head</description>
+ <command>
+ cat #for $q in $input1# $q #end for# > $out_file1
+ </command>
+ <inputs>
+ <param name="input1" type="data_collection" label="Concatenate Dataset" collection_type="paired" />
+ </inputs>
+ <outputs>
+ <data name="out_file1" format="input" />
+ </outputs>
+ <tests>
+ </tests>
+ <help>
+ </help>
+</tool>
diff -r 8b450e9886e1e8d62201bf307d79e2f8e3458fbd -r 92238b01c946c03b93f4b77d0729b24684940c89 test/functional/tools/for_workflows/cat_list.xml
--- a/test/functional/tools/for_workflows/cat_list.xml
+++ b/test/functional/tools/for_workflows/cat_list.xml
@@ -1,4 +1,4 @@
-<tool id="cat_list" name="Concatenate dataset list (for test workflows)">
+<tool id="cat_list" name="Concatenate multiple datasets (for test workflows)"><description>tail-to-head</description><command>
cat #for $q in $input1# $q #end for# > $out_file1
diff -r 8b450e9886e1e8d62201bf307d79e2f8e3458fbd -r 92238b01c946c03b93f4b77d0729b24684940c89 test/functional/tools/samples_tool_conf.xml
--- a/test/functional/tools/samples_tool_conf.xml
+++ b/test/functional/tools/samples_tool_conf.xml
@@ -28,8 +28,13 @@
<tool file="collection_optional_param.xml" /><!-- Tools interesting only for building up test workflows. -->
+
+ <!-- Next three tools demonstrate concatenating multiple datasets
+ with a repeat, multiple datasets with a multiple input data
+ parameter, and multiple datasets from a collection. --><tool file="for_workflows/cat.xml" /><tool file="for_workflows/cat_list.xml" />
+ <tool file="for_workflows/cat_collection.xml" /><tool file="for_workflows/head.xml" /></toolbox>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: natefoo: Handle Pulsar job stop when setting metadata remotely.
by commits-noreply@bitbucket.org 05 Sep '14
by commits-noreply@bitbucket.org 05 Sep '14
05 Sep '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/77e633e92604/
Changeset: 77e633e92604
User: natefoo
Date: 2014-09-05 17:41:05
Summary: Handle Pulsar job stop when setting metadata remotely.
Affected #: 1 file
diff -r be2c86442b3e28407d7b845a90420c3feb3fe859 -r 77e633e926043524691dcc193f3e7d59809d06a8 lib/galaxy/jobs/runners/pulsar.py
--- a/lib/galaxy/jobs/runners/pulsar.py
+++ b/lib/galaxy/jobs/runners/pulsar.py
@@ -416,8 +416,9 @@
def stop_job( self, job ):
#if our local job has JobExternalOutputMetadata associated, then our primary job has to have already finished
+ client = self.get_client( job.destination_params, job.job_runner_external_id )
job_ext_output_metadata = job.get_external_output_metadata()
- if job_ext_output_metadata:
+ if not PulsarJobRunner.__remote_metadata( client ) and job_ext_output_metadata:
pid = job_ext_output_metadata[0].job_runner_external_pid # every JobExternalOutputMetadata has a pid set, we just need to take from one of them
if pid in [ None, '' ]:
log.warning( "stop_job(): %s: no PID in database for job, unable to stop" % job.id )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: guerler: ToolForm: Additional buttons, fixes and improvements, genome build selector, first commit for multiple dataset and collection selector
by commits-noreply@bitbucket.org 05 Sep '14
by commits-noreply@bitbucket.org 05 Sep '14
05 Sep '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/be2c86442b3e/
Changeset: be2c86442b3e
User: guerler
Date: 2014-09-05 17:24:55
Summary: ToolForm: Additional buttons, fixes and improvements, genome build selector, first commit for multiple dataset and collection selector
Affected #: 13 files
diff -r 0df64c45db90ca84e763c34919ebe9304d96f202 -r be2c86442b3e28407d7b845a90420c3feb3fe859 static/scripts/mvc/tools/tools-datasets.js
--- a/static/scripts/mvc/tools/tools-datasets.js
+++ b/static/scripts/mvc/tools/tools-datasets.js
@@ -6,7 +6,7 @@
this.currHistoryContents = new HISTORY_CONTENTS.HistoryContents({});
// identify current history id
- this.currHistoryContents.historyId = "f597429621d6eb2b";//Galaxy.currHistoryPanel.model.id;
+ this.currHistoryContents.historyId = options.history_id;
// make request
var self = this;
diff -r 0df64c45db90ca84e763c34919ebe9304d96f202 -r be2c86442b3e28407d7b845a90420c3feb3fe859 static/scripts/mvc/tools/tools-form.js
--- a/static/scripts/mvc/tools/tools-form.js
+++ b/static/scripts/mvc/tools/tools-form.js
@@ -40,7 +40,8 @@
// initialize datasets
this.datasets = new ToolDatasets({
- success: function() {
+ history_id : this.options.history_id,
+ success : function() {
self._initializeToolForm();
}
});
@@ -48,8 +49,40 @@
// initialize tool form
_initializeToolForm: function() {
+ // link this
+ var self = this;
+
+ // create question button
+ var button_question = new Ui.ButtonIcon({
+ icon : 'fa-question-circle',
+ title : 'Question?',
+ tooltip : 'Ask a question about this tool (Biostar)',
+ onclick : function() {
+ window.open(self.options.biostar_url + '/p/new/post/');
+ }
+ });
+
+ // create search button
+ var button_search = new Ui.ButtonIcon({
+ icon : 'fa-search',
+ title : 'Search',
+ tooltip : 'Search help for this tool (Biostar)',
+ onclick : function() {
+ window.open(self.options.biostar_url + '/t/' + self.options.id + '/');
+ }
+ });
+
+ // create share button
+ var button_share = new Ui.ButtonIcon({
+ icon : 'fa-share',
+ title : 'Share',
+ tooltip : 'Share this tool',
+ onclick : function() {
+ prompt('Copy to clipboard: Ctrl+C, Enter', galaxy_config.root + 'root?tool_id=' + self.options.id);
+ }
+ });
+
// fetch model and render form
- var self = this;
this.model.fetch({
error: function(response) {
console.debug('tools-form::_initializeToolForm() : Attempt to fetch tool model failed.');
@@ -72,9 +105,20 @@
console.log(self.tree.create(self));
}
})
+ },
+ operations: {
+ button_question: button_question,
+ button_search: button_search,
+ button_share: button_share
}
});
+ // configure button selection
+ if(!self.options.biostar_url) {
+ button_question.$el.hide();
+ button_search.$el.hide();
+ }
+
// create message
self.message = new Ui.Message();
self.portlet.append(self.message.$el);
diff -r 0df64c45db90ca84e763c34919ebe9304d96f202 -r be2c86442b3e28407d7b845a90420c3feb3fe859 static/scripts/mvc/tools/tools-section.js
--- a/static/scripts/mvc/tools/tools-section.js
+++ b/static/scripts/mvc/tools/tools-section.js
@@ -1,5 +1,5 @@
-define(['utils/utils', 'mvc/ui/ui-table', 'mvc/ui/ui-misc', 'mvc/ui/ui-tabs'],
- function(Utils, Table, Ui, Tabs) {
+define(['utils/utils', 'mvc/ui/ui-table', 'mvc/ui/ui-misc', 'mvc/ui/ui-tabs', 'mvc/tools/tools-select-dataset'],
+ function(Utils, Table, Ui, Tabs, SelectDataset) {
// create form view
var View = Backbone.View.extend({
@@ -202,7 +202,7 @@
// dataset column
case 'data_column':
- field = this._field_column(input_def);
+ field = this._field_data_colum(input_def);
break;
// conditional select field
@@ -229,11 +229,20 @@
case 'boolean':
field = this._field_boolean(input_def);
break;
-
- // default
- default:
+ }
+
+ // pick a generic field if specific mapping failed
+ if (!field) {
+ if (input_def.options) {
+ // assign select field
+ field = this._field_select(input_def);
+ } else {
+ // assign text field
field = this._field_text(input_def);
- console.debug('tools-form::_addRow() : Unmatched field type (' + field_type + ').');
+ }
+
+ // log
+ console.debug('tools-form::_addRow() : Auto matched field type (' + field_type + ').');
}
// set field value
@@ -319,28 +328,15 @@
// get element id
var id = input_def.id;
- // get datasets
- var datasets = this.app.datasets.filterType();
-
- // configure options fields
- var options = [];
- for (var i in datasets) {
- options.push({
- label: datasets[i].get('name'),
- value: datasets[i].get('id')
- });
- }
-
// select field
- return new Ui.Select.View({
+ return new SelectDataset.View(this.app, {
id : 'field-' + id,
- data : options,
- value : options[0].value,
+ extensions : input_def.extensions,
multiple : input_def.multiple,
onchange : function(value) {
// pick the first dataset if multiple might be selected
// TODO: iterate over all datasets and filter common/consistent columns
- if (input_def.multiple) {
+ if (value instanceof Array) {
value = value[0];
}
@@ -429,26 +425,22 @@
break;
}
- // force checkboxes if multiple has been selected
- if (input_def.multiple) {
- SelectClass = Ui.Checkbox;
- }
-
// select field
return new SelectClass.View({
id : 'field-' + input_def.id,
- data : options
+ data : options,
+ multiple: input_def.multiple
});
},
- // column selection field
- _field_column : function (input_def) {
+ // column field
+ _field_data_colum : function (input_def) {
return new Ui.Select.View({
id : 'field-' + input_def.id,
multiple: input_def.multiple
});
},
-
+
// text input field
_field_text : function(input_def) {
return new Ui.Input({
@@ -457,13 +449,20 @@
});
},
- // integer field
+ // slider field
_field_slider: function(input_def) {
+ // calculate step size
+ var step = 1;
+ if (input_def.type == 'float') {
+ step = (input_def.max - input_def.min) / 10000;
+ }
+
+ // create slider
return new Ui.Slider.View({
id : 'field-' + input_def.id,
min : input_def.min || 0,
max : input_def.max || 1000,
- decimal : input_def.type == 'float'
+ step : step
});
},
diff -r 0df64c45db90ca84e763c34919ebe9304d96f202 -r be2c86442b3e28407d7b845a90420c3feb3fe859 static/scripts/mvc/tools/tools-select-dataset.js
--- /dev/null
+++ b/static/scripts/mvc/tools/tools-select-dataset.js
@@ -0,0 +1,109 @@
+// dependencies
+define(['utils/utils', 'mvc/ui/ui-misc', 'mvc/ui/ui-tabs'], function(Utils, Ui, Tabs) {
+
+var View = Backbone.View.extend({
+ // initialize
+ initialize : function(app, options) {
+ // link this
+ var self = this;
+
+ // get datasets
+ var datasets = app.datasets.filterType();
+
+ // configure options fields
+ var select_data = [];
+ for (var i in datasets) {
+ select_data.push({
+ label: datasets[i].get('name'),
+ value: datasets[i].get('id')
+ });
+ }
+
+ // create select field
+ this.select = new Ui.Select.View({
+ data : select_data,
+ value : select_data[0].value,
+ onchange : function() {
+ self.trigger('change');
+ }
+ });
+
+ // create select field for multiple files
+ this.select_multiple = new Ui.Select.View({
+ multiple : true,
+ data : select_data,
+ value : select_data[0].value,
+ onchange : function() {
+ self.trigger('change');
+ }
+ });
+
+
+ // create select field for multiple files
+ this.select_collection = new Ui.Select.View({
+ data : select_data,
+ value : select_data[0].value,
+ onchange : function() {
+ self.trigger('change');
+ }
+ });
+
+ // add change event. fires on trigger
+ this.on('change', function() {
+ if (options.onchange) {
+ options.onchange(self.value());
+ }
+ });
+
+ // tabs
+ this.tabs = new Tabs.View();
+
+ // add tab
+ this.tabs.add({
+ id : 'single',
+ title : 'Select a dataset',
+ $el : this.select.$el
+ });
+
+ // add tab
+ this.tabs.add({
+ id : 'multiple',
+ title : 'Select multiple datasets',
+ $el : this.select_multiple.$el
+ });
+
+ // add tab
+ this.tabs.add({
+ id : 'collection',
+ title : 'Select a dataset collection',
+ $el : this.select_collection.$el
+ });
+
+ // add element
+ this.setElement(this.tabs.$el);
+ },
+
+ // value
+ value : function (new_value) {
+ var current_tab = this.tabs.current();
+ switch(current_tab) {
+ case 'multiple' :
+ return this.select_multiple.value();
+ case 'collection' :
+ return this.select_collection.value();
+ default :
+ return this.select.value();
+ }
+ },
+
+ // render
+ update: function(options) {
+ this.select.update(options);
+ }
+});
+
+return {
+ View: View
+}
+
+});
diff -r 0df64c45db90ca84e763c34919ebe9304d96f202 -r be2c86442b3e28407d7b845a90420c3feb3fe859 static/scripts/mvc/ui/ui-slider.js
--- a/static/scripts/mvc/ui/ui-slider.js
+++ b/static/scripts/mvc/ui/ui-slider.js
@@ -8,8 +8,7 @@
value : '',
min : 1,
max : 100,
- step : 0.1,
- decimal : false
+ step : 1
},
// initialize
@@ -29,11 +28,6 @@
// backup integer field
this.$text = this.$el.find('#text');
- // set step size
- if (!this.options.decimal) {
- this.options.step = 1;
- }
-
// load slider plugin
this.$slider.slider(this.options);
@@ -46,7 +40,7 @@
this.$text.on('keydown', function (event) {
var v = event.which;
if (!(v == 13 || v == 8 || v == 37 || v == 39 || v == 189 || (v >= 48 && v <= 57)
- || (self.options.decimal && $(this).val().indexOf('.') == -1) && v == 190)) {
+ || (self.options.step != 1 && $(this).val().indexOf('.') == -1) && v == 190)) {
event.preventDefault();
}
});
diff -r 0df64c45db90ca84e763c34919ebe9304d96f202 -r be2c86442b3e28407d7b845a90420c3feb3fe859 static/scripts/mvc/ui/ui-tabs.js
--- a/static/scripts/mvc/ui/ui-tabs.js
+++ b/static/scripts/mvc/ui/ui-tabs.js
@@ -71,6 +71,11 @@
return _.size(this.list);
},
+ // front
+ current: function() {
+ return this.$el.find('.tab-pane.active').attr('id');
+ },
+
// append
add: function(options) {
// self
diff -r 0df64c45db90ca84e763c34919ebe9304d96f202 -r be2c86442b3e28407d7b845a90420c3feb3fe859 static/scripts/packed/mvc/tools/tools-datasets.js
--- a/static/scripts/packed/mvc/tools/tools-datasets.js
+++ b/static/scripts/packed/mvc/tools/tools-datasets.js
@@ -1,1 +1,1 @@
-define(["mvc/history/history-contents"],function(a){return Backbone.Model.extend({initialize:function(c){this.currHistoryContents=new a.HistoryContents({});this.currHistoryContents.historyId="f597429621d6eb2b";var b=this;var d=this.currHistoryContents.fetchAllDetails().done(function(){console.debug("tools-datasets::initialize() - Completed.");c.success&&c.success()}).fail(function(){console.debug("tools-datasets::initialize() - Ajax request failed.")})},filterType:function(b){return this.currHistoryContents.filter(function(c){var d=c.get("history_content_type");var e=c.get("file_ext");return d==="dataset"})},filter:function(b){return _.first(this.currHistoryContents.filter(function(c){return c.get("id")===b}))}})});
\ No newline at end of file
+define(["mvc/history/history-contents"],function(a){return Backbone.Model.extend({initialize:function(c){this.currHistoryContents=new a.HistoryContents({});this.currHistoryContents.historyId=c.history_id;var b=this;var d=this.currHistoryContents.fetchAllDetails().done(function(){console.debug("tools-datasets::initialize() - Completed.");c.success&&c.success()}).fail(function(){console.debug("tools-datasets::initialize() - Ajax request failed.")})},filterType:function(b){return this.currHistoryContents.filter(function(c){var d=c.get("history_content_type");var e=c.get("file_ext");return d==="dataset"})},filter:function(b){return _.first(this.currHistoryContents.filter(function(c){return c.get("id")===b}))}})});
\ No newline at end of file
diff -r 0df64c45db90ca84e763c34919ebe9304d96f202 -r be2c86442b3e28407d7b845a90420c3feb3fe859 static/scripts/packed/mvc/tools/tools-form.js
--- a/static/scripts/packed/mvc/tools/tools-form.js
+++ b/static/scripts/packed/mvc/tools/tools-form.js
@@ -1,1 +1,1 @@
-define(["mvc/ui/ui-portlet","mvc/ui/ui-misc","mvc/citation/citation-model","mvc/citation/citation-view","mvc/tools","mvc/tools/tools-template","mvc/tools/tools-datasets","mvc/tools/tools-section","mvc/tools/tools-tree"],function(g,k,i,a,f,d,h,j,c){var e=Backbone.Model.extend({initialize:function(l){this.url=galaxy_config.root+"api/tools/"+l.id+"?io_details=true"}});var b=Backbone.View.extend({main_el:"body",initialize:function(m){var l=this;this.options=m;this.model=new e({id:m.id});this.tree=new c(this);this.field_list={};this.input_list={};this.datasets=new h({success:function(){l._initializeToolForm()}})},_initializeToolForm:function(){var l=this;this.model.fetch({error:function(m){console.debug("tools-form::_initializeToolForm() : Attempt to fetch tool model failed.")},success:function(){l.inputs=l.model.get("inputs");l.portlet=new g.View({icon:"fa-wrench",title:"<b>"+l.model.get("name")+"</b> "+l.model.get("description"),buttons:{execute:new k.ButtonIcon({icon:"fa-check",tooltip:"Execute the tool",title:"Execute",floating:"clear",onclick:function(){console.log(l.tree.create(l))}})}});l.message=new k.Message();l.portlet.append(l.message.$el);$(l.main_el).append(l.portlet.$el);if(l.options.help!=""){$(l.main_el).append(d.help(l.options.help))}if(l.options.citations){$(l.main_el).append(d.citations());var m=new i.ToolCitationCollection();m.tool_id=l.options.id;var n=new a.CitationListView({collection:m});n.render();m.fetch()}l.setElement(l.portlet.content());l.section=new j.View(l,{inputs:l.model.get("inputs")});l.portlet.append(l.section.$el);l.refresh()}})},refresh:function(){this.tree.refresh();for(var l in this.field_list){this.field_list[l].trigger("change")}console.debug("tools-form::refresh() - Recreated tree structure. Refresh.")}});return{View:b}});
\ No newline at end of file
+define(["mvc/ui/ui-portlet","mvc/ui/ui-misc","mvc/citation/citation-model","mvc/citation/citation-view","mvc/tools","mvc/tools/tools-template","mvc/tools/tools-datasets","mvc/tools/tools-section","mvc/tools/tools-tree"],function(g,k,i,a,f,d,h,j,c){var e=Backbone.Model.extend({initialize:function(l){this.url=galaxy_config.root+"api/tools/"+l.id+"?io_details=true"}});var b=Backbone.View.extend({main_el:"body",initialize:function(m){var l=this;this.options=m;this.model=new e({id:m.id});this.tree=new c(this);this.field_list={};this.input_list={};this.datasets=new h({history_id:this.options.history_id,success:function(){l._initializeToolForm()}})},_initializeToolForm:function(){var m=this;var n=new k.ButtonIcon({icon:"fa-question-circle",title:"Question?",tooltip:"Ask a question about this tool (Biostar)",onclick:function(){window.open(m.options.biostar_url+"/p/new/post/")}});var o=new k.ButtonIcon({icon:"fa-search",title:"Search",tooltip:"Search help for this tool (Biostar)",onclick:function(){window.open(m.options.biostar_url+"/t/"+m.options.id+"/")}});var l=new k.ButtonIcon({icon:"fa-share",title:"Share",tooltip:"Share this tool",onclick:function(){prompt("Copy to clipboard: Ctrl+C, Enter",galaxy_config.root+"root?tool_id="+m.options.id)}});this.model.fetch({error:function(p){console.debug("tools-form::_initializeToolForm() : Attempt to fetch tool model failed.")},success:function(){m.inputs=m.model.get("inputs");m.portlet=new g.View({icon:"fa-wrench",title:"<b>"+m.model.get("name")+"</b> "+m.model.get("description"),buttons:{execute:new k.ButtonIcon({icon:"fa-check",tooltip:"Execute the tool",title:"Execute",floating:"clear",onclick:function(){console.log(m.tree.create(m))}})},operations:{button_question:n,button_search:o,button_share:l}});if(!m.options.biostar_url){n.$el.hide();o.$el.hide()}m.message=new k.Message();m.portlet.append(m.message.$el);$(m.main_el).append(m.portlet.$el);if(m.options.help!=""){$(m.main_el).append(d.help(m.options.help))}if(m.options.citations){$(m.main_el).append(d.citations());var p=new i.ToolCitationCollection();p.tool_id=m.options.id;var q=new a.CitationListView({collection:p});q.render();p.fetch()}m.setElement(m.portlet.content());m.section=new j.View(m,{inputs:m.model.get("inputs")});m.portlet.append(m.section.$el);m.refresh()}})},refresh:function(){this.tree.refresh();for(var l in this.field_list){this.field_list[l].trigger("change")}console.debug("tools-form::refresh() - Recreated tree structure. Refresh.")}});return{View:b}});
\ No newline at end of file
diff -r 0df64c45db90ca84e763c34919ebe9304d96f202 -r be2c86442b3e28407d7b845a90420c3feb3fe859 static/scripts/packed/mvc/tools/tools-section.js
--- a/static/scripts/packed/mvc/tools/tools-section.js
+++ b/static/scripts/packed/mvc/tools/tools-section.js
@@ -1,1 +1,1 @@
-define(["utils/utils","mvc/ui/ui-table","mvc/ui/ui-misc","mvc/ui/ui-tabs"],function(c,b,e,a){var d=Backbone.View.extend({initialize:function(g,f){this.app=g;this.inputs=f.inputs;f.cls_tr="section-row";this.table=new b.View(f);this.setElement(this.table.$el);this.render()},render:function(){this.table.delAll();for(var f in this.inputs){this._add(this.inputs[f])}},_add:function(h){var g=this;var f=jQuery.extend(true,{},h);f.id=c.uuid();this.app.input_list[f.id]=f;var i=f.type;switch(i){case"conditional":this._addConditional(f);break;case"repeat":this._addRepeat(f);break;default:this._addRow(i,f)}},_addConditional:function(f){f.label=f.test_param.label;f.value=f.test_param.value;this._addRow("conditional",f);for(var h in f.cases){var g=f.id+"-section-"+h;var j=new d(this.app,{inputs:f.cases[h].inputs,cls:"ui-table-plain"});this.table.add("");this.table.add(j.$el);this.table.append(g)}},_addRepeat:function(f){var g=this;var k=new a.View({title_new:"Add "+f.title,max:f.max,onnew:function(){var i=f.id+"-section-"+c.uuid();var m=new d(g.app,{inputs:f.inputs,cls:"ui-table-plain"});k.add({id:i,title:f.title,$el:m.$el,ondel:function(){k.del(i);k.retitle(f.title);g.app.refresh()}});k.retitle(f.title);k.show(i);g.app.refresh()}});for(var j=0;j<f.min;j++){var h=f.id+"-section-"+c.uuid();var l=new d(g.app,{inputs:f.inputs,cls:"ui-table-plain"});k.add({id:h,title:f.title,$el:l.$el})}k.retitle(f.title);this.table.add("");this.table.add(k.$el);this.table.append(f.id)},_addRow:function(h,f){var j=f.id;var g=null;switch(h){case"text":g=this._field_text(f);break;case"select":g=this._field_select(f);break;case"data":g=this._field_data(f);break;case"data_column":g=this._field_column(f);break;case"conditional":g=this._field_conditional(f);break;case"hidden":g=this._field_hidden(f);break;case"integer":g=this._field_slider(f);break;case"float":g=this._field_slider(f);break;case"boolean":g=this._field_boolean(f);break;default:g=this._field_text(f);console.debug("tools-form::_addRow() : Unmatched field type ("+h+").")}if(f.value!==undefined){g.value(f.value)}this.app.field_list[j]=g;var i=$("<div/>");i.append(g.$el);if(f.help){i.append('<div class="ui-table-form-info">'+f.help+"</div>")}this.table.add('<span class="ui-table-form-title">'+f.label+"</span>","20%");this.table.add(i);this.table.append(j)},_field_conditional:function(f){var g=this;var h=[];for(var j in f.test_param.options){var k=f.test_param.options[j];h.push({label:k[0],value:k[1]})}return new e.Select.View({id:"field-"+f.id,data:h,onchange:function(s){for(var q in f.cases){var m=f.cases[q];var p=f.id+"-section-"+q;var l=g.table.get(p);var o=false;for(var n in m.inputs){var r=m.inputs[n].type;if(r&&r!=="hidden"){o=true;break}}if(m.value==s&&o){l.fadeIn("fast")}else{l.hide()}}}})},_field_data:function(f){var g=this;var l=f.id;var k=this.app.datasets.filterType();var h=[];for(var j in k){h.push({label:k[j].get("name"),value:k[j].get("id")})}return new e.Select.View({id:"field-"+l,data:h,value:h[0].value,onchange:function(u){var s=g.app.tree.findReferences(l,"data_column");var n=g.app.datasets.filter(u);if(n&&s.length>0){console.debug("tool-form::field_data() - Selected dataset "+u+".");var w=n.get("metadata_column_types");if(!w){console.debug("tool-form::field_data() - FAILED: Could not find metadata for dataset "+u+".")}for(var p in s){var q=g.app.input_list[s[p]];var r=g.app.field_list[s[p]];if(!q||!r){console.debug("tool-form::field_data() - FAILED: Column not found.")}var o=q.numerical;var m=[];for(var v in w){var t=w[v];if(t=="int"||t=="float"||!o){m.push({label:"Column: "+(parseInt(v)+1)+" ["+w[v]+"]",value:v})}}if(r){r.update(m);if(!r.exists(r.value())){r.value(r.first())}}}}else{console.debug("tool-form::field_data() - FAILED: Could not find dataset "+u+".")}}})},_field_select:function(f){var g=[];for(var h in f.options){var j=f.options[h];g.push({label:j[0],value:j[1]})}var k=e.Select;switch(f.display){case"checkboxes":k=e.Checkbox;break;case"radio":k=e.RadioButton;break}if(f.multiple){k=e.Checkbox}return new k.View({id:"field-"+f.id,data:g})},_field_column:function(f){return new e.Select.View({id:"field-"+f.id,multiple:f.multiple})},_field_text:function(f){return new e.Input({id:"field-"+f.id,area:f.area})},_field_slider:function(f){return new e.Slider.View({id:"field-"+f.id,min:f.min||0,max:f.max||1000,decimal:f.type=="float"})},_field_hidden:function(f){return new e.Hidden({id:"field-"+f.id})},_field_boolean:function(f){return new e.RadioButton.View({id:"field-"+f.id,data:[{label:"Yes",value:true},{label:"No",value:false}]})}});return{View:d}});
\ No newline at end of file
+define(["utils/utils","mvc/ui/ui-table","mvc/ui/ui-misc","mvc/ui/ui-tabs","mvc/tools/tools-select-dataset"],function(d,b,f,a,c){var e=Backbone.View.extend({initialize:function(h,g){this.app=h;this.inputs=g.inputs;g.cls_tr="section-row";this.table=new b.View(g);this.setElement(this.table.$el);this.render()},render:function(){this.table.delAll();for(var g in this.inputs){this._add(this.inputs[g])}},_add:function(i){var h=this;var g=jQuery.extend(true,{},i);g.id=d.uuid();this.app.input_list[g.id]=g;var j=g.type;switch(j){case"conditional":this._addConditional(g);break;case"repeat":this._addRepeat(g);break;default:this._addRow(j,g)}},_addConditional:function(g){g.label=g.test_param.label;g.value=g.test_param.value;this._addRow("conditional",g);for(var j in g.cases){var h=g.id+"-section-"+j;var k=new e(this.app,{inputs:g.cases[j].inputs,cls:"ui-table-plain"});this.table.add("");this.table.add(k.$el);this.table.append(h)}},_addRepeat:function(g){var h=this;var l=new a.View({title_new:"Add "+g.title,max:g.max,onnew:function(){var i=g.id+"-section-"+d.uuid();var n=new e(h.app,{inputs:g.inputs,cls:"ui-table-plain"});l.add({id:i,title:g.title,$el:n.$el,ondel:function(){l.del(i);l.retitle(g.title);h.app.refresh()}});l.retitle(g.title);l.show(i);h.app.refresh()}});for(var k=0;k<g.min;k++){var j=g.id+"-section-"+d.uuid();var m=new e(h.app,{inputs:g.inputs,cls:"ui-table-plain"});l.add({id:j,title:g.title,$el:m.$el})}l.retitle(g.title);this.table.add("");this.table.add(l.$el);this.table.append(g.id)},_addRow:function(i,g){var k=g.id;var h=null;switch(i){case"text":h=this._field_text(g);break;case"select":h=this._field_select(g);break;case"data":h=this._field_data(g);break;case"data_column":h=this._field_data_colum(g);break;case"conditional":h=this._field_conditional(g);break;case"hidden":h=this._field_hidden(g);break;case"integer":h=this._field_slider(g);break;case"float":h=this._field_slider(g);break;case"boolean":h=this._field_boolean(g);break}if(!h){if(g.options){h=this._field_select(g)}else{h=this._field_text(g)}console.debug("tools-form::_addRow() : Auto matched field type ("+i+").")}if(g.value!==undefined){h.value(g.value)}this.app.field_list[k]=h;var j=$("<div/>");j.append(h.$el);if(g.help){j.append('<div class="ui-table-form-info">'+g.help+"</div>")}this.table.add('<span class="ui-table-form-title">'+g.label+"</span>","20%");this.table.add(j);this.table.append(k)},_field_conditional:function(g){var h=this;var j=[];for(var k in g.test_param.options){var l=g.test_param.options[k];j.push({label:l[0],value:l[1]})}return new f.Select.View({id:"field-"+g.id,data:j,onchange:function(t){for(var r in g.cases){var n=g.cases[r];var q=g.id+"-section-"+r;var m=h.table.get(q);var p=false;for(var o in n.inputs){var s=n.inputs[o].type;if(s&&s!=="hidden"){p=true;break}}if(n.value==t&&p){m.fadeIn("fast")}else{m.hide()}}}})},_field_data:function(g){var h=this;var i=g.id;return new c.View(this.app,{id:"field-"+i,extensions:g.extensions,multiple:g.multiple,onchange:function(r){if(r instanceof Array){r=r[0]}var p=h.app.tree.findReferences(i,"data_column");var k=h.app.datasets.filter(r);if(k&&p.length>0){console.debug("tool-form::field_data() - Selected dataset "+r+".");var t=k.get("metadata_column_types");if(!t){console.debug("tool-form::field_data() - FAILED: Could not find metadata for dataset "+r+".")}for(var m in p){var n=h.app.input_list[p[m]];var o=h.app.field_list[p[m]];if(!n||!o){console.debug("tool-form::field_data() - FAILED: Column not found.")}var l=n.numerical;var j=[];for(var s in t){var q=t[s];if(q=="int"||q=="float"||!l){j.push({label:"Column: "+(parseInt(s)+1)+" ["+t[s]+"]",value:s})}}if(o){o.update(j);if(!o.exists(o.value())){o.value(o.first())}}}}else{console.debug("tool-form::field_data() - FAILED: Could not find dataset "+r+".")}}})},_field_select:function(g){var h=[];for(var j in g.options){var k=g.options[j];h.push({label:k[0],value:k[1]})}var l=f.Select;switch(g.display){case"checkboxes":l=f.Checkbox;break;case"radio":l=f.RadioButton;break}return new l.View({id:"field-"+g.id,data:h,multiple:g.multiple})},_field_data_colum:function(g){return new f.Select.View({id:"field-"+g.id,multiple:g.multiple})},_field_text:function(g){return new f.Input({id:"field-"+g.id,area:g.area})},_field_slider:function(g){var h=1;if(g.type=="float"){h=(g.max-g.min)/10000}return new f.Slider.View({id:"field-"+g.id,min:g.min||0,max:g.max||1000,step:h})},_field_hidden:function(g){return new f.Hidden({id:"field-"+g.id})},_field_boolean:function(g){return new f.RadioButton.View({id:"field-"+g.id,data:[{label:"Yes",value:true},{label:"No",value:false}]})}});return{View:e}});
\ No newline at end of file
diff -r 0df64c45db90ca84e763c34919ebe9304d96f202 -r be2c86442b3e28407d7b845a90420c3feb3fe859 static/scripts/packed/mvc/tools/tools-select-dataset.js
--- /dev/null
+++ b/static/scripts/packed/mvc/tools/tools-select-dataset.js
@@ -0,0 +1,1 @@
+define(["utils/utils","mvc/ui/ui-misc","mvc/ui/ui-tabs"],function(b,d,a){var c=Backbone.View.extend({initialize:function(k,f){var e=this;var h=k.datasets.filterType();var j=[];for(var g in h){j.push({label:h[g].get("name"),value:h[g].get("id")})}this.select=new d.Select.View({data:j,value:j[0].value,onchange:function(){e.trigger("change")}});this.select_multiple=new d.Select.View({multiple:true,data:j,value:j[0].value,onchange:function(){e.trigger("change")}});this.select_collection=new d.Select.View({data:j,value:j[0].value,onchange:function(){e.trigger("change")}});this.on("change",function(){if(f.onchange){f.onchange(e.value())}});this.tabs=new a.View();this.tabs.add({id:"single",title:"Select a dataset",$el:this.select.$el});this.tabs.add({id:"multiple",title:"Select multiple datasets",$el:this.select_multiple.$el});this.tabs.add({id:"collection",title:"Select a dataset collection",$el:this.select_collection.$el});this.setElement(this.tabs.$el)},value:function(e){var f=this.tabs.current();switch(f){case"multiple":return this.select_multiple.value();case"collection":return this.select_collection.value();default:return this.select.value()}},update:function(e){this.select.update(e)}});return{View:c}});
\ No newline at end of file
diff -r 0df64c45db90ca84e763c34919ebe9304d96f202 -r be2c86442b3e28407d7b845a90420c3feb3fe859 static/scripts/packed/mvc/ui/ui-slider.js
--- a/static/scripts/packed/mvc/ui/ui-slider.js
+++ b/static/scripts/packed/mvc/ui/ui-slider.js
@@ -1,1 +1,1 @@
-define(["utils/utils"],function(a){var b=Backbone.View.extend({optionsDefault:{value:"",min:1,max:100,step:0.1,decimal:false},initialize:function(d){var c=this;this.options=a.merge(d,this.optionsDefault);this.setElement(this._template(this.options));this.$slider=this.$el.find("#slider");this.$text=this.$el.find("#text");if(!this.options.decimal){this.options.step=1}this.$slider.slider(this.options);this.$text.on("change",function(){c.value($(this).val())});this.$text.on("keydown",function(f){var e=f.which;if(!(e==13||e==8||e==37||e==39||e==189||(e>=48&&e<=57)||(c.options.decimal&&$(this).val().indexOf(".")==-1)&&e==190)){f.preventDefault()}});this.$slider.on("slide",function(e,f){c.value(f.value)})},value:function(c){if(c!==undefined){c=Math.max(Math.min(c,this.options.max),this.options.min);if(this.options.onchange){this.options.onchange(c)}this.$slider.slider("value",c);this.$text.val(c)}return this.$text.val()},_template:function(c){return'<div id="'+c.id+'" style=""><input id="text" type="text" class="ui-input" style="width: 50px; float: left"/><div id="slider" style="width: calc(100% - 60px); float: left; top: 8px; left: 10px;"/></div>'}});return{View:b}});
\ No newline at end of file
+define(["utils/utils"],function(a){var b=Backbone.View.extend({optionsDefault:{value:"",min:1,max:100,step:1},initialize:function(d){var c=this;this.options=a.merge(d,this.optionsDefault);this.setElement(this._template(this.options));this.$slider=this.$el.find("#slider");this.$text=this.$el.find("#text");this.$slider.slider(this.options);this.$text.on("change",function(){c.value($(this).val())});this.$text.on("keydown",function(f){var e=f.which;if(!(e==13||e==8||e==37||e==39||e==189||(e>=48&&e<=57)||(c.options.step!=1&&$(this).val().indexOf(".")==-1)&&e==190)){f.preventDefault()}});this.$slider.on("slide",function(e,f){c.value(f.value)})},value:function(c){if(c!==undefined){c=Math.max(Math.min(c,this.options.max),this.options.min);if(this.options.onchange){this.options.onchange(c)}this.$slider.slider("value",c);this.$text.val(c)}return this.$text.val()},_template:function(c){return'<div id="'+c.id+'" style=""><input id="text" type="text" class="ui-input" style="width: 50px; float: left"/><div id="slider" style="width: calc(100% - 60px); float: left; top: 8px; left: 10px;"/></div>'}});return{View:b}});
\ No newline at end of file
diff -r 0df64c45db90ca84e763c34919ebe9304d96f202 -r be2c86442b3e28407d7b845a90420c3feb3fe859 static/scripts/packed/mvc/ui/ui-tabs.js
--- a/static/scripts/packed/mvc/ui/ui-tabs.js
+++ b/static/scripts/packed/mvc/ui/ui-tabs.js
@@ -1,1 +1,1 @@
-define(["utils/utils"],function(a){var b=Backbone.View.extend({optionsDefault:{title_new:"",operations:null,onnew:null,min:null,max:null},initialize:function(e){this.visible=false;this.$nav=null;this.$content=null;this.first_tab=null;this.options=a.merge(e,this.optionsDefault);var c=$(this._template(this.options));this.$nav=c.find(".tab-navigation");this.$content=c.find(".tab-content");this.setElement(c);this.list={};var d=this;if(this.options.operations){$.each(this.options.operations,function(g,h){h.$el.prop("id",g);d.$nav.find(".operations").append(h.$el)})}if(this.options.onnew){var f=$(this._template_tab_new(this.options));this.$nav.append(f);f.tooltip({title:"Add a new tab",placement:"bottom",container:d.$el});f.on("click",function(g){f.tooltip("hide");d.options.onnew()})}},size:function(){return _.size(this.list)},add:function(f){var e=this;var h=f.id;var g=$(this._template_tab(f));var d=$(this._template_tab_content(f));this.list[h]=f.ondel?true:false;if(this.options.onnew){this.$nav.find("#new-tab").before(g)}else{this.$nav.append(g)}d.append(f.$el);this.$content.append(d);if(this.size()==1){g.addClass("active");d.addClass("active");this.first_tab=h}if(this.options.max&&this.size()>=this.options.max){this.$el.find("#new-tab").hide()}if(f.ondel){var c=g.find("#delete");c.tooltip({title:"Delete this tab",placement:"bottom",container:e.$el});c.on("click",function(){c.tooltip("destroy");e.$el.find(".tooltip").remove();f.ondel();return false})}g.on("click",function(i){i.preventDefault();if(f.onclick){f.onclick()}else{e.show(h)}})},del:function(c){this.$el.find("#tab-"+c).remove();this.$el.find("#"+c).remove();if(this.first_tab==c){this.first_tab=null}if(this.first_tab!=null){this.show(this.first_tab)}if(this.list[c]){delete this.list[c]}if(this.size()<this.options.max){this.$el.find("#new-tab").show()}},delRemovable:function(){for(var c in this.list){this.del(c)}},show:function(c){this.$el.fadeIn("fast");this.visible=true;if(c){this.$el.find(".tab-element").removeClass("active");this.$el.find(".tab-pane").removeClass("active");this.$el.find("#tab-"+c).addClass("active");this.$el.find("#"+c).addClass("active")}},hide:function(){this.$el.fadeOut("fast");this.visible=false},hideOperation:function(c){this.$nav.find("#"+c).hide()},showOperation:function(c){this.$nav.find("#"+c).show()},setOperation:function(e,d){var c=this.$nav.find("#"+e);c.off("click");c.on("click",d)},title:function(e,d){var c=this.$el.find("#tab-title-text-"+e);if(d){c.html(d)}return c.html()},retitle:function(d){var c=0;for(var e in this.list){this.title(e,++c+": "+d)}},_template:function(c){return'<div class="ui-tabs tabbable tabs-left"><ul id="tab-navigation" class="tab-navigation nav nav-tabs"><div class="operations" style="float: right; margin-bottom: 4px;"></div></ul><div id="tab-content" class="tab-content"/></div>'},_template_tab_new:function(c){return'<li id="new-tab"><a href="javascript:void(0);"><i class="ui-tabs-add fa fa-plus-circle"/>'+c.title_new+"</a></li>"},_template_tab:function(d){var c='<li id="tab-'+d.id+'" class="tab-element"><a id="tab-title-link-'+d.id+'" title="" href="#'+d.id+'" data-original-title=""><span id="tab-title-text-'+d.id+'" class="tab-title-text">'+d.title+"</span>";if(d.ondel){c+='<i id="delete" class="ui-tabs-delete fa fa-minus-circle"/>'}c+="</a></li>";return c},_template_tab_content:function(c){return'<div id="'+c.id+'" class="tab-pane"/>'}});return{View:b}});
\ No newline at end of file
+define(["utils/utils"],function(a){var b=Backbone.View.extend({optionsDefault:{title_new:"",operations:null,onnew:null,min:null,max:null},initialize:function(e){this.visible=false;this.$nav=null;this.$content=null;this.first_tab=null;this.options=a.merge(e,this.optionsDefault);var c=$(this._template(this.options));this.$nav=c.find(".tab-navigation");this.$content=c.find(".tab-content");this.setElement(c);this.list={};var d=this;if(this.options.operations){$.each(this.options.operations,function(g,h){h.$el.prop("id",g);d.$nav.find(".operations").append(h.$el)})}if(this.options.onnew){var f=$(this._template_tab_new(this.options));this.$nav.append(f);f.tooltip({title:"Add a new tab",placement:"bottom",container:d.$el});f.on("click",function(g){f.tooltip("hide");d.options.onnew()})}},size:function(){return _.size(this.list)},current:function(){return this.$el.find(".tab-pane.active").attr("id")},add:function(f){var e=this;var h=f.id;var g=$(this._template_tab(f));var d=$(this._template_tab_content(f));this.list[h]=f.ondel?true:false;if(this.options.onnew){this.$nav.find("#new-tab").before(g)}else{this.$nav.append(g)}d.append(f.$el);this.$content.append(d);if(this.size()==1){g.addClass("active");d.addClass("active");this.first_tab=h}if(this.options.max&&this.size()>=this.options.max){this.$el.find("#new-tab").hide()}if(f.ondel){var c=g.find("#delete");c.tooltip({title:"Delete this tab",placement:"bottom",container:e.$el});c.on("click",function(){c.tooltip("destroy");e.$el.find(".tooltip").remove();f.ondel();return false})}g.on("click",function(i){i.preventDefault();if(f.onclick){f.onclick()}else{e.show(h)}})},del:function(c){this.$el.find("#tab-"+c).remove();this.$el.find("#"+c).remove();if(this.first_tab==c){this.first_tab=null}if(this.first_tab!=null){this.show(this.first_tab)}if(this.list[c]){delete this.list[c]}if(this.size()<this.options.max){this.$el.find("#new-tab").show()}},delRemovable:function(){for(var c in this.list){this.del(c)}},show:function(c){this.$el.fadeIn("fast");this.visible=true;if(c){this.$el.find(".tab-element").removeClass("active");this.$el.find(".tab-pane").removeClass("active");this.$el.find("#tab-"+c).addClass("active");this.$el.find("#"+c).addClass("active")}},hide:function(){this.$el.fadeOut("fast");this.visible=false},hideOperation:function(c){this.$nav.find("#"+c).hide()},showOperation:function(c){this.$nav.find("#"+c).show()},setOperation:function(e,d){var c=this.$nav.find("#"+e);c.off("click");c.on("click",d)},title:function(e,d){var c=this.$el.find("#tab-title-text-"+e);if(d){c.html(d)}return c.html()},retitle:function(d){var c=0;for(var e in this.list){this.title(e,++c+": "+d)}},_template:function(c){return'<div class="ui-tabs tabbable tabs-left"><ul id="tab-navigation" class="tab-navigation nav nav-tabs"><div class="operations" style="float: right; margin-bottom: 4px;"></div></ul><div id="tab-content" class="tab-content"/></div>'},_template_tab_new:function(c){return'<li id="new-tab"><a href="javascript:void(0);"><i class="ui-tabs-add fa fa-plus-circle"/>'+c.title_new+"</a></li>"},_template_tab:function(d){var c='<li id="tab-'+d.id+'" class="tab-element"><a id="tab-title-link-'+d.id+'" title="" href="#'+d.id+'" data-original-title=""><span id="tab-title-text-'+d.id+'" class="tab-title-text">'+d.title+"</span>";if(d.ondel){c+='<i id="delete" class="ui-tabs-delete fa fa-minus-circle"/>'}c+="</a></li>";return c},_template_tab_content:function(c){return'<div id="'+c.id+'" class="tab-pane"/>'}});return{View:b}});
\ No newline at end of file
diff -r 0df64c45db90ca84e763c34919ebe9304d96f202 -r be2c86442b3e28407d7b845a90420c3feb3fe859 templates/webapps/galaxy/tool_form.api.mako
--- a/templates/webapps/galaxy/tool_form.api.mako
+++ b/templates/webapps/galaxy/tool_form.api.mako
@@ -24,9 +24,11 @@
# form configuration
self.form_config = {
- 'id' : tool.id,
- 'help' : tool_help,
- 'citations' : tool_citations
+ 'id' : tool.id,
+ 'help' : tool_help,
+ 'citations' : tool_citations,
+ 'biostar_url' : trans.app.config.biostar_url,
+ 'history_id' : trans.security.encode_id( trans.history.id )
}
%>
${h.js( "libs/bibtex", "libs/jquery/jquery-ui" )}
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/d7c19ee54a91/
Changeset: d7c19ee54a91
User: jmchilton
Date: 2014-09-05 02:34:52
Summary: More fixes for library permissions backward compatiblity.
Fixes failing test in test/api/test_history_contents.py.
Affected #: 2 files
diff -r 6a718b389bf1caa5461c5ff7a7dd644f1279cd69 -r d7c19ee54a9167b97f99eb83c2600f35abdea312 lib/galaxy/webapps/galaxy/api/libraries.py
--- a/lib/galaxy/webapps/galaxy/api/libraries.py
+++ b/lib/galaxy/webapps/galaxy/api/libraries.py
@@ -284,6 +284,7 @@
action = kwd.get( 'action', None )
if action is None:
payload = kwd.get( 'payload', None )
+ del kwd[ 'payload' ]
if payload is not None:
return self.set_permissions_old( trans, library, payload, **kwd )
else:
@@ -368,6 +369,7 @@
POST /api/libraries/{encoded_library_id}/permissions
Updates the library permissions.
"""
+ import galaxy.util
params = galaxy.util.Params( payload )
permissions = {}
for k, v in trans.app.model.Library.permitted_actions.items():
diff -r 6a718b389bf1caa5461c5ff7a7dd644f1279cd69 -r d7c19ee54a9167b97f99eb83c2600f35abdea312 lib/galaxy/webapps/galaxy/buildapp.py
--- a/lib/galaxy/webapps/galaxy/buildapp.py
+++ b/lib/galaxy/webapps/galaxy/buildapp.py
@@ -299,11 +299,6 @@
path_prefix='/api/libraries/:library_id',
parent_resources=dict( member_name='library', collection_name='libraries' ) )
- webapp.mapper.resource( 'permission',
- 'permissions',
- path_prefix='/api/libraries/:library_id',
- parent_resources=dict( member_name='library', collection_name='libraries' ) )
-
_add_item_extended_metadata_controller( webapp,
name_prefix="library_dataset_",
path_prefix='/api/libraries/:library_id/contents/:library_content_id' )
https://bitbucket.org/galaxy/galaxy-central/commits/0df64c45db90/
Changeset: 0df64c45db90
User: jmchilton
Date: 2014-09-05 02:34:52
Summary: Remove abstraction around requests now that it is a required dependency.
Affected #: 7 files
diff -r d7c19ee54a9167b97f99eb83c2600f35abdea312 -r 0df64c45db90ca84e763c34919ebe9304d96f202 test/api/test_authenticate.py
--- a/test/api/test_authenticate.py
+++ b/test/api/test_authenticate.py
@@ -1,6 +1,6 @@
import base64
-from base.interactor import get_request
+from requests import get
from base import api
@@ -18,12 +18,12 @@
headers = {
"Authorization": authorization,
}
- auth_response = get_request( baseauth_url, headers=headers )
+ auth_response = get( baseauth_url, headers=headers )
self._assert_status_code_is( auth_response, 200 )
auth_dict = auth_response.json()
self._assert_has_keys( auth_dict, "api_key" )
# Verify key...
random_api_url = self._api_url( "users", use_key=False )
- random_api_response = get_request( random_api_url, params=dict( key=auth_dict[ "api_key" ] ) )
+ random_api_response = get( random_api_url, params=dict( key=auth_dict[ "api_key" ] ) )
self._assert_status_code_is( random_api_response, 200 )
diff -r d7c19ee54a9167b97f99eb83c2600f35abdea312 -r 0df64c45db90ca84e763c34919ebe9304d96f202 test/api/test_histories.py
--- a/test/api/test_histories.py
+++ b/test/api/test_histories.py
@@ -1,8 +1,7 @@
from base import api
-# requests.{post,put,get} or something like it if unavailable
-from base.interactor import post_request
-from base.interactor import put_request
-from base.interactor import get_request
+from requests import post
+from requests import put
+from requests import get
from .helpers import DatasetPopulator, wait_on
@@ -29,7 +28,7 @@
post_data = dict( name="CannotCreate" )
# Using lower-level _api_url will cause key to not be injected.
histories_url = self._api_url( "histories" )
- create_response = post_request( url=histories_url, data=post_data )
+ create_response = post( url=histories_url, data=post_data )
self._assert_status_code_is( create_response, 403 )
def test_import_export( self ):
@@ -38,7 +37,7 @@
self.dataset_populator.wait_for_history( history_id, assert_ok=True )
download_path = self._export( history_id )
full_download_url = "%s%s?key=%s" % ( self.url, download_path, self.galaxy_interactor.api_key )
- download_response = get_request( full_download_url )
+ download_response = get( full_download_url )
self._assert_status_code_is( download_response, 200 )
def history_names():
@@ -72,11 +71,11 @@
def _export(self, history_id):
export_url = self._api_url( "histories/%s/exports" % history_id, use_key=True )
- put_response = put_request( export_url )
+ put_response = put( export_url )
self._assert_status_code_is( put_response, 202 )
def export_ready_response():
- put_response = put_request( export_url )
+ put_response = put( export_url )
if put_response.status_code == 202:
return None
return put_response
diff -r d7c19ee54a9167b97f99eb83c2600f35abdea312 -r 0df64c45db90ca84e763c34919ebe9304d96f202 test/api/test_history_contents.py
--- a/test/api/test_history_contents.py
+++ b/test/api/test_history_contents.py
@@ -4,10 +4,8 @@
from .helpers import TestsDatasets
from .helpers import LibraryPopulator
from .helpers import DatasetCollectionPopulator
-from base.interactor import (
- put_request,
- delete_request,
-)
+from requests import delete
+from requests import put
# TODO: Test anonymous access.
@@ -72,7 +70,7 @@
update_url = self._api_url( "histories/%s/contents/%s" % ( self.history_id, hda1[ "id" ] ), use_key=True )
# Awkward json.dumps required here because of https://trello.com/c/CQwmCeG6
body = json.dumps( dict( deleted=True ) )
- update_response = put_request( update_url, data=body )
+ update_response = put( update_url, data=body )
self._assert_status_code_is( update_response, 200 )
show_response = self.__show( hda1 )
assert str( show_response.json()[ "deleted" ] ).lower() == "true"
@@ -82,7 +80,7 @@
self._wait_for_history( self.history_id )
assert str( self.__show( hda1 ).json()[ "deleted" ] ).lower() == "false"
url = self._api_url( "histories/%s/contents/%s" % ( self.history_id, hda1["id" ] ), use_key=True )
- delete_response = delete_request( url )
+ delete_response = delete( url )
assert delete_response.status_code < 300 # Something in the 200s :).
assert str( self.__show( hda1 ).json()[ "deleted" ] ).lower() == "true"
@@ -119,7 +117,7 @@
assert not dataset_collection[ "deleted" ]
- delete_response = delete_request( self._api_url( collection_url, use_key=True ) )
+ delete_response = delete( self._api_url( collection_url, use_key=True ) )
self._assert_status_code_is( delete_response, 200 )
show_response = self._get( collection_url )
@@ -137,7 +135,7 @@
update_url = self._api_url( "histories/%s/contents/dataset_collections/%s" % ( self.history_id, hdca[ "id" ] ), use_key=True )
# Awkward json.dumps required here because of https://trello.com/c/CQwmCeG6
body = json.dumps( dict( name="newnameforpair" ) )
- update_response = put_request( update_url, data=body )
+ update_response = put( update_url, data=body )
self._assert_status_code_is( update_response, 200 )
show_response = self.__show( hdca )
assert str( show_response.json()[ "name" ] ) == "newnameforpair"
diff -r d7c19ee54a9167b97f99eb83c2600f35abdea312 -r 0df64c45db90ca84e763c34919ebe9304d96f202 test/api/test_pages.py
--- a/test/api/test_pages.py
+++ b/test/api/test_pages.py
@@ -1,6 +1,6 @@
from galaxy.exceptions import error_codes
from base import api
-from base.interactor import delete_request
+from requests import delete
from operator import itemgetter
@@ -68,17 +68,17 @@
def test_delete( self ):
response_json = self._create_valid_page_with_slug( "testdelete" )
- delete_response = delete_request( self._api_url( "pages/%s" % response_json[ 'id' ], use_key=True ) )
+ delete_response = delete( self._api_url( "pages/%s" % response_json[ 'id' ], use_key=True ) )
self._assert_status_code_is( delete_response, 200 )
def test_404_on_delete_unknown_page( self ):
- delete_response = delete_request( self._api_url( "pages/%s" % self._random_key(), use_key=True ) )
+ delete_response = delete( self._api_url( "pages/%s" % self._random_key(), use_key=True ) )
self._assert_status_code_is( delete_response, 404 )
self._assert_error_code_is( delete_response, error_codes.USER_OBJECT_NOT_FOUND )
def test_403_on_delete_unowned_page( self ):
page_response = self._create_valid_page_as( "others_page(a)bx.psu.edu", "otherspage" )
- delete_response = delete_request( self._api_url( "pages/%s" % page_response[ "id" ], use_key=True ) )
+ delete_response = delete( self._api_url( "pages/%s" % page_response[ "id" ], use_key=True ) )
self._assert_status_code_is( delete_response, 403 )
self._assert_error_code_is( delete_response, error_codes.USER_DOES_NOT_OWN_ITEM )
diff -r d7c19ee54a9167b97f99eb83c2600f35abdea312 -r 0df64c45db90ca84e763c34919ebe9304d96f202 test/api/test_search.py
--- a/test/api/test_search.py
+++ b/test/api/test_search.py
@@ -1,5 +1,5 @@
from base import api
-from base.interactor import delete_request
+from requests import delete
from .helpers import WorkflowPopulator
@@ -14,7 +14,7 @@
# Deleted
delete_url = self._api_url( "workflows/%s" % workflow_id, use_key=True )
- delete_request( delete_url )
+ delete( delete_url )
search_response = self.__search( "select * from workflow where deleted = False" )
assert not self.__has_result_with_name( search_response, "test_for_search (imported from API)" ), search_response.json()
diff -r d7c19ee54a9167b97f99eb83c2600f35abdea312 -r 0df64c45db90ca84e763c34919ebe9304d96f202 test/api/test_workflows.py
--- a/test/api/test_workflows.py
+++ b/test/api/test_workflows.py
@@ -8,7 +8,8 @@
from .helpers import DatasetCollectionPopulator
from .helpers import skip_without_tool
-from base.interactor import delete_request # requests like delete
+from requests import delete
+
from galaxy.exceptions import error_codes
@@ -40,7 +41,7 @@
workflow_name = "test_delete (imported from API)"
self._assert_user_has_workflow_with_name( workflow_name )
workflow_url = self._api_url( "workflows/%s" % workflow_id, use_key=True )
- delete_response = delete_request( workflow_url )
+ delete_response = delete( workflow_url )
self._assert_status_code_is( delete_response, 200 )
# Make sure workflow is no longer in index by default.
assert workflow_name not in self.__workflow_names()
@@ -49,7 +50,7 @@
workflow_id = self.workflow_populator.simple_workflow( "test_other_delete" )
with self._different_user():
workflow_url = self._api_url( "workflows/%s" % workflow_id, use_key=True )
- delete_response = delete_request( workflow_url )
+ delete_response = delete( workflow_url )
self._assert_status_code_is( delete_response, 403 )
def test_index( self ):
diff -r d7c19ee54a9167b97f99eb83c2600f35abdea312 -r 0df64c45db90ca84e763c34919ebe9304d96f202 test/base/interactor.py
--- a/test/base/interactor.py
+++ b/test/base/interactor.py
@@ -10,7 +10,9 @@
import galaxy.model
from galaxy.model.orm import and_, desc
from functional import database_contexts
-from json import dumps, loads
+from requests import get
+from requests import post
+from json import dumps
from logging import getLogger
log = getLogger( __name__ )
@@ -366,7 +368,7 @@
key = self.api_key if not admin else self.master_api_key
data = data.copy()
data['key'] = key
- return post_request( "%s/%s" % (self.api_url, path), data=data, files=files )
+ return post( "%s/%s" % (self.api_url, path), data=data, files=files )
def _get( self, path, data={}, key=None, admin=False ):
if not key:
@@ -376,7 +378,7 @@
if path.startswith("/api"):
path = path[ len("/api"): ]
url = "%s/%s" % (self.api_url, path)
- return get_request( url, params=data )
+ return get( url, params=data )
class GalaxyInteractorTwill( object ):
@@ -496,9 +498,3 @@
'api': GalaxyInteractorApi,
'twill': GalaxyInteractorTwill,
}
-
-
-from requests import get as get_request
-from requests import post as post_request
-from requests import put as put_request
-from requests import delete as delete_request
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0