galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
January 2014
- 1 participants
- 280 discussions
commit/galaxy-central: guerler: Charts: Adjust chart data volume
by commits-noreply@bitbucket.org 13 Jan '14
by commits-noreply@bitbucket.org 13 Jan '14
13 Jan '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/1b7293faeb8f/
Changeset: 1b7293faeb8f
User: guerler
Date: 2014-01-13 17:54:52
Summary: Charts: Adjust chart data volume
Affected #: 1 file
diff -r 129e16a1e9463663631823a7afb04f7aa4f3e051 -r 1b7293faeb8ffb0778726f9541e8247c6010d2d8 config/plugins/visualizations/charts/static/models/datasets.js
--- a/config/plugins/visualizations/charts/static/models/datasets.js
+++ b/config/plugins/visualizations/charts/static/models/datasets.js
@@ -8,7 +8,7 @@
optionsDefault : {
limit : 20,
pace : 1000,
- max : 10
+ max : 5
},
// list
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: guerler: Charts: Increase chart data volume limit
by commits-noreply@bitbucket.org 13 Jan '14
by commits-noreply@bitbucket.org 13 Jan '14
13 Jan '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/129e16a1e946/
Changeset: 129e16a1e946
User: guerler
Date: 2014-01-13 17:18:56
Summary: Charts: Increase chart data volume limit
Affected #: 1 file
diff -r 9bc3fa17c15d00a219fe8053af4072b4a4217507 -r 129e16a1e9463663631823a7afb04f7aa4f3e051 config/plugins/visualizations/charts/static/models/datasets.js
--- a/config/plugins/visualizations/charts/static/models/datasets.js
+++ b/config/plugins/visualizations/charts/static/models/datasets.js
@@ -8,7 +8,7 @@
optionsDefault : {
limit : 20,
pace : 1000,
- max : 2
+ max : 10
},
// list
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: dannon: Eliminate simplejson for real this time. Update WebHelpers to v 1.3 (no longer requires simplejson), fix deprecated methods.
by commits-noreply@bitbucket.org 13 Jan '14
by commits-noreply@bitbucket.org 13 Jan '14
13 Jan '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/9bc3fa17c15d/
Changeset: 9bc3fa17c15d
User: dannon
Date: 2014-01-13 16:54:20
Summary: Eliminate simplejson for real this time. Update WebHelpers to v 1.3 (no longer requires simplejson), fix deprecated methods.
Affected #: 2 files
diff -r 4eeac1dbfce3f399312e58e92d820e1153ba1a09 -r 9bc3fa17c15d00a219fe8053af4072b4a4217507 eggs.ini
--- a/eggs.ini
+++ b/eggs.ini
@@ -26,7 +26,6 @@
pysam = 0.4.2
pysqlite = 2.5.6
python_lzo = 1.08_2.03_static
-simplejson = 2.1.1
threadframe = 0.2
guppy = 0.1.8
SQLAlchemy = 0.7.9
@@ -61,7 +60,7 @@
Tempita = 0.5.1
twill = 0.9
WebError = 0.8a
-WebHelpers = 0.2
+WebHelpers = 1.3
WebOb = 0.8.5
wsgiref = 0.1.2
Babel = 0.9.4
diff -r 4eeac1dbfce3f399312e58e92d820e1153ba1a09 -r 9bc3fa17c15d00a219fe8053af4072b4a4217507 lib/galaxy/web/framework/helpers/__init__.py
--- a/lib/galaxy/web/framework/helpers/__init__.py
+++ b/lib/galaxy/web/framework/helpers/__init__.py
@@ -1,14 +1,19 @@
-import pkg_resources
+"""
+Galaxy web framework helpers
+"""
-pkg_resources.require( "WebHelpers" )
-from webhelpers import date, stylesheet_link_tag, javascript_include_tag, url_for
+import time
+from cgi import escape
+from datetime import datetime, timedelta
+from galaxy import eggs
+from galaxy.util import hash_util
+from galaxy.util.json import to_json_string
+eggs.require( "WebHelpers" )
+from webhelpers import date
+from webhelpers.html.tags import stylesheet_link, javascript_link
-from galaxy.util.json import to_json_string
-from galaxy.util import hash_util
-from datetime import datetime, timedelta
-import time
-
-from cgi import escape
+eggs.require( "Routes" )
+from routes import url_for
server_starttime = int(time.time())
@@ -25,6 +30,9 @@
return date.distance_of_time_in_words( x, datetime.utcnow() ).replace("about", "~") + " ago"
def iff( a, b, c ):
+ """
+ Ternary shortcut
+ """
if a:
return b
else:
@@ -48,7 +56,7 @@
Cache-bust with time that server started running on
"""
- return "\n".join( [ stylesheet_link_tag( "/static/style/" + name + ".css?v=%s" % server_starttime ) for name in args ] )
+ return "\n".join( [ stylesheet_link( "/static/style/" + name + ".css?v=%s" % server_starttime ) for name in args ] )
def js_helper( prefix, *args ):
"""
@@ -57,7 +65,7 @@
Cache-bust with time that server started running on
"""
- return "\n".join( [ javascript_include_tag( prefix + name + ".js?v=%s" % server_starttime ) for name in args ] )
+ return "\n".join( [ javascript_link( prefix + name + ".js?v=%s" % server_starttime ) for name in args ] )
def js( *args ):
"""
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: dan: Update create_applet_tag_peekcreate_applet_tag_peek of images.py (used for e.g. GMAJ) to display an error message is Java is not installed/enabled for the browser.
by commits-noreply@bitbucket.org 13 Jan '14
by commits-noreply@bitbucket.org 13 Jan '14
13 Jan '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/4eeac1dbfce3/
Changeset: 4eeac1dbfce3
User: dan
Date: 2014-01-13 16:35:44
Summary: Update create_applet_tag_peekcreate_applet_tag_peek of images.py (used for e.g. GMAJ) to display an error message is Java is not installed/enabled for the browser.
Affected #: 1 file
diff -r c250b1cfc60fe2b56ddcf18e2cf2543631d4c26f -r 4eeac1dbfce3f399312e58e92d820e1153ba1a09 lib/galaxy/datatypes/images.py
--- a/lib/galaxy/datatypes/images.py
+++ b/lib/galaxy/datatypes/images.py
@@ -213,7 +213,6 @@
def create_applet_tag_peek( class_name, archive, params ):
text = """
-<!--[if !IE]>--><object classid="java:%s"
type="application/x-java-applet"
height="30" width="200" align="center" >
@@ -221,17 +220,14 @@
for name, value in params.iteritems():
text += """<param name="%s" value="%s"/>""" % ( name, value )
text += """
-<!--<![endif]--><object classid="clsid:8AD9C840-044E-11D1-B3E9-00805F499D93"
height="30" width="200" ><param name="code" value="%s" /><param name="archive" value="%s"/>""" % ( class_name, archive )
for name, value in params.iteritems():
text += """<param name="%s" value="%s"/>""" % ( name, value )
- text += """</object>
-<!--[if !IE]>-->
+ text += """<div class="errormessage">You must install and enable Java in your browser in order to access this applet.<div></object></object>
-<!--<![endif]-->
"""
return """<div><p align="center">%s</p></div>""" % text
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
3 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/2588b7b42ecd/
Changeset: 2588b7b42ecd
User: jmchilton
Date: 2014-01-13 15:58:22
Summary: Non-exhaustive tools related PEP-8 fixes.
Fixes for api/tools.py, tools/__init__.py, and tools/parameters/{__init__.py,basic.py}.
Affected #: 4 files
diff -r e40e2965ac65f3a52778838e2a6879aadaf18add -r 2588b7b42ecdd9998c878f70420c75543bd5219d lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -20,7 +20,7 @@
from math import isinf
from galaxy import eggs
-eggs.require( "MarkupSafe" ) #MarkupSafe must load before mako
+eggs.require( "MarkupSafe" ) # MarkupSafe must load before mako
eggs.require( "Mako" )
eggs.require( "elementtree" )
eggs.require( "Paste" )
@@ -69,11 +69,13 @@
log = logging.getLogger( __name__ )
-WORKFLOW_PARAMETER_REGULAR_EXPRESSION = re.compile( '''\$\{.+?\}''' )
+WORKFLOW_PARAMETER_REGULAR_EXPRESSION = re.compile( '''\$\{.+?\}''' )
+
class ToolNotFoundException( Exception ):
pass
+
def to_dict_helper( obj, kwargs ):
""" Helper function that provides the appropriate kwargs to to_dict an object. """
@@ -83,6 +85,7 @@
return obj.to_dict( **kwargs )
+
class ToolBox( object, Dictifiable ):
"""Container for a collection of tools"""
@@ -421,6 +424,7 @@
return self.app.install_model.context.query( self.app.install_model.ToolVersion ) \
.filter( self.app.install_model.ToolVersion.table.c.tool_id == tool_id ) \
.first()
+
def __get_tool_shed_repository( self, tool_shed, name, owner, installed_changeset_revision ):
return self.app.install_model.context.query( self.app.install_model.ToolShedRepository ) \
.filter( and_( self.app.install_model.ToolShedRepository.table.c.tool_shed == tool_shed,
@@ -730,8 +734,8 @@
# Produce panel.
rval = []
kwargs = dict(
- trans = trans,
- link_details = True
+ trans=trans,
+ link_details=True
)
for elt in panel_elts:
rval.append( to_dict_helper( elt, kwargs ) )
@@ -798,7 +802,6 @@
return None
-
class ToolSection( object, Dictifiable ):
"""
A group of tools with similar type/purpose that will be displayed as a
@@ -828,8 +831,8 @@
section_dict = super( ToolSection, self ).to_dict()
section_elts = []
kwargs = dict(
- trans = trans,
- link_details = link_details
+ trans=trans,
+ link_details=link_details
)
for elt in self.elems.values():
section_elts.append( to_dict_helper( elt, kwargs ) )
@@ -837,6 +840,7 @@
return section_dict
+
class ToolSectionLabel( object, Dictifiable ):
"""
A label for a set of tools that can be displayed above groups of tools
@@ -850,6 +854,7 @@
self.id = elem.get( "id" )
self.version = elem.get( "version" ) or ''
+
class DefaultToolState( object ):
"""
Keeps track of the state of a users interaction with a tool between
@@ -860,6 +865,7 @@
self.page = 0
self.rerun_remap_job_id = None
self.inputs = None
+
def encode( self, tool, app, secure=True ):
"""
Convert the data to a string
@@ -877,6 +883,7 @@
return "%s:%s" % ( a, b )
else:
return value
+
def decode( self, value, tool, app, secure=True ):
"""
Restore the state from a string
@@ -896,6 +903,7 @@
self.rerun_remap_job_id = None
self.inputs = params_from_strings( tool.inputs, values, app, ignore_errors=True )
+
class ToolOutput( object, Dictifiable ):
"""
Represents an output datasets produced by a tool. For backward
@@ -907,7 +915,7 @@
dict_collection_visible_keys = ( 'name', 'format', 'label', 'hidden' )
def __init__( self, name, format=None, format_source=None, metadata_source=None,
- parent=None, label=None, filters = None, actions = None, hidden=False ):
+ parent=None, label=None, filters=None, actions=None, hidden=False ):
self.name = name
self.format = format
self.format_source = format_source
@@ -993,16 +1001,19 @@
# Parse XML element containing configuration
self.parse( root, guid=guid )
self.external_runJob_script = app.config.drmaa_external_runjob_script
+
@property
def sa_session( self ):
"""Returns a SQLAlchemy session"""
return self.app.model.context
+
@property
def tool_version( self ):
"""Return a ToolVersion if one exists for our id"""
return self.app.install_model.context.query( self.app.install_model.ToolVersion ) \
.filter( self.app.install_model.ToolVersion.table.c.tool_id == self.id ) \
.first()
+
@property
def tool_versions( self ):
# If we have versions, return them.
@@ -1010,6 +1021,7 @@
if tool_version:
return tool_version.get_versions( self.app )
return []
+
@property
def tool_version_ids( self ):
# If we have versions, return a list of their tool_ids.
@@ -1017,6 +1029,7 @@
if tool_version:
return tool_version.get_version_ids( self.app )
return []
+
@property
def tool_shed_repository( self ):
# If this tool is included in an installed tool shed repository, return it.
@@ -1075,7 +1088,7 @@
:returns: galaxy.jobs.JobDestination -- The destination definition and runner parameters.
"""
return self.app.job_config.get_destination(self.__get_job_tool_configuration(job_params=job_params).destination)
-
+
def get_panel_section( self ):
for key, item in self.app.toolbox.integrated_tool_panel.items():
if item:
@@ -1099,7 +1112,7 @@
# Get the (user visible) name of the tool
self.name = root.get( "name" )
if not self.name:
- raise Exception, "Missing tool 'name'"
+ raise Exception( "Missing tool 'name'" )
# Get the UNIQUE id for the tool
self.old_id = root.get( "id" )
if guid is None:
@@ -1107,7 +1120,7 @@
else:
self.id = guid
if not self.id:
- raise Exception, "Missing tool 'id'"
+ raise Exception( "Missing tool 'id'" )
self.version = root.get( "version" )
if not self.version:
# For backward compatibility, some tools may not have versions yet.
@@ -1127,7 +1140,7 @@
# Command line (template). Optional for tools that do not invoke a local program
command = root.find("command")
if command is not None and command.text is not None:
- self.command = command.text.lstrip() # get rid of leading whitespace
+ self.command = command.text.lstrip() # get rid of leading whitespace
# Must pre-pend this AFTER processing the cheetah command template
self.interpreter = command.get( "interpreter", None )
else:
@@ -1169,14 +1182,15 @@
self_ids = [ self.id.lower() ]
if self.old_id != self.id:
# Handle toolshed guids
- self_ids = [ self.id.lower(), self.id.lower().rsplit('/',1)[0], self.old_id.lower() ]
+ self_ids = [ self.id.lower(), self.id.lower().rsplit('/', 1)[0], self.old_id.lower() ]
self.all_ids = self_ids
# In the toolshed context, there is no job config.
if 'job_config' in dir(self.app):
self.job_tool_configurations = self.app.job_config.get_job_tool_configurations(self_ids)
# Is this a 'hidden' tool (hidden in tool menu)
self.hidden = xml_text(root, "hidden")
- if self.hidden: self.hidden = string_as_bool(self.hidden)
+ if self.hidden:
+ self.hidden = string_as_bool(self.hidden)
# Load any tool specific code (optional) Edit: INS 5/29/2007,
# allow code files to have access to the individual tool's
# "module" if it has one. Allows us to reuse code files, etc.
@@ -1186,7 +1200,7 @@
for hook_elem in code_elem.findall("hook"):
for key, value in hook_elem.items():
# map hook to function
- self.hook_map[key]=value
+ self.hook_map[key] = value
file_name = code_elem.get("file")
code_path = os.path.join( self.tool_dir, file_name )
execfile( code_path, self.code_namespace )
@@ -1308,7 +1322,7 @@
elif len( enctypes ) == 1:
self.enctype = enctypes.pop()
else:
- raise Exception, "Conflicting required enctypes: %s" % str( enctypes )
+ raise Exception( "Conflicting required enctypes: %s" % str( enctypes ) )
# Check if the tool either has no parameters or only hidden (and
# thus hardcoded) FIXME: hidden parameters aren't
# parameters at all really, and should be passed in a different
@@ -1318,6 +1332,7 @@
if not isinstance( param, ( HiddenToolParameter, BaseURLToolParameter ) ):
self.input_required = True
break
+
def parse_help( self, root ):
"""
Parse the help text for the tool. Formatted in reStructuredText, but
@@ -1365,6 +1380,7 @@
# Pad out help pages to match npages ... could this be done better?
while len( self.help_by_page ) < self.npages:
self.help_by_page.append( self.help )
+
def parse_outputs( self, root ):
"""
Parse <outputs> elements and fill in self.outputs (keyed by name)
@@ -1601,6 +1617,7 @@
else:
display = None
return display, inputs
+
def parse_input_elem( self, parent_elem, enctypes, context=None ):
"""
Parse a parent element whose children are inputs -- these could be
@@ -1653,7 +1670,7 @@
input_elem = elem.find( "param" )
assert input_elem is not None, "<conditional> must have a child <param>"
group.test_param = self.parse_param_elem( input_elem, enctypes, context )
- possible_cases = list( group.test_param.legal_values ) #store possible cases, undefined whens will have no inputs
+ possible_cases = list( group.test_param.legal_values ) # store possible cases, undefined whens will have no inputs
# Must refresh when test_param changes
group.test_param.refresh_on_change = True
# And a set of possible cases
@@ -1694,6 +1711,7 @@
param.ref_input = context[ param.data_ref ]
self.input_params.append( param )
return rval
+
def parse_param_elem( self, input_elem, enctypes, context ):
"""
Parse a single "<param>" element and return a ToolParameter instance.
@@ -1777,12 +1795,14 @@
raise Exception( "'get_param_html_map' only supported for simple paramters" )
rval[key] = param.get_html( trans, other_values=other_values )
return rval
+
def get_param( self, key ):
"""
Returns the parameter named `key` or None if there is no such
parameter.
"""
return self.inputs.get( key, None )
+
def get_hook(self, name):
"""
Returns an object from the code file referenced by `code_namespace`
@@ -1795,6 +1815,7 @@
elif name in self.code_namespace:
return self.code_namespace[name]
return None
+
def visit_inputs( self, value, callback ):
"""
Call the function `callback` on each parameter of this tool. Visits
@@ -1811,6 +1832,7 @@
callback( "", input, value[input.name] )
else:
input.visit_inputs( "", value[input.name], callback )
+
def handle_input( self, trans, incoming, history=None, old_errors=None, process_state='update', source='html' ):
"""
Process incoming parameters for this tool from the dict `incoming`,
@@ -1955,6 +1977,7 @@
[ self.find_fieldstorage( y ) for y in x.values() ]
elif type( x ) is types.ListType:
[ self.find_fieldstorage( y ) for y in x ]
+
def handle_interrupted( self, trans, inputs ):
"""
Upon handling inputs, if it appears that we have received an incomplete
@@ -2320,6 +2343,7 @@
errors[ input.name ] = error
state[ input.name ] = value
return errors
+
@property
def params_with_missing_data_table_entry( self ):
"""
@@ -2333,6 +2357,7 @@
if options and options.missing_tool_data_table_name and input_param not in params:
params.append( input_param )
return params
+
@property
def params_with_missing_index_file( self ):
"""
@@ -2346,6 +2371,7 @@
if options and options.missing_index_file and input_param not in params:
params.append( input_param )
return params
+
def get_static_param_values( self, trans ):
"""
Returns a map of parameter names and values if the tool does not
@@ -2361,6 +2387,7 @@
else:
raise Exception( "Unexpected parameter type" )
return args
+
def execute( self, trans, incoming={}, set_output_hid=True, history=None, **kwargs ):
"""
Execute the tool using parameter values in `incoming`. This just
@@ -2369,10 +2396,13 @@
when run will build the tool's outputs, e.g. `DefaultToolAction`.
"""
return self.tool_action.execute( self, trans, incoming=incoming, set_output_hid=set_output_hid, history=history, **kwargs )
+
def params_to_strings( self, params, app ):
return params_to_strings( self.inputs, params, app )
+
def params_from_strings( self, params, app, ignore_errors=False ):
return params_from_strings( self.inputs, params, app, ignore_errors )
+
def check_and_update_param_values( self, values, trans, update_values=True, allow_workflow_parameters=False ):
"""
Check that all parameters have values, and fill in with default
@@ -2382,6 +2412,7 @@
messages = {}
self.check_and_update_param_values_helper( self.inputs, values, trans, messages, update_values=update_values, allow_workflow_parameters=allow_workflow_parameters )
return messages
+
def check_and_update_param_values_helper( self, inputs, values, trans, messages, context=None, prefix="", update_values=True, allow_workflow_parameters=False ):
"""
Recursive helper for `check_and_update_param_values_helper`
@@ -2438,6 +2469,7 @@
messages[ input.name ] = "Value no longer valid for '%s%s', replaced with default" % ( prefix, input.label )
if update_values:
values[ input.name ] = input.get_initial_value( trans, context )
+
def handle_unvalidated_param_values( self, input_values, app ):
"""
Find any instances of `UnvalidatedValue` within input_values and
@@ -2448,6 +2480,7 @@
if not self.check_values:
return
self.handle_unvalidated_param_values_helper( self.inputs, input_values, app )
+
def handle_unvalidated_param_values_helper( self, inputs, input_values, app, context=None, prefix="" ):
"""
Recursive helper for `handle_unvalidated_param_values`
@@ -2488,6 +2521,7 @@
% ( prefix, input.label, e )
raise LateValidationError( message )
input_values[ input.name ] = value
+
def handle_job_failure_exception( self, e ):
"""
Called by job.fail when an exception is generated to allow generation
@@ -2499,6 +2533,7 @@
if isinstance( e, LateValidationError ):
message = e.message
return message
+
def build_param_dict( self, incoming, input_datasets, output_datasets, output_paths, job_working_directory ):
"""
Build the dictionary of parameters for substituting into the command
@@ -2754,6 +2789,7 @@
# Remove newlines
redirect_url_params = redirect_url_params.replace( "\n", " " ).replace( "\r", " " )
return redirect_url_params
+
def parse_redirect_url( self, data, param_dict ):
"""
Parse the REDIRECT_URL tool param. Tools that send data to an external
@@ -2793,6 +2829,7 @@
USERNAME = 'Anonymous'
redirect_url += "&USERNAME=%s" % USERNAME
return redirect_url
+
def call_hook( self, hook_name, *args, **kwargs ):
"""
Call the custom code hook function identified by 'hook_name' if any,
@@ -2808,15 +2845,19 @@
original_message = e.args[0]
e.args = ( "Error in '%s' hook '%s', original message: %s" % ( self.name, hook_name, original_message ), )
raise
+
def exec_before_job( self, app, inp_data, out_data, param_dict={} ):
pass
- def exec_after_process( self, app, inp_data, out_data, param_dict, job = None ):
+
+ def exec_after_process( self, app, inp_data, out_data, param_dict, job=None ):
pass
- def job_failed( self, job_wrapper, message, exception = False ):
+
+ def job_failed( self, job_wrapper, message, exception=False ):
"""
Called when a job has failed
"""
pass
+
def collect_associated_files( self, output, job_working_directory ):
"""
Find extra files in the job working directory and move them into
@@ -2835,10 +2876,11 @@
for f in files:
self.app.object_store.update_from_file(hda.dataset,
extra_dir=extra_dir,
- alt_name = f,
- file_name = os.path.join(root, f),
- create = True,
- preserve_symlinks = True )
+ alt_name=f,
+ file_name=os.path.join(root, f),
+ create=True,
+ preserve_symlinks=True
+ )
# Clean up after being handled by object store.
# FIXME: If the object (e.g., S3) becomes async, this will
# cause issues so add it to the object store functionality?
@@ -2848,6 +2890,7 @@
except Exception, e:
log.debug( "Error in collect_associated_files: %s" % ( e ) )
continue
+
def collect_child_datasets( self, output, job_working_directory ):
"""
Look for child dataset files, create HDA and attach to parent.
@@ -2858,9 +2901,9 @@
for name, outdata in output.items():
filenames = []
if 'new_file_path' in self.app.config.collect_outputs_from:
- filenames.extend( glob.glob(os.path.join(self.app.config.new_file_path,"child_%i_*" % outdata.id) ) )
+ filenames.extend( glob.glob(os.path.join(self.app.config.new_file_path, "child_%i_*" % outdata.id) ) )
if 'job_working_directory' in self.app.config.collect_outputs_from:
- filenames.extend( glob.glob(os.path.join(job_working_directory,"child_%i_*" % outdata.id) ) )
+ filenames.extend( glob.glob(os.path.join(job_working_directory, "child_%i_*" % outdata.id) ) )
for filename in filenames:
if not name in children:
children[name] = {}
@@ -2909,12 +2952,14 @@
# Need to update all associated output hdas, i.e. history was
# shared with job running
for dataset in outdata.dataset.history_associations:
- if outdata == dataset: continue
+ if outdata == dataset:
+ continue
# Create new child dataset
- child_data = child_dataset.copy( parent_id = dataset.id )
+ child_data = child_dataset.copy( parent_id=dataset.id )
self.sa_session.add( child_data )
self.sa_session.flush()
return children
+
def collect_primary_datasets( self, output, job_working_directory ):
"""
Find any additional datasets generated by a tool and attach (for
@@ -2937,9 +2982,9 @@
for name, outdata in output.items():
filenames = []
if 'new_file_path' in self.app.config.collect_outputs_from:
- filenames.extend( glob.glob(os.path.join(self.app.config.new_file_path,"primary_%i_*" % outdata.id) ) )
+ filenames.extend( glob.glob(os.path.join(self.app.config.new_file_path, "primary_%i_*" % outdata.id) ) )
if 'job_working_directory' in self.app.config.collect_outputs_from:
- filenames.extend( glob.glob(os.path.join(job_working_directory,"primary_%i_*" % outdata.id) ) )
+ filenames.extend( glob.glob(os.path.join(job_working_directory, "primary_%i_*" % outdata.id) ) )
for filename in filenames:
if not name in primary_datasets:
primary_datasets[name] = {}
@@ -2948,8 +2993,10 @@
parent_id = int(fields.pop(0))
designation = fields.pop(0)
visible = fields.pop(0).lower()
- if visible == "visible": visible = True
- else: visible = False
+ if visible == "visible":
+ visible = True
+ else:
+ visible = False
ext = fields.pop(0).lower()
dbkey = outdata.dbkey
if fields:
@@ -2999,7 +3046,8 @@
# Need to update all associated output hdas, i.e. history was
# shared with job running
for dataset in outdata.dataset.history_associations:
- if outdata == dataset: continue
+ if outdata == dataset:
+ continue
new_data = primary_data.copy()
dataset.history.add( new_data )
self.sa_session.add( new_data )
@@ -3029,7 +3077,7 @@
if io_details:
tool_dict[ 'inputs' ] = [ input.to_dict( trans ) for input in self.inputs.values() ]
tool_dict[ 'outputs' ] = [ output.to_dict() for output in self.outputs.values() ]
-
+
tool_dict[ 'panel_section_id' ], tool_dict[ 'panel_section_name' ] = self.get_panel_section()
return tool_dict
@@ -3044,6 +3092,7 @@
JSONified within the contents of an output dataset
"""
tool_type = 'output_parameter_json'
+
def _prepare_json_list( self, param_list ):
rval = []
for value in param_list:
@@ -3054,6 +3103,7 @@
else:
rval.append( str( value ) )
return rval
+
def _prepare_json_param_dict( self, param_dict ):
rval = {}
for key, value in param_dict.iteritems():
@@ -3064,11 +3114,12 @@
else:
rval[ key ] = str( value )
return rval
+
def exec_before_job( self, app, inp_data, out_data, param_dict=None ):
if param_dict is None:
param_dict = {}
json_params = {}
- json_params[ 'param_dict' ] = self._prepare_json_param_dict( param_dict ) #it would probably be better to store the original incoming parameters here, instead of the Galaxy modified ones?
+ json_params[ 'param_dict' ] = self._prepare_json_param_dict( param_dict ) # it would probably be better to store the original incoming parameters here, instead of the Galaxy modified ones?
json_params[ 'output_data' ] = []
json_params[ 'job_config' ] = dict( GALAXY_DATATYPES_CONF_FILE=param_dict.get( 'GALAXY_DATATYPES_CONF_FILE' ), GALAXY_ROOT_DIR=param_dict.get( 'GALAXY_ROOT_DIR' ), TOOL_PROVIDED_JOB_METADATA_FILE=jobs.TOOL_PROVIDED_JOB_METADATA_FILE )
json_filename = None
@@ -3078,12 +3129,12 @@
#allow multiple files to be created
file_name = str( wrapped_data )
extra_files_path = str( wrapped_data.files_path )
- data_dict = dict( out_data_name = out_name,
- ext = data.ext,
- dataset_id = data.dataset.id,
- hda_id = data.id,
- file_name = file_name,
- extra_files_path = extra_files_path )
+ data_dict = dict( out_data_name=out_name,
+ ext=data.ext,
+ dataset_id=data.dataset.id,
+ hda_id=data.id,
+ file_name=file_name,
+ extra_files_path=extra_files_path )
json_params[ 'output_data' ].append( data_dict )
if json_filename is None:
json_filename = file_name
@@ -3091,6 +3142,7 @@
out.write( json.dumps( json_params ) )
out.close()
+
class DataSourceTool( OutputParameterJSONTool ):
"""
Alternate implementation of Tool for data_source tools -- those that
@@ -3101,11 +3153,13 @@
def _build_GALAXY_URL_parameter( self ):
return ToolParameter.build( self, ElementTree.XML( '<param name="GALAXY_URL" type="baseurl" value="/tool_runner?tool_id=%s" />' % self.id ) )
+
def parse_inputs( self, root ):
super( DataSourceTool, self ).parse_inputs( root )
if 'GALAXY_URL' not in self.inputs:
self.inputs[ 'GALAXY_URL' ] = self._build_GALAXY_URL_parameter()
self.inputs_by_page[0][ 'GALAXY_URL' ] = self.inputs[ 'GALAXY_URL' ]
+
def exec_before_job( self, app, inp_data, out_data, param_dict=None ):
if param_dict is None:
param_dict = {}
@@ -3115,7 +3169,7 @@
name = param_dict.get( 'name' )
json_params = {}
- json_params[ 'param_dict' ] = self._prepare_json_param_dict( param_dict ) #it would probably be better to store the original incoming parameters here, instead of the Galaxy modified ones?
+ json_params[ 'param_dict' ] = self._prepare_json_param_dict( param_dict ) # it would probably be better to store the original incoming parameters here, instead of the Galaxy modified ones?
json_params[ 'output_data' ] = []
json_params[ 'job_config' ] = dict( GALAXY_DATATYPES_CONF_FILE=param_dict.get( 'GALAXY_DATATYPES_CONF_FILE' ), GALAXY_ROOT_DIR=param_dict.get( 'GALAXY_ROOT_DIR' ), TOOL_PROVIDED_JOB_METADATA_FILE=jobs.TOOL_PROVIDED_JOB_METADATA_FILE )
json_filename = None
@@ -3138,12 +3192,12 @@
data.extension = cur_data_type
file_name = str( wrapped_data )
extra_files_path = str( wrapped_data.files_path )
- data_dict = dict( out_data_name = out_name,
- ext = data.ext,
- dataset_id = data.dataset.id,
- hda_id = data.id,
- file_name = file_name,
- extra_files_path = extra_files_path )
+ data_dict = dict( out_data_name=out_name,
+ ext=data.ext,
+ dataset_id=data.dataset.id,
+ hda_id=data.id,
+ file_name=file_name,
+ extra_files_path=extra_files_path )
json_params[ 'output_data' ].append( data_dict )
if json_filename is None:
json_filename = file_name
@@ -3151,15 +3205,18 @@
out.write( json.dumps( json_params ) )
out.close()
+
class AsyncDataSourceTool( DataSourceTool ):
tool_type = 'data_source_async'
def _build_GALAXY_URL_parameter( self ):
return ToolParameter.build( self, ElementTree.XML( '<param name="GALAXY_URL" type="baseurl" value="/async/%s" />' % self.id ) )
+
class DataDestinationTool( Tool ):
tool_type = 'data_destination'
+
class SetMetadataTool( Tool ):
"""
Tool implementation for special tool that sets metadata on an existing
@@ -3167,8 +3224,8 @@
"""
tool_type = 'set_metadata'
requires_setting_metadata = False
-
- def exec_after_process( self, app, inp_data, out_data, param_dict, job = None ):
+
+ def exec_after_process( self, app, inp_data, out_data, param_dict, job=None ):
for name, dataset in inp_data.iteritems():
external_metadata = JobExternalOutputMetadataWrapper( job )
if external_metadata.external_metadata_set_successfully( dataset, app.model.context ):
@@ -3191,23 +3248,28 @@
dataset.set_peek()
self.sa_session.add( dataset )
self.sa_session.flush()
- def job_failed( self, job_wrapper, message, exception = False ):
+
+ def job_failed( self, job_wrapper, message, exception=False ):
job = job_wrapper.sa_session.query( model.Job ).get( job_wrapper.job_id )
if job:
inp_data = {}
for dataset_assoc in job.input_datasets:
inp_data[dataset_assoc.name] = dataset_assoc.dataset
- return self.exec_after_process( job_wrapper.app, inp_data, {}, job_wrapper.get_param_dict(), job = job )
+ return self.exec_after_process( job_wrapper.app, inp_data, {}, job_wrapper.get_param_dict(), job=job )
+
class ExportHistoryTool( Tool ):
tool_type = 'export_history'
+
class ImportHistoryTool( Tool ):
tool_type = 'import_history'
+
class GenomeIndexTool( Tool ):
tool_type = 'index_genome'
+
class DataManagerTool( OutputParameterJSONTool ):
tool_type = 'manage_data'
default_tool_action = DataManagerToolAction
@@ -3218,9 +3280,9 @@
if self.data_manager_id is None:
self.data_manager_id = self.id
- def exec_after_process( self, app, inp_data, out_data, param_dict, job = None, **kwds ):
+ def exec_after_process( self, app, inp_data, out_data, param_dict, job=None, **kwds ):
#run original exec_after_process
- super( DataManagerTool, self ).exec_after_process( app, inp_data, out_data, param_dict, job = job, **kwds )
+ super( DataManagerTool, self ).exec_after_process( app, inp_data, out_data, param_dict, job=job, **kwds )
#process results of tool
if job and job.state == job.states.ERROR:
return
@@ -3267,8 +3329,8 @@
for tool_class in [ Tool, DataDestinationTool, SetMetadataTool, DataSourceTool, AsyncDataSourceTool, DataManagerTool ]:
tool_types[ tool_class.tool_type ] = tool_class
+
# ---- Utility classes to be factored out -----------------------------------
-
class TracksterConfig:
""" Trackster configuration encapsulation. """
@@ -3282,6 +3344,7 @@
actions.append( SetParamAction.parse( action_elt ) )
return TracksterConfig( actions )
+
class SetParamAction:
""" Set parameter action. """
@@ -3294,10 +3357,12 @@
""" Parse action from element. """
return SetParamAction( elt.get( "name" ), elt.get( "output_name" ) )
+
class BadValue( object ):
def __init__( self, value ):
self.value = value
+
class ToolStdioRegex( object ):
"""
This is a container for the <stdio> element's regex subelement.
@@ -3313,6 +3378,7 @@
self.error_level = "fatal"
self.desc = ""
+
class ToolStdioExitCode( object ):
"""
This is a container for the <stdio> element's <exit_code> subelement.
@@ -3532,6 +3598,6 @@
else:
return incoming.get( key, default )
+
class InterruptedUpload( Exception ):
pass
-
diff -r e40e2965ac65f3a52778838e2a6879aadaf18add -r 2588b7b42ecdd9998c878f70420c75543bd5219d lib/galaxy/tools/parameters/__init__.py
--- a/lib/galaxy/tools/parameters/__init__.py
+++ b/lib/galaxy/tools/parameters/__init__.py
@@ -6,6 +6,7 @@
from grouping import *
from galaxy.util.json import *
+
def visit_input_values( inputs, input_values, callback, name_prefix="", label_prefix="" ):
"""
Given a tools parameter definition (`inputs`) and a specific set of
@@ -35,11 +36,12 @@
else:
new_value = callback( input,
input_values[input.name],
- prefixed_name = name_prefix + input.name,
- prefixed_label = label_prefix + input.label )
+ prefixed_name=name_prefix + input.name,
+ prefixed_label=label_prefix + input.label )
if new_value:
input_values[input.name] = new_value
+
def check_param( trans, param, incoming_value, param_values, source='html' ):
"""
Check the value of a single parameter `param`. The value in
@@ -62,12 +64,13 @@
# Then do any further validation on the value
param.validate( filtered_value, trans.history )
elif value is None and isinstance( param, SelectToolParameter ):
- # An empty select list or column list
- param.validate( value, trans.history )
+ # An empty select list or column list
+ param.validate( value, trans.history )
except ValueError, e:
error = str( e )
return value, error
+
def params_to_strings( params, param_values, app ):
"""
Convert a dictionary of parameter values to a dictionary of strings
@@ -83,6 +86,7 @@
rval[ key ] = str( to_json_string( value ) )
return rval
+
def params_from_strings( params, param_values, app, ignore_errors=False ):
"""
Convert a dictionary of strings as produced by `params_to_strings`
@@ -98,6 +102,7 @@
rval[ key ] = value
return rval
+
def params_to_incoming( incoming, inputs, input_values, app, name_prefix="" ):
"""
Given a tool's parameter definition (`inputs`) and a specific set of
@@ -119,4 +124,3 @@
params_to_incoming( incoming, input.cases[current].inputs, values, app, new_name_prefix )
else:
incoming[ name_prefix + input.name ] = input.to_html_value( input_values.get( input.name ), app )
-
diff -r e40e2965ac65f3a52778838e2a6879aadaf18add -r 2588b7b42ecdd9998c878f70420c75543bd5219d lib/galaxy/tools/parameters/basic.py
--- a/lib/galaxy/tools/parameters/basic.py
+++ b/lib/galaxy/tools/parameters/basic.py
@@ -1441,10 +1441,11 @@
for opt in option['options']:
recurse_option( option_list, opt )
rval = []
- recurse_option( rval, get_base_option( value, self.get_options( other_values = other_values ) ) )
+ recurse_option( rval, get_base_option( value, self.get_options( other_values=other_values ) ) )
return rval or [value]
- if value is None: return "None"
+ if value is None:
+ return "None"
rval = []
if self.hierarchy == "exact":
rval = value
@@ -1483,8 +1484,9 @@
if value == option['value']:
return option['name']
rval = get_option_display( value, option['options'] )
- if rval: return rval
- return None #not found
+ if rval:
+ return rval
+ return None # not found
if isinstance( value, UnvalidatedValue ):
suffix = "\n(value not yet validated)"
@@ -1524,12 +1526,12 @@
options = []
try:
options = self.get_options( trans, {} )
- except KeyError, key_err:
+ except KeyError:
# will sometimes error if self.is_dynamic and self.filtered
# bc we dont/cant fill out other_values above ({})
pass
- d[ 'options' ] = options;
+ d[ 'options' ] = options
return d
@@ -1762,7 +1764,7 @@
if v:
if v.deleted:
raise ValueError( "The previously selected dataset has been previously deleted" )
- if v.dataset.state in [galaxy.model.Dataset.states.ERROR, galaxy.model.Dataset.states.DISCARDED ]:
+ if v.dataset.state in [ galaxy.model.Dataset.states.ERROR, galaxy.model.Dataset.states.DISCARDED ]:
raise ValueError( "The previously selected dataset has entered an unusable state" )
return rval
@@ -1794,7 +1796,8 @@
return app.model.context.query( app.model.HistoryDatasetAssociation ).get( int( value ) )
def to_param_dict_string( self, value, other_values={} ):
- if value is None: return "None"
+ if value is None:
+ return "None"
return value.file_name
def value_to_display_text( self, value, app ):
@@ -1828,12 +1831,13 @@
if self.tool is None or self.tool.has_multiple_pages or not hasattr( trans, 'workflow_building_mode' ) or trans.workflow_building_mode:
return False
if other_values is None:
- return True # we don't know other values, so we can't check, assume ok
+ return True # we don't know other values, so we can't check, assume ok
converter_safe = [True]
- def visitor( prefix, input, value, parent = None ):
+
+ def visitor( prefix, input, value, parent=None ):
if isinstance( input, SelectToolParameter ) and self.name in input.get_dependencies():
if input.is_dynamic and ( input.dynamic_options or ( not input.dynamic_options and not input.options ) or not input.options.converter_safe ):
- converter_safe[0] = False #This option does not allow for conversion, i.e. uses contents of dataset file to generate options
+ converter_safe[0] = False # This option does not allow for conversion, i.e. uses contents of dataset file to generate options
self.tool.visit_inputs( other_values, visitor )
return False not in converter_safe
@@ -1960,21 +1964,24 @@
# self.html = form_builder.HiddenField( self.name, trans.history.id ).get_html()
# return self.html
-parameter_types = dict( text = TextToolParameter,
- integer = IntegerToolParameter,
- float = FloatToolParameter,
- boolean = BooleanToolParameter,
- genomebuild = GenomeBuildParameter,
- select = SelectToolParameter,
- data_column = ColumnListParameter,
- hidden = HiddenToolParameter,
- hidden_data = HiddenDataToolParameter,
- baseurl = BaseURLToolParameter,
- file = FileToolParameter,
- ftpfile = FTPFileToolParameter,
- data = DataToolParameter,
- library_data = LibraryDatasetToolParameter,
- drill_down = DrillDownSelectToolParameter )
+parameter_types = dict(
+ text=TextToolParameter,
+ integer=IntegerToolParameter,
+ float=FloatToolParameter,
+ boolean=BooleanToolParameter,
+ genomebuild=GenomeBuildParameter,
+ select=SelectToolParameter,
+ data_column=ColumnListParameter,
+ hidden=HiddenToolParameter,
+ hidden_data=HiddenDataToolParameter,
+ baseurl=BaseURLToolParameter,
+ file=FileToolParameter,
+ ftpfile=FTPFileToolParameter,
+ data=DataToolParameter,
+ library_data=LibraryDatasetToolParameter,
+ drill_down=DrillDownSelectToolParameter
+)
+
class UnvalidatedValue( object ):
"""
@@ -1993,4 +2000,3 @@
runtime.
"""
pass
-
diff -r e40e2965ac65f3a52778838e2a6879aadaf18add -r 2588b7b42ecdd9998c878f70420c75543bd5219d lib/galaxy/webapps/galaxy/api/tools.py
--- a/lib/galaxy/webapps/galaxy/api/tools.py
+++ b/lib/galaxy/webapps/galaxy/api/tools.py
@@ -1,9 +1,11 @@
import urllib
from galaxy import web, util
-from galaxy.web.base.controller import BaseAPIController, UsesHistoryDatasetAssociationMixin, UsesVisualizationMixin, UsesHistoryMixin
+from galaxy.web.base.controller import BaseAPIController
+from galaxy.web.base.controller import UsesVisualizationMixin
+from galaxy.web.base.controller import UsesHistoryMixin
from galaxy.visualization.genomes import GenomeRegion
-from galaxy.util.json import to_json_string, from_json_string
+from galaxy.util.json import to_json_string
from galaxy.visualization.data_providers.genome import *
import logging
@@ -47,7 +49,7 @@
GET /api/tools/{tool_id}
Returns tool information, including parameters and inputs.
"""
- io_details = util.string_as_bool( kwd.get( 'io_details', False ) )
+ io_details = util.string_as_bool( kwd.get( 'io_details', False ) )
link_details = util.string_as_bool( kwd.get( 'link_details', False ) )
try:
id = urllib.unquote_plus( id )
@@ -96,7 +98,7 @@
for k, v in payload.iteritems():
if k.startswith("files_") or k.startswith("__files_"):
inputs[k] = v
-
+
#for inputs that are coming from the Library, copy them into the history
input_patch = {}
for k, v in inputs.iteritems():
@@ -112,7 +114,7 @@
inputs['runtool_btn'] = 'Execute'
# TODO: encode data ids and decode ids.
# TODO: handle dbkeys
- params = util.Params( inputs, sanitize = False )
+ params = util.Params( inputs, sanitize=False )
# process_state will be 'populate' or 'update'. When no tool
# state is specified in input - it will be 'populate', and
# tool will fully expand repeat and conditionals when building
@@ -140,8 +142,8 @@
for output_name, output in output_datasets:
output_dict = output.to_dict()
#add the output name back into the output data structure
- #so it's possible to figure out which newly created elements
- #correspond with which tool file outputs
+ #so it's possible to figure out which newly created elements
+ #correspond with which tool file outputs
output_dict['output_name'] = output_name
outputs.append( trans.security.encode_dict_ids( output_dict ) )
return rval
@@ -185,14 +187,13 @@
# HACK: add run button so that tool.handle_input will run tool.
kwargs['runtool_btn'] = 'Execute'
- params = util.Params( kwargs, sanitize = False )
+ params = util.Params( kwargs, sanitize=False )
template, vars = tool.handle_input( trans, params.__dict__, history=target_history )
# TODO: check for errors and ensure that output dataset is available.
output_datasets = vars[ 'out_data' ].values()
return self.add_track_async( trans, output_datasets[0].id )
-
def _rerun_tool( self, trans, payload, **kwargs ):
"""
Rerun a tool to produce a new output dataset that corresponds to a
@@ -363,7 +364,7 @@
continue
input_dataset = jida.dataset
- if input_dataset is None: #optional dataset and dataset wasn't selected
+ if input_dataset is None: # optional dataset and dataset wasn't selected
tool_params[ jida.name ] = None
elif run_on_regions and 'data' in input_dataset.datatype.data_sources:
# Dataset is indexed and hence a subset can be extracted and used
@@ -407,7 +408,7 @@
# Set metadata.
# TODO: set meta internally if dataset is small enough?
trans.app.datatypes_registry.set_external_metadata_tool.tool_action.execute( trans.app.datatypes_registry.set_external_metadata_tool,
- trans, incoming = { 'input1':new_dataset },
+ trans, incoming={ 'input1': new_dataset },
overwrite=False, job_params={ "source" : "trackster" } )
# Add HDA subset association.
subset_association = trans.app.model.HistoryDatasetAssociationSubset( hda=input_dataset, subset=new_dataset, location=regions_str )
@@ -450,5 +451,5 @@
dataset_dict = output_dataset.to_dict()
dataset_dict[ 'id' ] = trans.security.encode_id( dataset_dict[ 'id' ] )
- dataset_dict[ 'track_config' ] = self.get_new_track_config( trans, output_dataset );
+ dataset_dict[ 'track_config' ] = self.get_new_track_config( trans, output_dataset )
return dataset_dict
https://bitbucket.org/galaxy/galaxy-central/commits/14ab802d0209/
Changeset: 14ab802d0209
User: jmchilton
Date: 2014-01-13 15:58:22
Summary: Initial work on tool execution unit tests.
Going to be doing some more work on tool state stuff so it will be good to have a way to test that. This also brings in test/unit/tools_support.py from Pull Request #287 (would be overkill for just these tests, but it is useful for future tests coming down the pipe.)
Affected #: 3 files
diff -r 2588b7b42ecdd9998c878f70420c75543bd5219d -r 14ab802d020997e1acfdce191d912b702624b96f test/unit/tools/test_execution.py
--- /dev/null
+++ b/test/unit/tools/test_execution.py
@@ -0,0 +1,104 @@
+""" Test Tool execution and state handling logic.
+"""
+import os
+
+from unittest import TestCase
+
+import galaxy.model
+from galaxy.tools import Tool
+from galaxy.tools import DefaultToolState
+from galaxy.util import parse_xml
+from galaxy.util import string_to_object
+from galaxy.util import object_to_string
+from galaxy.util.odict import odict
+from tools_support import UsesApp
+
+EXAMPLE_TOOL_CONTENTS = '''<tool id="test_tool" name="Test Tool">
+ <command>echo "$text" < $out1</command>
+ <inputs>
+ <param type="text" name="param1" value="" />
+ </inputs>
+ <outputs>
+ <output name="out1" format="data" />
+ </outputs>
+</tool>'''
+
+
+class ToolExecutionTestCase( TestCase, UsesApp ):
+
+ def setUp(self):
+ self.setup_app()
+ self.app.job_config["get_job_tool_configurations"] = lambda ids: None
+ self.app.config.drmaa_external_runjob_script = ""
+ self.app.config.tool_secret = "testsecret"
+ self.trans = MockTrans( self.app )
+ self.tool_action = MockAction( self.trans )
+ self.tool_file = os.path.join( self.test_directory, "tool.xml" )
+
+ def tearDown(self):
+ self.tear_down_app()
+
+ def test_state_new( self ):
+ self.__write_tool( EXAMPLE_TOOL_CONTENTS )
+ self.__setup_tool( )
+ template, template_vars = self.tool.handle_input(
+ trans=self.trans,
+ incoming=dict( param1="moo" )
+ # no runtool_btn, just rerenders the form mako with tool
+ # state populated.
+ )
+ assert template == "tool_form.mako"
+ assert not template_vars[ "errors" ]
+ state = template_vars[ "tool_state" ]
+ assert state.inputs[ "param1" ] == "moo"
+
+ def test_execute( self ):
+ self.__write_tool( EXAMPLE_TOOL_CONTENTS )
+ self.__setup_tool( )
+ template, template_vars = self.tool.handle_input(
+ trans=self.trans,
+ incoming=dict( param1="moo", runtool_btn="dummy" )
+ )
+ assert template == "tool_executed.mako"
+
+ def __setup_tool( self ):
+ tree = parse_xml( self.tool_file )
+ self.tool = Tool( self.tool_file, tree.getroot(), self.app )
+ self.tool.tool_action = self.tool_action
+
+ def __write_tool( self, contents ):
+ open( self.tool_file, "w" ).write( contents )
+
+ def __string_to_state( self, state_string ):
+ encoded_state = string_to_object( state_string )
+ state = DefaultToolState()
+ state.decode( encoded_state, self.tool, self.app )
+ return state
+
+ def __inputs_to_state( self, inputs ):
+ tool_state = DefaultToolState()
+ tool_state.inputs = inputs
+ return tool_state
+
+ def __inputs_to_state_string( self, inputs ):
+ tool_state = self.__inputs_to_state( inputs )
+ return object_to_string( tool_state.encode( self.tool, self.app ) )
+
+
+class MockAction( object ):
+
+ def __init__( self, expected_trans ):
+ self.expected_trans = expected_trans
+ self.execution_call_args = []
+
+ def execute( self, tool, trans, **kwds ):
+ assert self.expected_trans == trans
+ self.execution_call_args.append( kwds )
+ return None, odict(dict(out1="1"))
+
+
+class MockTrans( object ):
+
+ def __init__( self, app ):
+ self.app = app
+ self.history = galaxy.model.History()
diff -r 2588b7b42ecdd9998c878f70420c75543bd5219d -r 14ab802d020997e1acfdce191d912b702624b96f test/unit/tools_support.py
--- /dev/null
+++ b/test/unit/tools_support.py
@@ -0,0 +1,51 @@
+""" Module contains test fixtures meant to aide in the testing of jobs and
+tool evaluation. Such extensive "fixtures" are something of an anti-pattern
+so use of this should be limitted to tests of very 'extensive' classes.
+"""
+
+import os.path
+import tempfile
+import shutil
+
+from galaxy.util.bunch import Bunch
+from galaxy.model import mapping
+
+
+class UsesApp( object ):
+
+ def setup_app( self ):
+ # The following line is needed in order to create
+ # HistoryDatasetAssociations - ideally the model classes would be
+ # usable without the ORM infrastructure in place.
+ mapping.init( "/tmp", "sqlite:///:memory:", create_tables=True )
+ self.test_directory = tempfile.mkdtemp()
+ self.app = MockApp(self.test_directory)
+
+ def tear_down_app( self ):
+ shutil.rmtree( self.test_directory )
+
+
+class MockApp( object ):
+
+ def __init__( self, test_directory ):
+
+ self.datatypes_registry = Bunch(
+ integrated_datatypes_configs='/galaxy/integrated_datatypes_configs.xml',
+ get_datatype_by_extension=lambda ext: Bunch(),
+ )
+
+ self.config = Bunch(
+ outputs_to_working_directory=False,
+ new_file_path=os.path.join(test_directory, "new_files"),
+ tool_data_path=os.path.join(test_directory, "tools"),
+ root=os.path.join(test_directory, "galaxy"),
+ admin_users="mary(a)example.com",
+ )
+
+ # Setup some attributes for downstream extension by specific tests.
+ self.job_config = Bunch()
+ self.model = Bunch()
+ self.toolbox = None
+ self.object_store = None
+
+__all__ = [ UsesApp ]
https://bitbucket.org/galaxy/galaxy-central/commits/c250b1cfc60f/
Changeset: c250b1cfc60f
User: jmchilton
Date: 2014-01-13 15:58:22
Summary: Initial work on tools API functional tests.
Just tests some simple indexing and tool execution. Tons more one can and should test.
Affected #: 1 file
diff -r 14ab802d020997e1acfdce191d912b702624b96f -r c250b1cfc60fe2b56ddcf18e2cf2543631d4c26f test/functional/api/test_tools.py
--- /dev/null
+++ b/test/functional/api/test_tools.py
@@ -0,0 +1,102 @@
+# Test tools API.
+from itertools import chain
+from json import dumps
+import time
+
+from base import api
+from operator import itemgetter
+
+
+class ToolsTestCase( api.ApiTestCase ):
+
+ def test_index( self ):
+ index = self._get( "tools" )
+ tools_index = index.json()
+ # In panels by default, so flatten out sections...
+ tools = list( chain( *map( itemgetter( "elems" ), tools_index ) ) )
+ tool_ids = map( itemgetter( "id" ), tools )
+ assert "upload1" in tool_ids
+ assert "cat1" in tool_ids
+
+ def test_no_panel_index( self ):
+ index = self._get( "tools", data=dict(in_panel="false") )
+ tools_index = index.json()
+ # No need to flatten out sections, with in_panel=False, only tools are
+ # returned.
+ tool_ids = map( itemgetter( "id" ), tools_index )
+ assert "upload1" in tool_ids
+ assert "cat1" in tool_ids
+
+ def test_upload1_paste( self ):
+ history_id = self._new_history()
+ payload = self._upload_payload( history_id, 'Hello World' )
+ create_response = self._post( "tools", data=payload )
+ self._assert_has_keys( create_response.json(), 'outputs' )
+
+ def test_run_cat1( self ):
+ history_id = self._new_history()
+ new_dataset = self._new_dataset( history_id )
+ dataset_id = new_dataset[ 'id' ]
+ payload = self._run_tool_payload(
+ tool_id='cat1',
+ inputs=dict(
+ input1=dict(
+ src='hda',
+ id=dataset_id
+ ),
+ ),
+ history_id=history_id,
+ )
+ create_response = self._post( "tools", data=payload )
+ self._assert_status_code_is( create_response, 200 )
+ self._assert_has_keys( create_response.json(), 'outputs' )
+ self._wait_for_history( history_id, assert_ok=True )
+
+ def _new_dataset( self, history_id, content='TestData123', **kwds ):
+ payload = self._upload_payload( history_id, content, **kwds )
+ run_response = self._post( "tools", data=payload )
+ self._assert_status_code_is( run_response, 200 )
+ return run_response.json()["outputs"][0]
+
+ def _wait_for_history( self, history_id, assert_ok=False ):
+ while True:
+ history_details_response = self._get( "histories/%s" % history_id )
+ self._assert_status_code_is( history_details_response, 200 )
+ history_state = history_details_response.json()[ "state" ]
+ if history_state not in [ "running", "queued" ]:
+ break
+ time.sleep( .1 )
+ if assert_ok:
+ self.assertEquals( history_state, 'ok' )
+
+ def _new_history( self, **kwds ):
+ name = kwds.get( "name", "API Test History" )
+ create_history_response = self._post( "histories", data=dict( name=name ) )
+ self._assert_status_code_is( create_history_response, 200 )
+ history_id = create_history_response.json()[ "id" ]
+ return history_id
+
+ def _upload_payload( self, history_id, content, **kwds ):
+ name = kwds.get( "name", "Test Dataset" )
+ dbkey = kwds.get( "dbkey", "?" )
+ file_type = kwds.get( "file_type", 'txt' )
+ upload_params = {
+ 'files_0|NAME': name,
+ 'files_0|url_paste': content,
+ 'dbkey': dbkey,
+ 'file_type': file_type,
+ }
+ return self._run_tool_payload(
+ tool_id='upload1',
+ inputs=upload_params,
+ history_id=history_id,
+ upload_type='upload_dataset'
+ )
+
+ def _run_tool_payload( self, tool_id, inputs, history_id, **kwds ):
+ return dict(
+ tool_id=tool_id,
+ inputs=dumps(inputs),
+ history_id=history_id,
+ **kwds
+ )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: guerler: Update sample data for visualization plugin
by commits-noreply@bitbucket.org 13 Jan '14
by commits-noreply@bitbucket.org 13 Jan '14
13 Jan '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/e40e2965ac65/
Changeset: e40e2965ac65
User: guerler
Date: 2014-01-13 12:17:24
Summary: Update sample data for visualization plugin
Affected #: 1 file
diff -r bc7445cda44439e787dcc0ead93f0a512fa6e0c3 -r e40e2965ac65f3a52778838e2a6879aadaf18add config/plugins/visualizations/charts/sample.txt
--- a/config/plugins/visualizations/charts/sample.txt
+++ b/config/plugins/visualizations/charts/sample.txt
@@ -1,1 +1,9 @@
-43 55 63 99 28 85 20 31 47 14
1 63 94 83 94 74 7 73 51 7
8 34 2 94 87 21 72 51 3 84
5 50 40 30 59 43 19 77 30 80
22 55 69 43 77 72 12 54 53 56
36 34 81 28 59 64 54 29 57 69
57 90 93 32 45 98 40 16 52 83
6 17 39 86 10 11 38 69 19 50
67 51 47 22 58 27 65 49 92 96
\ No newline at end of file
+43 55 63 99 28 85 20 31 47 14
+1 63 94 83 94 74 7 73 51 7
+8 34 2 94 87 21 72 51 3 84
+5 50 40 30 59 43 19 77 30 80
+22 55 69 43 77 72 12 54 53 56
+36 34 81 28 59 64 54 29 57 69
+57 90 93 32 45 98 40 16 52 83
+6 17 39 86 10 11 38 69 19 50
+67 51 47 22 58 27 65 49 92 96
\ No newline at end of file
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: guerler: Sample data for visualization plugin
by commits-noreply@bitbucket.org 13 Jan '14
by commits-noreply@bitbucket.org 13 Jan '14
13 Jan '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/bc7445cda444/
Changeset: bc7445cda444
User: guerler
Date: 2014-01-13 12:14:39
Summary: Sample data for visualization plugin
Affected #: 1 file
diff -r 6f56754324a6cd91d9e4afccd1d268ca22861093 -r bc7445cda44439e787dcc0ead93f0a512fa6e0c3 config/plugins/visualizations/charts/sample.txt
--- /dev/null
+++ b/config/plugins/visualizations/charts/sample.txt
@@ -0,0 +1,1 @@
+43 55 63 99 28 85 20 31 47 14
1 63 94 83 94 74 7 73 51 7
8 34 2 94 87 21 72 51 3 84
5 50 40 30 59 43 19 77 30 80
22 55 69 43 77 72 12 54 53 56
36 34 81 28 59 64 54 29 57 69
57 90 93 32 45 98 40 16 52 83
6 17 39 86 10 11 38 69 19 50
67 51 47 22 58 27 65 49 92 96
\ No newline at end of file
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: guerler: Visualization plugin: Wrapper for nvd3/d3
by commits-noreply@bitbucket.org 13 Jan '14
by commits-noreply@bitbucket.org 13 Jan '14
13 Jan '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/6f56754324a6/
Changeset: 6f56754324a6
User: guerler
Date: 2014-01-13 11:53:01
Summary: Visualization plugin: Wrapper for nvd3/d3
Affected #: 28 files
diff -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a -r 6f56754324a6cd91d9e4afccd1d268ca22861093 config/plugins/visualizations/charts/config/charts.xml
--- /dev/null
+++ b/config/plugins/visualizations/charts/config/charts.xml
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE visualization SYSTEM "../../visualization.dtd">
+<visualization name="charts">
+ <data_sources>
+ <data_source>
+ <model_class>HistoryDatasetAssociation</model_class>
+ <test type="isinstance" test_attr="datatype" result_type="datatype">tabular.Tabular</test>
+ <to_param param_attr="id">dataset_id</to_param>
+ </data_source>
+ </data_sources>
+ <params>
+ <param type="dataset" var_name_in_template="hda" required="true">dataset_id</param>
+ </params>
+ <template>charts.mako</template>
+</visualization>
diff -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a -r 6f56754324a6cd91d9e4afccd1d268ca22861093 config/plugins/visualizations/charts/static/app.js
--- /dev/null
+++ b/config/plugins/visualizations/charts/static/app.js
@@ -0,0 +1,67 @@
+// dependencies
+define(['library/portlet', 'library/ui', 'library/utils',
+ 'main', 'viewport', 'create', 'config',
+ 'models/datasets', 'models/chart', 'models/charts', 'models/types'],
+ function( Portlet, Ui, Utils,
+ Main, Viewport, Create, Config,
+ Datasets, Chart, Charts, Types
+ ) {
+
+// widget
+return Backbone.View.extend(
+{
+ // initialize
+ initialize: function(options)
+ {
+ // link options
+ this.options = options;
+
+ // link galaxy
+ this.modal = parent.Galaxy.modal;
+
+ // create chart objects
+ this.types = new Types();
+ this.chart = new Chart();
+ this.charts = new Charts(this);
+
+ // create dataset handler
+ this.datasets = new Datasets(this);
+
+ // create views
+ this.main = new Main(this);
+ this.viewport = new Viewport(this);
+ this.config = new Config(this);
+ this.create = new Create(this);
+
+ // portlet
+ this.portlet = new Portlet({icon : 'fa-bar-chart-o', label : 'Charts'});
+ this.portlet.append(this.main.$el);
+ this.portlet.append(this.config.$el);
+ this.portlet.append(this.create.$el);
+
+ // append main
+ this.main.append(this.viewport.$el);
+
+ // create
+ this.config.$el.hide();
+ this.create.$el.hide();
+
+ // set elements
+ this.setElement(this.portlet.$el);
+ },
+
+ // execute command
+ execute: function(options) {
+ },
+
+ // unload
+ onunload: function() {
+ },
+
+ // log
+ log: function(location, message) {
+ console.log(location + ' ' + message);
+ }
+});
+
+});
\ No newline at end of file
diff -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a -r 6f56754324a6cd91d9e4afccd1d268ca22861093 config/plugins/visualizations/charts/static/charts/bardiagram.js
--- /dev/null
+++ b/config/plugins/visualizations/charts/static/charts/bardiagram.js
@@ -0,0 +1,34 @@
+// dependencies
+define(['library/utils'], function(Utils) {
+
+// widget
+return Backbone.View.extend(
+{
+ // initialize
+ initialize: function(app, options) {
+ this.app = app;
+ this.options = options;
+ this.chart = options.chart;
+ },
+
+ // render
+ refresh : function(data)
+ {
+ // add graph to screen
+ var self = this;
+ nv.addGraph(function() {
+ self.d3_chart = nv.models.multiBarChart();
+
+ self.d3_chart.xAxis.tickFormat(d3.format('.2f'))
+ self.d3_chart.yAxis.tickFormat(d3.format('.1f'))
+
+ d3.select(self.options.svg_id)
+ .datum(data)
+ .call(self.d3_chart);
+
+ nv.utils.windowResize(self.d3_chart.update);
+ });
+ }
+});
+
+});
\ No newline at end of file
diff -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a -r 6f56754324a6cd91d9e4afccd1d268ca22861093 config/plugins/visualizations/charts/static/charts/horizontal.js
--- /dev/null
+++ b/config/plugins/visualizations/charts/static/charts/horizontal.js
@@ -0,0 +1,33 @@
+// dependencies
+define(['library/utils'], function(Utils) {
+
+// widget
+return Backbone.View.extend(
+{
+ // initialize
+ initialize: function(app, options) {
+ this.app = app;
+ this.options = options;
+ this.chart = options.chart;
+ },
+
+ // render
+ refresh : function(data)
+ {
+ // add graph to screen
+ var self = this;
+ nv.addGraph(function() {
+ self.d3_chart = nv.models.multiBarHorizontalChart();
+
+ self.d3_chart.xAxis.tickFormat(function() { return ''; });
+
+ d3.select(self.options.svg_id)
+ .datum(data)
+ .call(self.d3_chart);
+
+ nv.utils.windowResize(self.d3_chart.update);
+ });
+ }
+});
+
+});
\ No newline at end of file
diff -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a -r 6f56754324a6cd91d9e4afccd1d268ca22861093 config/plugins/visualizations/charts/static/charts/line.js
--- /dev/null
+++ b/config/plugins/visualizations/charts/static/charts/line.js
@@ -0,0 +1,39 @@
+// dependencies
+define(['library/utils'], function(Utils) {
+
+// widget
+return Backbone.View.extend(
+{
+ // initialize
+ initialize: function(app, options) {
+ this.app = app;
+ this.options = options;
+ this.chart = options.chart;
+ },
+
+ // render
+ refresh : function(data)
+ {
+ // add graph to screen
+ var self = this;
+ nv.addGraph(function() {
+ self.chart_3d = nv.models.lineChart();
+
+ self.chart_3d.xAxis
+ .tickFormat(d3.format(',f'));
+
+ self.chart_3d.yAxis
+ .tickFormat(d3.format(',.2f'));
+
+ d3.select(self.options.svg_id)
+ .datum(data)
+ .call(self.chart_3d);
+
+ nv.utils.windowResize(self.chart_3d.update);
+
+ return self.chart_3d;
+ });
+ }
+});
+
+});
\ No newline at end of file
diff -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a -r 6f56754324a6cd91d9e4afccd1d268ca22861093 config/plugins/visualizations/charts/static/charts/linewithfocus.js
--- /dev/null
+++ b/config/plugins/visualizations/charts/static/charts/linewithfocus.js
@@ -0,0 +1,37 @@
+// dependencies
+define(['library/utils'], function(Utils) {
+
+// widget
+return Backbone.View.extend(
+{
+ // initialize
+ initialize: function(app, options) {
+ this.app = app;
+ this.options = options;
+ this.chart = options.chart;
+ },
+
+ // render
+ refresh : function(data)
+ {
+ // add graph to screen
+ var self = this;
+ nv.addGraph(function() {
+ self.chart_3d = nv.models.lineWithFocusChart();
+
+ self.chart_3d.xAxis
+ .tickFormat(d3.format(',f'));
+
+ self.chart_3d.yAxis
+ .tickFormat(d3.format(',.2f'));
+
+ d3.select(self.options.svg_id)
+ .datum(data)
+ .call(self.chart_3d);
+
+ nv.utils.windowResize(self.chart_3d.update);
+ });
+ }
+});
+
+});
\ No newline at end of file
diff -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a -r 6f56754324a6cd91d9e4afccd1d268ca22861093 config/plugins/visualizations/charts/static/charts/piechart.js
--- /dev/null
+++ b/config/plugins/visualizations/charts/static/charts/piechart.js
@@ -0,0 +1,59 @@
+// dependencies
+define(['library/utils'], function(Utils) {
+
+// widget
+return Backbone.View.extend(
+{
+ // initialize
+ initialize: function(app, options) {
+ this.app = app;
+ this.options = options;
+ this.chart = options.chart;
+ },
+
+ // render
+ refresh : function()
+ {
+ // add graph to screen
+ var self = this;
+ nv.addGraph(function(data) {
+ self.chart_3d = nv.models.pieChart()
+ .x(function(d) { return d.key })
+ .y(function(d) { return d.y })
+ .color(d3.scale.category10().range())
+ .height(250)
+ .width(250);
+
+ d3.select(self.options.svg_id)
+ .datum(self._data())
+ .transition().duration(1200)
+ .attr('height', 250)
+ .attr('width', 250)
+ .call(self.chart_3d);
+
+ nv.utils.windowResize(self.chart_3d.update);
+ });
+ },
+
+ _data : function() {
+ return [
+ {
+ key: "Cumulative Return",
+ values: [
+ {
+ key : "CDS / Options" ,
+ y : 29.765957771107
+ },
+ {
+ key : "Options" ,
+ y : 19.765957771107
+ },
+ {
+ key : "Other" ,
+ y : 12.765957771107
+ }]
+ }];
+ }
+});
+
+});
\ No newline at end of file
diff -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a -r 6f56754324a6cd91d9e4afccd1d268ca22861093 config/plugins/visualizations/charts/static/charts/scatterplot.js
--- /dev/null
+++ b/config/plugins/visualizations/charts/static/charts/scatterplot.js
@@ -0,0 +1,37 @@
+// dependencies
+define(['library/utils'], function(Utils) {
+
+// widget
+return Backbone.View.extend(
+{
+ // initialize
+ initialize: function(app, options) {
+ this.app = app;
+ this.options = options;
+ this.chart = options.chart;
+ },
+
+ // render
+ refresh : function(data)
+ {
+ // add graph to screen
+ var self = this;
+ nv.addGraph(function() {
+ self.d3_chart = nv.models.scatterChart()
+ .showDistX(true)
+ .showDistY(true)
+ .color(d3.scale.category10().range());
+
+ self.d3_chart.xAxis.tickFormat(d3.format('.02f'))
+ self.d3_chart.yAxis.tickFormat(d3.format('.02f'))
+
+ d3.select(self.options.svg_id)
+ .datum(data)
+ .call(self.d3_chart);
+
+ nv.utils.windowResize(self.d3_chart.update);
+ });
+ }
+});
+
+});
\ No newline at end of file
diff -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a -r 6f56754324a6cd91d9e4afccd1d268ca22861093 config/plugins/visualizations/charts/static/charts/stackedarea.js
--- /dev/null
+++ b/config/plugins/visualizations/charts/static/charts/stackedarea.js
@@ -0,0 +1,59 @@
+// dependencies
+define(['library/utils'], function(Utils) {
+
+// widget
+return Backbone.View.extend(
+{
+ // initialize
+ initialize: function(app, options) {
+ this.app = app;
+ this.options = options;
+ this.chart = options.chart;
+ },
+
+ // render
+ refresh : function(data)
+ {
+ // add graph to screen
+ var self = this;
+ nv.addGraph(function() {
+ // check data
+ var valid = true;
+ var length = 0;
+ for (var key in data) {
+ // evalute length
+ if (length == 0) {
+ length = data[key].values.length;
+ } else {
+ if (length != data[key].values.length) {
+ valid = false;
+ break;
+ }
+ }
+ }
+ if (!valid) {
+ return;
+ }
+
+ // make plot
+ self.d3_chart = nv.models.stackedAreaChart()
+ .x(function(d) {
+ return d.x
+ })
+ .y(function(d) {
+ return d.y
+ })
+ .clipEdge(true);
+
+ self.d3_chart.xAxis.tickFormat(function() { return ''; });
+
+ d3.select(self.options.svg_id)
+ .datum(data)
+ .call(self.d3_chart);
+
+ nv.utils.windowResize(self.d3_chart.update);
+ });
+ }
+});
+
+});
\ No newline at end of file
diff -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a -r 6f56754324a6cd91d9e4afccd1d268ca22861093 config/plugins/visualizations/charts/static/config.js
--- /dev/null
+++ b/config/plugins/visualizations/charts/static/config.js
@@ -0,0 +1,219 @@
+// dependencies
+define(['library/portlet', 'library/table', 'library/ui', 'library/utils', 'models/group'],
+ function(Portlet, Table, Ui, Utils, Group) {
+
+// chart config
+return Backbone.View.extend(
+{
+ // model
+ group: new Group(),
+
+ // columns
+ columns: [],
+
+ // initialize
+ initialize: function(app, options) {
+ // link app
+ this.app = app;
+
+ // get current chart object
+ this.chart = this.app.chart;
+
+ // ui elements
+ this.message = new Ui.Message();
+ this.label = new Ui.Input({placeholder: 'Group label'});
+ this.table = new Table({content: 'No data column.'});
+
+ // add table to portlet
+ var self = this;
+ this.portlet = new Portlet({
+ icon : 'fa-edit',
+ label : 'Define group properties:',
+ operations : {
+ 'save' : new Ui.ButtonIcon({
+ icon : 'fa-save',
+ tooltip : 'Save',
+ onclick : function() {
+ // save/add group
+ self._saveGroup();
+ }
+ }),
+ 'back' : new Ui.ButtonIcon({
+ icon : 'fa-caret-left',
+ tooltip : 'Return',
+ onclick : function() {
+ self.$el.hide();
+ self.app.create.$el.show();
+ }
+ })
+ }
+ });
+ this.portlet.append(this.message.$el);
+ this.portlet.append(this.label.$el);
+ this.portlet.append(this.table.$el);
+
+ // add element
+ this.setElement(this.portlet.$el);
+
+ // change
+ var self = this;
+ this.chart.on('change:dataset_id', function() {
+ self._refreshDataset();
+ });
+ this.chart.on('change:type', function() {
+ self._refreshType();
+ });
+ this.group.on('change:label', function() {
+ self._refreshLabel();
+ });
+ this.group.on('change', function() {
+ self._refreshGroup();
+ });
+ },
+
+ // show
+ show: function() {
+ this.$el.show();
+ },
+
+ // reset
+ reset: function() {
+ this.group.reset();
+ this.group.set('id', Utils.uuid());
+ this.group.set('label', 'Group label');
+ },
+
+ // update dataset
+ _refreshDataset: function() {
+ // identify datasets
+ var dataset_id = this.chart.get('dataset_id');
+
+ // check if dataset is available
+ if (!dataset_id) {
+ return;
+ }
+
+ // get dataset
+ var dataset = this.app.datasets.get({id : dataset_id});
+
+ // check
+ if (!dataset) {
+ this.app.log('Config::render()', 'Failed to retrieve dataset.');
+ return;
+ }
+
+ // configure columns
+ this.columns = [];
+ var meta = dataset.metadata_column_types;
+ for (var key in meta){
+ this.columns.push({
+ 'label' : key + ' [' + meta[key] + ']',
+ 'value' : key
+ });
+ }
+
+ // update select fields
+ for (var key in this.list) {
+ this.list[key].update(this.columns);
+ }
+ },
+
+ // update
+ _refreshType: function() {
+ // configure chart type
+ var self = this;
+ var chart_type = this.chart.get('type');
+ if (chart_type) {
+ var chart_settings = this.app.types.get(chart_type);
+
+ // table
+ this.table.removeAll();
+ this.list = {};
+ for (var id in chart_settings.data) {
+ // create select field
+ var data_def = chart_settings.data[id];
+ var select = new Ui.Select({
+ id : 'select_' + id,
+ gid : id,
+ data : this.columns,
+ onchange : function(value) {
+ self.group.set(this.gid, value);
+ }
+ });
+
+ // add row to table
+ this.table.add(data_def.title);
+ this.table.add(select.$el);
+ this.table.append(id);
+
+ // add select field to list
+ this.list[id] = select;
+ }
+ }
+ },
+
+ // update
+ _refreshGroup: function() {
+ // update select fields
+ for (var id in this.list) {
+ var col = this.group.get(id);
+ if (col === undefined) {
+ col = 0;
+ }
+ this.list[id].value(col);
+ }
+ },
+
+ // update label
+ _refreshLabel: function() {
+ var label_text = this.group.get('label');
+ if (label_text === undefined) {
+ label_text = '';
+ }
+ this.label.value(label_text);
+ },
+
+ // create group
+ _saveGroup: function() {
+ // get current chart
+ var chart = this.chart;
+
+ // update group object
+ var group = this.group;
+ for (var key in this.list) {
+ group.set(key, this.list[key].value());
+ }
+
+ // add label
+ group.set({
+ dataset_id : this.chart.get('dataset_id'),
+ label : this.label.value(),
+ date : Utils.time()
+ });
+
+ // validate
+ if (!group.get('label')) {
+ this.message.update({message : 'Please enter a label for your group.'});
+ return;
+ }
+
+ // get groups of current chart
+ var groups = this.chart.groups;
+
+ // create/update group
+ var group_update = groups.get(group.id);
+ if (group_update) {
+ group_update.set(group.attributes);
+ } else {
+ groups.add(group.clone());
+ }
+
+ // hide
+ this.$el.hide();
+
+ // update main
+ this.app.create.$el.show();
+ }
+});
+
+});
\ No newline at end of file
diff -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a -r 6f56754324a6cd91d9e4afccd1d268ca22861093 config/plugins/visualizations/charts/static/create.js
--- /dev/null
+++ b/config/plugins/visualizations/charts/static/create.js
@@ -0,0 +1,189 @@
+// dependencies
+define(['library/portlet', 'library/table', 'library/ui', 'library/utils', 'models/chart', 'groups'],
+ function(Portlet, Table, Ui, Utils, Chart, Groups) {
+
+// widget
+return Backbone.View.extend(
+{
+ // defaults options
+ optionsDefault: {
+ header : true,
+ content : 'No content available.'
+ },
+
+ // current chart
+ chart : null,
+
+ // initialize
+ initialize: function(app, options)
+ {
+ // link application
+ this.app = app;
+
+ // get current chart object
+ this.chart = this.app.chart;
+
+ // configure options
+ this.options = Utils.merge(options, this.optionsDefault);
+
+ // main elements
+ this.message = new Ui.Message();
+ this.title = new Ui.Input({placeholder: 'Chart title'});
+ this.dataset = new Ui.Input({value : app.options.dataset.id, disabled: true});
+
+ // configure dataset
+ this.groups = new Groups(this.app);
+
+ // table
+ var self = this;
+ this.table = new Table({
+ header : false,
+ onconfirm : function(type) {
+ if (self.chart.groups.length > 0) {
+ // show modal
+ self.app.modal.show({
+ title : 'Switching the chart type?',
+ body : 'You configured data sources. If you switch chart types your configurations will be removed.',
+ buttons : {
+ 'Cancel' : function() {
+ // hide modal
+ self.app.modal.hide();
+ },
+ 'Continue' : function() {
+ // hide modal
+ self.app.modal.hide();
+
+ // confirm
+ self.table.confirm(type);
+ }
+ }
+ });
+ } else {
+ // confirm
+ self.table.confirm(type);
+ }
+ },
+ onchange : function(type) {
+ // update chart type
+ self.chart.set({type: type});
+
+ // reset groups
+ self.chart.groups.reset();
+ },
+ content: 'No chart types available'
+ });
+
+ // add types
+ var types_n = 0;
+ var types = app.types.attributes;
+ for (var id in types){
+ var chart_type = types[id];
+ this.table.add (++types_n + '.');
+ this.table.add (chart_type.title);
+ this.table.append(id);
+ }
+
+ // add table to portlet
+ var self = this;
+ this.portlet = new Portlet({
+ icon : 'fa-edit',
+ label : 'Create a new chart:',
+ operations : {
+ 'save' : new Ui.ButtonIcon({
+ icon : 'fa-save',
+ tooltip : 'Save',
+ onclick : function() {
+ self._saveChart();
+ }
+ }),
+ 'back' : new Ui.ButtonIcon({
+ icon : 'fa-caret-left',
+ tooltip : 'Return',
+ onclick : function() {
+ self.$el.hide();
+ self.app.main.$el.show();
+ }
+ })
+ }
+ });
+ this.portlet.append(this.message.$el);
+ this.portlet.append((new Ui.Label({ label : 'Provide a chart title:'})).$el);
+ this.portlet.append(this.title.$el);
+ this.portlet.append((new Ui.Label({ label : 'Select a dataset:'})).$el);
+ this.portlet.append(this.dataset.$el);
+ this.portlet.append((new Ui.Label({ label : 'Select a chart type:'})).$el);
+ this.portlet.append(this.table.$el);
+ this.portlet.append(this.groups.$el);
+
+ // elements
+ this.setElement(this.portlet.$el);
+
+ // events
+ var self = this;
+ this.chart.on('change:title', function(chart) {
+ self.title.value(chart.get('title'));
+ });
+ this.chart.on('change:type', function(chart) {
+ self.table.value(chart.get('type'));
+ });
+ },
+
+ // reset
+ reset: function() {
+ this.chart.reset();
+ this.chart.set('id', Utils.uuid());
+ this.chart.set('dataset_id', app.options.dataset.id);
+ this.chart.set('type', 'bardiagram');
+ this.chart.set('title', 'Chart title');
+ },
+
+ // set chart
+ setChart: function(new_chart) {
+ this.chart.copy(new_chart);
+ },
+
+ // create chart
+ _saveChart: function() {
+ // update chart data
+ this.chart.set({
+ type : this.table.value(),
+ title : this.title.value(),
+ dataset_id : this.dataset.value(),
+ date : Utils.time()
+ });
+
+ // validate
+ if (!this.chart.get('title')) {
+ this.message.update({message : 'Please enter a title for your chart.'});
+ return;
+ }
+
+ if (!this.chart.get('type')) {
+ this.message.update({message : 'Please select a chart type.'});
+ return;
+ }
+
+ if (this.chart.groups.length == 0) {
+ this.message.update({message : 'Please configure at least one data source.'});
+ return;
+ }
+
+ // create/get chart
+ var current = this.app.charts.get(this.chart.id);
+ if (!current) {
+ current = this.chart.clone();
+ this.app.charts.add(current);
+ }
+
+ // update chart model
+ current.copy(this.chart);
+
+ // hide
+ this.$el.hide();
+
+ // update main
+ this.app.main.$el.show();
+ }
+});
+
+});
\ No newline at end of file
diff -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a -r 6f56754324a6cd91d9e4afccd1d268ca22861093 config/plugins/visualizations/charts/static/groups.js
--- /dev/null
+++ b/config/plugins/visualizations/charts/static/groups.js
@@ -0,0 +1,154 @@
+// dependencies
+define(['library/portlet', 'library/table', 'library/ui', 'library/utils'], function(Portlet, Table, Ui, Utils) {
+
+// chart config
+return Backbone.View.extend(
+{
+ // initialize
+ initialize: function(app, options) {
+ // link app
+ this.app = app;
+
+ // get current chart object
+ this.chart = this.app.chart;
+
+ // table
+ this.table = new Table({
+ content : 'Add data sources to this table.',
+ ondblclick : function(group_id) {
+ // get group
+ var group = self.app.chart.groups.get(group_id);
+
+ // show edit
+ self.app.create.$el.hide();
+ self.app.config.show();
+ self.app.config.group.set(group.attributes);
+ }
+ });
+
+ // add table to portlet
+ var self = this;
+ this.portlet = new Portlet({
+ icon : '',
+ label : 'Configure data sources:',
+ height : 100,
+ operations : {
+ 'new' : new Ui.ButtonIcon({
+ icon : 'fa-plus',
+ tooltip: 'Create',
+ onclick: function() {
+ self.app.create.$el.hide();
+ self.app.config.show();
+ self.app.config.reset();
+ }
+ }),
+ 'edit' : new Ui.ButtonIcon({
+ icon : 'fa-pencil',
+ tooltip: 'Edit',
+ onclick: function() {
+ // check if element has been selected
+ var group_id = self.table.value();
+ if (!group_id) {
+ return;
+ }
+
+ // get group
+ var group = self.app.chart.groups.get(group_id);
+
+ // show edit
+ self.app.create.$el.hide();
+ self.app.config.show();
+ self.app.config.group.set(group.attributes);
+ }
+ }),
+ 'delete' : new Ui.ButtonIcon({
+ icon : 'fa-minus',
+ tooltip: 'Delete',
+ onclick: function() {
+ // check if element has been selected
+ var id = self.table.value();
+ if (!id) {
+ return;
+ }
+ // remove group from chart
+ self.chart.groups.remove(id);
+ }
+ })
+ }
+ });
+ this.portlet.append(this.table.$el);
+
+ // add element
+ this.setElement(this.portlet.$el);
+
+ // change
+ var self = this;
+ this.chart.on('change', function() {
+ self._refresh();
+ });
+ this.chart.on('reset', function() {
+ self._removeGroupAll();
+ });
+ this.chart.on('change:type', function() {
+ self.app.config.trigger('change');
+ });
+ this.chart.groups.on('add', function(group) {
+ self._addGroup(group);
+ });
+ this.chart.groups.on('remove', function(group) {
+ self._removeGroup(group);
+ });
+ this.chart.groups.on('reset', function(group) {
+ self._removeGroupAll();
+ });
+ this.chart.groups.on('change', function(group) {
+ self._removeGroup(group);
+ self._addGroup(group);
+ });
+ },
+
+ // refresh
+ _refresh: function() {
+ this._removeGroupAll();
+ var self = this;
+ var groups = this.chart.groups;
+ if (groups) {
+ groups.each(function(group) { self._addGroup(group); });
+ }
+ },
+
+ // add
+ _addGroup: function(group) {
+ // make custom info string
+ var info = '[';
+ var chart_type = this.chart.get('type');
+ if (chart_type) {
+ var chart_settings = this.app.types.get(chart_type);
+ for (var key in chart_settings.data) {
+ info += key + '=' + group.get(key) + ', '
+ }
+ }
+ info = info.substring(0, info.length - 2) + ']';
+
+ // add to table
+ this.table.add(group.get('label'));
+ this.table.add(info);
+ this.table.add('Last changed: ' + group.get('date'));
+ this.table.prepend(group.id);
+ this.table.value(group.id);
+ },
+
+ // remove
+ _removeGroup: function(group) {
+ // remove from to table
+ this.table.remove(group.id);
+ },
+
+ // data config reset
+ _removeGroupAll: function() {
+ // reset options table
+ this.table.removeAll();
+ }
+});
+
+});
\ No newline at end of file
diff -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a -r 6f56754324a6cd91d9e4afccd1d268ca22861093 config/plugins/visualizations/charts/static/library/portlet.js
--- /dev/null
+++ b/config/plugins/visualizations/charts/static/library/portlet.js
@@ -0,0 +1,142 @@
+// dependencies
+define(['library/utils'], function(Utils) {
+
+// return
+return Backbone.View.extend(
+{
+ // visibility
+ visible: false,
+
+ // defaults options
+ optionsDefault: {
+ label : '',
+ icon : 'fa-tasks',
+ buttons : null,
+ body : null,
+ height : null,
+ operations : null,
+ placement : 'bottom',
+ overflow : 'auto'
+ },
+
+ // content
+ $content : null,
+
+ // initialize
+ initialize : function(options) {
+ // configure options
+ this.options = Utils.merge(options, this.optionsDefault);
+
+ // create new element
+ this.setElement(this.template(this.options));
+
+ // link content
+ this.$content = this.$el.find('#content');
+
+ // set content height
+ if (this.options.height) {
+ this.$el.find('#body').css('height', this.options.height);
+ this.$el.find('#content').css('overflow', this.options.overflow);
+ }
+
+ // append buttons
+ this.$buttons = $(this.el).find('#buttons');
+ if (this.options.buttons) {
+ // link functions
+ var self = this;
+ $.each(this.options.buttons, function(name, item) {
+ self.$buttons.append(item.$el);
+ });
+ } else {
+ this.$buttons.remove();
+ }
+
+ // append operations
+ this.$operations = $(this.el).find('#operations');
+ if (this.options.operations) {
+ // link functions
+ var self = this;
+ $.each(this.options.operations, function(name, item) {
+ self.$operations.append(item.$el);
+ });
+ }
+
+ // add body
+ if(this.options.body) {
+ this.append(this.options.body);
+ }
+ },
+
+ // append
+ append: function($el) {
+ this.$content.append(Utils.wrap($el));
+ },
+
+ // content
+ content: function() {
+ return this.$content;
+ },
+
+ // hide modal
+ show: function(){
+ // fade in
+ this.$el.fadeIn('fast');
+
+ // set flag
+ this.visible = true;
+ },
+
+ // hide modal
+ hide: function(){
+ // fade out
+ this.$el.fadeOut('fast');
+
+ // set flag
+ this.visible = false;
+ },
+
+ // enable buttons
+ enableButton: function(name) {
+ this.$buttons.find('#' + String(name).toLowerCase()).prop('disabled', false);
+ },
+
+ // disable buttons
+ disableButton: function(name) {
+ this.$buttons.find('#' + String(name).toLowerCase()).prop('disabled', true);
+ },
+
+ // fill regular modal template
+ template: function(options) {
+ var tmpl = '<div class="toolForm">';
+
+ if (options.label) {
+ tmpl += '<div id="title" class="toolFormTitle" style="padding-bottom: 7px;">' +
+ '<div id="operations" style="float: right;"></div>' +
+ '<div>';
+
+ if (options.icon)
+ tmpl += '<i style="font-size: 1.2em" class="icon fa ' + options.icon + '"> </i>';
+
+ tmpl += options.label +
+ '</div>' +
+ '</div>';
+ }
+ tmpl += '<div id="body" class="toolFormBody">';
+
+ if (options.placement == 'top') {
+ tmpl += '<div id="buttons" class="buttons" style="height: 50px; padding: 10px;"></div>';
+ }
+
+ tmpl += '<div id="content" class="content" style="height: inherit; padding: 10px;"></div>';
+
+ if (options.placement == 'bottom') {
+ tmpl += '<div id="buttons" class="buttons" style="height: 50px; padding: 10px;"></div>';
+ }
+
+ tmpl += '</div>' +
+ '</div>';
+ return tmpl;
+ }
+});
+
+});
diff -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a -r 6f56754324a6cd91d9e4afccd1d268ca22861093 config/plugins/visualizations/charts/static/library/table.js
--- /dev/null
+++ b/config/plugins/visualizations/charts/static/library/table.js
@@ -0,0 +1,188 @@
+// dependencies
+define(['library/utils'], function(Utils) {
+
+// return
+return Backbone.View.extend(
+{
+ // current row
+ row: null,
+
+ // count rows
+ row_count: 0,
+
+ // defaults options
+ optionsDefault: {
+ content : 'No content available.',
+ onchange : null,
+ ondblclick : null,
+ onconfirm : null
+ },
+
+ // events
+ events : {
+ 'click' : 'onclick',
+ 'dblclick' : 'ondblclick'
+ },
+
+ // first
+ first: true,
+
+ // initialize
+ initialize : function(options) {
+ // configure options
+ this.options = Utils.merge(options, this.optionsDefault);
+
+ // create new element
+ this.setElement(this.template(options));
+
+ // initialize row
+ this.row = $('<tr></tr>');
+ },
+
+ // add header cell
+ addHeader: function($el) {
+ var wrapper = $('<th></th>');
+ wrapper.append($el);
+ this.row.append(wrapper);
+ },
+
+ // header
+ appendHeader: function() {
+ // append header row
+ $(this.el).find('thead').append(this.row);
+
+ // row
+ this.row = $('<tr></tr>');
+ },
+
+ // add row cell
+ add: function($el) {
+ var wrapper = $('<td></td>');
+ wrapper.append($el);
+ this.row.append(wrapper);
+ },
+
+ // append
+ append: function(id) {
+ this.commit(id);
+ },
+
+ // prepend
+ prepend: function(id) {
+ this.commit(id, true);
+ },
+
+ // commit
+ commit: function(id, prepend) {
+ // add
+ this.row.attr('id', id);
+
+ // add row
+ if (prepend) {
+ $(this.el).find('tbody').prepend(this.row);
+ } else {
+ $(this.el).find('tbody').append(this.row);
+ }
+
+ // row
+ this.row = $('<tr></tr>');
+
+ // row count
+ this.row_count++;
+ this.refresh();
+ },
+
+ // remove
+ remove: function(id) {
+ $(this.el).find('#' + id).remove();
+ this.row_count--;
+ this.refresh();
+ },
+
+ // remove
+ removeAll: function() {
+ $(this.el).find('tbody').html('');
+ this.row_count = 0;
+ this.refresh();
+ },
+
+ // value
+ value: function(new_value) {
+ // get current id/value
+ this.before = this.$el.find('.current').attr('id');
+
+ // check if new_value is defined
+ if (new_value !== undefined) {
+ this.$el.find('tr').removeClass('current');
+ if (new_value) {
+ this.$el.find('#' + new_value).addClass('current');
+ }
+ }
+
+ // get current id/value
+ var after = this.$el.find('.current').attr('id');
+ if(after === undefined) {
+ return null;
+ } else {
+ // fire onchange
+ if (after != this.before && this.options.onchange) {
+ this.options.onchange(new_value);
+ }
+
+ // return current value
+ return after;
+ }
+ },
+
+ // cancel last change
+ confirm: function(new_value) {
+ this.value(new_value);
+ },
+
+ // onclick
+ onclick: function(e) {
+ // get values
+ var old_value = this.value();
+ var new_value = $(e.target).closest('tr').attr('id');
+
+ // check equality
+ if (new_value && old_value != new_value) {
+ if (this.options.onconfirm) {
+ this.options.onconfirm(new_value);
+ } else {
+ this.confirm(new_value);
+ }
+ }
+ },
+
+ // onclick
+ ondblclick: function(e) {
+ var value = this.value();
+ if (value && this.options.ondblclick) {
+ this.options.ondblclick(value);
+ }
+ },
+
+ // refresh
+ refresh: function() {
+ if (this.row_count == 0) {
+ this.$el.find('tmessage').show();
+ } else {
+ this.$el.find('tmessage').hide();
+ }
+ },
+
+ // load html template
+ template: function(options)
+ {
+ return '<div>' +
+ '<table class="grid">' +
+ '<thead></thead>' +
+ '<tbody style="cursor: pointer;"></tbody>' +
+ '</table>' +
+ '<tmessage>' + options.content + '</tmessage>' +
+ '<div>';
+ }
+});
+
+});
diff -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a -r 6f56754324a6cd91d9e4afccd1d268ca22861093 config/plugins/visualizations/charts/static/library/ui.js
--- /dev/null
+++ b/config/plugins/visualizations/charts/static/library/ui.js
@@ -0,0 +1,472 @@
+// dependencies
+define(['library/utils', 'library/ui.select'], function(Utils, Select) {
+
+// plugin
+var Label = Backbone.View.extend(
+{
+ // options
+ optionsDefault: {
+ label : ''
+ },
+
+ // initialize
+ initialize : function(options) {
+ // get options
+ this.options = Utils.merge(options, this.optionsDefault);
+
+ // create new element
+ this.setElement(this.template(this.options));
+ },
+
+ // template
+ template: function(options) {
+ return '<label><b>' + options.label + '</b></label>';
+ },
+
+ // value
+ value: function() {
+ return options.label;
+ }
+});
+
+// plugin
+var Button = Backbone.View.extend(
+{
+ // options
+ optionsDefault: {
+ label : '',
+ float : 'right',
+ cls : 'btn-default',
+ type : 'submit',
+ icon : ''
+ },
+
+ // initialize
+ initialize : function(options) {
+ // get options
+ this.options = Utils.merge(options, this.optionsDefault);
+
+ // create new element
+ this.setElement(this.template(this.options));
+
+ // add event
+ $(this.el).on('click', options.onclick);
+ },
+
+ // element
+ template: function(options) {
+ var str = '<button type="' + options.type + '" style="margin-right: 5px; float: ' + options.float + ';" type="button" class="btn ' + options.cls + '">';
+ if (options.icon) {
+ str += '<i class="icon fa ' + options.icon + '"></i> ' ;
+ }
+ str += options.label +
+ '</button>';
+ return str;
+ }
+});
+
+// plugin
+var Anchor = Backbone.View.extend(
+{
+ // options
+ optionsDefault: {
+ label : ''
+ },
+
+ // initialize
+ initialize : function(options) {
+ // get options
+ this.options = Utils.merge(options, this.optionsDefault);
+
+ // create new element
+ this.setElement(this.template(this.options));
+
+ // add event
+ $(this.el).on('click', options.onclick);
+ },
+
+ // element
+ template: function(options) {
+ return '<div><a href="javascript:void(0)">' + options.label + '</a></div>';
+ }
+});
+
+// plugin
+var Message = Backbone.View.extend(
+{
+ // options
+ optionsDefault: {
+ message : '',
+ status : 'info',
+ persistent : false
+ },
+
+ // initialize
+ initialize : function(options) {
+ // configure options
+ this.options = Utils.merge(options, this.optionsDefault);
+
+ // create new element
+ this.setElement('<div></div>');
+ },
+
+ // update
+ update : function(options) {
+ // get options
+ this.options = Utils.merge(options, this.optionsDefault);
+
+ // show message
+ if (options.message != '') {
+ this.$el.html(this.template(this.options));
+ this.$el.fadeIn();
+
+ // check if message is persistent
+ if (!options.persistent) {
+ // set timer
+ var self = this;
+ window.setTimeout(function() {
+ if (self.$el.is(':visible')) {
+ self.$el.fadeOut();
+ } else {
+ self.$el.hide();
+ }
+ }, 3000);
+ }
+ } else {
+ this.$el.fadeOut();
+ }
+ },
+
+ // element
+ template: function(options) {
+ return '<div class="alert alert-' + options.status + '" style="padding: 2px 2px 2px 10px;">' + options.message + '</div>';
+ }
+});
+
+// plugin
+var Searchbox = Backbone.View.extend(
+{
+ // options
+ optionsDefault: {
+ onclick : null,
+ searchword : ''
+ },
+
+ // initialize
+ initialize : function(options) {
+ // configure options
+ this.options = Utils.merge(options, this.optionsDefault);
+
+ // create new element
+ this.setElement(this.template(this.options));
+
+ // add click event
+ var self = this;
+ if (this.options.onclick) {
+ this.$el.on('submit', function(e) {
+ var search_field = self.$el.find('#search');
+ self.options.onclick(search_field.val());
+ });
+ }
+ },
+
+ // element
+ template: function(options) {
+ return '<div class="search">' +
+ '<form onsubmit="return false;">' +
+ '<input id="search" class="form-control input-sm" type="text" name="search" placeholder="Search..." value="' + options.searchword + '">' +
+ '<button type="submit" class="btn search-btn">' +
+ '<i class="fa fa-search"></i>' +
+ '</button>' +
+ '</form>' +
+ '</div>';
+ }
+});
+
+// plugin
+var Title = Backbone.View.extend(
+{
+ // options
+ optionsDefault: {
+ label : 'Unlabeled',
+ body : null
+ },
+
+ // initialize
+ initialize : function(options) {
+ // configure options
+ this.options = Utils.merge(options, this.optionsDefault);
+
+ // create new element
+ this.setElement(this.template(this.options));
+
+ if (this.options.body) {
+ this.$el.find('.body').append(this.options.body);
+ }
+ },
+
+ // element
+ template: function(options) {
+ return '<div id="title" class="title">' +
+ options.label + ':' +
+ '</div>';
+ }
+});
+
+// tab
+var ButtonMenu = Backbone.View.extend(
+{
+ // main options
+ options:
+ {
+ id : '',
+ title : '',
+ target : '',
+ href : '',
+ onunload : null,
+ onclick : null,
+ visible : true,
+ icon : null,
+ tag : ''
+ },
+
+ // optional sub menu
+ $menu: null,
+
+ // initialize
+ initialize: function (options)
+ {
+ // read in defaults
+ if (options)
+ this.options = _.defaults(options, this.options);
+
+ // add template for tab
+ this.setElement($(this._template(this.options)));
+
+ // find root
+ var $root = $(this.el).find('.root');
+
+ // link head
+ var self = this;
+ $root.on('click', function(e)
+ {
+ // prevent default
+ e.preventDefault();
+
+ // add click event
+ if(self.options.onclick) {
+ self.options.onclick();
+ }
+ });
+
+ // visiblity
+ if (!this.options.visible)
+ this.hide();
+ },
+
+ // show
+ show: function()
+ {
+ $(this.el).show();
+ },
+
+ // hide
+ hide: function()
+ {
+ $(this.el).hide();
+ },
+
+ // add menu item
+ addMenu: function (options)
+ {
+ // menu option defaults
+ var menuOptions = {
+ title : '',
+ target : '',
+ href : '',
+ onclick : null,
+ divider : false,
+ icon : null
+ }
+
+ // read in defaults
+ if (options)
+ menuOptions = _.defaults(options, menuOptions);
+
+ // check if submenu element is available
+ if (!this.$menu)
+ {
+ // insert submenu element into root
+ $(this.el).append(this._templateMenu());
+
+ // update element link
+ this.$menu = $(this.el).find('.menu');
+ }
+
+ // create
+ var $item = $(this._templateMenuItem(menuOptions));
+
+ // add events
+ $item.on('click', function(e)
+ {
+ // prevent default
+ e.preventDefault();
+
+ // add click event
+ if(menuOptions.onclick) {
+ menuOptions.onclick();
+ }
+ });
+
+ // append menu
+ this.$menu.append($item);
+
+ // append divider
+ if (menuOptions.divider)
+ this.$menu.append($(this._templateDivider()));
+ },
+
+ // fill template header
+ _templateMenuItem: function (options)
+ {
+ var tmpl = '<li>' +
+ '<a href="' + options.href + '" target="' + options.target + '">';
+
+ if (options.icon)
+ tmpl += '<i class="fa ' + options.icon + '"></i>';
+
+ tmpl += ' ' + options.title +
+ '</a>' +
+ '</li>';
+ return tmpl;
+ },
+
+ // fill template header
+ _templateMenu: function ()
+ {
+ return '<ul class="menu dropdown-menu pull-right" role="menu"></ul>';
+ },
+
+ _templateDivider: function()
+ {
+ return '<li class="divider"></li>';
+ },
+
+ // fill template
+ _template: function (options)
+ {
+ // start template
+ var tmpl = '<div id="' + options.id + '" class="button-menu btn-group">' +
+ '<button type="button" class="root btn btn-default dropdown-toggle" data-toggle="dropdown">';
+
+ if (options.icon)
+ tmpl += '<i class="fa ' + options.icon + '"></i>';
+
+ '</button>' +
+ '</div>';
+
+ // return template
+ return tmpl;
+ }
+});
+
+// plugin
+var ButtonIcon = Backbone.View.extend(
+{
+ // options
+ optionsDefault: {
+ label : '',
+ float : 'right',
+ cls : 'icon-btn',
+ type : 'submit',
+ icon : '',
+ tooltip : ''
+ },
+
+ // initialize
+ initialize : function(options) {
+ // get options
+ if (options)
+ this.options = _.defaults(options, this.optionsDefault);
+ else
+ this.options = this.optionsDefault;
+
+ // create new element
+ this.setElement(this.template(this.options));
+
+ // add event
+ $(this.el).on('click', options.onclick);
+
+ // add tooltip
+ $(this.el).tooltip({title: options.tooltip, placement: 'bottom'});
+ },
+
+ // element
+ template: function(options) {
+ var str = '<a style="margin-right: 5px; float: ' + options.float + ';" class="' + options.cls + '">';
+ if (options.icon) {
+ str += '<i class="icon fa ' + options.icon + '"></i>';
+ }
+ str += options.label +
+ '</a>';
+ return str;
+ }
+});
+
+
+// plugin
+var Input = Backbone.View.extend(
+{
+ // options
+ optionsDefault: {
+ value : '',
+ type : 'text',
+ placeholder : '',
+ disabled : false
+ },
+
+ // initialize
+ initialize : function(options) {
+ // get options
+ if (options)
+ this.options = _.defaults(options, this.optionsDefault);
+ else
+ this.options = this.optionsDefault;
+
+ // create new element
+ this.setElement(this.template(this.options));
+
+ // disable input field
+ if (this.options.disabled) {
+ this.$el.prop('disabled', true);
+ }
+ },
+
+ // value
+ value : function (new_val) {
+ if (new_val !== undefined) {
+ this.$el.val(new_val);
+ }
+ return this.$el.val();
+ },
+
+ // element
+ template: function(options) {
+ return '<input id="' + options.id + '" type="' + options.type + '" value="' + options.value + '" placeholder="' + options.placeholder + '" class="form-control">';
+ }
+});
+
+// return
+return {
+ Label : Label,
+ Button : Button,
+ ButtonIcon : ButtonIcon,
+ Input : Input,
+ Anchor : Anchor,
+ Message : Message,
+ Searchbox : Searchbox,
+ Title : Title,
+ Select : Select,
+ ButtonMenu : ButtonMenu
+}
+});
diff -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a -r 6f56754324a6cd91d9e4afccd1d268ca22861093 config/plugins/visualizations/charts/static/library/ui.select.js
--- /dev/null
+++ b/config/plugins/visualizations/charts/static/library/ui.select.js
@@ -0,0 +1,117 @@
+// dependencies
+define(['library/utils'], function(Utils) {
+
+
+// plugin
+return Backbone.View.extend(
+{
+ // options
+ optionsDefault: {
+ id : '',
+ cls : ''
+ },
+
+ // initialize
+ initialize : function(options) {
+ // configure options
+ this.options = Utils.merge(options, this.optionsDefault);
+
+ // create new element
+ this.setElement(this.template(this.options));
+
+ // add change event
+ var self = this;
+ if (this.options.onchange) {
+ this.$el.on('change', function() { self.options.onchange(self.value()) });
+ }
+ },
+
+ // value
+ value : function (new_val) {
+ if (new_val !== undefined) {
+ this.$el.val(new_val);
+ }
+ return this.$el.val();
+ },
+
+ // label
+ label : function () {
+ return this.$el.find('option:selected').text();
+ },
+
+ // disabled
+ disabled: function() {
+ return this.$el.is(':disabled');
+ },
+
+ // render
+ update: function(options) {
+ // selected
+ var selected = this.$el.val();
+
+ // remove all options
+ $(this.el).find('option').remove();
+
+ // add new options
+ for (var key in options.data) {
+ $(this.el).append(this.templateOption(options.data[key]));
+ }
+
+ // check if selected value exists
+ var exists = 0 != $(this.el).find('option[value=' + selected + ']').length;
+
+ // add selected value
+ if (exists)
+ $(this.el).val(selected);
+ },
+
+ // update from url
+ updateUrl : function(options, callback) {
+ // get json
+ var self = this;
+ Utils.get(options.url, function(json) {
+ // write data into array
+ var data = [];
+ for (key in json) {
+ data.push({label: json[key].name, value: json[key].id});
+ }
+
+ // check if disabled. do not update disabled select elements.
+ if (!self.disabled()) {
+ self.update({data: data});
+ }
+
+ // callback
+ if (callback) {
+ callback();
+ }
+ });
+ },
+
+ // option
+ templateOption: function(options) {
+ return '<option value="' + options.value + '">' + options.label + '</option>';
+ },
+
+ // element
+ template: function(options) {
+ var tmpl = '<select id="' + options.id + '" class="select ' + options.cls + ' ' + options.id + '">';
+ for (key in options.data) {
+ // options
+ var item = options.data[key];
+
+ // identify selected value
+ var tag = '';
+ if (item.value == options.selected || item.value == '') {
+ tag = 'selected';
+ }
+
+ // add template string
+ tmpl += '<option value="' + item.value + '" ' + tag + '>' + item.label + '</option>';
+ }
+ tmpl += '</select>';
+ return tmpl;
+ }
+});
+
+});
This diff is so big that we needed to truncate the remainder.
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/494b51bda9c1/
Changeset: 494b51bda9c1
User: jmchilton
Date: 2014-01-12 05:40:18
Summary: Rework workflow functional testing so isn't tested by default...
Meant to be used with a specific workflow, but nose is picking it up by default when no workflow has been specified.
Affected #: 4 files
diff -r c8c00d106c7a6883feaacca809b42fd7c107fe2a -r 494b51bda9c1a319672e92cce4ec17386cd65390 run_functional_tests.sh
--- a/run_functional_tests.sh
+++ b/run_functional_tests.sh
@@ -51,7 +51,7 @@
python ./test/tool_shed/functional_tests.py -v --with-nosehtml --html-report-file ./test/tool_shed/run_functional_tests.html $2
fi
elif [ $1 = '-workflow' ]; then
- python ./scripts/functional_tests.py -v functional.test_workflow:WorkflowTestCase --with-nosehtml --html-report-file ./test/tool_shed/run_functional_tests.html -workflow $2
+ python ./scripts/functional_tests.py -v functional.workflow:WorkflowTestCase --with-nosehtml --html-report-file ./test/tool_shed/run_functional_tests.html -workflow $2
elif [ $1 = '-data_managers' ]; then
if [ ! $2 ]; then
python ./scripts/functional_tests.py -v functional.test_data_managers --with-nosehtml --html-report-file run_functional_tests.html -data_managers
diff -r c8c00d106c7a6883feaacca809b42fd7c107fe2a -r 494b51bda9c1a319672e92cce4ec17386cd65390 scripts/functional_tests.py
--- a/scripts/functional_tests.py
+++ b/scripts/functional_tests.py
@@ -475,10 +475,10 @@
def _run_functional_test( testing_shed_tools=None ):
workflow_test = __check_arg( '-workflow', param=True )
if workflow_test:
- import functional.test_workflow
- functional.test_workflow.WorkflowTestCase.workflow_test_file = workflow_test
- functional.test_workflow.WorkflowTestCase.master_api_key = master_api_key
- functional.test_workflow.WorkflowTestCase.user_api_key = get_user_api_key()
+ import functional.workflow
+ functional.workflow.WorkflowTestCase.workflow_test_file = workflow_test
+ functional.workflow.WorkflowTestCase.master_api_key = master_api_key
+ functional.workflow.WorkflowTestCase.user_api_key = get_user_api_key()
data_manager_test = __check_arg( '-data_managers', param=False )
if data_manager_test:
import functional.test_data_managers
diff -r c8c00d106c7a6883feaacca809b42fd7c107fe2a -r 494b51bda9c1a319672e92cce4ec17386cd65390 test/functional/test_workflow.py
--- a/test/functional/test_workflow.py
+++ /dev/null
@@ -1,185 +0,0 @@
-import os
-import sys
-from base.twilltestcase import TwillTestCase
-from base.interactor import GalaxyInteractorApi, stage_data_in_history
-
-from galaxy.util import parse_xml
-from galaxy.tools.test import parse_param_elem, require_file, test_data_iter, parse_output_elems
-from json import load, dumps
-
-from logging import getLogger
-log = getLogger( __name__ )
-
-
-class WorkflowTestCase( TwillTestCase ):
- """
- Kind of a shell of a test case for running workflow tests. Probably
- needs to look more like test_toolbox.
- """
- workflow_test_file = None
- user_api_key = None
- master_api_key = None
-
- def test_workflow( self, workflow_test_file=None ):
- maxseconds = 120
- workflow_test_file = workflow_test_file or WorkflowTestCase.workflow_test_file
- assert workflow_test_file
- workflow_test = parse_test_file( workflow_test_file )
- galaxy_interactor = GalaxyWorkflowInteractor( self )
-
- # Calling workflow https://github.com/jmchilton/blend4j/blob/master/src/test/java/com/github/j…
-
- # Import workflow
- workflow_id, step_id_map, output_defs = self.__import_workflow( galaxy_interactor, workflow_test.workflow )
-
- # Stage data and history for workflow
- test_history = galaxy_interactor.new_history()
- stage_data_in_history( galaxy_interactor, workflow_test.test_data(), test_history )
-
- # Build workflow parameters
- uploads = galaxy_interactor.uploads
- ds_map = {}
- for step_index, input_dataset_label in workflow_test.input_datasets():
- # Upload is {"src": "hda", "id": hid}
- try:
- upload = uploads[ workflow_test.upload_name( input_dataset_label ) ]
- except KeyError:
- raise AssertionError( "Failed to find upload with label %s in uploaded datasets %s" % ( input_dataset_label, uploads ) )
-
- ds_map[ step_id_map[ step_index ] ] = upload
-
- payload = {
- "history": "hist_id=%s" % test_history,
- "ds_map": dumps( ds_map ),
- "workflow_id": workflow_id,
- }
- run_response = galaxy_interactor.run_workflow( payload ).json()
-
- outputs = run_response[ 'outputs' ]
- if not len( outputs ) == len( output_defs ):
- msg_template = "Number of outputs [%d] created by workflow execution does not equal expected number from input file [%d]."
- msg = msg_template % ( len( outputs ), len( output_defs ) )
- raise AssertionError( msg )
-
- galaxy_interactor.wait_for_ids( test_history, outputs )
-
- for expected_output_def in workflow_test.outputs:
- # Get the correct hid
- name, outfile, attributes = expected_output_def
-
- output_data = outputs[ int( name ) ]
- try:
- galaxy_interactor.verify_output( test_history, output_data, outfile, attributes=attributes, shed_tool_id=None, maxseconds=maxseconds )
- except Exception:
- for stream in ['stdout', 'stderr']:
- stream_output = galaxy_interactor.get_job_stream( test_history, output_data, stream=stream )
- print >>sys.stderr, self._format_stream( stream_output, stream=stream, format=True )
- raise
-
- def __import_workflow( self, galaxy_interactor, workflow ):
- """
- Import workflow into Galaxy and return id and mapping of step ids.
- """
- workflow_info = galaxy_interactor.import_workflow( workflow ).json()
- try:
- workflow_id = workflow_info[ 'id' ]
- except KeyError:
- raise AssertionError( "Failed to find id for workflow import response %s" % workflow_info )
-
- # Well ideally the local copy of the workflow would have the same step ids
- # as the one imported through the API, but API workflow imports are 1-indexed
- # and GUI exports 0-indexed as of mid-november 2013.
-
- imported_workflow = galaxy_interactor.read_workflow( workflow_id )
- #log.info("local %s\nimported%s" % (workflow, imported_workflow))
- step_id_map = {}
- local_steps_ids = sorted( [ int( step_id ) for step_id in workflow[ 'steps' ].keys() ] )
- imported_steps_ids = sorted( [ int( step_id ) for step_id in imported_workflow[ 'steps' ].keys() ] )
- for local_step_id, imported_step_id in zip( local_steps_ids, imported_steps_ids ):
- step_id_map[ local_step_id ] = imported_step_id
-
- output_defs = []
- for local_step_id in local_steps_ids:
- step_def = workflow['steps'][ str( local_step_id ) ]
- output_defs.extend( step_def.get( "outputs", [] ) )
-
- return workflow_id, step_id_map, output_defs
-
-
-def parse_test_file( workflow_test_file ):
- tree = parse_xml( workflow_test_file )
- root = tree.getroot()
- input_elems = root.findall( "input" )
- required_files = []
- dataset_dict = {}
- for input_elem in input_elems:
- name, value, attrib = parse_param_elem( input_elem )
- require_file( name, value, attrib, required_files )
- dataset_dict[ name ] = value
-
- outputs = parse_output_elems( root )
-
- workflow_file_rel_path = root.get( 'file' )
- if not workflow_file_rel_path:
- raise Exception( "Workflow test XML must declare file attribute pointing to workflow under test." )
-
- # TODO: Normalize this path, prevent it from accessing arbitrary files on system.
- worfklow_file_abs_path = os.path.join( os.path.dirname( workflow_test_file ), workflow_file_rel_path )
-
- return WorkflowTest(
- dataset_dict,
- required_files,
- worfklow_file_abs_path,
- outputs=outputs,
- )
-
-
-class WorkflowTest( object ):
-
- def __init__( self, dataset_dict, required_files, workflow_file, outputs ):
- self.dataset_dict = dataset_dict
- self.required_files = required_files
- self.workflow = load( open( workflow_file, "r" ) )
- self.outputs = outputs
-
- def test_data( self ):
- return test_data_iter( self.required_files )
-
- def upload_name( self, input_dataset_label ):
- return self.dataset_dict[ input_dataset_label ]
-
- def input_datasets( self ):
- steps = self.workflow[ "steps" ]
- log.info("in input_datasets with steps %s" % steps)
- for step_index, step_dict in steps.iteritems():
- if step_dict.get( "name", None ) == "Input dataset":
- yield int( step_index ), step_dict[ "inputs" ][0][ "name" ]
-
-
-class GalaxyWorkflowInteractor(GalaxyInteractorApi):
-
- def __init__( self, twill_test_case ):
- super(GalaxyWorkflowInteractor, self).__init__( twill_test_case )
-
- def import_workflow( self, workflow_rep ):
- payload = { "workflow": dumps( workflow_rep ) }
- return self._post( "workflows/upload", data=payload )
-
- def run_workflow( self, data ):
- return self._post( "workflows", data=data )
-
- def read_workflow( self, id ):
- return self._get( "workflows/%s" % id ).json()
-
- def wait_for_ids( self, history_id, ids ):
- self.twill_test_case.wait_for( lambda: not all( [ self.__dataset_ready( history_id, id ) for id in ids ] ), maxseconds=120 )
-
- def __dataset_ready( self, history_id, id ):
- contents = self._get( 'histories/%s/contents' % history_id ).json()
- for content in contents:
-
- if content["id"] == id:
- state = content[ 'state' ]
- state_ready = self._state_ready( state, error_msg="Dataset creation failed for dataset with name %s." % content[ 'name' ] )
- return state_ready
- return False
diff -r c8c00d106c7a6883feaacca809b42fd7c107fe2a -r 494b51bda9c1a319672e92cce4ec17386cd65390 test/functional/workflow.py
--- /dev/null
+++ b/test/functional/workflow.py
@@ -0,0 +1,185 @@
+import os
+import sys
+from base.twilltestcase import TwillTestCase
+from base.interactor import GalaxyInteractorApi, stage_data_in_history
+
+from galaxy.util import parse_xml
+from galaxy.tools.test import parse_param_elem, require_file, test_data_iter, parse_output_elems
+from json import load, dumps
+
+from logging import getLogger
+log = getLogger( __name__ )
+
+
+class WorkflowTestCase( TwillTestCase ):
+ """
+ Kind of a shell of a test case for running workflow tests. Probably
+ needs to look more like test_toolbox.
+ """
+ workflow_test_file = None
+ user_api_key = None
+ master_api_key = None
+
+ def test_workflow( self, workflow_test_file=None ):
+ maxseconds = 120
+ workflow_test_file = workflow_test_file or WorkflowTestCase.workflow_test_file
+ assert workflow_test_file
+ workflow_test = parse_test_file( workflow_test_file )
+ galaxy_interactor = GalaxyWorkflowInteractor( self )
+
+ # Calling workflow https://github.com/jmchilton/blend4j/blob/master/src/test/java/com/github/j…
+
+ # Import workflow
+ workflow_id, step_id_map, output_defs = self.__import_workflow( galaxy_interactor, workflow_test.workflow )
+
+ # Stage data and history for workflow
+ test_history = galaxy_interactor.new_history()
+ stage_data_in_history( galaxy_interactor, workflow_test.test_data(), test_history )
+
+ # Build workflow parameters
+ uploads = galaxy_interactor.uploads
+ ds_map = {}
+ for step_index, input_dataset_label in workflow_test.input_datasets():
+ # Upload is {"src": "hda", "id": hid}
+ try:
+ upload = uploads[ workflow_test.upload_name( input_dataset_label ) ]
+ except KeyError:
+ raise AssertionError( "Failed to find upload with label %s in uploaded datasets %s" % ( input_dataset_label, uploads ) )
+
+ ds_map[ step_id_map[ step_index ] ] = upload
+
+ payload = {
+ "history": "hist_id=%s" % test_history,
+ "ds_map": dumps( ds_map ),
+ "workflow_id": workflow_id,
+ }
+ run_response = galaxy_interactor.run_workflow( payload ).json()
+
+ outputs = run_response[ 'outputs' ]
+ if not len( outputs ) == len( output_defs ):
+ msg_template = "Number of outputs [%d] created by workflow execution does not equal expected number from input file [%d]."
+ msg = msg_template % ( len( outputs ), len( output_defs ) )
+ raise AssertionError( msg )
+
+ galaxy_interactor.wait_for_ids( test_history, outputs )
+
+ for expected_output_def in workflow_test.outputs:
+ # Get the correct hid
+ name, outfile, attributes = expected_output_def
+
+ output_data = outputs[ int( name ) ]
+ try:
+ galaxy_interactor.verify_output( test_history, output_data, outfile, attributes=attributes, shed_tool_id=None, maxseconds=maxseconds )
+ except Exception:
+ for stream in ['stdout', 'stderr']:
+ stream_output = galaxy_interactor.get_job_stream( test_history, output_data, stream=stream )
+ print >>sys.stderr, self._format_stream( stream_output, stream=stream, format=True )
+ raise
+
+ def __import_workflow( self, galaxy_interactor, workflow ):
+ """
+ Import workflow into Galaxy and return id and mapping of step ids.
+ """
+ workflow_info = galaxy_interactor.import_workflow( workflow ).json()
+ try:
+ workflow_id = workflow_info[ 'id' ]
+ except KeyError:
+ raise AssertionError( "Failed to find id for workflow import response %s" % workflow_info )
+
+ # Well ideally the local copy of the workflow would have the same step ids
+ # as the one imported through the API, but API workflow imports are 1-indexed
+ # and GUI exports 0-indexed as of mid-november 2013.
+
+ imported_workflow = galaxy_interactor.read_workflow( workflow_id )
+ #log.info("local %s\nimported%s" % (workflow, imported_workflow))
+ step_id_map = {}
+ local_steps_ids = sorted( [ int( step_id ) for step_id in workflow[ 'steps' ].keys() ] )
+ imported_steps_ids = sorted( [ int( step_id ) for step_id in imported_workflow[ 'steps' ].keys() ] )
+ for local_step_id, imported_step_id in zip( local_steps_ids, imported_steps_ids ):
+ step_id_map[ local_step_id ] = imported_step_id
+
+ output_defs = []
+ for local_step_id in local_steps_ids:
+ step_def = workflow['steps'][ str( local_step_id ) ]
+ output_defs.extend( step_def.get( "outputs", [] ) )
+
+ return workflow_id, step_id_map, output_defs
+
+
+def parse_test_file( workflow_test_file ):
+ tree = parse_xml( workflow_test_file )
+ root = tree.getroot()
+ input_elems = root.findall( "input" )
+ required_files = []
+ dataset_dict = {}
+ for input_elem in input_elems:
+ name, value, attrib = parse_param_elem( input_elem )
+ require_file( name, value, attrib, required_files )
+ dataset_dict[ name ] = value
+
+ outputs = parse_output_elems( root )
+
+ workflow_file_rel_path = root.get( 'file' )
+ if not workflow_file_rel_path:
+ raise Exception( "Workflow test XML must declare file attribute pointing to workflow under test." )
+
+ # TODO: Normalize this path, prevent it from accessing arbitrary files on system.
+ worfklow_file_abs_path = os.path.join( os.path.dirname( workflow_test_file ), workflow_file_rel_path )
+
+ return WorkflowTest(
+ dataset_dict,
+ required_files,
+ worfklow_file_abs_path,
+ outputs=outputs,
+ )
+
+
+class WorkflowTest( object ):
+
+ def __init__( self, dataset_dict, required_files, workflow_file, outputs ):
+ self.dataset_dict = dataset_dict
+ self.required_files = required_files
+ self.workflow = load( open( workflow_file, "r" ) )
+ self.outputs = outputs
+
+ def test_data( self ):
+ return test_data_iter( self.required_files )
+
+ def upload_name( self, input_dataset_label ):
+ return self.dataset_dict[ input_dataset_label ]
+
+ def input_datasets( self ):
+ steps = self.workflow[ "steps" ]
+ log.info("in input_datasets with steps %s" % steps)
+ for step_index, step_dict in steps.iteritems():
+ if step_dict.get( "name", None ) == "Input dataset":
+ yield int( step_index ), step_dict[ "inputs" ][0][ "name" ]
+
+
+class GalaxyWorkflowInteractor(GalaxyInteractorApi):
+
+ def __init__( self, twill_test_case ):
+ super(GalaxyWorkflowInteractor, self).__init__( twill_test_case )
+
+ def import_workflow( self, workflow_rep ):
+ payload = { "workflow": dumps( workflow_rep ) }
+ return self._post( "workflows/upload", data=payload )
+
+ def run_workflow( self, data ):
+ return self._post( "workflows", data=data )
+
+ def read_workflow( self, id ):
+ return self._get( "workflows/%s" % id ).json()
+
+ def wait_for_ids( self, history_id, ids ):
+ self.twill_test_case.wait_for( lambda: not all( [ self.__dataset_ready( history_id, id ) for id in ids ] ), maxseconds=120 )
+
+ def __dataset_ready( self, history_id, id ):
+ contents = self._get( 'histories/%s/contents' % history_id ).json()
+ for content in contents:
+
+ if content["id"] == id:
+ state = content[ 'state' ]
+ state_ready = self._state_ready( state, error_msg="Dataset creation failed for dataset with name %s." % content[ 'name' ] )
+ return state_ready
+ return False
https://bitbucket.org/galaxy/galaxy-central/commits/c7986c31fd74/
Changeset: c7986c31fd74
User: jmchilton
Date: 2014-01-12 05:40:18
Summary: More work on eliminating references to test_db_util.sa_session.
... tweaked styling of several functional tests (the non-toolbox,datamanager,workflow,api functional tests) along the way - mostly PEP-8 fixes and eliminating * imports. Did not modify tests which do not pass - but did not in the source where they fail inside a TODO.
Affected #: 14 files
diff -r 494b51bda9c1a319672e92cce4ec17386cd65390 -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a test/base/test_db_util.py
--- a/test/base/test_db_util.py
+++ b/test/base/test_db_util.py
@@ -1,153 +1,223 @@
import galaxy.model
from galaxy.model.orm import *
-from functional.database_contexts import galaxy_context as sa_session
+from functional import database_contexts
+# Deprecated - import database_contexts and use galaxy_context
+sa_session = database_contexts.galaxy_context
from base.twilltestcase import *
-import sys
+
+
+def gx_context():
+ return database_contexts.galaxy_context
+
def delete_obj( obj ):
- sa_session.delete( obj )
- sa_session.flush()
+ gx_context().delete( obj )
+ gx_context().flush()
+
+
def delete_request_type_permissions( id ):
- rtps = sa_session.query( galaxy.model.RequestTypePermissions ) \
- .filter( and_( galaxy.model.RequestTypePermissions.table.c.request_type_id==id ) ) \
+ rtps = gx_context().query( galaxy.model.RequestTypePermissions ) \
+ .filter( and_( galaxy.model.RequestTypePermissions.table.c.request_type_id == id ) ) \
.order_by( desc( galaxy.model.RequestTypePermissions.table.c.create_time ) )
for rtp in rtps:
- sa_session.delete( rtp )
- sa_session.flush()
+ gx_context().delete( rtp )
+ gx_context().flush()
+
+
def delete_user_roles( user ):
for ura in user.roles:
- sa_session.delete( ura )
- sa_session.flush()
+ gx_context().delete( ura )
+ gx_context().flush()
+
+
def flush( obj ):
- sa_session.add( obj )
- sa_session.flush()
+ gx_context().add( obj )
+ gx_context().flush()
+
+
def get_all_histories_for_user( user ):
- return sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.user_id==user.id,
- galaxy.model.History.table.c.deleted==False ) ) \
+ return gx_context().query( galaxy.model.History ) \
+ .filter( and_( galaxy.model.History.table.c.user_id == user.id,
+ galaxy.model.History.table.c.deleted == False ) ) \
.all()
+
+
def get_dataset_permissions_by_dataset( dataset ):
- return sa_session.query( galaxy.model.DatasetPermissions ) \
- .filter( galaxy.model.DatasetPermissions.table.c.dataset_id==dataset.id ) \
+ return gx_context().query( galaxy.model.DatasetPermissions ) \
+ .filter( galaxy.model.DatasetPermissions.table.c.dataset_id == dataset.id ) \
.all()
+
+
def get_dataset_permissions_by_role( role ):
- return sa_session.query( galaxy.model.DatasetPermissions ) \
+ return gx_context().query( galaxy.model.DatasetPermissions ) \
.filter( galaxy.model.DatasetPermissions.table.c.role_id == role.id ) \
.first()
+
+
def get_default_history_permissions_by_history( history ):
- return sa_session.query( galaxy.model.DefaultHistoryPermissions ) \
- .filter( galaxy.model.DefaultHistoryPermissions.table.c.history_id==history.id ) \
+ return gx_context().query( galaxy.model.DefaultHistoryPermissions ) \
+ .filter( galaxy.model.DefaultHistoryPermissions.table.c.history_id == history.id ) \
.all()
+
+
def get_default_history_permissions_by_role( role ):
- return sa_session.query( galaxy.model.DefaultHistoryPermissions ) \
+ return gx_context().query( galaxy.model.DefaultHistoryPermissions ) \
.filter( galaxy.model.DefaultHistoryPermissions.table.c.role_id == role.id ) \
.all()
+
+
def get_default_user_permissions_by_role( role ):
- return sa_session.query( galaxy.model.DefaultUserPermissions ) \
+ return gx_context().query( galaxy.model.DefaultUserPermissions ) \
.filter( galaxy.model.DefaultUserPermissions.table.c.role_id == role.id ) \
.all()
+
+
def get_default_user_permissions_by_user( user ):
- return sa_session.query( galaxy.model.DefaultUserPermissions ) \
- .filter( galaxy.model.DefaultUserPermissions.table.c.user_id==user.id ) \
+ return gx_context().query( galaxy.model.DefaultUserPermissions ) \
+ .filter( galaxy.model.DefaultUserPermissions.table.c.user_id == user.id ) \
.all()
+
+
def get_form( name ):
- fdc_list = sa_session.query( galaxy.model.FormDefinitionCurrent ) \
+ fdc_list = gx_context().query( galaxy.model.FormDefinitionCurrent ) \
.filter( galaxy.model.FormDefinitionCurrent.table.c.deleted == False ) \
.order_by( galaxy.model.FormDefinitionCurrent.table.c.create_time.desc() )
for fdc in fdc_list:
- sa_session.refresh( fdc )
- sa_session.refresh( fdc.latest_form )
+ gx_context().refresh( fdc )
+ gx_context().refresh( fdc.latest_form )
if fdc.latest_form.name == name:
return fdc.latest_form
return None
+
+
def get_folder( parent_id, name, description ):
- return sa_session.query( galaxy.model.LibraryFolder ) \
- .filter( and_( galaxy.model.LibraryFolder.table.c.parent_id==parent_id,
- galaxy.model.LibraryFolder.table.c.name==name,
- galaxy.model.LibraryFolder.table.c.description==description ) ) \
+ return gx_context().query( galaxy.model.LibraryFolder ) \
+ .filter( and_( galaxy.model.LibraryFolder.table.c.parent_id == parent_id,
+ galaxy.model.LibraryFolder.table.c.name == name,
+ galaxy.model.LibraryFolder.table.c.description == description ) ) \
.first()
+
+
def get_group_by_name( name ):
- return sa_session.query( galaxy.model.Group ).filter( galaxy.model.Group.table.c.name==name ).first()
+ return gx_context().query( galaxy.model.Group ).filter( galaxy.model.Group.table.c.name == name ).first()
+
+
def get_group_role_associations_by_group( group ):
- return sa_session.query( galaxy.model.GroupRoleAssociation ) \
+ return gx_context().query( galaxy.model.GroupRoleAssociation ) \
.filter( galaxy.model.GroupRoleAssociation.table.c.group_id == group.id ) \
.all()
+
+
def get_group_role_associations_by_role( role ):
- return sa_session.query( galaxy.model.GroupRoleAssociation ) \
+ return gx_context().query( galaxy.model.GroupRoleAssociation ) \
.filter( galaxy.model.GroupRoleAssociation.table.c.role_id == role.id ) \
.all()
+
+
def get_latest_dataset():
- return sa_session.query( galaxy.model.Dataset ) \
+ return gx_context().query( galaxy.model.Dataset ) \
.order_by( desc( galaxy.model.Dataset.table.c.create_time ) ) \
.first()
+
+
def get_latest_hda():
- return sa_session.query( galaxy.model.HistoryDatasetAssociation ) \
+ return gx_context().query( galaxy.model.HistoryDatasetAssociation ) \
.order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ) \
.first()
+
+
def get_latest_history_for_user( user ):
- return sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==user.id ) ) \
+ return gx_context().query( galaxy.model.History ) \
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == user.id ) ) \
.order_by( desc( galaxy.model.History.table.c.create_time ) ) \
.first()
+
+
def get_latest_ldda_by_name( name ):
- return sa_session.query( galaxy.model.LibraryDatasetDatasetAssociation ) \
- .filter( and_( galaxy.model.LibraryDatasetDatasetAssociation.table.c.name==name,
+ return gx_context().query( galaxy.model.LibraryDatasetDatasetAssociation ) \
+ .filter( and_( galaxy.model.LibraryDatasetDatasetAssociation.table.c.name == name,
galaxy.model.LibraryDatasetDatasetAssociation.table.c.deleted == False ) ) \
.order_by( desc( galaxy.model.LibraryDatasetDatasetAssociation.table.c.create_time ) ) \
.first()
+
+
def get_latest_lddas( limit ):
- return sa_session.query( galaxy.model.LibraryDatasetDatasetAssociation ) \
+ return gx_context().query( galaxy.model.LibraryDatasetDatasetAssociation ) \
.order_by( desc( galaxy.model.LibraryDatasetDatasetAssociation.table.c.update_time ) ) \
.limit( limit )
+
+
def get_library( name, description, synopsis ):
- return sa_session.query( galaxy.model.Library ) \
- .filter( and_( galaxy.model.Library.table.c.name==name,
- galaxy.model.Library.table.c.description==description,
- galaxy.model.Library.table.c.synopsis==synopsis,
- galaxy.model.Library.table.c.deleted==False ) ) \
+ return gx_context().query( galaxy.model.Library ) \
+ .filter( and_( galaxy.model.Library.table.c.name == name,
+ galaxy.model.Library.table.c.description == description,
+ galaxy.model.Library.table.c.synopsis == synopsis,
+ galaxy.model.Library.table.c.deleted == False ) ) \
.first()
+
+
def get_private_role( user ):
for role in user.all_roles():
if role.name == user.email and role.description == 'Private Role for %s' % user.email:
return role
raise AssertionError( "Private role not found for user '%s'" % user.email )
+
+
def get_request_by_name( name ):
- return sa_session.query( galaxy.model.Request ) \
- .filter( and_( galaxy.model.Request.table.c.name==name,
- galaxy.model.Request.table.c.deleted==False ) ) \
- .first()
+ return gx_context().query( galaxy.model.Request ) \
+ .filter( and_( galaxy.model.Request.table.c.name == name,
+ galaxy.model.Request.table.c.deleted == False ) ) \
+ .first()
+
+
def get_request_type_by_name( name ):
- return sa_session.query( galaxy.model.RequestType ) \
- .filter( and_( galaxy.model.RequestType.table.c.name==name ) ) \
+ return gx_context().query( galaxy.model.RequestType ) \
+ .filter( and_( galaxy.model.RequestType.table.c.name == name ) ) \
.order_by( desc( galaxy.model.RequestType.table.c.create_time ) ) \
.first()
+
+
def get_role_by_name( name ):
- return sa_session.query( galaxy.model.Role ).filter( galaxy.model.Role.table.c.name==name ).first()
+ return gx_context().query( galaxy.model.Role ).filter( galaxy.model.Role.table.c.name == name ).first()
+
+
def get_user( email ):
- return sa_session.query( galaxy.model.User ) \
- .filter( galaxy.model.User.table.c.email==email ) \
+ return gx_context().query( galaxy.model.User ) \
+ .filter( galaxy.model.User.table.c.email == email ) \
.first()
+
+
def get_user_address( user, short_desc ):
- return sa_session.query( galaxy.model.UserAddress ) \
- .filter( and_( galaxy.model.UserAddress.table.c.user_id==user.id,
- galaxy.model.UserAddress.table.c.desc==short_desc,
- galaxy.model.UserAddress.table.c.deleted==False ) ) \
+ return gx_context().query( galaxy.model.UserAddress ) \
+ .filter( and_( galaxy.model.UserAddress.table.c.user_id == user.id,
+ galaxy.model.UserAddress.table.c.desc == short_desc,
+ galaxy.model.UserAddress.table.c.deleted == False ) ) \
.order_by( desc( galaxy.model.UserAddress.table.c.create_time ) ) \
- .first()
+ .first()
+
+
def get_user_group_associations_by_group( group ):
- return sa_session.query( galaxy.model.UserGroupAssociation ) \
+ return gx_context().query( galaxy.model.UserGroupAssociation ) \
.filter( galaxy.model.UserGroupAssociation.table.c.group_id == group.id ) \
.all()
+
+
def get_user_info_form_definition():
return galaxy.model.FormDefinition.types.USER_INFO
+
+
def get_user_role_associations_by_role( role ):
- return sa_session.query( galaxy.model.UserRoleAssociation ) \
+ return gx_context().query( galaxy.model.UserRoleAssociation ) \
.filter( galaxy.model.UserRoleAssociation.table.c.role_id == role.id ) \
.all()
+
+
def mark_obj_deleted( obj ):
obj.deleted = True
- sa_session.add( obj )
- sa_session.flush()
+ gx_context().add( obj )
+ gx_context().flush()
+
+
def refresh( obj ):
- sa_session.refresh( obj )
+ gx_context().refresh( obj )
diff -r 494b51bda9c1a319672e92cce4ec17386cd65390 -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a test/functional/test_DNAse_flanked_genes.py
--- a/test/functional/test_DNAse_flanked_genes.py
+++ b/test/functional/test_DNAse_flanked_genes.py
@@ -1,21 +1,24 @@
import galaxy.model
from galaxy.model.orm import *
-from base.test_db_util import sa_session
+import database_contexts
from base.twilltestcase import TwillTestCase
""" A sample analysis"""
+
class AnalysisDNAseHSSFlankedGenes( TwillTestCase ):
+
def test_get_DNAseHSS_flanked_genes( self ):
+ sa_session = database_contexts.galaxy_context
self.logout()
self.login( email='test(a)bx.psu.edu' )
admin_user = sa_session.query( galaxy.model.User ) \
- .filter( galaxy.model.User.table.c.email=='test(a)bx.psu.edu' ) \
+ .filter( galaxy.model.User.table.c.email == 'test(a)bx.psu.edu' ) \
.one()
self.new_history( name='DNAseHSS_flanked_genes' )
history1 = sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==admin_user.id ) ) \
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == admin_user.id ) ) \
.order_by( desc( galaxy.model.History.table.c.create_time ) ) \
.first()
track_params = dict(
diff -r 494b51bda9c1a319672e92cce4ec17386cd65390 -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a test/functional/test_admin_features.py
--- a/test/functional/test_admin_features.py
+++ b/test/functional/test_admin_features.py
@@ -1,5 +1,32 @@
-from base.twilltestcase import *
-from base.test_db_util import *
+from base.twilltestcase import TwillTestCase
+from functional import database_contexts
+import galaxy.model
+from base.test_db_util import (
+ get_user,
+ get_private_role,
+ get_all_histories_for_user,
+ get_latest_history_for_user,
+ get_default_history_permissions_by_history,
+ get_latest_dataset,
+ refresh,
+ flush,
+ get_group_by_name,
+ get_role_by_name,
+ get_user_group_associations_by_group,
+ get_default_history_permissions_by_role,
+ get_default_user_permissions_by_role,
+ get_user_role_associations_by_role,
+ get_group_role_associations_by_group,
+ get_dataset_permissions_by_role,
+ get_group_role_associations_by_role,
+)
+
+
+# Globals setup by these tests.
+regular_user1 = regular_user2 = regular_user3 = admin_user = None
+role_one = role_two = role_three = None
+group_zero = group_one = group_two = None
+
class TestDataSecurity( TwillTestCase ):
def test_000_initiate_users( self ):
@@ -19,6 +46,7 @@
global admin_user
admin_user = get_user( 'test(a)bx.psu.edu' )
assert admin_user is not None, 'Problem retrieving user with email "test(a)bx.psu.edu" from the database'
+
def test_005_create_new_user_account_as_admin( self ):
"""Testing creating a new user account as admin"""
# Logged in as admin_user
@@ -31,8 +59,9 @@
username='admin-user',
redirect='' )
if not username_taken:
- raise AssertionError, "The public name (%s) is already being used by another user, but no error was displayed" \
- % 'admin-user'
+ error_msg = "The public name (%s) is already being used by another user, but no error was displayed" % 'admin-user'
+ raise AssertionError( error_msg )
+
# Test setting the user name to an invalid one. Note that the account must not exist in order
# for this test to work as desired, so the email we're passing is important...
previously_created, username_taken, invalid_username = self.create_new_account_as_admin( email='diff(a)you.com',
@@ -40,7 +69,7 @@
username='h',
redirect='' )
if not invalid_username:
- raise AssertionError, "The public name (%s) is is invalid, but no error was displayed" % username
+ raise AssertionError( "The public name (%s) is is invalid, but no error was displayed" % 'diff(a)you.com' )
previously_created, username_taken, invalid_username = self.create_new_account_as_admin( email=email,
password=password,
username='regular-user3',
@@ -61,7 +90,7 @@
raise AssertionError( '%d UserRoleAssociations were created for user %s when the admin created the account ( should have been 1 )' \
% ( len( regular_user3.roles ), regular_user3.email ) )
for ura in regular_user3.roles:
- role = sa_session.query( galaxy.model.Role ).get( ura.role_id )
+ role = database_contexts.galaxy_context.query( galaxy.model.Role ).get( ura.role_id )
if not previously_created and role.type != 'private':
raise AssertionError( 'Role created for user %s when the admin created the account is not private, type is' \
% str( role.type ) )
@@ -73,9 +102,11 @@
# Make sure the user was not associated with any groups
if regular_user3.groups:
raise AssertionError( 'Groups were incorrectly associated with user %s when the admin created the account' % email )
+
def test_010_reset_password_as_admin( self ):
"""Testing reseting a user password as admin"""
self.reset_password_as_admin( user_id=self.security.encode_id( regular_user3.id ), password='testreset' )
+
def test_015_login_after_password_reset( self ):
"""Testing logging in after an admin reset a password - tests DefaultHistoryPermissions for accounts created by an admin"""
# logged in as admin_user
@@ -84,7 +115,7 @@
# Make sure a History and HistoryDefaultPermissions exist for the user
latest_history = get_latest_history_for_user( regular_user3 )
if not latest_history.user_id == regular_user3.id:
- raise AssertionError( 'A history was not created for user %s when he logged in' % email )
+ raise AssertionError( 'A history was not created for user %s when he logged in' % regular_user3.email )
if not latest_history.default_permissions:
raise AssertionError( 'No DefaultHistoryPermissions were created for history id %d when it was created' % latest_history.id )
dhps = get_default_history_permissions_by_history( latest_history )
@@ -102,22 +133,25 @@
if dp.action != galaxy.model.Dataset.permitted_actions.DATASET_MANAGE_PERMISSIONS.action:
raise AssertionError( 'The DatasetPermissions for dataset id %d is %s ( should have been %s )' \
% ( latest_dataset.id,
- latest_dataset.actions.action,
+ latest_dataset.actions.action,
galaxy.model.Dataset.permitted_actions.DATASET_MANAGE_PERMISSIONS.action ) )
self.logout()
# Reset the password to the default for later tests
self.login( email='test(a)bx.psu.edu' )
self.reset_password_as_admin( user_id=self.security.encode_id( regular_user3.id ), password='testuser' )
+
def test_020_mark_user_deleted( self ):
"""Testing marking a user account as deleted"""
# Logged in as admin_user
self.mark_user_deleted( user_id=self.security.encode_id( regular_user3.id ), email=regular_user3.email )
if not regular_user3.active_histories:
raise AssertionError( 'HistoryDatasetAssociations for regular_user3 were incorrectly deleted when the user was marked deleted' )
+
def test_025_undelete_user( self ):
"""Testing undeleting a user account"""
# Logged in as admin_user
self.undelete_user( user_id=self.security.encode_id( regular_user3.id ), email=regular_user3.email )
+
def test_030_create_role( self ):
"""Testing creating new role with 3 members ( and a new group named the same ), then renaming the role"""
# Logged in as admin_user
@@ -137,7 +171,7 @@
"One of the groups associated with this role is the newly created group with the same name." ] )
# Get the role object for later tests
global role_one
- role_one = sa_session.query( galaxy.model.Role ).filter( galaxy.model.Role.table.c.name==name ).first()
+ role_one = database_contexts.galaxy_context.query( galaxy.model.Role ).filter( galaxy.model.Role.table.c.name == name ).first()
assert role_one is not None, 'Problem retrieving role named "Role One" from the database'
# Make sure UserRoleAssociations are correct
if len( role_one.users ) != len( in_user_ids ):
@@ -156,13 +190,14 @@
group_zero = get_group_by_name( name )
# Rename the role
rename = "Role One's been Renamed"
- new_description="This is Role One's Re-described"
+ new_description = "This is Role One's Re-described"
self.rename_role( self.security.encode_id( role_one.id ), name=rename, description=new_description )
self.visit_url( '%s/admin/roles' % self.url )
self.check_page_for_string( rename )
self.check_page_for_string( new_description )
# Reset the role back to the original name and description
self.rename_role( self.security.encode_id( role_one.id ), name=name, description=description )
+
def test_035_create_group( self ):
"""Testing creating new group with 3 members and 2 associated roles, then renaming it"""
# Logged in as admin_user
@@ -202,6 +237,7 @@
self.check_page_for_string( rename )
# Reset the group back to the original name
self.rename_group( self.security.encode_id( group_one.id ), name=name )
+
def test_040_add_members_and_role_to_group( self ):
"""Testing editing user membership and role associations of an existing group"""
# Logged in as admin_user
@@ -224,6 +260,7 @@
group_two.name,
user_ids=user_ids,
role_ids=role_ids )
+
def test_045_create_role_with_user_and_group_associations( self ):
"""Testing creating a role with user and group associations"""
# Logged in as admin_user
@@ -233,9 +270,9 @@
# associate_users_and_groups_with_role() method.
name = 'Role Two'
description = 'This is Role Two'
- user_ids=[ str( admin_user.id ) ]
- group_ids=[ str( group_two.id ) ]
- private_role=admin_user.email
+ user_ids = [ str( admin_user.id ) ]
+ group_ids = [ str( group_two.id ) ]
+ private_role = admin_user.email
# Create the role
self.create_role( name=name,
description=description,
@@ -263,15 +300,16 @@
refresh( group_two )
if len( group_two.roles ) != 2:
raise AssertionError( '%d GroupRoleAssociations are associated with group id %d ( should be 2 )' % ( len( group_two.roles ), group_two.id ) )
+
def test_050_change_user_role_associations( self ):
"""Testing changing roles associated with a user"""
# Logged in as admin_user
# Create a new role with no associations
name = 'Role Three'
description = 'This is Role Three'
- user_ids=[]
- group_ids=[]
- private_role=admin_user.email
+ user_ids = []
+ group_ids = []
+ private_role = admin_user.email
self.create_role( name=name,
description=description,
in_user_ids=user_ids,
@@ -301,6 +339,7 @@
if len( admin_user.roles ) != 4:
raise AssertionError( '%d UserRoleAssociations are associated with %s ( should be 4 )' % \
( len( admin_user.roles ), admin_user.email ) )
+
def test_055_mark_group_deleted( self ):
"""Testing marking a group as deleted"""
# Logged in as admin_user
@@ -314,6 +353,7 @@
raise AssertionError( '%s incorrectly lost all members when it was marked as deleted.' % group_two.name )
if not group_two.roles:
raise AssertionError( '%s incorrectly lost all role associations when it was marked as deleted.' % group_two.name )
+
def test_060_undelete_group( self ):
"""Testing undeleting a deleted group"""
# Logged in as admin_user
@@ -321,6 +361,7 @@
refresh( group_two )
if group_two.deleted:
raise AssertionError( '%s was not correctly marked as not deleted.' % group_two.name )
+
def test_065_mark_role_deleted( self ):
"""Testing marking a role as deleted"""
# Logged in as admin_user
@@ -335,10 +376,12 @@
raise AssertionError( '%s incorrectly lost all user associations when it was marked as deleted.' % role_two.name )
if not role_two.groups:
raise AssertionError( '%s incorrectly lost all group associations when it was marked as deleted.' % role_two.name )
+
def test_070_undelete_role( self ):
"""Testing undeleting a deleted role"""
# Logged in as admin_user
self.undelete_role( self.security.encode_id( role_two.id ), role_two.name )
+
def test_075_purge_user( self ):
"""Testing purging a user account"""
# Logged in as admin_user
@@ -352,26 +395,27 @@
if len( regular_user3.default_permissions ) != 1:
raise AssertionError( 'DefaultUserPermissions for user %s were not deleted.' % regular_user3.email )
for dup in regular_user3.default_permissions:
- role = sa_session.query( galaxy.model.Role ).get( dup.role_id )
+ role = database_contexts.galaxy_context.query( galaxy.model.Role ).get( dup.role_id )
if role.type != 'private':
raise AssertionError( 'DefaultUserPermissions for user %s are not related with the private role.' % regular_user3.email )
# Make sure History deleted
for history in regular_user3.histories:
refresh( history )
if not history.deleted:
- raise AssertionError( 'User %s has active history id %d after their account was marked as purged.' % ( regular_user3.email, hda.id ) )
+ raise AssertionError( 'User %s has active history id %d after their account was marked as purged.' % ( regular_user3.email, history.id ) )
# NOTE: Not all hdas / datasets will be deleted at the time a history is deleted - the cleanup_datasets.py script
# is responsible for this.
# Make sure UserGroupAssociations deleted
if regular_user3.groups:
- raise AssertionError( 'User %s has active group id %d after their account was marked as purged.' % ( regular_user3.email, uga.id ) )
+ raise AssertionError( 'User %s has active group after their account was marked as purged.' % ( regular_user3.email ) )
# Make sure UserRoleAssociations deleted EXCEPT FOR THE PRIVATE ROLE
if len( regular_user3.roles ) != 1:
raise AssertionError( 'UserRoleAssociations for user %s were not deleted.' % regular_user3.email )
for ura in regular_user3.roles:
- role = sa_session.query( galaxy.model.Role ).get( ura.role_id )
+ role = database_contexts.galaxy_context.query( galaxy.model.Role ).get( ura.role_id )
if role.type != 'private':
raise AssertionError( 'UserRoleAssociations for user %s are not related with the private role.' % regular_user3.email )
+
def test_080_manually_unpurge_user( self ):
"""Testing manually un-purging a user account"""
# Logged in as admin_user
@@ -381,6 +425,7 @@
regular_user3.purged = False
regular_user3.deleted = False
flush( regular_user3 )
+
def test_085_purge_group( self ):
"""Testing purging a group"""
# Logged in as admin_user
@@ -394,6 +439,7 @@
raise AssertionError( "Purging the group did not delete the GroupRoleAssociations for group_id '%s'" % group_two.id )
# Undelete the group for later test runs
self.undelete_group( self.security.encode_id( group_two.id ), group_two.name )
+
def test_090_purge_role( self ):
"""Testing purging a role"""
# Logged in as admin_user
@@ -414,6 +460,7 @@
# Make sure there are no DatasetPermissionss
if get_dataset_permissions_by_role( role_two ):
raise AssertionError( "Purging the role did not delete the DatasetPermissionss for role_id '%s'" % role_two.id )
+
def test_095_manually_unpurge_role( self ):
"""Testing manually un-purging a role"""
# Logged in as admin_user
@@ -422,6 +469,7 @@
role_two.purged = False
flush( role_two )
self.undelete_role( self.security.encode_id( role_two.id ), role_two.name )
+
def test_999_reset_data_for_later_test_runs( self ):
"""Reseting data to enable later test runs to pass"""
# Logged in as admin_user
@@ -433,8 +481,8 @@
self.purge_role( self.security.encode_id( role.id ), role.name )
# Manually delete the role from the database
refresh( role )
- sa_session.delete( role )
- sa_session.flush()
+ database_contexts.galaxy_context.delete( role )
+ database_contexts.galaxy_context.flush()
##################
# Eliminate all groups
##################
@@ -443,8 +491,8 @@
self.purge_group( self.security.encode_id( group.id ), group.name )
# Manually delete the group from the database
refresh( group )
- sa_session.delete( group )
- sa_session.flush()
+ database_contexts.galaxy_context.delete( group )
+ database_contexts.galaxy_context.flush()
##################
# Make sure all users are associated only with their private roles
##################
diff -r 494b51bda9c1a319672e92cce4ec17386cd65390 -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a test/functional/test_data_security.py
--- a/test/functional/test_data_security.py
+++ b/test/functional/test_data_security.py
@@ -1,7 +1,24 @@
-from base.twilltestcase import *
-from base.test_db_util import *
+import galaxy.model
+from base.twilltestcase import TwillTestCase
+from base.test_db_util import (
+ get_user,
+ get_private_role,
+ get_latest_history_for_user,
+ get_default_history_permissions_by_history,
+ get_latest_dataset,
+ refresh,
+ get_default_user_permissions_by_user,
+ get_dataset_permissions_by_dataset,
+)
+
+regular_user1 = regular_user2 = regular_user3 = None
+admin_user = None
+admin_user_private_role = regular_user1_private_role = None
+regular_user2_private_role = None
+
class TestDataSecurity( TwillTestCase ):
+
def test_000_initiate_users( self ):
"""Ensuring all required user accounts exist"""
self.logout()
@@ -32,6 +49,7 @@
assert admin_user is not None, 'Problem retrieving user with email "test(a)bx.psu.edu" from the database'
global admin_user_private_role
admin_user_private_role = get_private_role( admin_user )
+
def test_005_default_permissions( self ):
"""Testing initial settings for DefaultUserPermissions and DefaultHistoryPermissions"""
# Logged in as admin_user
@@ -59,7 +77,8 @@
# Try deleting the admin_user's private role
self.manage_roles_and_groups_for_user( self.security.encode_id( admin_user.id ),
out_role_ids=str( admin_user_private_role.id ),
- strings_displayed = [ "You cannot eliminate a user's private role association." ] )
+ strings_displayed=[ "You cannot eliminate a user's private role association." ] )
+
def test_010_private_role_creation_and_default_history_permissions( self ):
"""Testing private role creation and changing DefaultHistoryPermissions for new histories"""
# Logged in as admin_user
@@ -127,6 +146,7 @@
if dps != dhps:
raise AssertionError( 'DatasetPermissions "%s" for dataset id %d differ from DefaultHistoryPermissions "%s" for history id %d' \
% ( str( dps ), latest_dataset.id, str( dhps ), latest_history.id ) )
+
def test_015_change_default_permissions_for_current_history( self ):
"""Testing changing DefaultHistoryPermissions for the current history"""
# logged in a regular_user1
@@ -139,7 +159,6 @@
# Make sure these are in sorted order for later comparison
actions_in = [ 'manage permissions' ]
permissions_out = [ 'DATASET_ACCESS' ]
- actions_out = [ 'access' ]
# Change DefaultHistoryPermissions for the current history
self.history_set_default_permissions( permissions_out=permissions_out, permissions_in=permissions_in, role_id=str( regular_user2_private_role.id ) )
if len( latest_history.default_permissions ) != len( actions_in ):
@@ -168,6 +187,7 @@
if dps != dhps:
raise AssertionError( 'DatasetPermissionss "%s" for dataset id %d differ from DefaultHistoryPermissions "%s"' \
% ( str( dps ), latest_dataset.id, str( dhps ) ) )
+
def test_999_reset_data_for_later_test_runs( self ):
"""Reseting data to enable later test runs to pass"""
# Logged in as regular_user2
diff -r 494b51bda9c1a319672e92cce4ec17386cd65390 -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a test/functional/test_dataset_features.py
--- a/test/functional/test_dataset_features.py
+++ b/test/functional/test_dataset_features.py
@@ -3,7 +3,7 @@
class TestDatasetFeatures( TwillTestCase ):
-
+
def test_0000_initiate_users( self ):
"""Ensuring all required user accounts exist"""
self.logout()
@@ -18,18 +18,18 @@
self.new_history()
latest_history = test_db_util.get_latest_history_for_user( admin_user )
assert latest_history is not None, "Problem retrieving latest_history from database"
-
+
def test_0005_initiate_data( self ):
'''Ensure that data exists for this test suite.'''
self.upload_file( '1.bed' )
-
+
def test_0010_view_dataset_params( self ):
'''Test viewing a dataset's parameters.'''
hda = self.find_hda_by_dataset_name( '1.bed' )
assert hda is not None, 'Could not retrieve latest hda from history API.'
self.visit_url( '/datasets/%s/show_params' % hda[ 'id'] )
self.check_for_strings( strings_displayed=[ '1.bed', 'uploaded' ] )
-
+
def test_0015_report_dataset_error( self ):
'''Load and submit the report error form. This should show an error message, as the functional test instance should not be configured for email.'''
hda = test_db_util.get_latest_hda()
diff -r 494b51bda9c1a319672e92cce4ec17386cd65390 -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a test/functional/test_get_data.py
--- a/test/functional/test_get_data.py
+++ b/test/functional/test_get_data.py
@@ -1,7 +1,12 @@
-import galaxy.model
-from galaxy.model.orm import *
from base.twilltestcase import TwillTestCase
-from base.test_db_util import *
+from base.test_db_util import (
+ get_user,
+ get_latest_history_for_user,
+ get_latest_hda,
+)
+
+admin_user = None
+
class UploadData( TwillTestCase ):
@@ -25,7 +30,7 @@
self.is_history_empty()
return get_latest_history_for_user( user )
- def test_0005_upload_file( self ):
+ def test_0005_upload_file( self ):
"""
Test uploading 1.bed, NOT setting the file format
"""
@@ -39,13 +44,13 @@
self.delete_history( id=self.security.encode_id( history.id ) )
- def test_0006_upload_file( self ):
+ def test_0006_upload_file( self ):
"""
Test uploading 1.bed.spaces, with space to tab selected, NOT setting the file format
"""
history = self.create_fresh_history( admin_user )
- self.upload_file( '1.bed.spaces', space_to_tab = True )
+ self.upload_file( '1.bed.spaces', space_to_tab=True )
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
self.verify_dataset_correctness( '1.bed', hid=str( hda.hid ) )
@@ -209,13 +214,13 @@
history = self.create_fresh_history( admin_user )
# lped data types include a ped_file and a map_file ( which is binary )
- self.upload_file( None, ftype='lped', metadata = [ { 'name':'base_name', 'value':'rgenetics' } ], composite_data = [ { 'name':'ped_file', 'value':'tinywga.ped' }, { 'name':'map_file', 'value':'tinywga.map'} ] )
+ self.upload_file( None, ftype='lped', metadata=[ { 'name':'base_name', 'value':'rgenetics' } ], composite_data=[ { 'name':'ped_file', 'value':'tinywga.ped' }, { 'name':'map_file', 'value':'tinywga.map'} ] )
# Get the latest hid for testing
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
# We'll test against the resulting ped file and map file for correctness
- self.verify_composite_datatype_file_content( 'tinywga.ped', str( hda.id ), base_name = 'rgenetics.ped' )
- self.verify_composite_datatype_file_content( 'tinywga.map', str( hda.id ), base_name = 'rgenetics.map' )
+ self.verify_composite_datatype_file_content( 'tinywga.ped', str( hda.id ), base_name='rgenetics.ped' )
+ self.verify_composite_datatype_file_content( 'tinywga.map', str( hda.id ), base_name='rgenetics.map' )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ),
"metadata_base_name", "rgenetics", use_string_contains=True )
@@ -229,13 +234,13 @@
history = self.create_fresh_history( admin_user )
# lped data types include a ped_file and a map_file ( which is binary )
- self.upload_file( None, ftype='lped', metadata = [ { 'name':'base_name', 'value':'rgenetics' } ], composite_data = [ { 'name':'ped_file', 'value':'tinywga.ped', 'space_to_tab':True }, { 'name':'map_file', 'value':'tinywga.map'} ] )
+ self.upload_file( None, ftype='lped', metadata=[ { 'name':'base_name', 'value':'rgenetics' } ], composite_data=[ { 'name':'ped_file', 'value':'tinywga.ped', 'space_to_tab':True }, { 'name':'map_file', 'value':'tinywga.map'} ] )
# Get the latest hid for testing
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
# We'll test against the resulting ped file and map file for correctness
- self.verify_composite_datatype_file_content( 'tinywga.ped.space_to_tab', str( hda.id ), base_name = 'rgenetics.ped' )
- self.verify_composite_datatype_file_content( 'tinywga.map', str( hda.id ), base_name = 'rgenetics.map' )
+ self.verify_composite_datatype_file_content( 'tinywga.ped.space_to_tab', str( hda.id ), base_name='rgenetics.ped' )
+ self.verify_composite_datatype_file_content( 'tinywga.map', str( hda.id ), base_name='rgenetics.map' )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ),
"metadata_base_name", "rgenetics", use_string_contains=True )
@@ -250,8 +255,8 @@
# pbed data types include a bim_file, a bed_file and a fam_file
self.upload_file( None, ftype='pbed',
- metadata = [ { 'name':'base_name', 'value':'rgenetics' } ],
- composite_data = [
+ metadata=[ { 'name':'base_name', 'value':'rgenetics' } ],
+ composite_data=[
{ 'name':'bim_file', 'value':'tinywga.bim' },
{ 'name':'bed_file', 'value':'tinywga.bed' },
{ 'name':'fam_file', 'value':'tinywga.fam' } ])
@@ -259,9 +264,9 @@
hda = get_latest_hda()
assert hda is not None, "Problem retrieving hda from database"
# We'll test against the resulting ped file and map file for correctness
- self.verify_composite_datatype_file_content( 'tinywga.bim', str( hda.id ), base_name = 'rgenetics.bim' )
- self.verify_composite_datatype_file_content( 'tinywga.bed', str( hda.id ), base_name = 'rgenetics.bed' )
- self.verify_composite_datatype_file_content( 'tinywga.fam', str( hda.id ), base_name = 'rgenetics.fam' )
+ self.verify_composite_datatype_file_content( 'tinywga.bim', str( hda.id ), base_name='rgenetics.bim' )
+ self.verify_composite_datatype_file_content( 'tinywga.bed', str( hda.id ), base_name='rgenetics.bed' )
+ self.verify_composite_datatype_file_content( 'tinywga.fam', str( hda.id ), base_name='rgenetics.fam' )
self.check_hda_json_for_key_value( self.security.encode_id( hda.id ),
"metadata_base_name", "rgenetics", use_string_contains=True )
diff -r 494b51bda9c1a319672e92cce4ec17386cd65390 -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a test/functional/test_history_functions.py
--- a/test/functional/test_history_functions.py
+++ b/test/functional/test_history_functions.py
@@ -4,6 +4,7 @@
from base.test_db_util import sa_session
from base.twilltestcase import *
+
class TestHistory( TwillTestCase ):
def test_000_history_behavior_between_logout_login( self ):
@@ -16,7 +17,7 @@
global anonymous_history
anonymous_history = (
sa_session.query( galaxy.model.History )
- .filter( and_( galaxy.model.History.table.c.deleted==False, galaxy.model.History.table.c.name==name ) )
+ .filter( and_( galaxy.model.History.table.c.deleted == False, galaxy.model.History.table.c.name == name ) )
.order_by( desc( galaxy.model.History.table.c.create_time ) )
.first()
)
@@ -26,7 +27,7 @@
self.login( email='test1(a)bx.psu.edu', username='regular-user1' )
global regular_user1
regular_user1 = sa_session.query( galaxy.model.User ) \
- .filter( galaxy.model.User.table.c.email=='test1(a)bx.psu.edu' ) \
+ .filter( galaxy.model.User.table.c.email == 'test1(a)bx.psu.edu' ) \
.first()
assert regular_user1 is not None, 'Problem retrieving user with email "test1(a)bx.psu.edu" from the database'
# Current history should be anonymous_history
@@ -39,21 +40,21 @@
self.login( email='test2(a)bx.psu.edu', username='regular-user2' )
global regular_user2
regular_user2 = sa_session.query( galaxy.model.User ) \
- .filter( galaxy.model.User.table.c.email=='test2(a)bx.psu.edu' ) \
+ .filter( galaxy.model.User.table.c.email == 'test2(a)bx.psu.edu' ) \
.first()
assert regular_user2 is not None, 'Problem retrieving user with email "test2(a)bx.psu.edu" from the database'
self.logout()
self.login( email='test3(a)bx.psu.edu', username='regular-user3' )
global regular_user3
regular_user3 = sa_session.query( galaxy.model.User ) \
- .filter( galaxy.model.User.table.c.email=='test3(a)bx.psu.edu' ) \
+ .filter( galaxy.model.User.table.c.email == 'test3(a)bx.psu.edu' ) \
.first()
assert regular_user3 is not None, 'Problem retrieving user with email "test3(a)bx.psu.edu" from the database'
self.logout()
self.login( email='test(a)bx.psu.edu', username='admin-user' )
global admin_user
admin_user = sa_session.query( galaxy.model.User ) \
- .filter( galaxy.model.User.table.c.email=='test(a)bx.psu.edu' ) \
+ .filter( galaxy.model.User.table.c.email == 'test(a)bx.psu.edu' ) \
.one()
assert admin_user is not None, 'Problem retrieving user with email "test(a)bx.psu.edu" from the database'
# Get the admin_user private role for later use
@@ -66,8 +67,8 @@
if not admin_user_private_role:
raise AssertionError( "Private role not found for user '%s'" % admin_user.email )
historyA = sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==admin_user.id ) ) \
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == admin_user.id ) ) \
.order_by( desc( galaxy.model.History.table.c.create_time ) ) \
.first()
assert historyA is not None, "Problem retrieving historyA from database"
@@ -76,8 +77,8 @@
self.logout()
self.login( email=admin_user.email )
historyB = sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==admin_user.id ) ) \
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == admin_user.id ) ) \
.order_by( desc( galaxy.model.History.table.c.create_time ) ) \
.first()
assert historyB is not None, "Problem retrieving historyB from database"
@@ -87,23 +88,23 @@
"""Testing deleting histories"""
# Logged in as admin_user
historyB = sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==admin_user.id ) ) \
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == admin_user.id ) ) \
.order_by( desc( galaxy.model.History.table.c.create_time ) ) \
.first()
assert historyB is not None, "Problem retrieving historyB from database"
self.delete_history( self.security.encode_id( historyB.id ) )
sa_session.refresh( historyB )
if not historyB.deleted:
- raise AssertionError, "Problem deleting history id %d" % historyB.id
+ raise AssertionError( "Problem deleting history id %d" % historyB.id )
# Since we deleted the current history, make sure the history frame was refreshed
self.check_history_for_string( 'Your history is empty.' )
# We'll now test deleting a list of histories
# After deleting the current history, a new one should have been created
global history1
history1 = sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==admin_user.id ) ) \
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == admin_user.id ) ) \
.order_by( desc( galaxy.model.History.table.c.create_time ) ) \
.first()
assert history1 is not None, "Problem retrieving history1 from database"
@@ -111,8 +112,8 @@
self.new_history( name=urllib.quote( 'history2' ) )
global history2
history2 = sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==admin_user.id ) ) \
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == admin_user.id ) ) \
.order_by( desc( galaxy.model.History.table.c.create_time ) ) \
.first()
assert history2 is not None, "Problem retrieving history2 from database"
@@ -123,26 +124,26 @@
self.check_history_for_string( 'Your history is empty.' )
try:
self.view_stored_active_histories( strings_displayed=[ history1.name ] )
- raise AssertionError, "History %s is displayed in the active history list after it was deleted" % history1.name
+ raise AssertionError( "History %s is displayed in the active history list after it was deleted" % history1.name )
except:
pass
self.view_stored_deleted_histories( strings_displayed=[ history1.name ] )
try:
self.view_stored_active_histories( strings_displayed=[ history2.name ] )
- raise AssertionError, "History %s is displayed in the active history list after it was deleted" % history2.name
+ raise AssertionError( "History %s is displayed in the active history list after it was deleted" % history2.name )
except:
pass
self.view_stored_deleted_histories( strings_displayed=[ history2.name ] )
sa_session.refresh( history1 )
if not history1.deleted:
- raise AssertionError, "Problem deleting history id %d" % history1.id
+ raise AssertionError( "Problem deleting history id %d" % history1.id )
if not history1.default_permissions:
- raise AssertionError, "Default permissions were incorrectly deleted from the db for history id %d when it was deleted" % history1.id
+ raise AssertionError( "Default permissions were incorrectly deleted from the db for history id %d when it was deleted" % history1.id )
sa_session.refresh( history2 )
if not history2.deleted:
- raise AssertionError, "Problem deleting history id %d" % history2.id
+ raise AssertionError( "Problem deleting history id %d" % history2.id )
if not history2.default_permissions:
- raise AssertionError, "Default permissions were incorrectly deleted from the db for history id %d when it was deleted" % history2.id
+ raise AssertionError( "Default permissions were incorrectly deleted from the db for history id %d when it was deleted" % history2.id )
# Current history is empty
self.history_options( user=True )
@@ -151,12 +152,12 @@
# Logged in as admin_user
global history3
history3 = sa_session.query( galaxy.model.History ) \
- .filter( galaxy.model.History.table.c.deleted==False ) \
+ .filter( galaxy.model.History.table.c.deleted == False ) \
.order_by( desc( galaxy.model.History.table.c.create_time ) ) \
.first()
assert history3 is not None, "Problem retrieving history3 from database"
if history3.deleted:
- raise AssertionError, "History id %d deleted when it should not be" % latest_history.id
+ raise AssertionError( "History id %d deleted when it should not be" % latest_history.id )
self.rename_history( self.security.encode_id( history3.id ), history3.name, new_name=urllib.quote( 'history 3' ) )
sa_session.refresh( history3 )
@@ -192,7 +193,7 @@
# Make sure history3 is now accessible.
sa_session.refresh( history3 )
if not history3.importable:
- raise AssertionError, "History 3 is not marked as importable after make_accessible_via_link"
+ raise AssertionError( "History 3 is not marked as importable after make_accessible_via_link" )
# Try importing history3
#Importing your own history was enabled in 5248:dc9efb540f61.
#self.import_history_via_url( self.security.encode_id( history3.id ),
@@ -235,13 +236,13 @@
strings_displayed_after_submit=[ 'has been created.' ] )
global history3_clone1
history3_clone1 = sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==regular_user1.id ) ) \
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == regular_user1.id ) ) \
.order_by( desc( galaxy.model.History.table.c.create_time ) ) \
.first()
assert history3_clone1 is not None, "Problem retrieving history3_clone1 from database"
# Check list of histories to make sure shared history3 was cloned
- strings_displayed=[ "Copy of '%s' shared by '%s'" % ( history3.name, admin_user.email ) ]
+ strings_displayed = [ "Copy of '%s' shared by '%s'" % ( history3.name, admin_user.email ) ]
self.view_stored_active_histories( strings_displayed=strings_displayed )
def test_035_clone_current_history( self ):
@@ -255,8 +256,8 @@
self.upload_file( '2.bed', dbkey='hg18' )
hda_2_bed = (
sa_session.query( galaxy.model.HistoryDatasetAssociation )
- .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history3.id,
- galaxy.model.HistoryDatasetAssociation.table.c.name=='2.bed' ) )
+ .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id == history3.id,
+ galaxy.model.HistoryDatasetAssociation.table.c.name == '2.bed' ) )
.first() )
assert hda_2_bed is not None, "Problem retrieving hda_2_bed from database"
self.delete_history_item( str( hda_2_bed.id ) )
@@ -264,8 +265,8 @@
self.upload_file( '3.bed', dbkey='hg18' )
hda_3_bed = (
sa_session.query( galaxy.model.HistoryDatasetAssociation )
- .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history3.id,
- galaxy.model.HistoryDatasetAssociation.table.c.name=='3.bed' ) )
+ .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id == history3.id,
+ galaxy.model.HistoryDatasetAssociation.table.c.name == '3.bed' ) )
.first() )
assert hda_3_bed is not None, "Problem retrieving hda_3_bed from database"
self.delete_history_item( str( hda_3_bed.id ) )
@@ -276,8 +277,8 @@
strings_displayed_after_submit=['has been created.' ] )
global history3_clone2
history3_clone2 = sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==admin_user.id ) ) \
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == admin_user.id ) ) \
.order_by( desc( galaxy.model.History.table.c.create_time ) ) \
.first()
assert history3_clone2 is not None, "Problem retrieving history3_clone2 from database"
@@ -288,14 +289,14 @@
self.switch_history( id=self.security.encode_id( history3_clone2.id ), name=history3_clone2.name )
hda_2_bed = (
sa_session.query( galaxy.model.HistoryDatasetAssociation )
- .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history3_clone2.id,
- galaxy.model.HistoryDatasetAssociation.table.c.name=='2.bed' ) )
+ .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id == history3_clone2.id,
+ galaxy.model.HistoryDatasetAssociation.table.c.name == '2.bed' ) )
.first() )
assert hda_2_bed is not None, "Problem retrieving hda_2_bed from database"
hda_3_bed = (
sa_session.query( galaxy.model.HistoryDatasetAssociation )
- .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history3_clone2.id,
- galaxy.model.HistoryDatasetAssociation.table.c.name=='3.bed' ) )
+ .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id == history3_clone2.id,
+ galaxy.model.HistoryDatasetAssociation.table.c.name == '3.bed' ) )
.first() )
assert hda_3_bed is not None, "Problem retrieving hda_3_bed from database"
@@ -315,15 +316,15 @@
global history3_clone3
history3_clone3 = (
sa_session.query( galaxy.model.History )
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==admin_user.id ) )
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == admin_user.id ) )
.order_by( desc( galaxy.model.History.table.c.create_time ) )
.first()
)
assert history3_clone3 is not None, "Problem retrieving history3_clone3 from database"
# Check list of histories to make sure shared history3 was cloned
- self.view_stored_active_histories( strings_displayed = ["Copy of '%s'" % history3.name ] )
+ self.view_stored_active_histories( strings_displayed=[ "Copy of '%s'" % history3.name ] )
# Switch to the cloned history to make sure ONLY activatable datasets were cloned
self.switch_history( id=self.security.encode_id( history3_clone3.id ) )
@@ -332,7 +333,7 @@
try:
self.check_history_for_exact_string( '"deleted": true', show_deleted=True )
#self.check_history_for_string( 'This dataset has been deleted.', show_deleted=True )
- raise AssertionError, "Deleted datasets incorrectly included in cloned history history3_clone3"
+ raise AssertionError( "Deleted datasets incorrectly included in cloned history history3_clone3" )
except:
pass
@@ -342,14 +343,14 @@
self.new_history()
global history4
history4 = sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==admin_user.id ) ) \
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == admin_user.id ) ) \
.order_by( desc( galaxy.model.History.table.c.create_time ) ) \
.first()
assert history4 is not None, "Problem retrieving history4 from database"
self.rename_history( self.security.encode_id( history4.id ), history4.name, new_name=urllib.quote( 'history 4' ) )
sa_session.refresh( history4 )
- # Galaxy's new history sharing code does not yet support sharing multiple histories; when support for sharing multiple histories is added,
+ # Galaxy's new history sharing code does not yet support sharing multiple histories; when support for sharing multiple histories is added,
# this test will be uncommented and updated.
"""
self.upload_file( '2.bed', dbkey='hg18' )
@@ -377,8 +378,8 @@
self.new_history()
global history5
history5 = sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==admin_user.id ) ) \
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == admin_user.id ) ) \
.order_by( desc( galaxy.model.History.table.c.create_time ) ) \
.first()
assert history5 is not None, "Problem retrieving history5 from database"
@@ -405,14 +406,14 @@
history5_dataset1 = hda.dataset
break
assert history5_dataset1 is not None, "Problem retrieving history5_dataset1 from the database"
- # The permissions on the dataset should be restricted from sharing with anyone due to the
+ # The permissions on the dataset should be restricted from sharing with anyone due to the
# inherited history permissions
dataset_permissions = [ a.action for a in history5_dataset1.actions ]
dataset_permissions.sort()
if dataset_permissions != history5_default_permissions:
err_msg = "Dataset permissions for history5_dataset1 (%s) were not correctly inherited from history permissions (%s)" \
% ( str( dataset_permissions ), str( history5_default_permissions ) )
- raise AssertionError, err_msg
+ raise AssertionError( err_msg )
# Make sure when we logout and login, the history default permissions are preserved
self.logout()
self.login( email=admin_user.email )
@@ -420,7 +421,7 @@
current_history_permissions = [ dhp.action for dhp in history5.default_permissions ]
current_history_permissions.sort()
if current_history_permissions != history5_default_permissions:
- raise AssertionError, "With logout and login, the history default permissions are not preserved"
+ raise AssertionError( "With logout and login, the history default permissions are not preserved" )
def test_050_sharing_restricted_history_by_making_datasets_public( self ):
"""Testing sharing a restricted history by making the datasets public"""
@@ -440,8 +441,8 @@
strings_displayed_after_submit=[ 'has been created.' ] )
global history5_clone1
history5_clone1 = sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==regular_user1.id ) ) \
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == regular_user1.id ) ) \
.order_by( desc( galaxy.model.History.table.c.create_time ) ) \
.first()
assert history5_clone1 is not None, "Problem retrieving history5_clone1 from database"
@@ -462,21 +463,21 @@
'The following datasets can be shared with %s by updating their permissions' % regular_user2.email ]
self.share_current_history( regular_user2.email,
strings_displayed_after_submit=strings_displayed_after_submit,
- action='private' )
+ action='private' )
# We should now have a new sharing role
global sharing_role
role_name = 'Sharing role for: %s, %s' % ( admin_user.email, regular_user2.email )
- sharing_role = sa_session.query( galaxy.model.Role ).filter( galaxy.model.Role.table.c.name==role_name ).first()
+ sharing_role = sa_session.query( galaxy.model.Role ).filter( galaxy.model.Role.table.c.name == role_name ).first()
if not sharing_role:
# May have created a sharing role in a previous functional test suite from the opposite direction.
role_name = 'Sharing role for: %s, %s' % ( regular_user2.email, admin_user.email )
sharing_role = sa_session.query( galaxy.model.Role ) \
- .filter( and_( galaxy.model.Role.table.c.type==role_type,
- galaxy.model.Role.table.c.name==role_name ) ) \
+ .filter( and_( galaxy.model.Role.table.c.type == role_type,
+ galaxy.model.Role.table.c.name == role_name ) ) \
.first()
if not sharing_role:
raise AssertionError( "Privately sharing a dataset did not properly create a sharing role" )
- # The DATASET_ACCESS permission on 2.bed was originally associated with admin_user's private role.
+ # The DATASET_ACCESS permission on 2.bed was originally associated with admin_user's private role.
# Since we created a new sharing role for 2.bed, the original permission should have been eliminated,
# replaced with the sharing role.
history5_dataset2 = None
@@ -498,8 +499,8 @@
strings_displayed_after_submit=[ 'has been created.' ] )
global history5_clone2
history5_clone2 = sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==regular_user2.id ) ) \
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == regular_user2.id ) ) \
.order_by( desc( galaxy.model.History.table.c.create_time ) ) \
.first()
assert history5_clone2 is not None, "Problem retrieving history5_clone2 from database"
@@ -512,13 +513,13 @@
self.check_history_for_string( '2.bed' )
# Get both new hdas from the db that were created for the shared history
hda_1_bed = sa_session.query( galaxy.model.HistoryDatasetAssociation ) \
- .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history5_clone2.id,
- galaxy.model.HistoryDatasetAssociation.table.c.name=='1.bed' ) ) \
+ .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id == history5_clone2.id,
+ galaxy.model.HistoryDatasetAssociation.table.c.name == '1.bed' ) ) \
.first()
assert hda_1_bed is not None, "Problem retrieving hda_1_bed from database"
hda_2_bed = sa_session.query( galaxy.model.HistoryDatasetAssociation ) \
- .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history5_clone2.id,
- galaxy.model.HistoryDatasetAssociation.table.c.name=='2.bed' ) ) \
+ .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id == history5_clone2.id,
+ galaxy.model.HistoryDatasetAssociation.table.c.name == '2.bed' ) ) \
.first()
assert hda_2_bed is not None, "Problem retrieving hda_2_bed from database"
# Make sure 1.bed is accessible since it is public
@@ -549,7 +550,7 @@
self.visit_page( "root/history_options" )
try:
self.check_page_for_string( 'List</a> histories shared with you by others' )
- raise AssertionError, "history5 still shared with regular_user2 after unsharing it with that user."
+ raise AssertionError( "history5 still shared with regular_user2 after unsharing it with that user." )
except:
pass
@@ -558,7 +559,8 @@
email = '%s,%s' % ( regular_user2.email, regular_user3.email )
strings_displayed_after_submit = [
'The following datasets can be shared with %s with no changes' % email,
- 'The following datasets can be shared with %s by updating their permissions' % email ]
+ 'The following datasets can be shared with %s by updating their permissions' % email
+ ]
# history5 will be shared with regular_user1, regular_user2 and regular_user3
self.share_current_history( email,
@@ -576,8 +578,8 @@
global history5_clone3
history5_clone3 = (
sa_session.query( galaxy.model.History )
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==regular_user2.id ) )
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == regular_user2.id ) )
.order_by( desc( galaxy.model.History.table.c.create_time ) )
.first() )
assert history5_clone3 is not None, "Problem retrieving history5_clone3 from database"
@@ -592,14 +594,14 @@
# Get both new hdas from the db that were created for the shared history
hda_1_bed = (
sa_session.query( galaxy.model.HistoryDatasetAssociation )
- .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history5_clone3.id,
- galaxy.model.HistoryDatasetAssociation.table.c.name=='1.bed' ) )
+ .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id == history5_clone3.id,
+ galaxy.model.HistoryDatasetAssociation.table.c.name == '1.bed' ) )
.first() )
assert hda_1_bed is not None, "Problem retrieving hda_1_bed from database"
hda_2_bed = (
sa_session.query( galaxy.model.HistoryDatasetAssociation )
- .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history5_clone3.id,
- galaxy.model.HistoryDatasetAssociation.table.c.name=='2.bed' ) )
+ .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id == history5_clone3.id,
+ galaxy.model.HistoryDatasetAssociation.table.c.name == '2.bed' ) )
.first() )
assert hda_2_bed is not None, "Problem retrieving hda_2_bed from database"
@@ -622,8 +624,8 @@
global history5_clone4
history5_clone4 = (
sa_session.query( galaxy.model.History )
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==regular_user3.id ) )
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == regular_user3.id ) )
.order_by( desc( galaxy.model.History.table.c.create_time ) )
.first() )
assert history5_clone4 is not None, "Problem retrieving history5_clone4 from database"
@@ -638,14 +640,14 @@
# Get both new hdas from the db that were created for the shared history
hda_1_bed = (
sa_session.query( galaxy.model.HistoryDatasetAssociation )
- .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history5_clone4.id,
- galaxy.model.HistoryDatasetAssociation.table.c.name=='1.bed' ) )
+ .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id == history5_clone4.id,
+ galaxy.model.HistoryDatasetAssociation.table.c.name == '1.bed' ) )
.first() )
assert hda_1_bed is not None, "Problem retrieving hda_1_bed from database"
hda_2_bed = (
sa_session.query( galaxy.model.HistoryDatasetAssociation )
- .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history5_clone4.id,
- galaxy.model.HistoryDatasetAssociation.table.c.name=='2.bed' ) )
+ .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id == history5_clone4.id,
+ galaxy.model.HistoryDatasetAssociation.table.c.name == '2.bed' ) )
.first() )
assert hda_2_bed is not None, "Problem retrieving hda_2_bed from database"
# Make sure 1.bed is accessible since it is public
@@ -653,7 +655,7 @@
# Make sure 2.bed is not accessible since it is protected
try:
self.display_history_item( str( hda_2_bed.id ), strings_displayed=[ 'chr1' ] )
- raise AssertionError, "History item 2.bed is accessible by user %s when is should not be" % regular_user3.email
+ raise AssertionError( "History item 2.bed is accessible by user %s when is should not be" % regular_user3.email )
except:
pass
@@ -696,7 +698,7 @@
self.visit_page( "root/history_options" )
try:
self.check_page_for_string( 'List</a> histories shared with you by others' )
- raise AssertionError, "history5 still shared with regular_user2 after unshaing it with that user."
+ raise AssertionError( "history5 still shared with regular_user2 after unshaing it with that user." )
except:
pass
@@ -705,7 +707,7 @@
self.visit_page( "root/history_options" )
try:
self.check_page_for_string( 'List</a> histories shared with you by others' )
- raise AssertionError, "history5 still shared with regular_user3 after unshaing it with that user."
+ raise AssertionError( "history5 still shared with regular_user3 after unshaing it with that user." )
except:
pass
self.logout()
@@ -725,8 +727,8 @@
self.new_history( name=urllib.quote( 'show hide deleted datasets' ) )
latest_history = (
sa_session.query( galaxy.model.History )
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==admin_user.id ) )
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == admin_user.id ) )
.order_by( desc( galaxy.model.History.table.c.create_time ) )
.first() )
assert latest_history is not None, "Problem retrieving latest_history from database"
@@ -769,8 +771,8 @@
# Deleting the current history in the last method created a new history
latest_history = (
sa_session.query( galaxy.model.History )
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==admin_user.id ) )
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == admin_user.id ) )
.order_by( desc( galaxy.model.History.table.c.create_time ) )
.first() )
assert latest_history is not None, "Problem retrieving latest_history from database"
@@ -786,14 +788,14 @@
self.visit_url( "%s/history/?show_deleted=False" % self.url )
self.check_page_for_string( '1.bed' )
self.check_page_for_string( 'hg15' )
- self.assertEqual ( len( self.get_history_as_data_list() ), 1 )
+ self.assertEqual( len( self.get_history_as_data_list() ), 1 )
# Delete the history item
self.delete_history_item( str( latest_hda.id ), strings_displayed=[ "Your history is empty" ] )
- self.assertEqual ( len( self.get_history_as_data_list() ), 0 )
+ self.assertEqual( len( self.get_history_as_data_list() ), 0 )
# Try deleting an invalid hid
try:
self.delete_history_item( 'XXX' )
- raise AssertionError, "Inproperly able to delete hda_id 'XXX' which is not an integer"
+ raise AssertionError( "Inproperly able to delete hda_id 'XXX' which is not an integer" )
except:
pass
# Undelete the history item
@@ -809,8 +811,8 @@
# logged in as admin_user
self.new_history( name=urllib.quote( 'copy history items' ) )
history6 = sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==admin_user.id ) ) \
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == admin_user.id ) ) \
.order_by( desc( galaxy.model.History.table.c.create_time ) ) \
.first()
assert history6 is not None, "Problem retrieving history6 from database"
@@ -820,7 +822,7 @@
.first()
assert hda1 is not None, "Problem retrieving hda1 from database"
# We'll just test copying 1 hda
- source_dataset_ids=self.security.encode_id( hda1.id )
+ source_dataset_ids = self.security.encode_id( hda1.id )
# The valid list of target histories is only the user's active histories
all_target_history_ids = [ self.security.encode_id( hda.id ) for hda in admin_user.active_histories ]
# Since history1 and history2 have been deleted, they should not be displayed in the list of target histories
@@ -834,12 +836,12 @@
deleted_history_ids=deleted_history_ids )
sa_session.refresh( history6 )
if len( history6.datasets ) != 2:
- raise AssertionError, "Copying hda1 to the current history failed, history 6 has %d datasets, but should have 2" % len( history6.datasets )
+ raise AssertionError( "Copying hda1 to the current history failed, history 6 has %d datasets, but should have 2" % len( history6.datasets ) )
# Test copying 1 hda to another history
self.new_history( name=urllib.quote( 'copy history items - 2' ) )
history7 = sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==admin_user.id ) ) \
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == admin_user.id ) ) \
.order_by( desc( galaxy.model.History.table.c.create_time ) ) \
.first()
assert history7 is not None, "Problem retrieving history7 from database"
@@ -848,7 +850,7 @@
target_history_id = self.security.encode_id( history7.id )
all_target_history_ids = [ self.security.encode_id( hda.id ) for hda in admin_user.active_histories ]
# Test copying to the a history that is not the current history
- target_history_ids=[ self.security.encode_id( history7.id ) ]
+ self.security.encode_id( history7.id )
self.copy_history_item( source_dataset_id=source_dataset_ids,
target_history_id=target_history_id,
all_target_history_ids=all_target_history_ids,
diff -r 494b51bda9c1a319672e92cce4ec17386cd65390 -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a test/functional/test_library_features.py
--- a/test/functional/test_library_features.py
+++ b/test/functional/test_library_features.py
@@ -1,6 +1,8 @@
from base.twilltestcase import *
from base.test_db_util import *
+
+# TODO: Functional tests start failing at 070, fix or eliminate rest of tests.
class TestLibraryFeatures( TwillTestCase ):
def test_000_initiate_users( self ):
"""Ensuring all required user accounts exist"""
diff -r 494b51bda9c1a319672e92cce4ec17386cd65390 -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a test/functional/test_library_security.py
--- a/test/functional/test_library_security.py
+++ b/test/functional/test_library_security.py
@@ -1,6 +1,8 @@
from base.twilltestcase import *
from base.test_db_util import *
+
+# TODO: Functional tests start failing at 050, fix or eliminate rest of tests.
class TestLibrarySecurity( TwillTestCase ):
def test_000_initiate_users( self ):
"""Ensuring all required user accounts exist"""
diff -r 494b51bda9c1a319672e92cce4ec17386cd65390 -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a test/functional/test_library_templates.py
--- a/test/functional/test_library_templates.py
+++ b/test/functional/test_library_templates.py
@@ -1,7 +1,35 @@
-from base.twilltestcase import *
-from base.test_db_util import *
+from base.twilltestcase import TwillTestCase
+from base.test_db_util import (
+ get_user,
+ get_private_role,
+ get_form,
+ get_library,
+ get_folder,
+ get_user_address,
+ get_latest_ldda_by_name,
+ get_latest_hda,
+ mark_obj_deleted,
+ refresh
+)
+import galaxy.model
+
+AddressField_form = None
+CheckboxField_form = None
+SelectField_form = None
+TextArea_form = None
+TextField_form = None
+WorkflowField_form = None
+address_field_name = checkbox_field_name = select_field_name = None
+workflow_field_name = textfield_name = textarea_name = None
+user_address1 = user_address2 = None
+ldda1 = library1 = library2 = library3 = library4 = library5 = library6 = None
+folder1 = folder2 = folder3 = folder4 = folder5 = folder6 = None
+admin_user = None
+regular_user1 = regular_user2 = regular_user3 = None
+
class TestLibraryFeatures( TwillTestCase ):
+
def test_000_initiate_users( self ):
"""Ensuring all required user accounts exist"""
self.logout()
@@ -32,6 +60,7 @@
assert admin_user is not None, 'Problem retrieving user with email "test(a)bx.psu.edu" from the database'
global admin_user_private_role
admin_user_private_role = get_private_role( admin_user )
+
def test_005_create_library_templates( self ):
"""Testing creating several LibraryInformationTemplate form definitions"""
# Logged in as admin_user
@@ -109,6 +138,7 @@
library5 = get_library( 'library5', 'library5 description', 'library5 synopsis' )
global library6
library6 = get_library( 'library6', 'library6 description', 'library6 synopsis' )
+
def test_015_add_template_to_library1( self ):
"""Testing add an inheritable template containing an AddressField to library1"""
# Logged in as admin_user
@@ -119,6 +149,7 @@
form_id=self.security.encode_id( AddressField_form.id ),
form_name=AddressField_form.name,
library_id=self.security.encode_id( library1.id ) )
+
def test_020_add_folder_to_library1( self ):
"""Testing adding a folder to library1"""
# Logged in as admin_user
@@ -133,6 +164,7 @@
description=description )
global folder1
folder1 = get_folder( folder.id, name, description )
+
def test_025_check_library1( self ):
"""Checking library1 and its root folder"""
# Logged in as admin_user
@@ -146,6 +178,7 @@
template_refresh_field_name=address_field_name,
strings_displayed=[ AddressField_form.name,
'This is an inherited template and is not required to be used with this folder' ] )
+
def test_030_add_dataset_to_folder1( self ):
"""Testing adding a ldda1 to folder1, and adding a new UserAddress on the upload form."""
# Logged in as admin_user
@@ -189,6 +222,7 @@
self.security.encode_id( ldda1.id ),
ldda1.name,
strings_displayed=[ 'Dick' ] )
+
def test_035_edit_contents_of_ldda1_tempplate( self ):
"""Testing editing the contents of ldda1 AddressField template by adding a new user_address"""
short_desc = 'Home'
@@ -212,6 +246,7 @@
global user_address2
user_address2 = get_user_address( admin_user, short_desc )
assert user_address2 is not None, 'Problem retrieving user_address2 from the database'
+
def test_040_edit_contents_of_folder1_template( self ):
"""Testing editing the contents of folder1 AddressField template"""
# Make sure the template and contents were inherited to folder1
@@ -223,6 +258,7 @@
strings_displayed=[ AddressField_form.name,
'This is an inherited template and is not required to be used with this folder' ],
strings_displayed_after_submit=[ 'Richard' ] )
+
def test_045_add_dataset_to_folder1( self ):
"""Testing adding another ldda to folder1"""
# The upload form should now inherit user_address2 on the upload form
@@ -245,6 +281,7 @@
ldda_name=ldda1.name,
template_refresh_field_name=address_field_name,
strings_displayed=[ user_address2.desc ] )
+
def test_050_add_template_to_library2( self ):
""" Testing add an inheritable template containing an CheckboxField to library2"""
# Add a template containing an CheckboxField to library1
@@ -257,7 +294,8 @@
# Check the CheckboxField to make sure the template contents are inherited
self.library_info( 'library_admin',
self.security.encode_id( library2.id ),
- template_fields = [ ( checkbox_field_name, '1' ) ] )
+ template_fields=[ ( checkbox_field_name, '1' ) ] )
+
def test_055_add_folder2_to_library2( self ):
"""Testing adding a folder to library2"""
# Logged in as admin_user
@@ -272,12 +310,14 @@
description=description )
global folder2
folder2 = get_folder( folder.id, name, description )
+
def test_060_check_library2( self ):
"""Checking library2 and its root folder"""
# Logged in as admin_user
self.browse_library( cntrller='library_admin',
library_id=self.security.encode_id( library2.id ),
strings_displayed=[ folder2.name, folder2.description ] )
+
def test_065_save_folder2_inherited_template( self ):
"""Saving the inherited template for folder2"""
# Logged in as admin_user
@@ -288,6 +328,7 @@
template_fields=[ ( checkbox_field_name, '1' ) ],
strings_displayed=[ CheckboxField_form.name,
'This is an inherited template and is not required to be used with this folder' ] )
+
def test_070_add_ldda_to_folder2( self ):
"""
Testing adding a new library dataset to library2's folder, making sure the CheckboxField is
@@ -316,6 +357,7 @@
self.security.encode_id( ldda.id ),
ldda.name,
strings_displayed=[ 'CheckboxField', 'checked' ] )
+
def test_080_add_template_to_library3( self ):
""" Testing add an inheritable template containing an SelectField to library3"""
# Logged in as admin_user
@@ -330,6 +372,7 @@
self.library_info( 'library_admin',
self.security.encode_id( library3.id ),
template_fields=[ ( select_field_name, 'Option1' ) ] )
+
def test_085_add_folder3_to_library3( self ):
"""Testing adding a folder to library3"""
# Logged in as admin_user
@@ -344,12 +387,14 @@
description=description )
global folder3
folder3 = get_folder( folder.id, name, description )
+
def test_090_check_library3( self ):
"""Checking library3 and its root folder"""
# Logged in as admin_user
self.browse_library( cntrller='library_admin',
library_id=self.security.encode_id( library3.id ),
strings_displayed=[ folder3.name, folder3.description ] )
+
def test_095_save_folder3_inherited_template( self ):
"""Saving the inherited template for folder3"""
# Logged in as admin_user
@@ -361,6 +406,7 @@
strings_displayed=[ SelectField_form.name,
'This is an inherited template and is not required to be used with this folder',
'Option1' ] )
+
def test_100_add_ldda_to_folder3( self ):
"""
Testing adding a new library dataset to library3's folder, making sure the SelectField setting is correct on the upload form.
@@ -411,6 +457,7 @@
self.security.encode_id( ldda.id ),
ldda.name,
strings_displayed=[ 'SelectField', 'Option1' ] )
+
def test_105_add_template_to_library4( self ):
""" Testing add an inheritable template containing an TextArea to library4"""
# Logged in as admin_user
@@ -425,6 +472,7 @@
self.library_info( 'library_admin',
self.security.encode_id( library4.id ),
template_fields=[ ( textarea_name, 'This text should be inherited' ) ] )
+
def test_110_add_folder4_to_library4( self ):
"""Testing adding a folder to library4"""
# Logged in as admin_user
@@ -439,6 +487,7 @@
description=description )
global folder4
folder4 = get_folder( folder.id, name, description )
+
def test_115_save_folder4_inherited_template( self ):
"""Saving the inherited template for folder4"""
# Logged in as admin_user
@@ -450,6 +499,7 @@
strings_displayed=[ TextArea_form.name,
'This is an inherited template and is not required to be used with this folder',
'This text should be inherited' ] )
+
def test_120_add_ldda_to_folder4( self ):
"""
Testing adding a new library dataset to library4's folder, making sure the TextArea setting is correct on the upload form.
@@ -477,6 +527,7 @@
self.security.encode_id( ldda.id ),
ldda.name,
strings_displayed=[ 'TextArea', 'This text should be inherited' ] )
+
def test_125_add_template_to_library5( self ):
""" Testing add an inheritable template containing an TextField to library5"""
# Add an inheritable template to library5
@@ -490,6 +541,7 @@
self.library_info( 'library_admin',
self.security.encode_id( library5.id ),
template_fields=[ ( textfield_name, 'This text should be inherited' ) ] )
+
def test_130_add_folder5_to_library5( self ):
"""Testing adding a folder to library5"""
# Logged in as admin_user
@@ -504,6 +556,7 @@
description=description )
global folder5
folder5 = get_folder( folder.id, name, description )
+
def test_135_save_folder5_inherited_template( self ):
"""Saving the inherited template for folder5"""
# Logged in as admin_user
@@ -515,6 +568,7 @@
strings_displayed=[ TextField_form.name,
'This is an inherited template and is not required to be used with this folder',
'This text should be inherited' ] )
+
def test_140_add_ldda_to_folder5( self ):
"""
Testing adding a new library dataset to library5's folder, making sure the TextField setting is correct on the upload form.
@@ -542,6 +596,7 @@
self.security.encode_id( ldda.id ),
ldda.name,
strings_displayed=[ 'TextField', 'This text should be inherited' ] )
+
def test_145_edit_library5_template_layout( self ):
"""Test editing the layout of library5's template"""
# Currently there is only a TextField, and we'll add a TextArea.
@@ -553,6 +608,7 @@
field_label_1=TextArea_form.name,
field_helptext_1='%s help' % TextArea_form.name,
field_default_1='%s default' % TextArea_form.name )
+
def test_150_add_ldda_to_library5( self ):
"""
Testing adding a new library dataset to library5's folder, making sure the TextField and new TextArea settings are correct on the upload form.
@@ -584,6 +640,7 @@
strings_displayed=[ 'TextField',
'This text should be inherited',
'TextArea' ] )
+
def test_155_add_template_to_library6( self ):
""" Testing add an inheritable template containing an WorkflowField to library6"""
# Add an inheritable template to library6
@@ -594,6 +651,7 @@
form_id=self.security.encode_id( WorkflowField_form.id ),
form_name=WorkflowField_form.name,
library_id=self.security.encode_id( library6.id ) )
+
def test_160_add_folder6_to_library6( self ):
"""Testing adding a folder to library6"""
# Logged in as admin_user
@@ -608,6 +666,7 @@
description=description )
global folder6
folder6 = get_folder( folder.id, name, description )
+
def test_165_save_folder6_inherited_template( self ):
"""Saving the inherited template for folder6"""
# Logged in as admin_user
@@ -619,6 +678,7 @@
strings_displayed=[ WorkflowField_form.name,
'This is an inherited template and is not required to be used with this folder',
'none' ] )
+
def test_170_add_ldda_to_folder6( self ):
"""
Testing adding a new library dataset to library6's folder, making sure the WorkflowField setting is correct on the upload form.
@@ -646,6 +706,7 @@
self.security.encode_id( ldda.id ),
ldda.name,
strings_displayed=[ 'WorkflowField', 'none' ] )
+
def test_999_reset_data_for_later_test_runs( self ):
"""Reseting data to enable later test runs to pass"""
# Logged in as admin_user
diff -r 494b51bda9c1a319672e92cce4ec17386cd65390 -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a test/functional/test_metadata_editing.py
--- a/test/functional/test_metadata_editing.py
+++ b/test/functional/test_metadata_editing.py
@@ -1,21 +1,26 @@
+from base.twilltestcase import TwillTestCase
+from functional import database_contexts
import galaxy.model
-from galaxy.model.orm import *
-from base.test_db_util import sa_session
-from base.twilltestcase import TwillTestCase
+from galaxy.model.orm import (
+ and_,
+ desc,
+)
+
class TestMetadataEdit( TwillTestCase ):
def test_00_metadata_edit( self ):
"""test_metadata_edit: Testing metadata editing"""
+ sa_session = database_contexts.galaxy_context
self.logout()
self.login( email='test(a)bx.psu.edu', username='admin-user' )
admin_user = sa_session.query( galaxy.model.User ) \
- .filter( galaxy.model.User.table.c.email=='test(a)bx.psu.edu' ) \
+ .filter( galaxy.model.User.table.c.email == 'test(a)bx.psu.edu' ) \
.one()
self.new_history( name='Test Metadata Edit' )
history1 = sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==admin_user.id ) ) \
+ .filter( and_( galaxy.model.History.table.c.deleted == False,
+ galaxy.model.History.table.c.user_id == admin_user.id ) ) \
.order_by( desc( galaxy.model.History.table.c.create_time ) ) \
.first()
self.upload_file( '1.bed' )
diff -r 494b51bda9c1a319672e92cce4ec17386cd65390 -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a test/functional/test_sample_tracking.py
--- a/test/functional/test_sample_tracking.py
+++ b/test/functional/test_sample_tracking.py
@@ -3,6 +3,8 @@
from base.twilltestcase import *
from base.test_db_util import *
+
+# TODO: Functional tests start failing at 025, fix or eliminate rest of tests.
class TestFormsAndSampleTracking( TwillTestCase ):
# ====== Setup Users, Groups & Roles required for this test suite =========
def test_000_initiate_users( self ):
diff -r 494b51bda9c1a319672e92cce4ec17386cd65390 -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a test/functional/test_tags.py
--- a/test/functional/test_tags.py
+++ b/test/functional/test_tags.py
@@ -1,7 +1,17 @@
-from base.twilltestcase import *
-from base.test_db_util import *
+from base.twilltestcase import TwillTestCase
+from base.test_db_util import (
+ get_user,
+ get_private_role,
+ get_latest_history_for_user,
+ get_latest_hda,
+)
+
+history1 = None
+admin_user = None
+
class TestTags( TwillTestCase ):
+
# TODO: Add more functional test coverage for tags
def test_000_initiate_users( self ):
"""Ensuring all required user accounts exist"""
diff -r 494b51bda9c1a319672e92cce4ec17386cd65390 -r c7986c31fd7447f7d1e2a85bdfa43be6bc4b3b8a test/functional/test_user_info.py
--- a/test/functional/test_user_info.py
+++ b/test/functional/test_user_info.py
@@ -1,6 +1,8 @@
from base.twilltestcase import *
from base.test_db_util import *
+
+# TODO: Functional tests start failing at 020, fix or eliminate rest of tests.
class TestUserInfo( TwillTestCase ):
def test_000_initiate_users( self ):
"""Ensuring all required user accounts exist"""
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jmchilton: Code duplication reduction in Tool.
by commits-noreply@bitbucket.org 10 Jan '14
by commits-noreply@bitbucket.org 10 Jan '14
10 Jan '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/c8c00d106c7a/
Changeset: c8c00d106c7a
User: jmchilton
Date: 2014-01-11 04:23:07
Summary: Code duplication reduction in Tool.
Affected #: 1 file
diff -r 9f55b4b37d35b8366f42ab430ac4213554d83311 -r c8c00d106c7a6883feaacca809b42fd7c107fe2a lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -1834,7 +1834,7 @@
# external data source tools).
if "runtool_btn" not in incoming and "URL" not in incoming:
if not self.display_interface:
- return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
+ return self.__no_display_interface_response()
if len(incoming):
self.update_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, old_errors=old_errors or {}, source=source )
return "tool_form.mako", dict( errors={}, tool_state=state, param_values={}, incoming={} )
@@ -1894,7 +1894,7 @@
pass
# Just a refresh, render the form with updated state and errors.
if not self.display_interface:
- return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
+ return self.__no_display_interface_response()
return 'tool_form.mako', dict( errors=errors, tool_state=state )
def __handle_page_advance( self, trans, state, errors ):
@@ -1902,9 +1902,12 @@
# Fill in the default values for the next page
self.fill_in_new_state( trans, self.inputs_by_page[ state.page ], state.inputs )
if not self.display_interface:
- return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
+ return self.__no_display_interface_response()
return 'tool_form.mako', dict( errors=errors, tool_state=state )
+ def __no_display_interface_response( self ):
+ return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
+
def __fetch_state( self, trans, incoming, history, all_pages ):
# Get the state or create if not found
if "tool_state" in incoming:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0