galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
March 2013
- 1 participants
- 183 discussions
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/1da2068fa58c/
changeset: 1da2068fa58c
user: dannon
date: 2013-03-05 22:15:54
summary: Fix workflow modules imports.
affected #: 1 file
diff -r f7904535003fdf8e56588fb78ebcdf9af913cc55 -r 1da2068fa58c5f1ac9d4bee510644a1dce20e814 lib/galaxy/workflow/modules.py
--- a/lib/galaxy/workflow/modules.py
+++ b/lib/galaxy/workflow/modules.py
@@ -1,13 +1,20 @@
+"""
+Modules used in building workflows
+"""
+
+import logging
import re
+
from elementtree.ElementTree import Element
+
+import galaxy.tools
from galaxy import web
-from galaxy.tools.parameters import DataToolParameter, DummyDataset, RuntimeValue, check_param, visit_input_values
-import galaxy.tools
+from galaxy.jobs.actions.post import ActionBox
+from galaxy.model import PostJobAction
+from galaxy.tools.parameters import check_param, DataToolParameter, DummyDataset, RuntimeValue, visit_input_values
from galaxy.util.bunch import Bunch
from galaxy.util.json import from_json_string, to_json_string
-from galaxy.jobs.actions.post import ActionBox
-from galaxy.model import PostJobAction
-import logging
+
log = logging.getLogger( __name__ )
https://bitbucket.org/galaxy/galaxy-central/commits/26006e793a32/
changeset: 26006e793a32
user: dannon
date: 2013-03-05 22:18:16
summary: Workflow modules: Adjust spacing to new guidelines.
affected #: 1 file
diff -r 1da2068fa58c5f1ac9d4bee510644a1dce20e814 -r 26006e793a32a35328d941f6cea573b0108faab7 lib/galaxy/workflow/modules.py
--- a/lib/galaxy/workflow/modules.py
+++ b/lib/galaxy/workflow/modules.py
@@ -15,8 +15,8 @@
from galaxy.util.bunch import Bunch
from galaxy.util.json import from_json_string, to_json_string
+log = logging.getLogger( __name__ )
-log = logging.getLogger( __name__ )
class WorkflowModule( object ):
@@ -31,6 +31,7 @@
Create a new instance of the module with default state
"""
return Class( trans )
+
@classmethod
def from_dict( Class, trans, d ):
"""
@@ -38,6 +39,7 @@
dictionary `d`.
"""
return Class( trans )
+
@classmethod
def from_workflow_step( Class, trans, step ):
return Class( trans )
@@ -51,10 +53,13 @@
def get_type( self ):
return self.type
+
def get_name( self ):
return self.name
+
def get_tool_id( self ):
return None
+
def get_tooltip( self, static_path='' ):
return None
@@ -62,14 +67,19 @@
def get_state( self ):
return None
+
def get_errors( self ):
return None
+
def get_data_inputs( self ):
return []
+
def get_data_outputs( self ):
return []
+
def update_state( self ):
pass
+
def get_config_form( self ):
raise TypeError( "Abstract method" )
@@ -84,18 +94,23 @@
def get_runtime_inputs( self ):
raise TypeError( "Abstract method" )
+
def get_runtime_state( self ):
raise TypeError( "Abstract method" )
+
def encode_runtime_state( self, trans, state ):
raise TypeError( "Abstract method" )
+
def decode_runtime_state( self, trans, string ):
raise TypeError( "Abstract method" )
+
def update_runtime_state( self, trans, state, values ):
raise TypeError( "Abstract method" )
def execute( self, trans, state ):
raise TypeError( "Abstract method" )
+
class InputDataModule( WorkflowModule ):
type = "data_input"
name = "Input dataset"
@@ -105,12 +120,14 @@
module = Class( trans )
module.state = dict( name="Input Dataset" )
return module
+
@classmethod
def from_dict( Class, trans, d, secure=True ):
module = Class( trans )
state = from_json_string( d["tool_state"] )
module.state = dict( name=state.get( "name", "Input Dataset" ) )
return module
+
@classmethod
def from_workflow_step( Class, trans, step ):
module = Class( trans )
@@ -172,6 +189,7 @@
def execute( self, trans, state ):
return None, dict( output=state.inputs['input'])
+
class ToolModule( WorkflowModule ):
type = "tool"
@@ -286,6 +304,7 @@
def get_data_inputs( self ):
data_inputs = []
+
def callback( input, value, prefixed_name, prefixed_label ):
if isinstance( input, DataToolParameter ):
data_inputs.append( dict(
@@ -293,6 +312,7 @@
label=prefixed_label,
multiple=input.multiple,
extensions=input.extensions ) )
+
visit_input_values( self.tool.inputs, self.state.inputs, callback )
return data_inputs
@@ -338,6 +358,7 @@
make_runtime_key = incoming.get( 'make_runtime', None )
make_buildtime_key = incoming.get( 'make_buildtime', None )
+
def item_callback( trans, key, input, value, error, old_value, context ):
# Dummy value for Data parameters
if isinstance( input, DataToolParameter ):
@@ -354,6 +375,7 @@
return value, None
else:
return value, error
+
# Update state using incoming values
errors = self.tool.update_state( self.trans, self.tool.inputs, self.state.inputs, incoming, item_callback=item_callback )
self.errors = errors or None
@@ -370,6 +392,7 @@
input_connections_by_name = {}
# Any connected input needs to have value DummyDataset (these
# are not persisted so we need to do it every time)
+
def callback( input, value, prefixed_name, prefixed_label ):
replacement = None
if isinstance( input, DataToolParameter ):
@@ -379,9 +402,12 @@
else:
replacement = DummyDataset()
return replacement
+
visit_input_values( self.tool.inputs, self.state.inputs, callback )
+
class WorkflowModuleFactory( object ):
+
def __init__( self, module_types ):
self.module_types = module_types
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: dannon: Remove import *, fix a docstring in actions/admin
by commits-noreply@bitbucket.org 05 Mar '13
by commits-noreply@bitbucket.org 05 Mar '13
05 Mar '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/f7904535003f/
changeset: f7904535003f
user: dannon
date: 2013-03-05 22:10:45
summary: Remove import *, fix a docstring in actions/admin
affected #: 1 file
diff -r dde0e4592530c3be40e4464b4347817b25a6aa62 -r f7904535003fdf8e56588fb78ebcdf9af913cc55 lib/galaxy/actions/admin.py
--- a/lib/galaxy/actions/admin.py
+++ b/lib/galaxy/actions/admin.py
@@ -1,9 +1,10 @@
"""
Contains administrative functions
"""
+
import logging
from galaxy import util
-from galaxy.exceptions import *
+from galaxy.exceptions import MessageException
log = logging.getLogger( __name__ )
@@ -167,12 +168,14 @@
self.sa_session.flush()
message += ', '.join( names )
return message
-
+
def _purge_quota( self, quota, params ):
- # This method should only be called for a Quota that has previously been deleted.
- # Purging a deleted Quota deletes all of the following from the database:
- # - UserQuotaAssociations where quota_id == Quota.id
- # - GroupQuotaAssociations where quota_id == Quota.id
+ """
+ This method should only be called for a Quota that has previously been deleted.
+ Purging a deleted Quota deletes all of the following from the database:
+ - UserQuotaAssociations where quota_id == Quota.id
+ - GroupQuotaAssociations where quota_id == Quota.id
+ """
quotas = util.listify( quota )
names = []
for q in quotas:
@@ -194,3 +197,4 @@
self.sa_session.flush()
message += ', '.join( names )
return message
+
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: dannon: Strip dead test code (that actually threw an exception when enabled due to undefined variables).
by commits-noreply@bitbucket.org 05 Mar '13
by commits-noreply@bitbucket.org 05 Mar '13
05 Mar '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/dde0e4592530/
changeset: dde0e4592530
user: dannon
date: 2013-03-05 22:07:35
summary: Strip dead test code (that actually threw an exception when enabled due to undefined variables).
affected #: 1 file
diff -r a7814c5352318f5e81d77315bc9fff413673d483 -r dde0e4592530c3be40e4464b4347817b25a6aa62 lib/galaxy/security/__init__.py
--- a/lib/galaxy/security/__init__.py
+++ b/lib/galaxy/security/__init__.py
@@ -903,26 +903,10 @@
.filter( and_( trans.app.model.DatasetPermissions.dataset_id.in_( dataset_ids ),
trans.app.model.DatasetPermissions.action == self.permitted_actions.DATASET_ACCESS.action ) ) \
.all()
-
# Every dataset returned has "access" privileges associated with it,
# so it's not public.
for permission in access_data_perms:
datasets_public[ permission.dataset_id ] = False
-
- # Test code: Check if the results match up with the original:
- test_code = False
- if test_code:
- log.debug( "datasets_are_public test: check datasets_are_public matches dataset_is_public:" )
- test_success = True
- for dataset in datasets:
- orig_is_public = self.dataset_is_public( dataset )
- if orig_is_public == datasets_public[ dataset.id ]:
- log.debug( "\tMatch for dataset %d" % dataset.id )
- else:
- success = False
- log.error( "\tERROR: Did not match: single is_public: %s; multiple is_public: %s"
- % ( single_is_public, datasets_public[ dataset.id ] ) )
- log.debug( "datasets_are_public: test succeeded? %s" % test_success )
return datasets_public
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
3 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/c7fe14570d9a/
changeset: c7fe14570d9a
user: dannon
date: 2013-03-05 21:58:13
summary: Tools module import cleanup, removal of dead code.
affected #: 1 file
diff -r 6bc53fd15c629310c425f15e691f23dec4892293 -r c7fe14570d9a3f1823895978efd8e2734489a9a9 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -1,47 +1,59 @@
"""
Classes encapsulating galaxy tools and tool configuration.
"""
-import pkg_resources
-pkg_resources.require( "simplejson" )
-pkg_resources.require( "MarkupSafe" ) #MarkupSafe must load before mako
-pkg_resources.require( "Mako" )
+import binascii
+import glob
+import logging
+import os
+import pipes
+import re
+import shutil
+import sys
+import tempfile
+import traceback
+import types
+import urllib
-import logging, os, string, sys, tempfile, glob, shutil, types, urllib, subprocess, random, math, traceback, re, pipes
+from galaxy import eggs
+eggs.require( "simplejson" )
+eggs.require( "MarkupSafe" ) #MarkupSafe must load before mako
+eggs.require( "Mako" )
+
import simplejson
-import binascii
+from cgi import FieldStorage
+from elementtree import ElementTree
from mako.template import Template
-from UserDict import DictMixin
-from galaxy.util.odict import odict
-from galaxy.util.bunch import Bunch
-from galaxy.util.template import fill_template
-from galaxy import util, jobs, model
+from paste import httpexceptions
+from sqlalchemy import and_
+from copy import deepcopy
+
+from galaxy import jobs, model
+from galaxy.datatypes.metadata import JobExternalOutputMetadataWrapper
from galaxy.jobs import ParallelismInfo
-from copy import deepcopy
-from elementtree import ElementTree
-from parameters import *
-from parameters.grouping import *
-from parameters.output import ToolOutputActionGroup
-from parameters.validation import LateValidationError
-from parameters.input_translation import ToolInputTranslator
-from galaxy.util.expressions import ExpressionContext
-from galaxy.tools.test import ToolTestBuilder
from galaxy.tools.actions import DefaultToolAction
from galaxy.tools.actions.data_manager import DataManagerToolAction
from galaxy.tools.deps import DependencyManager
-from galaxy.model import directory_hash_id
-from galaxy.model.orm import *
+from galaxy.tools.parameters import check_param, params_from_strings, params_to_strings
+from galaxy.tools.parameters.basic import (BaseURLToolParameter,
+ DataToolParameter, HiddenToolParameter, LibraryDatasetToolParameter,
+ SelectToolParameter, ToolParameter, UnvalidatedValue,
+ IntegerToolParameter, FloatToolParameter)
+from galaxy.tools.parameters.grouping import Conditional, ConditionalWhen, Repeat, UploadDataset
+from galaxy.tools.parameters.input_translation import ToolInputTranslator
+from galaxy.tools.parameters.output import ToolOutputActionGroup
+from galaxy.tools.parameters.validation import LateValidationError
+from galaxy.tools.test import ToolTestBuilder
+from galaxy.util import isinf, listify, parse_xml, rst_to_html, string_as_bool, string_to_object, xml_text, xml_to_string
+from galaxy.util.bunch import Bunch
+from galaxy.util.expressions import ExpressionContext
+from galaxy.util.hash_util import hmac_new
from galaxy.util.none_like import NoneDataset
-from galaxy.datatypes import sniff
-from cgi import FieldStorage
-from galaxy.util.hash_util import *
-from galaxy.util import listify
-import tool_shed.util.shed_util_common
+from galaxy.util.odict import odict
+from galaxy.util.template import fill_template
+from galaxy.visualization.genome.visual_analytics import TracksterConfig
from galaxy.web import url_for
-
-from paste import httpexceptions
-
-from galaxy.visualization.genome.visual_analytics import TracksterConfig
+from tool_shed.util import shed_util_common
log = logging.getLogger( __name__ )
@@ -147,7 +159,7 @@
self.sa_session.query( self.app.model.ToolTagAssociation ).delete()
self.sa_session.flush()
log.info( "Parsing the tool configuration %s" % config_filename )
- tree = util.parse_xml( config_filename )
+ tree = parse_xml( config_filename )
root = tree.getroot()
tool_path = root.get( 'tool_path' )
if tool_path:
@@ -234,7 +246,7 @@
elif key.startswith( 'label_' ):
self.tool_panel[ key ] = val
elif key.startswith( 'section_' ):
- elem = Element( 'section' )
+ elem = ElementTree.Element( 'section' )
elem.attrib[ 'id' ] = val.id or ''
elem.attrib[ 'name' ] = val.name or ''
elem.attrib[ 'version' ] = val.version or ''
@@ -262,7 +274,7 @@
be reset when the various tool panel config files are parsed, at which time the tools and workflows are
loaded.
"""
- tree = util.parse_xml( self.integrated_tool_panel_config )
+ tree = parse_xml( self.integrated_tool_panel_config )
root = tree.getroot()
for elem in root:
if elem.tag == 'tool':
@@ -291,7 +303,7 @@
def write_integrated_tool_panel_config_file( self ):
"""
Write the current in-memory version of the integrated_tool_panel.xml file to disk. Since Galaxy administrators
- use this file to manage the tool panel, we'll not use util.xml_to_string() since it doesn't write XML quite right.
+ use this file to manage the tool panel, we'll not use xml_to_string() since it doesn't write XML quite right.
"""
fd, filename = tempfile.mkstemp()
os.write( fd, '<?xml version="1.0"?>\n' )
@@ -457,7 +469,7 @@
# the tool if it's it was not found in self.tools_by_id, but performing that check did not enable this scenario.
self.tools_by_id[ tool.id ] = tool
if load_panel_dict:
- self.__add_tool_to_tool_panel( tool.id, panel_dict, section=isinstance( panel_dict, galaxy.tools.ToolSection ) )
+ self.__add_tool_to_tool_panel( tool.id, panel_dict, section=isinstance( panel_dict, ToolSection ) )
# Always load the tool into the integrated_panel_dict, or it will not be included in the integrated_tool_panel.xml file.
if key in integrated_panel_dict or index is None:
integrated_panel_dict[ key ] = tool
@@ -707,7 +719,7 @@
return rval
def _load_and_preprocess_tool_xml(self, config_file):
- tree = util.parse_xml(config_file)
+ tree = parse_xml(config_file)
root = tree.getroot()
macros_el = root.find('macros')
if not macros_el:
@@ -779,7 +791,7 @@
return macros
def _load_macro_file(self, path, tool_dir):
- tree = util.parse_xml(path)
+ tree = parse_xml(path)
root = tree.getroot()
return self._load_macros(root, tool_dir)
@@ -848,7 +860,7 @@
"""
Keeps track of the state of a users interaction with a tool between
requests. The default tool state keeps track of the current page (for
- multipage "wizard" tools) and the values of all parameters.
+ multipage "wizard" tools) and the values of all
"""
def __init__( self ):
self.page = 0
@@ -968,7 +980,7 @@
self.input_required = False
self.display_interface = True
self.require_login = False
- # Define a place to keep track of all input parameters. These
+ # Define a place to keep track of all input These
# differ from the inputs dictionary in that inputs can be page
# elements like conditionals, but input_params are basic form
# parameters like SelectField objects. This enables us to more
@@ -1018,7 +1030,7 @@
def tool_shed_repository( self ):
# If this tool is included in an installed tool shed repository, return it.
if self.tool_shed:
- return tool_shed.util.shed_util_common.get_tool_shed_repository_by_shed_name_owner_installed_changeset_revision( self.app,
+ return shed_util_common.get_tool_shed_repository_by_shed_name_owner_installed_changeset_revision( self.app,
self.tool_shed,
self.repository_name,
self.repository_owner,
@@ -1094,13 +1106,13 @@
# For backward compatibility, some tools may not have versions yet.
self.version = "1.0.0"
# Support multi-byte tools
- self.is_multi_byte = util.string_as_bool( root.get( "is_multi_byte", False ) )
+ self.is_multi_byte = string_as_bool( root.get( "is_multi_byte", False ) )
# Force history to fully refresh after job execution for this tool.
# Useful i.e. when an indeterminate number of outputs are created by
# a tool.
- self.force_history_refresh = util.string_as_bool( root.get( 'force_history_refresh', 'False' ) )
- self.display_interface = util.string_as_bool( root.get( 'display_interface', str( self.display_interface ) ) )
- self.require_login = util.string_as_bool( root.get( 'require_login', str( self.require_login ) ) )
+ self.force_history_refresh = string_as_bool( root.get( 'force_history_refresh', 'False' ) )
+ self.display_interface = string_as_bool( root.get( 'display_interface', str( self.display_interface ) ) )
+ self.require_login = string_as_bool( root.get( 'require_login', str( self.require_login ) ) )
# Load input translator, used by datasource tools to change names/values of incoming parameters
self.input_translator = root.find( "request_param_translation" )
if self.input_translator:
@@ -1125,7 +1137,7 @@
else:
self.redirect_url_params = ''
# Short description of the tool
- self.description = util.xml_text(root, "description")
+ self.description = xml_text(root, "description")
# Versioning for tools
self.version_string_cmd = None
version_cmd = root.find("version_command")
@@ -1155,8 +1167,8 @@
if 'job_config' in dir(self.app):
self.job_tool_configurations = self.app.job_config.get_job_tool_configurations(self_ids)
# Is this a 'hidden' tool (hidden in tool menu)
- self.hidden = util.xml_text(root, "hidden")
- if self.hidden: self.hidden = util.string_as_bool(self.hidden)
+ self.hidden = xml_text(root, "hidden")
+ if self.hidden: self.hidden = string_as_bool(self.hidden)
# Load any tool specific code (optional) Edit: INS 5/29/2007,
# allow code files to have access to the individual tool's
# "module" if it has one. Allows us to reuse code files, etc.
@@ -1175,7 +1187,7 @@
for option_elem in root.findall("options"):
for option, value in self.options.copy().items():
if isinstance(value, type(False)):
- self.options[option] = util.string_as_bool(option_elem.get(option, str(value)))
+ self.options[option] = string_as_bool(option_elem.get(option, str(value)))
else:
self.options[option] = option_elem.get(option, str(value))
self.options = Bunch(** self.options)
@@ -1243,8 +1255,8 @@
enctypes = set()
if input_elem:
# Handle properties of the input form
- self.check_values = util.string_as_bool( input_elem.get("check_values", self.check_values ) )
- self.nginx_upload = util.string_as_bool( input_elem.get( "nginx_upload", self.nginx_upload ) )
+ self.check_values = string_as_bool( input_elem.get("check_values", self.check_values ) )
+ self.nginx_upload = string_as_bool( input_elem.get( "nginx_upload", self.nginx_upload ) )
self.action = input_elem.get( 'action', self.action )
# If we have an nginx upload, save the action as a tuple instead of
# a string. The actual action needs to get url_for run to add any
@@ -1283,7 +1295,7 @@
else:
raise Exception, "Conflicting required enctypes: %s" % str( enctypes )
# Check if the tool either has no parameters or only hidden (and
- # thus hardcoded) parameters. FIXME: hidden parameters aren't
+ # thus hardcoded) FIXME: hidden parameters aren't
# parameters at all really, and should be passed in a different
# way, making this check easier.
for param in self.inputs.values():
@@ -1305,7 +1317,7 @@
help_pages = self.help.findall( "page" )
help_header = self.help.text
try:
- self.help = Template( util.rst_to_html(self.help.text), input_encoding='utf-8',
+ self.help = Template( rst_to_html(self.help.text), input_encoding='utf-8',
output_encoding='utf-8', default_filters=[ 'decode.utf8' ],
encoding_errors='replace' )
except:
@@ -1317,7 +1329,7 @@
help_footer = help_footer + help_page.tail
# Each page has to rendered all-together because of backreferences allowed by rst
try:
- self.help_by_page = [ Template( util.rst_to_html( help_header + x + help_footer,
+ self.help_by_page = [ Template( rst_to_html( help_header + x + help_footer,
input_encoding='utf-8', output_encoding='utf-8',
default_filters=[ 'decode.utf8' ],
encoding_errors='replace' ) )
@@ -1342,11 +1354,11 @@
output.format_source = data_elem.get("format_source", None)
output.metadata_source = data_elem.get("metadata_source", "")
output.parent = data_elem.get("parent", None)
- output.label = util.xml_text( data_elem, "label" )
+ output.label = xml_text( data_elem, "label" )
output.count = int( data_elem.get("count", 1) )
output.filters = data_elem.findall( 'filter' )
output.from_work_dir = data_elem.get("from_work_dir", None)
- output.hidden = util.string_as_bool( data_elem.get("hidden", "") )
+ output.hidden = string_as_bool( data_elem.get("hidden", "") )
output.tool = self
output.actions = ToolOutputActionGroup( output, data_elem.find( 'actions' ) )
self.outputs[ output.name ] = output
@@ -1370,7 +1382,7 @@
for stdio_elem in ( root.findall( 'stdio' ) ):
self.parse_stdio_exit_codes( stdio_elem )
self.parse_stdio_regexes( stdio_elem )
- except Exception, e:
+ except Exception:
log.error( "Exception in parse_stdio! " + str(sys.exc_info()) )
def parse_stdio_exit_codes( self, stdio_elem ):
@@ -1397,7 +1409,7 @@
self.parse_error_level( exit_code_elem.get( "level" )))
code_range = exit_code_elem.get( "range", "" )
if None == code_range:
- code_range = code_elem.get( "value", "" )
+ code_range = exit_code_elem.get( "value", "" )
if None == code_range:
log.warning( "Tool stdio exit codes must have "
+ "a range or value" )
@@ -1441,12 +1453,12 @@
# isn't bogus. If we have two infinite values, then
# the start must be -inf and the end must be +inf.
# So at least warn about this situation:
- if ( util.isinf( exit_code.range_start ) and
- util.isinf( exit_code.range_end ) ):
+ if ( isinf( exit_code.range_start ) and
+ isinf( exit_code.range_end ) ):
log.warning( "Tool exit_code range %s will match on "
+ "all exit codes" % code_range )
self.stdio_exit_codes.append( exit_code )
- except Exception, e:
+ except Exception:
log.error( "Exception in parse_stdio_exit_codes! "
+ str(sys.exc_info()) )
trace = sys.exc_info()[2]
@@ -1514,7 +1526,7 @@
regex.stdout_match = True
regex.stderr_match = True
self.stdio_regexes.append( regex )
- except Exception, e:
+ except Exception:
log.error( "Exception in parse_stdio_exit_codes! "
+ str(sys.exc_info()) )
trace = sys.exc_info()[2]
@@ -1539,7 +1551,7 @@
return_level = StdioErrorLevel.FATAL
else:
log.debug( "Error level %s did not match warning/fatal" % err_level )
- except Exception, e:
+ except Exception:
log.error( "Exception in parse_error_level "
+ str(sys.exc_info() ) )
trace = sys.exc_info()[2]
@@ -1636,7 +1648,7 @@
attributes['lines_diff'] = int( attrib.pop( 'lines_diff', '0' ) )
# Allow a file size to vary if sim_size compare
attributes['delta'] = int( attrib.pop( 'delta', '10000' ) )
- attributes['sort'] = util.string_as_bool( attrib.pop( 'sort', False ) )
+ attributes['sort'] = string_as_bool( attrib.pop( 'sort', False ) )
attributes['extra_files'] = []
attributes['assert_list'] = assert_list
if 'ftype' in attrib:
@@ -1654,7 +1666,7 @@
extra_attributes['compare'] = extra.get( 'compare', 'diff' ).lower()
extra_attributes['delta'] = extra.get( 'delta', '0' )
extra_attributes['lines_diff'] = int( extra.get( 'lines_diff', '0' ) )
- extra_attributes['sort'] = util.string_as_bool( extra.get( 'sort', False ) )
+ extra_attributes['sort'] = string_as_bool( extra.get( 'sort', False ) )
attributes['extra_files'].append( ( extra_type, extra_value, extra_name, extra_attributes ) )
test.add_output( name, file, attributes )
except Exception, e:
@@ -1671,7 +1683,7 @@
# Display
display_elem = input_elem.find("display")
if display_elem is not None:
- display = util.xml_to_string(display_elem)
+ display = xml_to_string(display_elem)
else:
display = None
return display, inputs
@@ -1704,7 +1716,7 @@
group = Conditional()
group.name = elem.get( "name" )
group.value_ref = elem.get( 'value_ref', None )
- group.value_ref_in_group = util.string_as_bool( elem.get( 'value_ref_in_group', 'True' ) )
+ group.value_ref_in_group = string_as_bool( elem.get( 'value_ref_in_group', 'True' ) )
value_from = elem.get( "value_from" )
if value_from:
value_from = value_from.split( ':' )
@@ -1789,7 +1801,7 @@
self.requirements
"""
for requirement_elem in requirements_elem.findall( 'requirement' ):
- name = util.xml_text( requirement_elem )
+ name = xml_text( requirement_elem )
type = requirement_elem.get( "type", "package" )
version = requirement_elem.get( "version", None )
requirement = ToolRequirement( name=name, type=type, version=version )
@@ -1807,7 +1819,7 @@
# right now
if self.tool_type.startswith( 'data_source' ):
return False
- if not util.string_as_bool( root.get( "workflow_compatible", "True" ) ):
+ if not string_as_bool( root.get( "workflow_compatible", "True" ) ):
return False
# TODO: Anyway to capture tools that dynamically change their own
# outputs?
@@ -1873,7 +1885,7 @@
"""
Call the function `callback` on each parameter of this tool. Visits
grouping parameters recursively and constructs unique prefixes for
- each nested set of parameters. The callback method is then called as:
+ each nested set of The callback method is then called as:
`callback( level_prefix, parameter, parameter_value )`
"""
@@ -1894,7 +1906,7 @@
"""
# Get the state or create if not found
if "tool_state" in incoming:
- encoded_state = util.string_to_object( incoming["tool_state"] )
+ encoded_state = string_to_object( incoming["tool_state"] )
state = DefaultToolState()
state.decode( encoded_state, self, trans.app )
else:
@@ -1911,7 +1923,7 @@
# Process incoming data
if not( self.check_values ):
# If `self.check_values` is false we don't do any checking or
- # processing on input parameters. This is used to pass raw values
+ # processing on input This is used to pass raw values
# through to/from external sites. FIXME: This should be handled
# more cleanly, there is no reason why external sites need to
# post back to the same URL that the tool interface uses.
@@ -2225,7 +2237,7 @@
"""
params = []
for input_param in self.input_params:
- if isinstance( input_param, basic.SelectToolParameter ) and input_param.is_dynamic:
+ if isinstance( input_param, SelectToolParameter ) and input_param.is_dynamic:
options = input_param.options
if options and options.missing_tool_data_table_name and input_param not in params:
params.append( input_param )
@@ -2238,7 +2250,7 @@
"""
params = []
for input_param in self.input_params:
- if isinstance( input_param, basic.SelectToolParameter ) and input_param.is_dynamic:
+ if isinstance( input_param, SelectToolParameter ) and input_param.is_dynamic:
options = input_param.options
if options and options.missing_index_file and input_param not in params:
params.append( input_param )
@@ -2324,12 +2336,12 @@
else:
# Regular tool parameter, no recursion needed
try:
- check_param = True
+ ck_param = True
if allow_workflow_parameters and isinstance( values[ input.name ], basestring ):
if WORKFLOW_PARAMETER_REGULAR_EXPRESSION.search( values[ input.name ] ):
- check_param = False
+ ck_param = False
#this will fail when a parameter's type has changed to a non-compatible one: e.g. conditional group changed to dataset input
- if check_param:
+ if ck_param:
input.value_from_basic( input.value_to_basic( values[ input.name ], trans.app ), trans.app, ignore_errors=False )
except:
messages[ input.name ] = "Value no longer valid for '%s%s', replaced with default" % ( prefix, input.label )
@@ -2381,7 +2393,6 @@
except Exception, e:
# Wrap an re-raise any generated error so we can
# generate a more informative message
- v = input.value_to_display_text( value, self.app )
message = "Failed runtime validation of %s%s (%s)" \
% ( prefix, input.label, e )
raise LateValidationError( message )
@@ -2619,7 +2630,7 @@
command_line = fill_template( self.command, context=param_dict )
# Remove newlines from command line, and any leading/trailing white space
command_line = command_line.replace( "\n", " " ).replace( "\r", " " ).strip()
- except Exception, e:
+ except Exception:
# Modify exception message to be more clear
#e.args = ( 'Error substituting into command line. Params: %r, Command: %s' % ( param_dict, self.command ), )
raise
@@ -2822,6 +2833,7 @@
if outdata == dataset: continue
# Create new child dataset
child_data = child_dataset.copy( parent_id = dataset.id )
+ #DBTODO should this be child_data, and not child_dataset here?
self.sa_session.add( child_dataset )
self.sa_session.flush()
return children
@@ -2837,7 +2849,7 @@
line = simplejson.loads( line )
if line.get( 'type' ) == 'new_primary_dataset':
new_primary_datasets[ os.path.split( line.get( 'filename' ) )[-1] ] = line
- except Exception, e:
+ except Exception:
# This should not be considered an error or warning condition, this file is optional
pass
# Loop through output file names, looking for generated primary
@@ -3117,7 +3129,7 @@
tool_type = 'set_metadata'
def exec_after_process( self, app, inp_data, out_data, param_dict, job = None ):
for name, dataset in inp_data.iteritems():
- external_metadata = galaxy.datatypes.metadata.JobExternalOutputMetadataWrapper( job )
+ external_metadata = JobExternalOutputMetadataWrapper( job )
if external_metadata.external_metadata_set_successfully( dataset, app.model.context ):
dataset.metadata.from_JSON_dict( external_metadata.get_output_filenames_by_dataset( dataset, app.model.context ).filename_out )
else:
https://bitbucket.org/galaxy/galaxy-central/commits/c95132dc4813/
changeset: c95132dc4813
user: dannon
date: 2013-03-05 21:59:06
summary: Fix incorrect addition of child_dataset to sa_session.
affected #: 1 file
diff -r c7fe14570d9a3f1823895978efd8e2734489a9a9 -r c95132dc4813d1b1bbfd94fcbc2dc3441947d1dd lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -2833,8 +2833,7 @@
if outdata == dataset: continue
# Create new child dataset
child_data = child_dataset.copy( parent_id = dataset.id )
- #DBTODO should this be child_data, and not child_dataset here?
- self.sa_session.add( child_dataset )
+ self.sa_session.add( child_data )
self.sa_session.flush()
return children
def collect_primary_datasets( self, output, job_working_directory ):
https://bitbucket.org/galaxy/galaxy-central/commits/a7814c535231/
changeset: a7814c535231
user: dannon
date: 2013-03-05 21:59:30
summary: Strip whitespace.
affected #: 1 file
diff -r c95132dc4813d1b1bbfd94fcbc2dc3441947d1dd -r a7814c5352318f5e81d77315bc9fff413673d483 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -61,7 +61,7 @@
# These determine stdio-based error levels from matching on regular expressions
# and exit codes. They are meant to be used comparatively, such as showing
-# that warning < fatal. This is really meant to just be an enum.
+# that warning < fatal. This is really meant to just be an enum.
class StdioErrorLevel( object ):
NO_ERROR = 0
LOG = 1
@@ -69,9 +69,9 @@
FATAL = 3
MAX = 3
descs = {
- NO_ERROR : 'No error',
- LOG: 'Log',
- WARNING : 'Warning',
+ NO_ERROR : 'No error',
+ LOG: 'Log',
+ WARNING : 'Warning',
FATAL : 'Fatal error'
}
@staticmethod
@@ -163,10 +163,10 @@
root = tree.getroot()
tool_path = root.get( 'tool_path' )
if tool_path:
- # We're parsing a shed_tool_conf file since we have a tool_path attribute.
+ # We're parsing a shed_tool_conf file since we have a tool_path attribute.
parsing_shed_tool_conf = True
# Keep an in-memory list of xml elements to enable persistence of the changing tool config.
- config_elems = []
+ config_elems = []
else:
parsing_shed_tool_conf = False
# Default to backward compatible config setting.
@@ -302,7 +302,7 @@
self.integrated_tool_panel[ key ] = None
def write_integrated_tool_panel_config_file( self ):
"""
- Write the current in-memory version of the integrated_tool_panel.xml file to disk. Since Galaxy administrators
+ Write the current in-memory version of the integrated_tool_panel.xml file to disk. Since Galaxy administrators
use this file to manage the tool panel, we'll not use xml_to_string() since it doesn't write XML quite right.
"""
fd, filename = tempfile.mkstemp()
@@ -370,7 +370,7 @@
if tool.version == tool_version:
return tool
#No tool matches by version, simply return the first available tool found
- return rval[0]
+ return rval[0]
return None
def get_loaded_tools_by_lineage( self, tool_id ):
"""Get all loaded tools associated by lineage to the tool whose id is tool_id."""
@@ -614,7 +614,7 @@
return message, status
def load_workflow( self, workflow_id ):
"""
- Return an instance of 'Workflow' identified by `id`,
+ Return an instance of 'Workflow' identified by `id`,
which is encoded in the tool panel.
"""
id = self.app.security.decode_id( workflow_id )
@@ -631,9 +631,9 @@
Returns a SQLAlchemy session
"""
return self.app.model.context
-
+
def to_dict( self, trans, in_panel=True, trackster=False ):
-
+
def filter_for_panel( item, filters ):
"""
Filters tool panel elements so that only those that are compatible
@@ -648,12 +648,12 @@
if _apply_filter( item, filters[ 'tool' ] ):
return item
elif isinstance( item, ToolSectionLabel ):
- if _apply_filter( item, filters[ 'label' ] ):
+ if _apply_filter( item, filters[ 'label' ] ):
return item
elif isinstance( item, ToolSection ):
- # Filter section item-by-item. Only show a label if there are
+ # Filter section item-by-item. Only show a label if there are
# non-filtered tools below it.
-
+
if _apply_filter( item, filters[ 'section' ] ):
cur_label_key = None
tools_under_label = False
@@ -666,35 +666,35 @@
else:
del filtered_elems[ key ]
elif isinstance( section_item, ToolSectionLabel ):
- # If there is a label and it does not have tools,
+ # If there is a label and it does not have tools,
# remove it.
if ( cur_label_key and not tools_under_label ) or not _apply_filter( section_item, filters[ 'label' ] ):
del filtered_elems[ cur_label_key ]
-
+
# Reset attributes for new label.
cur_label_key = key
tools_under_label = False
-
-
+
+
# Handle last label.
if cur_label_key and not tools_under_label:
del filtered_elems[ cur_label_key ]
-
+
# Only return section if there are elements.
if len( filtered_elems ) != 0:
copy = item.copy()
copy.elems = filtered_elems
return copy
-
+
return None
-
- #
+
+ #
# Dictify toolbox.
- #
-
+ #
+
if in_panel:
panel_elts = [ val for val in self.tool_panel.itervalues() ]
-
+
# Filter if necessary.
filters = dict( tool=[ lambda x: not x._is_hidden_for_user( trans.user ) ], section=[], label=[] ) #hidden tools filter
if trackster:
@@ -705,7 +705,7 @@
if elt:
filtered_panel_elts.append( elt )
panel_elts = filtered_panel_elts
-
+
# Produce panel.
rval = []
for elt in panel_elts:
@@ -826,7 +826,7 @@
self.id = f( elem, 'id' )
self.version = f( elem, 'version' )
self.elems = odict()
-
+
def copy( self ):
copy = ToolSection()
copy.name = self.name
@@ -834,7 +834,7 @@
copy.version = self.version
copy.elems = self.elems.copy()
return copy
-
+
def to_dict( self, trans, for_link=False ):
""" Return a dict that includes section's attributes. """
section_elts = []
@@ -851,16 +851,16 @@
self.text = elem.get( "text" )
self.id = elem.get( "id" )
self.version = elem.get( "version" ) or ''
-
+
def to_dict( self, trans, **kwargs ):
""" Return a dict that includes label's attributes. """
return { 'type': 'label', 'id': self.id, 'name': self.text, 'version': self.version }
class DefaultToolState( object ):
"""
- Keeps track of the state of a users interaction with a tool between
- requests. The default tool state keeps track of the current page (for
- multipage "wizard" tools) and the values of all
+ Keeps track of the state of a users interaction with a tool between
+ requests. The default tool state keeps track of the current page (for
+ multipage "wizard" tools) and the values of all
"""
def __init__( self ):
self.page = 0
@@ -901,10 +901,10 @@
Represents an output datasets produced by a tool. For backward
compatibility this behaves as if it were the tuple::
- (format, metadata_source, parent)
+ (format, metadata_source, parent)
"""
- def __init__( self, name, format=None, format_source=None, metadata_source=None,
+ def __init__( self, name, format=None, format_source=None, metadata_source=None,
parent=None, label=None, filters = None, actions = None, hidden=False ):
self.name = name
self.format = format
@@ -918,11 +918,11 @@
# Tuple emulation
- def __len__( self ):
+ def __len__( self ):
return 3
def __getitem__( self, index ):
- if index == 0:
+ if index == 0:
return self.format
elif index == 1:
return self.metadata_source
@@ -933,7 +933,7 @@
def __iter__( self ):
return iter( ( self.format, self.metadata_source, self.parent ) )
-
+
def to_dict( self ):
return {
'name': self.name,
@@ -954,12 +954,12 @@
class Tool( object ):
"""
- Represents a computational tool that can be executed through Galaxy.
+ Represents a computational tool that can be executed through Galaxy.
"""
-
+
tool_type = 'default'
default_tool_action = DefaultToolAction
-
+
def __init__( self, config_file, root, app, guid=None ):
"""Load a tool from the config named by `config_file`"""
# Determine the full path of the directory where the tool config is
@@ -1091,15 +1091,15 @@
"""
# Get the (user visible) name of the tool
self.name = root.get( "name" )
- if not self.name:
+ if not self.name:
raise Exception, "Missing tool 'name'"
- # Get the UNIQUE id for the tool
+ # Get the UNIQUE id for the tool
self.old_id = root.get( "id" )
if guid is None:
self.id = self.old_id
else:
self.id = guid
- if not self.id:
+ if not self.id:
raise Exception, "Missing tool 'id'"
self.version = root.get( "version" )
if not self.version:
@@ -1107,8 +1107,8 @@
self.version = "1.0.0"
# Support multi-byte tools
self.is_multi_byte = string_as_bool( root.get( "is_multi_byte", False ) )
- # Force history to fully refresh after job execution for this tool.
- # Useful i.e. when an indeterminate number of outputs are created by
+ # Force history to fully refresh after job execution for this tool.
+ # Useful i.e. when an indeterminate number of outputs are created by
# a tool.
self.force_history_refresh = string_as_bool( root.get( 'force_history_refresh', 'False' ) )
self.display_interface = string_as_bool( root.get( 'display_interface', str( self.display_interface ) ) )
@@ -1117,7 +1117,7 @@
self.input_translator = root.find( "request_param_translation" )
if self.input_translator:
self.input_translator = ToolInputTranslator.from_element( self.input_translator )
- # Command line (template). Optional for tools that do not invoke a local program
+ # Command line (template). Optional for tools that do not invoke a local program
command = root.find("command")
if command is not None and command.text is not None:
self.command = command.text.lstrip() # get rid of leading whitespace
@@ -1138,7 +1138,7 @@
self.redirect_url_params = ''
# Short description of the tool
self.description = xml_text(root, "description")
- # Versioning for tools
+ # Versioning for tools
self.version_string_cmd = None
version_cmd = root.find("version_command")
if version_cmd is not None:
@@ -1304,8 +1304,8 @@
break
def parse_help( self, root ):
"""
- Parse the help text for the tool. Formatted in reStructuredText, but
- stored as Mako to allow for dynamic image paths.
+ Parse the help text for the tool. Formatted in reStructuredText, but
+ stored as Mako to allow for dynamic image paths.
This implementation supports multiple pages.
"""
# TODO: Allow raw HTML or an external link.
@@ -1329,15 +1329,15 @@
help_footer = help_footer + help_page.tail
# Each page has to rendered all-together because of backreferences allowed by rst
try:
- self.help_by_page = [ Template( rst_to_html( help_header + x + help_footer,
- input_encoding='utf-8', output_encoding='utf-8',
+ self.help_by_page = [ Template( rst_to_html( help_header + x + help_footer,
+ input_encoding='utf-8', output_encoding='utf-8',
default_filters=[ 'decode.utf8' ],
encoding_errors='replace' ) )
for x in self.help_by_page ]
except:
log.exception( "error in multi-page help for tool %s" % self.name )
# Pad out help pages to match npages ... could this be done better?
- while len( self.help_by_page ) < self.npages:
+ while len( self.help_by_page ) < self.npages:
self.help_by_page.append( self.help )
def parse_outputs( self, root ):
"""
@@ -1366,18 +1366,18 @@
# TODO: Include the tool's name in any parsing warnings.
def parse_stdio( self, root ):
"""
- Parse <stdio> element(s) and fill in self.return_codes,
- self.stderr_rules, and self.stdout_rules. Return codes have a range
- and an error type (fault or warning). Stderr and stdout rules have
+ Parse <stdio> element(s) and fill in self.return_codes,
+ self.stderr_rules, and self.stdout_rules. Return codes have a range
+ and an error type (fault or warning). Stderr and stdout rules have
a regular expression and an error level (fault or warning).
"""
try:
self.stdio_exit_codes = list()
self.stdio_regexes = list()
- # We should have a single <stdio> element, but handle the case for
- # multiples.
- # For every stdio element, add all of the exit_code and regex
+ # We should have a single <stdio> element, but handle the case for
+ # multiples.
+ # For every stdio element, add all of the exit_code and regex
# subelements that we find:
for stdio_elem in ( root.findall( 'stdio' ) ):
self.parse_stdio_exit_codes( stdio_elem )
@@ -1388,12 +1388,12 @@
def parse_stdio_exit_codes( self, stdio_elem ):
"""
Parse the tool's <stdio> element's <exit_code> subelements.
- This will add all of those elements, if any, to self.stdio_exit_codes.
+ This will add all of those elements, if any, to self.stdio_exit_codes.
"""
try:
- # Look for all <exit_code> elements. Each exit_code element must
+ # Look for all <exit_code> elements. Each exit_code element must
# have a range/value.
- # Exit-code ranges have precedence over a single exit code.
+ # Exit-code ranges have precedence over a single exit code.
# So if there are value and range attributes, we use the range
# attribute. If there is neither a range nor a value, then print
# a warning and skip to the next.
@@ -1404,7 +1404,7 @@
exit_code.desc = exit_code_elem.get( "desc" )
if None == exit_code.desc:
exit_code.desc = exit_code_elem.get( "description" )
- # Parse the error level:
+ # Parse the error level:
exit_code.error_level = (
self.parse_error_level( exit_code_elem.get( "level" )))
code_range = exit_code_elem.get( "range", "" )
@@ -1417,20 +1417,20 @@
# Parse the range. We look for:
# :Y
# X:
- # X:Y - Split on the colon. We do not allow a colon
- # without a beginning or end, though we could.
+ # X:Y - Split on the colon. We do not allow a colon
+ # without a beginning or end, though we could.
# Also note that whitespace is eliminated.
- # TODO: Turn this into a single match - it should be
+ # TODO: Turn this into a single match - it should be
# more efficient.
code_range = re.sub( "\s", "", code_range )
code_ranges = re.split( ":", code_range )
if ( len( code_ranges ) == 2 ):
if ( None == code_ranges[0] or '' == code_ranges[0] ):
- exit_code.range_start = float( "-inf" )
+ exit_code.range_start = float( "-inf" )
else:
exit_code.range_start = int( code_ranges[0] )
if ( None == code_ranges[1] or '' == code_ranges[1] ):
- exit_code.range_end = float( "inf" )
+ exit_code.range_end = float( "inf" )
else:
exit_code.range_end = int( code_ranges[1] )
# If we got more than one colon, then ignore the exit code.
@@ -1448,48 +1448,48 @@
log.warning( "Invalid range start for tool's exit_code %s: exit_code ignored" % code_range )
continue
exit_code.range_end = exit_code.range_start
- # TODO: Check if we got ">", ">=", "<", or "<=":
- # Check that the range, regardless of how we got it,
- # isn't bogus. If we have two infinite values, then
- # the start must be -inf and the end must be +inf.
+ # TODO: Check if we got ">", ">=", "<", or "<=":
+ # Check that the range, regardless of how we got it,
+ # isn't bogus. If we have two infinite values, then
+ # the start must be -inf and the end must be +inf.
# So at least warn about this situation:
- if ( isinf( exit_code.range_start ) and
+ if ( isinf( exit_code.range_start ) and
isinf( exit_code.range_end ) ):
log.warning( "Tool exit_code range %s will match on "
+ "all exit codes" % code_range )
self.stdio_exit_codes.append( exit_code )
except Exception:
- log.error( "Exception in parse_stdio_exit_codes! "
+ log.error( "Exception in parse_stdio_exit_codes! "
+ str(sys.exc_info()) )
trace = sys.exc_info()[2]
if ( None != trace ):
trace_msg = repr( traceback.format_tb( trace ) )
- log.error( "Traceback: %s" % trace_msg )
+ log.error( "Traceback: %s" % trace_msg )
def parse_stdio_regexes( self, stdio_elem ):
- """
+ """
Look in the tool's <stdio> elem for all <regex> subelements
- that define how to look for warnings and fatal errors in
+ that define how to look for warnings and fatal errors in
stdout and stderr. This will add all such regex elements
to the Tols's stdio_regexes list.
"""
try:
# Look for every <regex> subelement. The regular expression
- # will have "match" and "source" (or "src") attributes.
+ # will have "match" and "source" (or "src") attributes.
for regex_elem in ( stdio_elem.findall( "regex" ) ):
# TODO: Fill in ToolStdioRegex
- regex = ToolStdioRegex()
+ regex = ToolStdioRegex()
# Each regex has an optional description that can be
# part of the "desc" or "description" attributes:
regex.desc = regex_elem.get( "desc" )
if None == regex.desc:
regex.desc = regex_elem.get( "description" )
- # Parse the error level
- regex.error_level = (
+ # Parse the error level
+ regex.error_level = (
self.parse_error_level( regex_elem.get( "level" ) ) )
regex.match = regex_elem.get( "match", "" )
if None == regex.match:
- # TODO: Convert the offending XML element to a string
+ # TODO: Convert the offending XML element to a string
log.warning( "Ignoring tool's stdio regex element %s - "
"the 'match' attribute must exist" )
continue
@@ -1506,7 +1506,7 @@
if None == output_srcs:
output_srcs = "output,error"
output_srcs = re.sub( "\s", "", output_srcs )
- src_list = re.split( ",", output_srcs )
+ src_list = re.split( ",", output_srcs )
# Just put together anything to do with "out", including
# "stdout", "output", etc. Repeat for "stderr", "error",
# and anything to do with "err". If neither stdout nor
@@ -1527,12 +1527,12 @@
regex.stderr_match = True
self.stdio_regexes.append( regex )
except Exception:
- log.error( "Exception in parse_stdio_exit_codes! "
+ log.error( "Exception in parse_stdio_exit_codes! "
+ str(sys.exc_info()) )
trace = sys.exc_info()[2]
if ( None != trace ):
trace_msg = repr( traceback.format_tb( trace ) )
- log.error( "Traceback: %s" % trace_msg )
+ log.error( "Traceback: %s" % trace_msg )
# TODO: This method doesn't have to be part of the Tool class.
def parse_error_level( self, err_level ):
@@ -1540,24 +1540,24 @@
Parses error level and returns error level enumeration. If
unparsable, returns 'fatal'
"""
- return_level = StdioErrorLevel.FATAL
+ return_level = StdioErrorLevel.FATAL
try:
if err_level:
if ( re.search( "log", err_level, re.IGNORECASE ) ):
return_level = StdioErrorLevel.LOG
elif ( re.search( "warning", err_level, re.IGNORECASE ) ):
- return_level = StdioErrorLevel.WARNING
+ return_level = StdioErrorLevel.WARNING
elif ( re.search( "fatal", err_level, re.IGNORECASE ) ):
return_level = StdioErrorLevel.FATAL
else:
log.debug( "Error level %s did not match warning/fatal" % err_level )
except Exception:
- log.error( "Exception in parse_error_level "
+ log.error( "Exception in parse_error_level "
+ str(sys.exc_info() ) )
trace = sys.exc_info()[2]
if ( None != trace ):
trace_msg = repr( traceback.format_tb( trace ) )
- log.error( "Traceback: %s" % trace_msg )
+ log.error( "Traceback: %s" % trace_msg )
return return_level
def parse_tests( self, tests_elem ):
@@ -1566,9 +1566,9 @@
store in `self.tests`.
"""
self.tests = []
- # Composite datasets need a unique name: each test occurs in a fresh
+ # Composite datasets need a unique name: each test occurs in a fresh
# history, but we'll keep it unique per set of tests
- composite_data_names_counter = 0
+ composite_data_names_counter = 0
for i, test_elem in enumerate( tests_elem.findall( 'test' ) ):
name = test_elem.get( 'name', 'Test-%d' % (i+1) )
maxseconds = int( test_elem.get( 'maxseconds', '120' ) )
@@ -1584,20 +1584,20 @@
value = None
attrib['children'] = list( param_elem.getchildren() )
if attrib['children']:
- # At this time, we can assume having children only
- # occurs on DataToolParameter test items but this could
- # change and would cause the below parsing to change
+ # At this time, we can assume having children only
+ # occurs on DataToolParameter test items but this could
+ # change and would cause the below parsing to change
# based upon differences in children items
attrib['metadata'] = []
attrib['composite_data'] = []
attrib['edit_attributes'] = []
# Composite datasets need to be renamed uniquely
- composite_data_name = None
+ composite_data_name = None
for child in attrib['children']:
if child.tag == 'composite_data':
attrib['composite_data'].append( child )
if composite_data_name is None:
- # Generate a unique name; each test uses a
+ # Generate a unique name; each test uses a
# fresh history
composite_data_name = '_COMPOSITE_RENAMED_%i_' \
% ( composite_data_names_counter )
@@ -1609,10 +1609,10 @@
elif child.tag == 'edit_attributes':
attrib['edit_attributes'].append( child )
if composite_data_name:
- # Composite datasets need implicit renaming;
- # inserted at front of list so explicit declarations
+ # Composite datasets need implicit renaming;
+ # inserted at front of list so explicit declarations
# take precedence
- attrib['edit_attributes'].insert( 0, { 'type': 'name', 'value': composite_data_name } )
+ attrib['edit_attributes'].insert( 0, { 'type': 'name', 'value': composite_data_name } )
test.add_param( attrib.pop( 'name' ), value, attrib )
for output_elem in test_elem.findall( "output" ):
attrib = dict( output_elem.attrib )
@@ -1621,8 +1621,8 @@
raise Exception( "Test output does not have a 'name'" )
assert_elem = output_elem.find("assert_contents")
assert_list = None
- # Trying to keep testing patch as localized as
- # possible, this function should be relocated
+ # Trying to keep testing patch as localized as
+ # possible, this function should be relocated
# somewhere more conventional.
def convert_elem(elem):
""" Converts and XML element to a dictionary format, used by assertion checking code. """
@@ -1638,34 +1638,34 @@
for assert_child in list(assert_elem):
assert_list.append(convert_elem(assert_child))
file = attrib.pop( 'file', None )
- # File no longer required if an list of assertions was present.
+ # File no longer required if an list of assertions was present.
if assert_list is None and file is None:
raise Exception( "Test output does not have a 'file'")
attributes = {}
# Method of comparison
- attributes['compare'] = attrib.pop( 'compare', 'diff' ).lower()
- # Number of lines to allow to vary in logs (for dates, etc)
- attributes['lines_diff'] = int( attrib.pop( 'lines_diff', '0' ) )
+ attributes['compare'] = attrib.pop( 'compare', 'diff' ).lower()
+ # Number of lines to allow to vary in logs (for dates, etc)
+ attributes['lines_diff'] = int( attrib.pop( 'lines_diff', '0' ) )
# Allow a file size to vary if sim_size compare
- attributes['delta'] = int( attrib.pop( 'delta', '10000' ) )
+ attributes['delta'] = int( attrib.pop( 'delta', '10000' ) )
attributes['sort'] = string_as_bool( attrib.pop( 'sort', False ) )
attributes['extra_files'] = []
attributes['assert_list'] = assert_list
if 'ftype' in attrib:
attributes['ftype'] = attrib['ftype']
for extra in output_elem.findall( 'extra_files' ):
- # File or directory, when directory, compare basename
+ # File or directory, when directory, compare basename
# by basename
- extra_type = extra.get( 'type', 'file' )
+ extra_type = extra.get( 'type', 'file' )
extra_name = extra.get( 'name', None )
assert extra_type == 'directory' or extra_name is not None, \
'extra_files type (%s) requires a name attribute' % extra_type
extra_value = extra.get( 'value', None )
assert extra_value is not None, 'extra_files requires a value attribute'
extra_attributes = {}
- extra_attributes['compare'] = extra.get( 'compare', 'diff' ).lower()
- extra_attributes['delta'] = extra.get( 'delta', '0' )
- extra_attributes['lines_diff'] = int( extra.get( 'lines_diff', '0' ) )
+ extra_attributes['compare'] = extra.get( 'compare', 'diff' ).lower()
+ extra_attributes['delta'] = extra.get( 'delta', '0' )
+ extra_attributes['lines_diff'] = int( extra.get( 'lines_diff', '0' ) )
extra_attributes['sort'] = string_as_bool( extra.get( 'sort', False ) )
attributes['extra_files'].append( ( extra_type, extra_value, extra_name, extra_attributes ) )
test.add_output( name, file, attributes )
@@ -1689,7 +1689,7 @@
return display, inputs
def parse_input_elem( self, parent_elem, enctypes, context=None ):
"""
- Parse a parent element whose children are inputs -- these could be
+ Parse a parent element whose children are inputs -- these could be
groups (repeat, conditional) or param elements. Groups will be parsed
recursively.
"""
@@ -1706,11 +1706,11 @@
group.default = int( elem.get( "default", 0 ) )
group.min = int( elem.get( "min", 0 ) )
# Use float instead of int so that 'inf' can be used for no max
- group.max = float( elem.get( "max", "inf" ) )
+ group.max = float( elem.get( "max", "inf" ) )
assert group.min <= group.max, \
ValueError( "Min repeat count must be less-than-or-equal to the max." )
# Force default to be within min-max range
- group.default = min( max( group.default, group.min ), group.max )
+ group.default = min( max( group.default, group.min ), group.max )
rval[group.name] = group
elif elem.tag == "conditional":
group = Conditional()
@@ -1729,7 +1729,7 @@
case = ConditionalWhen()
case.value = case_value
if case_inputs:
- case.inputs = self.parse_input_elem(
+ case.inputs = self.parse_input_elem(
ElementTree.XML( "<when>%s</when>" % case_inputs ), enctypes, context )
else:
case.inputs = odict()
@@ -1751,10 +1751,10 @@
try:
possible_cases.remove( case.value )
except:
- log.warning( "Tool %s: a when tag has been defined for '%s (%s) --> %s', but does not appear to be selectable." %
+ log.warning( "Tool %s: a when tag has been defined for '%s (%s) --> %s', but does not appear to be selectable." %
( self.id, group.name, group.test_param.name, case.value ) )
for unspecified_case in possible_cases:
- log.warning( "Tool %s: a when tag has not been defined for '%s (%s) --> %s', assuming empty inputs." %
+ log.warning( "Tool %s: a when tag has not been defined for '%s (%s) --> %s', assuming empty inputs." %
( self.id, group.name, group.test_param.name, unspecified_case ) )
case = ConditionalWhen()
case.value = unspecified_case
@@ -1764,7 +1764,7 @@
elif elem.tag == "upload_dataset":
group = UploadDataset()
group.name = elem.get( "name" )
- group.title = elem.get( "title" )
+ group.title = elem.get( "title" )
group.file_type_name = elem.get( 'file_type_name', group.file_type_name )
group.default_file_type = elem.get( 'default_file_type', group.default_file_type )
group.metadata_ref = elem.get( 'metadata_ref', group.metadata_ref )
@@ -1782,7 +1782,7 @@
return rval
def parse_param_elem( self, input_elem, enctypes, context ):
"""
- Parse a single "<param>" element and return a ToolParameter instance.
+ Parse a single "<param>" element and return a ToolParameter instance.
Also, if the parameter has a 'required_enctype' add it to the set
enctypes.
"""
@@ -1827,8 +1827,8 @@
def new_state( self, trans, all_pages=False ):
"""
Create a new `DefaultToolState` for this tool. It will be initialized
- with default values for inputs.
-
+ with default values for inputs.
+
Only inputs on the first page will be initialized unless `all_pages` is
True, in which case all inputs regardless of page are initialized.
"""
@@ -1843,17 +1843,17 @@
def fill_in_new_state( self, trans, inputs, state, context=None ):
"""
Fill in a tool state dictionary with default values for all parameters
- in the dictionary `inputs`. Grouping elements are filled in recursively.
+ in the dictionary `inputs`. Grouping elements are filled in recursively.
"""
context = ExpressionContext( state, context )
for input in inputs.itervalues():
state[ input.name ] = input.get_initial_value( trans, context )
def get_param_html_map( self, trans, page=0, other_values={} ):
"""
- Return a dictionary containing the HTML representation of each
- parameter. This is used for rendering display elements. It is
+ Return a dictionary containing the HTML representation of each
+ parameter. This is used for rendering display elements. It is
currently not compatible with grouping constructs.
-
+
NOTE: This should be considered deprecated, it is only used for tools
with `display` elements. These should be eliminated.
"""
@@ -1865,7 +1865,7 @@
return rval
def get_param( self, key ):
"""
- Returns the parameter named `key` or None if there is no such
+ Returns the parameter named `key` or None if there is no such
parameter.
"""
return self.inputs.get( key, None )
@@ -1886,7 +1886,7 @@
Call the function `callback` on each parameter of this tool. Visits
grouping parameters recursively and constructs unique prefixes for
each nested set of The callback method is then called as:
-
+
`callback( level_prefix, parameter, parameter_value )`
"""
# HACK: Yet another hack around check_values -- WHY HERE?
@@ -1915,7 +1915,7 @@
# of inputs even when there is no state in the incoming dictionary
# by providing either 'runtool_btn' (the name of the submit button
# on the standard run form) or "URL" (a parameter provided by
- # external data source tools).
+ # external data source tools).
if "runtool_btn" not in incoming and "URL" not in incoming:
if not self.display_interface:
return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
@@ -1933,7 +1933,7 @@
# Update state for all inputs on the current page taking new
# values from `incoming`.
errors = self.update_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, old_errors=old_errors or {} )
- # If the tool provides a `validate_input` hook, call it.
+ # If the tool provides a `validate_input` hook, call it.
validate_input = self.get_hook( 'validate_input' )
if validate_input:
validate_input( trans, errors, state.inputs, self.inputs_by_page[state.page] )
@@ -1941,10 +1941,10 @@
# Did the user actually click next / execute or is this just
# a refresh?
if 'runtool_btn' in incoming or 'URL' in incoming or 'ajax_upload' in incoming:
- # If there were errors, we stay on the same page and display
+ # If there were errors, we stay on the same page and display
# error messages
if errors:
- error_message = "One or more errors were found in the input you provided. The specific errors are marked below."
+ error_message = "One or more errors were found in the input you provided. The specific errors are marked below."
return "tool_form.mako", dict( errors=errors, tool_state=state, incoming=incoming, error_message=error_message )
# If we've completed the last page we can execute the tool
elif state.page == self.last_page:
@@ -2010,10 +2010,10 @@
log.exception( 'Unable to load precreated dataset (%s) sent in upload form' % id )
continue
if trans.user is None and trans.galaxy_session.current_history != data.history:
- log.error( 'Got a precreated dataset (%s) but it does not belong to anonymous user\'s current session (%s)'
- % ( data.id, trans.galaxy_session.id ) )
+ log.error( 'Got a precreated dataset (%s) but it does not belong to anonymous user\'s current session (%s)'
+ % ( data.id, trans.galaxy_session.id ) )
elif data.history.user != trans.user:
- log.error( 'Got a precreated dataset (%s) but it does not belong to current user (%s)'
+ log.error( 'Got a precreated dataset (%s) but it does not belong to current user (%s)'
% ( data.id, trans.user.id ) )
else:
data.state = data.states.ERROR
@@ -2021,22 +2021,22 @@
self.sa_session.add( data )
self.sa_session.flush()
# It's unlikely the user will ever see this.
- return 'message.mako', dict( status='error',
- message='Your upload was interrupted. If this was uninentional, please retry it.',
+ return 'message.mako', dict( status='error',
+ message='Your upload was interrupted. If this was uninentional, please retry it.',
refresh_frames=[], cont=None )
def update_state( self, trans, inputs, state, incoming, prefix="", context=None,
update_only=False, old_errors={}, item_callback=None ):
"""
- Update the tool state in `state` using the user input in `incoming`.
+ Update the tool state in `state` using the user input in `incoming`.
This is designed to be called recursively: `inputs` contains the
set of inputs being processed, and `prefix` specifies a prefix to
add to the name of each input to extract it's value from `incoming`.
-
+
If `update_only` is True, values that are not in `incoming` will
not be modified. In this case `old_errors` can be provided, and any
errors for parameters which were *not* updated will be preserved.
"""
- errors = dict()
+ errors = dict()
# Push this level onto the context stack
context = ExpressionContext( state, context )
# Iterate inputs and update (recursively)
@@ -2045,7 +2045,7 @@
if isinstance( input, Repeat ):
group_state = state[input.name]
# Create list of empty errors for each previously existing state
- group_errors = [ {} for i in range( len( group_state ) ) ]
+ group_errors = [ {} for i in range( len( group_state ) ) ]
group_old_errors = old_errors.get( input.name, None )
any_group_errors = False
# Check any removals before updating state -- only one
@@ -2062,7 +2062,7 @@
else:
group_errors[i] = { '__index__': 'Cannot remove repeat (min size=%i).' % input.min }
any_group_errors = True
- # Only need to find one that can't be removed due to size, since only
+ # Only need to find one that can't be removed due to size, since only
# one removal is processed at # a time anyway
break
elif group_old_errors and group_old_errors[i]:
@@ -2079,9 +2079,9 @@
else:
rep_old_errors = {}
rep_errors = self.update_state( trans,
- input.inputs,
- rep_state,
- incoming,
+ input.inputs,
+ rep_state,
+ incoming,
prefix=rep_prefix,
context=context,
update_only=update_only,
@@ -2110,8 +2110,8 @@
old_current_case = group_state['__current_case__']
group_prefix = "%s|" % ( key )
# Deal with the 'test' element and see if it's value changed
- if input.value_ref and not input.value_ref_in_group:
- # We are referencing an existent parameter, which is not
+ if input.value_ref and not input.value_ref_in_group:
+ # We are referencing an existent parameter, which is not
# part of this group
test_param_key = prefix + input.test_param.name
else:
@@ -2141,10 +2141,10 @@
group_old_errors = dict()
else:
# Current case has not changed, update children
- group_errors = self.update_state( trans,
- input.cases[current_case].inputs,
+ group_errors = self.update_state( trans,
+ input.cases[current_case].inputs,
group_state,
- incoming,
+ incoming,
prefix=group_prefix,
context=context,
update_only=update_only,
@@ -2183,9 +2183,9 @@
else:
rep_old_errors = {}
rep_errors = self.update_state( trans,
- input.inputs,
- rep_state,
- incoming,
+ input.inputs,
+ rep_state,
+ incoming,
prefix=rep_prefix,
context=context,
update_only=update_only,
@@ -2224,7 +2224,7 @@
# If a callback was provided, allow it to process the value
if item_callback:
old_value = state.get( input.name, None )
- value, error = item_callback( trans, key, input, value, error, old_value, context )
+ value, error = item_callback( trans, key, input, value, error, old_value, context )
if error:
errors[ input.name ] = error
state[ input.name ] = value
@@ -2245,7 +2245,7 @@
@property
def params_with_missing_index_file( self ):
"""
- Return all parameters that are dynamically generated
+ Return all parameters that are dynamically generated
select lists whose options refer to a missing .loc file.
"""
params = []
@@ -2257,7 +2257,7 @@
return params
def get_static_param_values( self, trans ):
"""
- Returns a map of parameter names and values if the tool does not
+ Returns a map of parameter names and values if the tool does not
require any user input. Will raise an exception if any parameter
does require input.
"""
@@ -2273,8 +2273,8 @@
def execute( self, trans, incoming={}, set_output_hid=True, history=None, **kwargs ):
"""
Execute the tool using parameter values in `incoming`. This just
- dispatches to the `ToolAction` instance specified by
- `self.tool_action`. In general this will create a `Job` that
+ dispatches to the `ToolAction` instance specified by
+ `self.tool_action`. In general this will create a `Job` that
when run will build the tool's outputs, e.g. `DefaultToolAction`.
"""
return self.tool_action.execute( self, trans, incoming=incoming, set_output_hid=set_output_hid, history=history, **kwargs )
@@ -2286,7 +2286,7 @@
"""
Check that all parameters have values, and fill in with default
values where necessary. This could be called after loading values
- from a database in case new parameters have been added.
+ from a database in case new parameters have been added.
"""
messages = {}
self.check_and_update_param_values_helper( self.inputs, values, trans, messages, update_values=update_values, allow_workflow_parameters=allow_workflow_parameters )
@@ -2329,9 +2329,9 @@
messages[ input.test_param.name ] = "No value found for '%s%s', used default" % ( prefix, input.test_param.label )
current_case = group_values['__current_case__']
for child_input in input.cases[current_case].inputs.itervalues():
- messages[ child_input.name ] = "Value no longer valid for '%s%s', replaced with default" % ( prefix, child_input.label )
+ messages[ child_input.name ] = "Value no longer valid for '%s%s', replaced with default" % ( prefix, child_input.label )
else:
- current = group_values["__current_case__"]
+ current = group_values["__current_case__"]
self.check_and_update_param_values_helper( input.cases[current].inputs, group_values, trans, messages, context, prefix )
else:
# Regular tool parameter, no recursion needed
@@ -2350,7 +2350,7 @@
def handle_unvalidated_param_values( self, input_values, app ):
"""
Find any instances of `UnvalidatedValue` within input_values and
- validate them (by calling `ToolParameter.from_html` and
+ validate them (by calling `ToolParameter.from_html` and
`ToolParameter.validate`).
"""
# No validation is done when check_values is False
@@ -2363,7 +2363,7 @@
"""
context = ExpressionContext( input_values, context )
for input in inputs.itervalues():
- if isinstance( input, Repeat ):
+ if isinstance( input, Repeat ):
for i, d in enumerate( input_values[ input.name ] ):
rep_prefix = prefix + "%s %d > " % ( input.title, i + 1 )
self.handle_unvalidated_param_values_helper( input.inputs, d, app, context, rep_prefix )
@@ -2412,8 +2412,8 @@
"""
Build the dictionary of parameters for substituting into the command
line. Each value is wrapped in a `InputValueWrapper`, which allows
- all the attributes of the value to be used in the template, *but*
- when the __str__ method is called it actually calls the
+ all the attributes of the value to be used in the template, *but*
+ when the __str__ method is called it actually calls the
`to_param_dict_string` method of the associated input.
"""
param_dict = dict()
@@ -2426,7 +2426,7 @@
Wraps parameters as neccesary.
"""
for input in inputs.itervalues():
- if isinstance( input, Repeat ):
+ if isinstance( input, Repeat ):
for d in input_values[ input.name ]:
wrap_values( input.inputs, d )
elif isinstance( input, Conditional ):
@@ -2438,37 +2438,37 @@
DatasetListWrapper( input_values[ input.name ],
datatypes_registry = self.app.datatypes_registry,
tool = self,
- name = input.name )
+ name = input.name )
elif isinstance( input, DataToolParameter ):
- ## FIXME: We're populating param_dict with conversions when
- ## wrapping values, this should happen as a separate
- ## step before wrapping (or call this wrapping step
- ## something more generic) (but iterating this same
+ ## FIXME: We're populating param_dict with conversions when
+ ## wrapping values, this should happen as a separate
+ ## step before wrapping (or call this wrapping step
+ ## something more generic) (but iterating this same
## list twice would be wasteful)
# Add explicit conversions by name to current parent
for conversion_name, conversion_extensions, conversion_datatypes in input.conversions:
- # If we are at building cmdline step, then converters
+ # If we are at building cmdline step, then converters
# have already executed
conv_ext, converted_dataset = input_values[ input.name ].find_conversion_destination( conversion_datatypes )
- # When dealing with optional inputs, we'll provide a
+ # When dealing with optional inputs, we'll provide a
# valid extension to be used for None converted dataset
if not conv_ext:
conv_ext = conversion_extensions[0]
- # input_values[ input.name ] is None when optional
- # dataset, 'conversion' of optional dataset should
+ # input_values[ input.name ] is None when optional
+ # dataset, 'conversion' of optional dataset should
# create wrapper around NoneDataset for converter output
- if input_values[ input.name ] and not converted_dataset:
- # Input that converter is based from has a value,
+ if input_values[ input.name ] and not converted_dataset:
+ # Input that converter is based from has a value,
# but converted dataset does not exist
- raise Exception( 'A path for explicit datatype conversion has not been found: %s --/--> %s'
+ raise Exception( 'A path for explicit datatype conversion has not been found: %s --/--> %s'
% ( input_values[ input.name ].extension, conversion_extensions ) )
else:
- # Trick wrapper into using target conv ext (when
+ # Trick wrapper into using target conv ext (when
# None) without actually being a tool parameter
input_values[ conversion_name ] = \
DatasetFilenameWrapper( converted_dataset,
datatypes_registry = self.app.datatypes_registry,
- tool = Bunch( conversion_name = Bunch( extensions = conv_ext ) ),
+ tool = Bunch( conversion_name = Bunch( extensions = conv_ext ) ),
name = conversion_name )
# Wrap actual input dataset
input_values[ input.name ] = \
@@ -2477,15 +2477,15 @@
tool = self,
name = input.name )
elif isinstance( input, SelectToolParameter ):
- input_values[ input.name ] = SelectToolParameterWrapper(
+ input_values[ input.name ] = SelectToolParameterWrapper(
input, input_values[ input.name ], self.app, other_values = param_dict )
-
+
elif isinstance( input, LibraryDatasetToolParameter ):
- input_values[ input.name ] = LibraryDatasetValueWrapper(
+ input_values[ input.name ] = LibraryDatasetValueWrapper(
input, input_values[ input.name ], param_dict )
-
+
else:
- input_values[ input.name ] = InputValueWrapper(
+ input_values[ input.name ] = InputValueWrapper(
input, input_values[ input.name ], param_dict )
# HACK: only wrap if check_values is not false, this deals with external
@@ -2494,15 +2494,15 @@
if self.check_values:
wrap_values( self.inputs, param_dict )
- ## FIXME: when self.check_values==True, input datasets are being wrapped
- ## twice (above and below, creating 2 separate
- ## DatasetFilenameWrapper objects - first is overwritten by
- ## second), is this necessary? - if we get rid of this way to
- ## access children, can we stop this redundancy, or is there
+ ## FIXME: when self.check_values==True, input datasets are being wrapped
+ ## twice (above and below, creating 2 separate
+ ## DatasetFilenameWrapper objects - first is overwritten by
+ ## second), is this necessary? - if we get rid of this way to
+ ## access children, can we stop this redundancy, or is there
## another reason for this?
- ## - Only necessary when self.check_values is False (==external dataset
- ## tool?: can this be abstracted out as part of being a datasouce tool?)
- ## - But we still want (ALWAYS) to wrap input datasets (this should be
+ ## - Only necessary when self.check_values is False (==external dataset
+ ## tool?: can this be abstracted out as part of being a datasouce tool?)
+ ## - But we still want (ALWAYS) to wrap input datasets (this should be
## checked to prevent overhead of creating a new object?)
# Additionally, datasets go in the param dict. We wrap them such that
# if the bare variable name is used it returns the filename (for
@@ -2523,7 +2523,7 @@
for child in data.children:
param_dict[ "_CHILD___%s___%s" % ( name, child.designation ) ] = DatasetFilenameWrapper( child )
for name, hda in output_datasets.items():
- # Write outputs to the working directory (for security purposes)
+ # Write outputs to the working directory (for security purposes)
# if desired.
if self.app.config.outputs_to_working_directory:
try:
@@ -2542,8 +2542,8 @@
param_dict[ "_CHILD___%s___%s" % ( name, child.designation ) ] = DatasetFilenameWrapper( child )
for out_name, output in self.outputs.iteritems():
if out_name not in param_dict and output.filters:
- # Assume the reason we lack this output is because a filter
- # failed to pass; for tool writing convienence, provide a
+ # Assume the reason we lack this output is because a filter
+ # failed to pass; for tool writing convienence, provide a
# NoneDataset
param_dict[ out_name ] = NoneDataset( datatypes_registry = self.app.datatypes_registry, ext = output.format )
@@ -2557,20 +2557,20 @@
if table_name in self.app.tool_data_tables:
return self.app.tool_data_tables[ table_name ].get_entry( query_attr, query_val, return_attr )
-
+
param_dict['__get_data_table_entry__'] = get_data_table_entry
# We add access to app here, this allows access to app.config, etc
param_dict['__app__'] = RawObjectWrapper( self.app )
- # More convienent access to app.config.new_file_path; we don't need to
- # wrap a string, but this method of generating additional datasets
+ # More convienent access to app.config.new_file_path; we don't need to
+ # wrap a string, but this method of generating additional datasets
# should be considered DEPRECATED
# TODO: path munging for cluster/dataset server relocatability
param_dict['__new_file_path__'] = os.path.abspath(self.app.config.new_file_path)
- # The following points to location (xxx.loc) files which are pointers
+ # The following points to location (xxx.loc) files which are pointers
# to locally cached data
param_dict['__tool_data_path__'] = param_dict['GALAXY_DATA_INDEX_DIR'] = self.app.config.tool_data_path
- # For the upload tool, we need to know the root directory and the
+ # For the upload tool, we need to know the root directory and the
# datatypes conf path, so we can load the datatypes registry
param_dict['__root_dir__'] = param_dict['GALAXY_ROOT_DIR'] = os.path.abspath( self.app.config.root )
param_dict['__datatypes_config__'] = param_dict['GALAXY_DATATYPES_CONF_FILE'] = self.app.datatypes_registry.integrated_datatypes_configs
@@ -2590,7 +2590,7 @@
if type(value) != type([]):
value = [ value ]
for elem in value:
- f.write( '%s=%s\n' % (key, elem) )
+ f.write( '%s=%s\n' % (key, elem) )
f.close()
param_dict['param_file'] = param_filename
return param_filename
@@ -2625,7 +2625,7 @@
command_line = None
if not self.command:
return
- try:
+ try:
# Substituting parameters into the command
command_line = fill_template( self.command, context=param_dict )
# Remove newlines from command line, and any leading/trailing white space
@@ -2671,7 +2671,7 @@
"""
if not self.redirect_url_params:
return
- redirect_url_params = None
+ redirect_url_params = None
# Substituting parameter values into the url params
redirect_url_params = fill_template( self.redirect_url_params, context=param_dict )
# Remove newlines
@@ -2679,21 +2679,21 @@
return redirect_url_params
def parse_redirect_url( self, data, param_dict ):
"""
- Parse the REDIRECT_URL tool param. Tools that send data to an external
+ Parse the REDIRECT_URL tool param. Tools that send data to an external
application via a redirect must include the following 3 tool params:
-
+
1) REDIRECT_URL - the url to which the data is being sent
-
- 2) DATA_URL - the url to which the receiving application will send an
+
+ 2) DATA_URL - the url to which the receiving application will send an
http post to retrieve the Galaxy data
-
+
3) GALAXY_URL - the url to which the external application may post
data as a response
"""
redirect_url = param_dict.get( 'REDIRECT_URL' )
redirect_url_params = self.build_redirect_url_params( param_dict )
- # Add the parameters to the redirect url. We're splitting the param
- # string on '**^**' because the self.parse() method replaced white
+ # Add the parameters to the redirect url. We're splitting the param
+ # string on '**^**' because the self.parse() method replaced white
# space with that separator.
params = redirect_url_params.split( '**^**' )
rup_dict = {}
@@ -2762,8 +2762,8 @@
file_name = os.path.join(root, f),
create = True,
preserve_symlinks = True )
- # Clean up after being handled by object store.
- # FIXME: If the object (e.g., S3) becomes async, this will
+ # Clean up after being handled by object store.
+ # FIXME: If the object (e.g., S3) becomes async, this will
# cause issues so add it to the object store functionality?
if extra_dir is not None:
# there was an extra_files_path dir, attempt to remove it
@@ -2776,7 +2776,7 @@
Look for child dataset files, create HDA and attach to parent.
"""
children = {}
- # Loop through output file names, looking for generated children in
+ # Loop through output file names, looking for generated children in
# form of 'child_parentId_designation_visibility_extension'
for name, outdata in output.items():
filenames = []
@@ -2825,11 +2825,11 @@
child_dataset.state = outdata.state
self.sa_session.add( child_dataset )
self.sa_session.flush()
- # Add child to return dict
+ # Add child to return dict
children[name][designation] = child_dataset
- # Need to update all associated output hdas, i.e. history was
+ # Need to update all associated output hdas, i.e. history was
# shared with job running
- for dataset in outdata.dataset.history_associations:
+ for dataset in outdata.dataset.history_associations:
if outdata == dataset: continue
# Create new child dataset
child_data = child_dataset.copy( parent_id = dataset.id )
@@ -2838,7 +2838,7 @@
return children
def collect_primary_datasets( self, output, job_working_directory ):
"""
- Find any additional datasets generated by a tool and attach (for
+ Find any additional datasets generated by a tool and attach (for
cases where number of outputs is not known in advance).
"""
new_primary_datasets = {}
@@ -2851,7 +2851,7 @@
except Exception:
# This should not be considered an error or warning condition, this file is optional
pass
- # Loop through output file names, looking for generated primary
+ # Loop through output file names, looking for generated primary
# datasets in form of:
# 'primary_associatedWithDatasetID_designation_visibility_extension(_DBKEY)'
primary_datasets = {}
@@ -2909,48 +2909,48 @@
dataset_att_by_name = dict( ext='extension' )
for att_set in [ 'name', 'info', 'ext', 'dbkey' ]:
dataset_att_name = dataset_att_by_name.get( att_set, att_set )
- setattr( primary_data, dataset_att_name, new_primary_datasets_attributes.get( att_set, getattr( primary_data, dataset_att_name ) ) )
+ setattr( primary_data, dataset_att_name, new_primary_datasets_attributes.get( att_set, getattr( primary_data, dataset_att_name ) ) )
primary_data.set_meta()
primary_data.set_peek()
self.sa_session.add( primary_data )
self.sa_session.flush()
outdata.history.add_dataset( primary_data )
- # Add dataset to return dict
+ # Add dataset to return dict
primary_datasets[name][designation] = primary_data
- # Need to update all associated output hdas, i.e. history was
+ # Need to update all associated output hdas, i.e. history was
# shared with job running
- for dataset in outdata.dataset.history_associations:
+ for dataset in outdata.dataset.history_associations:
if outdata == dataset: continue
new_data = primary_data.copy()
dataset.history.add( new_data )
self.sa_session.add( new_data )
self.sa_session.flush()
return primary_datasets
-
+
def _is_hidden_for_user( self, user ):
if self.hidden or ( not user and self.require_login ):
return True
return False
-
+
def to_dict( self, trans, for_link=False, for_display=False ):
""" Returns dict of tool. """
-
+
# Basic information
tool_dict = { 'id': self.id, 'name': self.name,
'version': self.version, 'description': self.description }
-
+
if for_link:
# Create tool link.
if not self.tool_type.startswith( 'data_source' ):
link = url_for( '/tool_runner', tool_id=self.id )
else:
link = url_for( self.action, **self.get_static_param_values( trans ) )
-
+
# Basic information
- tool_dict.update( { 'type': 'tool', 'link': link,
+ tool_dict.update( { 'type': 'tool', 'link': link,
'min_width': self.uihints.get( 'minwidth', -1 ),
'target': self.target } )
-
+
if for_display:
# Dictify inputs.
inputs = []
@@ -2968,7 +2968,7 @@
value = option[1]
# Pack input.
- param_dict.update( { 'type' : 'select',
+ param_dict.update( { 'type' : 'select',
'html' : urllib.quote( input.get_html( trans ) ),
'options': options,
'value': value
@@ -2987,12 +2987,12 @@
param_dict.update( { 'type' : '??', 'init_value' : input.value, \
'html' : urllib.quote( input.get_html( trans ) ) } )
inputs.append( param_dict )
-
+
tool_dict[ 'inputs' ] = inputs
-
+
# Dictify outputs.
pass
-
+
return tool_dict
def get_default_history_by_trans( self, trans, create=False ):
@@ -3001,7 +3001,7 @@
class OutputParameterJSONTool( Tool ):
"""
- Alternate implementation of Tool that provides parameters and other values
+ Alternate implementation of Tool that provides parameters and other values
JSONified within the contents of an output dataset
"""
tool_type = 'output_parameter_json'
@@ -3027,14 +3027,14 @@
return rval
def exec_before_job( self, app, inp_data, out_data, param_dict=None ):
if param_dict is None:
- param_dict = {}
+ param_dict = {}
json_params = {}
json_params[ 'param_dict' ] = self._prepare_json_param_dict( param_dict ) #it would probably be better to store the original incoming parameters here, instead of the Galaxy modified ones?
json_params[ 'output_data' ] = []
json_params[ 'job_config' ] = dict( GALAXY_DATATYPES_CONF_FILE=param_dict.get( 'GALAXY_DATATYPES_CONF_FILE' ), GALAXY_ROOT_DIR=param_dict.get( 'GALAXY_ROOT_DIR' ), TOOL_PROVIDED_JOB_METADATA_FILE=jobs.TOOL_PROVIDED_JOB_METADATA_FILE )
json_filename = None
for i, ( out_name, data ) in enumerate( out_data.iteritems() ):
- #use wrapped dataset to access certain values
+ #use wrapped dataset to access certain values
wrapped_data = param_dict.get( out_name )
#allow multiple files to be created
file_name = str( wrapped_data )
@@ -3054,11 +3054,11 @@
class DataSourceTool( OutputParameterJSONTool ):
"""
- Alternate implementation of Tool for data_source tools -- those that
+ Alternate implementation of Tool for data_source tools -- those that
allow the user to query and extract data from another web site.
"""
tool_type = 'data_source'
-
+
def _build_GALAXY_URL_parameter( self ):
return ToolParameter.build( self, ElementTree.XML( '<param name="GALAXY_URL" type="baseurl" value="/tool_runner?tool_id=%s" />' % self.id ) )
def parse_inputs( self, root ):
@@ -3073,17 +3073,17 @@
info = param_dict.get( 'info' )
data_type = param_dict.get( 'data_type' )
name = param_dict.get( 'name' )
-
+
json_params = {}
json_params[ 'param_dict' ] = self._prepare_json_param_dict( param_dict ) #it would probably be better to store the original incoming parameters here, instead of the Galaxy modified ones?
json_params[ 'output_data' ] = []
json_params[ 'job_config' ] = dict( GALAXY_DATATYPES_CONF_FILE=param_dict.get( 'GALAXY_DATATYPES_CONF_FILE' ), GALAXY_ROOT_DIR=param_dict.get( 'GALAXY_ROOT_DIR' ), TOOL_PROVIDED_JOB_METADATA_FILE=jobs.TOOL_PROVIDED_JOB_METADATA_FILE )
json_filename = None
for i, ( out_name, data ) in enumerate( out_data.iteritems() ):
- #use wrapped dataset to access certain values
+ #use wrapped dataset to access certain values
wrapped_data = param_dict.get( out_name )
#allow multiple files to be created
- cur_base_param_name = 'GALAXY|%s|' % out_name
+ cur_base_param_name = 'GALAXY|%s|' % out_name
cur_name = param_dict.get( cur_base_param_name + 'name', name )
cur_dbkey = param_dict.get( cur_base_param_name + 'dkey', dbkey )
cur_info = param_dict.get( cur_base_param_name + 'info', info )
@@ -3113,7 +3113,7 @@
class AsyncDataSourceTool( DataSourceTool ):
tool_type = 'data_source_async'
-
+
def _build_GALAXY_URL_parameter( self ):
return ToolParameter.build( self, ElementTree.XML( '<param name="GALAXY_URL" type="baseurl" value="/async/%s" />' % self.id ) )
@@ -3130,23 +3130,23 @@
for name, dataset in inp_data.iteritems():
external_metadata = JobExternalOutputMetadataWrapper( job )
if external_metadata.external_metadata_set_successfully( dataset, app.model.context ):
- dataset.metadata.from_JSON_dict( external_metadata.get_output_filenames_by_dataset( dataset, app.model.context ).filename_out )
+ dataset.metadata.from_JSON_dict( external_metadata.get_output_filenames_by_dataset( dataset, app.model.context ).filename_out )
else:
dataset._state = model.Dataset.states.FAILED_METADATA
self.sa_session.add( dataset )
self.sa_session.flush()
return
- # If setting external metadata has failed, how can we inform the
- # user? For now, we'll leave the default metadata and set the state
+ # If setting external metadata has failed, how can we inform the
+ # user? For now, we'll leave the default metadata and set the state
# back to its original.
dataset.datatype.after_setting_metadata( dataset )
if job and job.tool_id == '1.0.0':
dataset.state = param_dict.get( '__ORIGINAL_DATASET_STATE__' )
else:
# Revert dataset.state to fall back to dataset.dataset.state
- dataset._state = None
+ dataset._state = None
# Need to reset the peek, which may rely on metadata
- dataset.set_peek()
+ dataset.set_peek()
self.sa_session.add( dataset )
self.sa_session.flush()
def job_failed( self, job_wrapper, message, exception = False ):
@@ -3156,10 +3156,10 @@
for dataset_assoc in job.input_datasets:
inp_data[dataset_assoc.name] = dataset_assoc.dataset
return self.exec_after_process( job_wrapper.app, inp_data, {}, job_wrapper.get_param_dict(), job = job )
-
+
class ExportHistoryTool( Tool ):
tool_type = 'export_history'
-
+
class ImportHistoryTool( Tool ):
tool_type = 'import_history'
@@ -3169,13 +3169,13 @@
class DataManagerTool( OutputParameterJSONTool ):
tool_type = 'manage_data'
default_tool_action = DataManagerToolAction
-
+
def __init__( self, config_file, root, app, guid=None, data_manager_id=None, **kwds ):
self.data_manager_id = data_manager_id
super( DataManagerTool, self ).__init__( config_file, root, app, guid=guid, **kwds )
if self.data_manager_id is None:
self.data_manager_id = self.id
-
+
def exec_after_process( self, app, inp_data, out_data, param_dict, job = None, **kwds ):
#run original exec_after_process
super( DataManagerTool, self ).exec_after_process( app, inp_data, out_data, param_dict, job = job, **kwds )
@@ -3186,7 +3186,7 @@
data_manager = self.app.data_managers.get_manager( data_manager_id, None )
assert data_manager is not None, "Invalid data manager (%s) requested. It may have been removed before the job completed." % ( data_manager_id )
data_manager.process_result( out_data )
-
+
def get_default_history_by_trans( self, trans, create=False ):
def _create_data_manager_history( user ):
history = trans.app.model.History( name='Data Manager History (automatically created)', user=user )
@@ -3222,7 +3222,7 @@
tool_types[ tool_class.tool_type ] = tool_class
# ---- Utility classes to be factored out -----------------------------------
-
+
class BadValue( object ):
def __init__( self, value ):
self.value = value
@@ -3239,7 +3239,7 @@
self.stdout_match = False
self.stderr_match = False
# TODO: Define a common class or constant for error level:
- self.error_level = "fatal"
+ self.error_level = "fatal"
self.desc = ""
class ToolStdioExitCode( object ):
@@ -3294,7 +3294,7 @@
return self.value[self.counter-1]
def __getattr__( self, key ):
return getattr( self.value, key )
-
+
class InputValueWrapper( ToolParameterValueWrapper ):
"""
Wraps an input so that __str__ gives the "param_dict" representation.
@@ -3313,7 +3313,7 @@
Wraps a SelectTooParameter so that __str__ returns the selected value, but all other
attributes are accessible.
"""
-
+
class SelectToolParameterFieldWrapper:
"""
Provide access to any field by name or index for this particular value.
@@ -3328,7 +3328,7 @@
if name not in self._fields:
self._fields[ name ] = self._input.options.get_field_by_name_for_value( name, self._value, None, self._other_values )
return self._input.separator.join( map( str, self._fields[ name ] ) )
-
+
def __init__( self, input, value, app, other_values={} ):
self.input = input
self.value = value
@@ -3345,11 +3345,11 @@
Wraps a dataset so that __str__ returns the filename, but all other
attributes are accessible.
"""
-
+
class MetadataWrapper:
"""
- Wraps a Metadata Collection to return MetadataParameters wrapped
- according to the metadata spec. Methods implemented to match behavior
+ Wraps a Metadata Collection to return MetadataParameters wrapped
+ according to the metadata spec. Methods implemented to match behavior
of a Metadata Collection.
"""
def __init__( self, metadata ):
@@ -3360,9 +3360,9 @@
if rval is None:
rval = self.metadata.spec[name].no_value
rval = self.metadata.spec[name].param.to_string( rval )
- # Store this value, so we don't need to recalculate if needed
+ # Store this value, so we don't need to recalculate if needed
# again
- setattr( self, name, rval )
+ setattr( self, name, rval )
return rval
def __nonzero__( self ):
return self.metadata.__nonzero__()
@@ -3375,7 +3375,7 @@
return default
def items( self ):
return iter( [ ( k, self.get( k ) ) for k, v in self.metadata.items() ] )
-
+
def __init__( self, dataset, datatypes_registry = None, tool = None, name = None, false_path = None ):
if not dataset:
try:
@@ -3420,7 +3420,7 @@
return val.encode( "utf8" )
else:
return val
-
+
def get_incoming_value( incoming, key, default ):
if "__" + key + "__is_composite" in incoming:
composite_keys = incoming["__" + key + "__keys"].split()
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
5 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/340d626da27a/
changeset: 340d626da27a
user: dannon
date: 2013-03-05 21:39:22
summary: Base controller import cleanup, remove unused variables.
affected #: 1 file
diff -r 0a6a4752493eacfac2fb7a537d30be0333a58977 -r 340d626da27af28fab27115c0329ea1a2069048b lib/galaxy/web/base/controller.py
--- a/lib/galaxy/web/base/controller.py
+++ b/lib/galaxy/web/base/controller.py
@@ -1,28 +1,29 @@
"""
Contains functionality needed in every web interface
"""
-import os, time, logging, re, string, sys, glob, shutil, tempfile, subprocess, operator
-from datetime import date, datetime, timedelta
-from time import strftime
-from galaxy import config, tools, web, util
-from galaxy.util import inflector
-from galaxy.util.hash_util import *
+import logging
+import operator
+import os
+import re
+import pkg_resources
+pkg_resources.require("SQLAlchemy >= 0.4")
+
+from sqlalchemy import func, and_, select
+from paste.httpexceptions import HTTPBadRequest, HTTPInternalServerError, HTTPNotImplemented, HTTPRequestRangeNotSatisfiable
+
+from galaxy import util, web
+from galaxy.datatypes.interval import ChromatinInteractions
+from galaxy.exceptions import ItemAccessibilityException, ItemDeletionException, ItemOwnershipException, MessageException
+from galaxy.security.validate_user_input import validate_publicname
from galaxy.util.sanitize_html import sanitize_html
-from galaxy.web import error, form, url_for
-from galaxy.model.orm import *
-from galaxy.workflow.modules import *
-from galaxy.web.framework import simplejson
+from galaxy.visualization.genome.visual_analytics import get_tool_def
+from galaxy.web import error, url_for
from galaxy.web.form_builder import AddressField, CheckboxField, SelectField, TextArea, TextField
-from galaxy.web.form_builder import WorkflowField, WorkflowMappingField, HistoryField, PasswordField, build_select_field
-from galaxy.visualization.genome.visual_analytics import get_tool_def
-from galaxy.security.validate_user_input import validate_publicname
-from paste.httpexceptions import *
-from galaxy.exceptions import *
-from galaxy.model import NoConverterException, ConverterDependencyException
-from galaxy.datatypes.interval import ChromatinInteractions
+from galaxy.web.form_builder import build_select_field, HistoryField, PasswordField, WorkflowField, WorkflowMappingField
+from galaxy.workflow.modules import module_factory
+from galaxy.model.orm import eagerload, eagerload_all
from galaxy.datatypes.data import Text
-from Cheetah.Template import Template
log = logging.getLogger( __name__ )
@@ -100,6 +101,7 @@
log.exception( "Invalid %s id ( %s ) specified" % ( class_name, id ) )
raise MessageException( "Invalid %s id ( %s ) specified" % ( class_name, id ), type="error" )
if check_ownership or check_accessible:
+ #DBTODO bug: encoded_id is id
self.security_check( trans, item, check_ownership, check_accessible, encoded_id )
if deleted == True and not item.deleted:
raise ItemDeletionException( '%s "%s" is not deleted' % ( class_name, getattr( item, 'name', id ) ), type="warning" )
@@ -134,7 +136,7 @@
def get_object( self, trans, id, class_name, check_ownership=False, check_accessible=False, deleted=None ):
try:
return BaseController.get_object( self, trans, id, class_name, check_ownership=False, check_accessible=False, deleted=None )
- except MessageException, e:
+ except MessageException:
raise # handled in the caller
except:
log.exception( "Execption in get_object check for %s %s:" % ( class_name, str( id ) ) )
@@ -228,7 +230,7 @@
# encoded id?
dataset_id = trans.security.decode_id( dataset_id )
- except ( AttributeError, TypeError ), err:
+ except ( AttributeError, TypeError ):
# unencoded id
dataset_id = int( dataset_id )
@@ -684,6 +686,7 @@
step.state = None
# Error dict
if step.tool_errors:
+ #DBTODO BUG: errors doesn't exist in this scope, intent?
errors[step.id] = step.tool_errors
else:
## Non-tool specific stuff?
@@ -943,9 +946,7 @@
def edit_template( self, trans, cntrller, item_type, form_type, **kwd ):
# Edit the template itself, keeping existing field contents, if any.
params = util.Params( kwd )
- form_id = params.get( 'form_id', 'none' )
message = util.restore_text( params.get( 'message', '' ) )
- status = params.get( 'status', 'done' )
edited = util.string_as_bool( params.get( 'edited', False ) )
action = ''
# form_type must be one of: RUN_DETAILS_TEMPLATE, LIBRARY_INFO_TEMPLATE
@@ -995,8 +996,6 @@
rtra = item.run_details
info_association = rtra.run
template = info_association.template
- info = info_association.info
- form_values = trans.sa_session.query( trans.app.model.FormValues ).get( info.id )
if edited:
# The form on which the template is based has been edited, so we need to update the
# info_association with the current form
@@ -1052,7 +1051,6 @@
sample_id = params.get( 'sample_id', None )
sample = trans.sa_session.query( trans.model.Sample ).get( trans.security.decode_id( sample_id ) )
message = util.restore_text( params.get( 'message', '' ) )
- status = params.get( 'status', 'done' )
try:
if in_library:
item, item_desc, action, id = self.get_item_and_stuff( trans,
@@ -1250,7 +1248,6 @@
sample_id = params.get( 'sample_id', None )
#id = params.get( 'id', None )
message = util.restore_text( params.get( 'message', '' ) )
- status = params.get( 'status', 'done' )
try:
if in_library:
item, item_desc, action, id = self.get_item_and_stuff( trans,
@@ -1282,7 +1279,6 @@
info_association = item.run_details
if not info_association:
message = "There is no template for this %s" % item_type
- status = 'error'
else:
if in_library:
info_association.deleted = True
@@ -1292,7 +1288,6 @@
trans.sa_session.delete( info_association )
trans.sa_session.flush()
message = 'The template for this %s has been deleted.' % item_type
- status = 'done'
new_kwd = dict( action=action,
cntrller=cntrller,
id=id,
@@ -1449,9 +1444,6 @@
def get_item_and_stuff( self, trans, item_type, **kwd ):
# Return an item, description, action and an id based on the item_type. Valid item_types are
# library, folder, ldda, request_type, sample.
- is_admin = kwd.get( 'is_admin', False )
- #message = None
- current_user_roles = trans.get_current_user_roles()
if item_type == 'library':
library_id = kwd.get( 'library_id', None )
id = library_id
https://bitbucket.org/galaxy/galaxy-central/commits/1daf0cefe5be/
changeset: 1daf0cefe5be
user: dannon
date: 2013-03-05 21:41:22
summary: Fix bug introduced in 6028 -- encoded_id should be just 'id'
affected #: 1 file
diff -r 340d626da27af28fab27115c0329ea1a2069048b -r 1daf0cefe5bed7f972f3e8aeec0d2577811d005c lib/galaxy/web/base/controller.py
--- a/lib/galaxy/web/base/controller.py
+++ b/lib/galaxy/web/base/controller.py
@@ -101,8 +101,7 @@
log.exception( "Invalid %s id ( %s ) specified" % ( class_name, id ) )
raise MessageException( "Invalid %s id ( %s ) specified" % ( class_name, id ), type="error" )
if check_ownership or check_accessible:
- #DBTODO bug: encoded_id is id
- self.security_check( trans, item, check_ownership, check_accessible, encoded_id )
+ self.security_check( trans, item, check_ownership, check_accessible, id )
if deleted == True and not item.deleted:
raise ItemDeletionException( '%s "%s" is not deleted' % ( class_name, getattr( item, 'name', id ) ), type="warning" )
elif deleted == False and item.deleted:
https://bitbucket.org/galaxy/galaxy-central/commits/94272d25279e/
changeset: 94272d25279e
user: dannon
date: 2013-03-05 21:43:40
summary: Kill unused step error dict - this is handled elsewhere.
affected #: 1 file
diff -r 1daf0cefe5bed7f972f3e8aeec0d2577811d005c -r 94272d25279e96b866be313c222e9e3a9af57b9d lib/galaxy/web/base/controller.py
--- a/lib/galaxy/web/base/controller.py
+++ b/lib/galaxy/web/base/controller.py
@@ -683,10 +683,6 @@
step.upgrade_messages = "Unknown Tool ID"
step.module = None
step.state = None
- # Error dict
- if step.tool_errors:
- #DBTODO BUG: errors doesn't exist in this scope, intent?
- errors[step.id] = step.tool_errors
else:
## Non-tool specific stuff?
step.module = module_factory.from_workflow_step( trans, step )
https://bitbucket.org/galaxy/galaxy-central/commits/3b4596096549/
changeset: 3b4596096549
user: dannon
date: 2013-03-05 21:44:03
summary: Trim whitespace.
affected #: 1 file
diff -r 94272d25279e96b866be313c222e9e3a9af57b9d -r 3b459609654976c8908a2c3104a080db8f550b61 lib/galaxy/web/base/controller.py
--- a/lib/galaxy/web/base/controller.py
+++ b/lib/galaxy/web/base/controller.py
@@ -194,7 +194,7 @@
self.mimetype = mimetype
self.display_in_upload = display_in_upload
-#
+#
# -- Mixins for working with Galaxy objects. --
#
@@ -221,7 +221,7 @@
class UsesHistoryDatasetAssociationMixin:
""" Mixin for controllers that use HistoryDatasetAssociation objects. """
-
+
def get_dataset( self, trans, dataset_id, check_ownership=True, check_accessible=False, check_state=True ):
""" Get an HDA object by id. """
# DEPRECATION: We still support unencoded ids for backward compatibility
@@ -256,13 +256,13 @@
return trans.show_error_message( "Please wait until this dataset finishes uploading "
+ "before attempting to view it." )
return data
-
+
def get_history_dataset_association( self, trans, history, dataset_id,
check_ownership=True, check_accessible=False, check_state=False ):
"""Get a HistoryDatasetAssociation from the database by id, verifying ownership."""
self.security_check( trans, history, check_ownership=check_ownership, check_accessible=check_accessible )
hda = self.get_object( trans, dataset_id, 'HistoryDatasetAssociation', check_ownership=False, check_accessible=False, deleted=False )
-
+
if check_accessible:
if not trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), hda.dataset ):
error( "You are not allowed to access this dataset" )
@@ -270,7 +270,7 @@
if check_state and hda.state == trans.model.Dataset.states.UPLOAD:
error( "Please wait until this dataset finishes uploading before attempting to view it." )
return hda
-
+
def get_data( self, dataset, preview=True ):
""" Gets a dataset's data. """
@@ -290,7 +290,7 @@
# For now, cannot get data from non-text datasets.
dataset_data = None
return truncated, dataset_data
-
+
def check_dataset_state( self, trans, dataset ):
"""
Returns a message if dataset is not ready to be used in visualization.
@@ -302,7 +302,7 @@
if dataset.state != trans.app.model.Job.states.OK:
return dataset.conversion_messages.PENDING
return None
-
+
class UsesLibraryMixin:
def get_library( self, trans, id, check_ownership=False, check_accessible=True ):
@@ -319,10 +319,10 @@
def get_library_dataset( self, trans, id, check_ownership=False, check_accessible=True ):
return self.get_object( trans, id, 'LibraryDataset', check_ownership=False, check_accessible=check_accessible )
-class UsesVisualizationMixin( UsesHistoryDatasetAssociationMixin,
+class UsesVisualizationMixin( UsesHistoryDatasetAssociationMixin,
UsesLibraryMixinItems ):
""" Mixin for controllers that use Visualization objects. """
-
+
viz_types = [ "trackster" ]
def create_visualization( self, trans, type, title="Untitled Genome Vis", slug=None, dbkey=None, annotation=None, config={}, save=True ):
@@ -339,26 +339,26 @@
session.flush()
return visualization
-
+
def save_visualization( self, trans, config, type, id=None, title=None, dbkey=None, slug=None, annotation=None ):
session = trans.sa_session
-
- # Create/get visualization.
+
+ # Create/get visualization.
if not id:
# Create new visualization.
vis = self._create_visualization( trans, title, type, dbkey, slug, annotation )
else:
decoded_id = trans.security.decode_id( id )
vis = session.query( trans.model.Visualization ).get( decoded_id )
-
+
# Create new VisualizationRevision that will be attached to the viz
vis_rev = trans.model.VisualizationRevision()
vis_rev.visualization = vis
vis_rev.title = vis.title
vis_rev.dbkey = dbkey
-
+
# -- Validate config. --
-
+
if vis.type == 'trackster':
def unpack_track( track_json ):
""" Unpack a track from its json. """
@@ -436,12 +436,12 @@
# Unpack Trackster config.
latest_revision = visualization.latest_revision
bookmarks = latest_revision.config.get( 'bookmarks', [] )
-
+
def pack_track( track_dict ):
dataset_id = track_dict['dataset_id']
hda_ldda = track_dict.get('hda_ldda', 'hda')
if hda_ldda == 'ldda':
- # HACK: need to encode library dataset ID because get_hda_or_ldda
+ # HACK: need to encode library dataset ID because get_hda_or_ldda
# only works for encoded datasets.
dataset_id = trans.security.encode_id( dataset_id )
dataset = self.get_hda_or_ldda( trans, hda_ldda, dataset_id )
@@ -452,10 +452,10 @@
prefs = {}
track_type, _ = dataset.datatype.get_track_type()
- track_data_provider = trans.app.data_provider_registry.get_data_provider( trans,
- original_dataset=dataset,
+ track_data_provider = trans.app.data_provider_registry.get_data_provider( trans,
+ original_dataset=dataset,
source='data' )
-
+
return {
"track_type": track_type,
"name": track_dict['name'],
@@ -467,7 +467,7 @@
"tool": get_tool_def( trans, dataset ),
"tool_state": track_dict.get( 'tool_state', {} )
}
-
+
def pack_collection( collection_dict ):
drawables = []
for drawable_dict in collection_dict[ 'drawables' ]:
@@ -482,10 +482,10 @@
'prefs': collection_dict.get( 'prefs', [] ),
'filters': collection_dict.get( 'filters', {} )
}
-
+
def encode_dbkey( dbkey ):
- """
- Encodes dbkey as needed. For now, prepends user's public name
+ """
+ Encodes dbkey as needed. For now, prepends user's public name
to custom dbkey keys.
"""
encoded_dbkey = dbkey
@@ -493,7 +493,7 @@
if 'dbkeys' in user.preferences and dbkey in user.preferences[ 'dbkeys' ]:
encoded_dbkey = "%s:%s" % ( user.username, dbkey )
return encoded_dbkey
-
+
# Set tracks.
tracks = []
if 'tracks' in latest_revision.config:
@@ -506,12 +506,12 @@
tracks.append( pack_track( drawable_dict ) )
else:
tracks.append( pack_collection( drawable_dict ) )
-
- config = { "title": visualization.title,
+
+ config = { "title": visualization.title,
"vis_id": trans.security.encode_id( visualization.id ),
- "tracks": tracks,
- "bookmarks": bookmarks,
- "chrom": "",
+ "tracks": tracks,
+ "bookmarks": bookmarks,
+ "chrom": "",
"dbkey": encode_dbkey( visualization.dbkey ) }
if 'viewport' in latest_revision.config:
@@ -522,7 +522,7 @@
config = latest_revision.config
return config
-
+
def get_new_track_config( self, trans, dataset ):
"""
Returns track configuration dict for a dataset.
@@ -530,13 +530,13 @@
# Get data provider.
track_type, _ = dataset.datatype.get_track_type()
track_data_provider = trans.app.data_provider_registry.get_data_provider( trans, original_dataset=dataset )
-
-
+
+
if isinstance( dataset, trans.app.model.HistoryDatasetAssociation ):
hda_ldda = "hda"
elif isinstance( dataset, trans.app.model.LibraryDatasetDatasetAssociation ):
hda_ldda = "ldda"
-
+
# Get track definition.
return {
"track_type": track_type,
@@ -548,16 +548,16 @@
"tool": get_tool_def( trans, dataset ),
"tool_state": {}
}
-
+
def get_hda_or_ldda( self, trans, hda_ldda, dataset_id ):
""" Returns either HDA or LDDA for hda/ldda and id combination. """
if hda_ldda == "hda":
return self.get_dataset( trans, dataset_id, check_ownership=False, check_accessible=True )
else:
return self.get_library_dataset_dataset_association( trans, dataset_id )
-
+
# -- Helper functions --
-
+
def _create_visualization( self, trans, title, type, dbkey=None, slug=None, annotation=None, save=True ):
""" Create visualization but not first revision. Returns Visualization object. """
user = trans.get_user()
@@ -573,7 +573,7 @@
if title_err or slug_err:
return { 'title_err': title_err, 'slug_err': slug_err }
-
+
# Create visualization
visualization = trans.model.Visualization( user=user, title=title, dbkey=dbkey, type=type )
@@ -616,13 +616,13 @@
if isinstance( dataset.datatype, ChromatinInteractions ):
source = 'data'
- data_provider = trans.app.data_provider_registry.get_data_provider( trans,
- original_dataset=dataset,
+ data_provider = trans.app.data_provider_registry.get_data_provider( trans,
+ original_dataset=dataset,
source=source )
- # HACK: pass in additional params which are used for only some
- # types of data providers; level, cutoffs used for summary tree,
+ # HACK: pass in additional params which are used for only some
+ # types of data providers; level, cutoffs used for summary tree,
# num_samples for BBI, and interchromosomal used for chromatin interactions.
- rval = data_provider.get_genome_data( chroms_info,
+ rval = data_provider.get_genome_data( chroms_info,
level=4, detail_cutoff=0, draw_cutoff=0,
num_samples=150,
interchromosomal=True )
@@ -635,7 +635,7 @@
Returns highest priority message from a list of messages.
"""
return_message = None
-
+
# For now, priority is: job error (dict), no converter, pending.
for message in message_list:
if message is not None:
@@ -714,14 +714,14 @@
def get_hda_state_counts( self, trans, history, include_deleted=False, include_hidden=False ):
"""
- Returns a dictionary with state counts for history's HDAs. Key is a
+ Returns a dictionary with state counts for history's HDAs. Key is a
dataset state, value is the number of states in that count.
"""
# Build query to get (state, count) pairs.
- cols_to_select = [ trans.app.model.Dataset.table.c.state, func.count( '*' ) ]
+ cols_to_select = [ trans.app.model.Dataset.table.c.state, func.count( '*' ) ]
from_obj = trans.app.model.HistoryDatasetAssociation.table.join( trans.app.model.Dataset.table )
-
+
conditions = [ trans.app.model.HistoryDatasetAssociation.table.c.history_id == history.id ]
if not include_deleted:
# Only count datasets that have not been deleted.
@@ -729,7 +729,7 @@
if not include_hidden:
# Only count datasets that are visible.
conditions.append( trans.app.model.HistoryDatasetAssociation.table.c.visible == True )
-
+
group_by = trans.app.model.Dataset.table.c.state
query = select( columns=cols_to_select,
from_obj=from_obj,
@@ -740,7 +740,7 @@
state_count_dict = {}
for k, state in trans.app.model.Dataset.states.items():
state_count_dict[ state ] = 0
-
+
# Process query results, adding to count dict.
for row in trans.sa_session.execute( query ):
state, count = row
@@ -1501,13 +1501,13 @@
class SharableMixin:
""" Mixin for a controller that manages an item that can be shared. """
-
+
# -- Implemented methods. --
def _is_valid_slug( self, slug ):
""" Returns true if slug is valid. """
return _is_valid_slug( slug )
-
+
@web.expose
@web.require_login( "share Galaxy items" )
def set_public_username( self, trans, id, username, **kwargs ):
@@ -1526,7 +1526,7 @@
item = self.get_item( trans, id )
if item:
# Only update slug if slug is not already in use.
- if trans.sa_session.query( item.__class__ ).filter_by( user=item.user, slug=new_slug, importable=True ).count() == 0:
+ if trans.sa_session.query( item.__class__ ).filter_by( user=item.user, slug=new_slug, importable=True ).count() == 0:
item.slug = new_slug
trans.sa_session.flush()
@@ -1534,15 +1534,15 @@
def _make_item_accessible( self, sa_session, item ):
""" Makes item accessible--viewable and importable--and sets item's slug.
- Does not flush/commit changes, however. Item must have name, user,
+ Does not flush/commit changes, however. Item must have name, user,
importable, and slug attributes. """
item.importable = True
self.create_item_slug( sa_session, item )
-
+
def create_item_slug( self, sa_session, item ):
- """ Create/set item slug. Slug is unique among user's importable items
- for item's class. Returns true if item's slug was set/changed; false
- otherwise.
+ """ Create/set item slug. Slug is unique among user's importable items
+ for item's class. Returns true if item's slug was set/changed; false
+ otherwise.
"""
cur_slug = item.slug
@@ -1563,7 +1563,7 @@
else:
slug_base = cur_slug
- # Using slug base, find a slug that is not taken. If slug is taken,
+ # Using slug base, find a slug that is not taken. If slug is taken,
# add integer to end.
new_slug = slug_base
count = 1
@@ -1572,13 +1572,13 @@
# handle numerous items with the same name gracefully.
new_slug = '%s-%i' % ( slug_base, count )
count += 1
-
+
# Set slug and return.
item.slug = new_slug
return item.slug == cur_slug
-
- # -- Abstract methods. --
-
+
+ # -- Abstract methods. --
+
@web.expose
@web.require_login( "share Galaxy items" )
def sharing( self, trans, id, **kwargs ):
@@ -1595,19 +1595,19 @@
def display_by_username_and_slug( self, trans, username, slug ):
""" Display item by username and slug. """
raise "Unimplemented Method"
-
+
@web.json
@web.require_login( "get item name and link" )
def get_name_and_link_async( self, trans, id=None ):
""" Returns item's name and link. """
raise "Unimplemented Method"
-
+
@web.expose
@web.require_login("get item content asynchronously")
def get_item_content_async( self, trans, id ):
""" Returns item content in HTML format. """
raise "Unimplemented Method"
-
+
def get_item( self, trans, id ):
""" Return item based on id. """
raise "Unimplemented Method"
https://bitbucket.org/galaxy/galaxy-central/commits/6bc53fd15c62/
changeset: 6bc53fd15c62
user: dannon
date: 2013-03-05 21:52:09
summary: Fix spacing between methods in base controller.
affected #: 1 file
Diff not available.
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: carlfeberhard: Browser testing: add tests (and conv. fns) for working with the history options, history panel
by commits-noreply@bitbucket.org 05 Mar '13
by commits-noreply@bitbucket.org 05 Mar '13
05 Mar '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/0a6a4752493e/
changeset: 0a6a4752493e
user: carlfeberhard
date: 2013-03-05 21:35:59
summary: Browser testing: add tests (and conv. fns) for working with the history options, history panel
affected #: 9 files
diff -r 592af505e4e673aa891e9163bd237dbbc728ba62 -r 0a6a4752493eacfac2fb7a537d30be0333a58977 test/casperjs/anon-history-tests.js
--- a/test/casperjs/anon-history-tests.js
+++ b/test/casperjs/anon-history-tests.js
@@ -36,6 +36,7 @@
if( spaceghost.fixtureData.testUser ){
email = spaceghost.fixtureData.testUser.email;
password = spaceghost.fixtureData.testUser.password;
+ spaceghost.info( 'Will use fixtureData.testUser: ' + email );
}
var galaxyCookieName = 'galaxysession',
@@ -174,6 +175,17 @@
});
});
+spaceghost.user.logout();
+spaceghost.thenOpen( spaceghost.baseUrl, function(){
+ this.test.comment( 'logging out should create a new, anonymous history' );
+
+ this.withFrame( this.selectors.frames.history, function(){
+ this.test.assertSelectorHasText( nameSelector, unnamedName, 'History name is ' + unnamedName );
+ this.test.assertSelectorHasText( emptyMsgSelector, emptyMsgStr,
+ 'Message contains "' + emptyMsgStr + '"' );
+ });
+});
+
// ===================================================================
spaceghost.run( function(){
diff -r 592af505e4e673aa891e9163bd237dbbc728ba62 -r 0a6a4752493eacfac2fb7a537d30be0333a58977 test/casperjs/casperjs_runner.py
--- a/test/casperjs/casperjs_runner.py
+++ b/test/casperjs/casperjs_runner.py
@@ -9,7 +9,7 @@
* sh run_functional_tests.sh test/casperjs/test_runner
* sh run_functional_tests.sh
-Note: that you can enable (lots) of debugging info using cli options:
+Note: that you can enable (lots of) debugging info using cli options:
* casperjs usertests.js --url='http://localhost:8080' --verbose=true --logLevel=debug
(see casperjs.org for more information)
@@ -94,11 +94,12 @@
while process.poll() == None:
stderr_msg = process.stderr.readline()
stderr_msg = self.strip_escape_codes( stderr_msg.strip() )
- log.debug( '(%s): %s', rel_script_path, stderr_msg )
- # HACK: this is the last string displayed using the debug settings - afterwards it hangs
- # so: bail on this string
- if stderr_msg.startswith( self.casper_done_str ):
- break
+ if stderr_msg:
+ log.debug( '(%s): %s', rel_script_path, stderr_msg )
+ # HACK: this is the last string displayed using the debug settings - afterwards it hangs
+ # so: bail on this string
+ if stderr_msg.startswith( self.casper_done_str ):
+ break
# stdout is assumed to have the json test data/results
( stdout_output, stderr_output ) = process.communicate()
@@ -301,8 +302,12 @@
"""User registration tests: register new user, logout, attempt bad registrations.
"""
# all keywords will be compiled into a single JSON obj and passed to the server
- self.run_js_script( 'registration-tests.js',
- testuser=test_user )
+ #self.run_js_script( 'registration-tests.js',
+ # # this causes a time out in history-panel-tests: why?
+ # # also: I can't seem to bump the timeout to an error (using a handler) - causes script to hang
+ # # removing for the sake of bbot
+ # testUser=test_user )
+ self.run_js_script( 'registration-tests.js' )
#TODO:?? could theoretically do db cleanup, checks here with SQLALX
#TODO: have run_js_script return other persistant fixture data (uploaded files, etc.)
@@ -310,8 +315,7 @@
def test_20_login( self ):
"""User log in tests.
"""
- self.run_js_script( 'login-tests.js',
- testuser=test_user )
+ self.run_js_script( 'login-tests.js' )
class Test_02_Tools( CasperJSTestCase ):
@@ -321,8 +325,7 @@
def test_10_upload( self ):
"""Tests uploading files
"""
- self.run_js_script( 'upload-tests.js',
- testuser=test_user )
+ self.run_js_script( 'upload-tests.js' )
class Test_03_HistoryPanel( CasperJSTestCase ):
@@ -332,14 +335,12 @@
def test_00_history_panel( self ):
"""Test history panel basics (controls, structure, refresh, history options menu, etc.).
"""
- self.run_js_script( 'history-panel-tests.js',
- testuser=test_user )
+ self.run_js_script( 'history-panel-tests.js' )
def test_10_anonymous_histories( self ):
"""Test history panel basics with an anonymous user.
"""
- self.run_js_script( 'anon-history-tests.js',
- testuser=test_user )
+ self.run_js_script( 'anon-history-tests.js' )
diff -r 592af505e4e673aa891e9163bd237dbbc728ba62 -r 0a6a4752493eacfac2fb7a537d30be0333a58977 test/casperjs/history-panel-tests.js
--- a/test/casperjs/history-panel-tests.js
+++ b/test/casperjs/history-panel-tests.js
@@ -27,6 +27,7 @@
// ===================================================================
/* TODO:
+ possibly break this file up
*/
// =================================================================== globals and helpers
var email = spaceghost.user.getRandomEmail(),
@@ -34,8 +35,8 @@
if( spaceghost.fixtureData.testUser ){
email = spaceghost.fixtureData.testUser.email;
password = spaceghost.fixtureData.testUser.password;
+ spaceghost.info( 'Will use fixtureData.testUser: ' + email );
}
-var newHistoryName = "Test History";
var nameSelector = 'div#history-name',
unnamedName = 'Unnamed history',
@@ -43,7 +44,6 @@
initialSizeStr = '0 bytes',
tagIconSelector = '#history-tag.icon-button',
annoIconSelector = '#history-annotate.icon-button',
- //emptyMsgSelector = '#emptyHistoryMessage';
emptyMsgSelector = '.infomessagesmall',
emptyMsgStr = "Your history is empty. Click 'Get Data' on the left pane to start",
@@ -51,14 +51,33 @@
nameTooltip = 'Click to rename history',
editableTextClass = 'editable-text',
- editableTextInputSelector = 'input#renaming-active';
+ editableTextInputSelector = 'input#renaming-active',
-var historyFrameInfo = {},
+ wrapperOkClassName = 'historyItem-ok',
+
+ tagAreaSelector = '#history-tag-area',
+ annoAreaSelector = '#history-annotation-area',
+ refreshButtonSelector = 'a#history-refresh-button',
+ refreshButtonIconSelector = 'span.fa-icon-refresh',
+ refreshButtonHref = '/history',
+
+ //historyOptionsButtonSelector = '#history-options-button',
+ //historyOptionsButtonIconSelector = 'span.fa-icon-cog',
+ includeDeletedOptionsLabel = spaceghost.historyoptions.data.labels.options.includeDeleted;
+
+function historyOptionXpathByLabel( label ){
+ return xpath( '//ul[@id="history-options-button-menu"]/li/a[text()[contains(.,"' + label + '")]]' );
+}
+
+var newHistoryName = "Test History",
+ filepathToUpload = '../../test-data/1.txt',
+ historyFrameInfo = {},
testUploadInfo = {};
// =================================================================== TESTS
-// ------------------------------------------------------------------- start a new user
+// ------------------------------------------------------------------- set up
+// start a new user
spaceghost.user.loginOrRegisterUser( email, password );
//??: why is a reload needed here? If we don't, loggedInAs === '' ...
spaceghost.thenOpen( spaceghost.baseUrl, function(){
@@ -66,13 +85,13 @@
this.test.assert( loggedInAs === email, 'loggedInAs() matches email: "' + loggedInAs + '"' );
});
-// ------------------------------------------------------------------- check structure of empty history
// grab the history frame bounds for mouse later tests
spaceghost.then( function(){
historyFrameInfo = this.getElementInfo( 'iframe[name="galaxy_history"]' );
//this.debug( 'historyFrameInfo:' + this.jsonStr( historyFrameInfo ) );
});
+// ------------------------------------------------------------------- check structure of empty history
spaceghost.thenOpen( spaceghost.baseUrl, function testPanelStructure(){
this.test.comment( 'history panel, new history' );
this.withFrame( this.selectors.frames.history, function(){
@@ -100,7 +119,6 @@
this.test.assertVisible( emptyMsgSelector, 'Empty history message is visible' );
this.test.assertSelectorHasText( emptyMsgSelector, emptyMsgStr,
'Message contains "' + emptyMsgStr + '"' );
-
});
});
@@ -163,88 +181,259 @@
});
});
-
// ------------------------------------------------------------------- check structure of NON empty history
-/*
// upload file: 1.txt
spaceghost.then( function upload(){
- this.test.comment( 'anon-user should be able to upload files' );
- spaceghost.uploadFile( '../../test-data/1.txt', function uploadCallback( _uploadInfo ){
+ this.test.comment( 'should be able to upload files' );
+ spaceghost.tools.uploadFile( filepathToUpload, function uploadCallback( _uploadInfo ){
this.debug( 'uploaded HDA info: ' + this.jsonStr( _uploadInfo ) );
var hasHda = _uploadInfo.hdaElement,
hasClass = _uploadInfo.hdaElement.attributes[ 'class' ],
- hasOkClass = _uploadInfo.hdaElement.attributes[ 'class' ].indexOf( 'historyItem-ok' ) !== -1;
+ hasOkClass = _uploadInfo.hdaElement.attributes[ 'class' ].indexOf( wrapperOkClassName ) !== -1;
this.test.assert( ( hasHda && hasClass && hasOkClass ), "Uploaded file: " + _uploadInfo.name );
uploadInfo = _uploadInfo;
});
});
-//TODO: for each uploaded file: 1 file per (standard) datatype (or some subset)
-// txt, tabular, sam, bam, fasta, fastq, bed, gff,
-*/
+spaceghost.then( function checkPanelStructure(){
+ this.test.comment( 'checking structure of non-empty panel' );
-// -------------------------------------------------------------------
-//history panel
- // structure of empty
- // upload file
- // structure of not empty
- // tags
- // annotation
- // history refresh
- // history options
- // structure
+ this.withFrame( this.selectors.frames.history, function(){
+ this.test.comment( "history name should exist, be visible, and have text " + unnamedName );
+ this.test.assertExists( nameSelector, nameSelector + ' exists' );
+ this.test.assertVisible( nameSelector, 'History name is visible' );
+ this.test.assertSelectorHasText( nameSelector, newHistoryName, 'History name is ' + newHistoryName );
- // deleted
+ this.test.comment( "history subtitle should display size and size should be " + onetxtFilesize + " bytes" );
+ var onetxtFilesize = require( 'fs' ).size( this.options.scriptDir + filepathToUpload ),
+ expectedSubtitle = onetxtFilesize + ' bytes';
+ this.test.assertExists( subtitleSelector, 'Found ' + subtitleSelector );
+ this.test.assertVisible( subtitleSelector, 'History subtitle is visible' );
+ this.test.assertSelectorHasText( subtitleSelector, expectedSubtitle,
+ 'History subtitle has "' + expectedSubtitle + '"' );
- // hidden
+ this.test.comment( "tags and annotation icons should be available" );
+ this.test.assertExists( tagIconSelector, 'Tag icon button found' );
+ this.test.assertExists( annoIconSelector, 'Annotation icon button found' );
- // persistant expansion (or in hdaView?)
+ this.test.comment( "A message about the current history being empty should NOT be displayed" );
+ this.test.assertExists( emptyMsgSelector, emptyMsgSelector + ' exists' );
+ this.test.assertNotVisible( emptyMsgSelector, 'Empty history message is NOT visible' );
+ });
+});
-//hdaView
-// with hpanel:
- // (we assume hda is in the ok state)
- // with collapsed hda:
- // can we see the hid?
- // can we see the title?
- // three primary action buttons:
- // they exist?
- // Do they have good hrefs, targets?
- // Are they enabled?
- // do they have proper tooltips?
- // display
- // edit
- // delete
- //??: click through?
+// ------------------------------------------------------------------- tags
+// keeping this light here - better for it's own test file
+spaceghost.then( function openTags(){
+ this.test.comment( 'tag area should open when the history panel tag icon is clicked' );
+ this.withFrame( this.selectors.frames.history, function(){
+ this.mouseEvent( 'click', tagIconSelector );
+ this.wait( 1000, function(){
+ this.test.assertVisible( tagAreaSelector, 'Tag area is now displayed' );
+ });
+ });
+});
+spaceghost.then( function closeTags(){
+ this.test.comment( 'tag area should close when the history panel tag icon is clicked again' );
+ this.withFrame( this.selectors.frames.history, function(){
+ this.mouseEvent( 'click', tagIconSelector );
+ this.wait( 1000, function(){
+ this.test.assertNotVisible( tagAreaSelector, 'Tag area is now hidden' );
+ });
+ });
+});
- // with expaned hda:
- // can we see the hid, title, and primary display buttons?
+// ------------------------------------------------------------------- annotation
+// keeping this light here - better for it's own test file
+spaceghost.then( function openAnnotation(){
+ this.test.comment( 'annotation area should open when the history panel annotation icon is clicked' );
+ this.withFrame( this.selectors.frames.history, function(){
+ this.mouseEvent( 'click', annoIconSelector );
+ this.wait( 1000, function(){
+ this.test.assertVisible( annoAreaSelector, 'Annotation area is now displayed' );
+ });
+ });
+});
+spaceghost.then( function closeAnnotation(){
+ this.test.comment( 'annotation area should close when the history panel tag icon is clicked again' );
+ this.withFrame( this.selectors.frames.history, function bler(){
+ this.mouseEvent( 'click', annoIconSelector );
+ this.wait( 1000, function(){
+ this.test.assertNotVisible( annoAreaSelector, 'Tag area is now hidden' );
+ });
+ });
+});
- // misc info: no dbkey specified - is there a '?' link leading to edit attr?
- // misc info: uploaded sam file
- // misc info: format: sam
+// ------------------------------------------------------------------- refresh button
+spaceghost.then( function refreshButton(){
+ this.test.comment( 'History panel should refresh when the history refresh icon is clicked' );
- // secondary actions:
- // download
- // info
- // rerun
- // visualizations
+ this.test.assertExists( refreshButtonSelector, "Found refresh button" );
+ this.test.assertVisible( refreshButtonSelector, "Refresh button is visible" );
+ this.test.assertVisible( refreshButtonSelector + ' ' + refreshButtonIconSelector, "Refresh icon is visible" );
+ this.test.assert( this.getElementAttribute( refreshButtonSelector, 'href' ) === refreshButtonHref,
+ "Refresh button has href: " + refreshButtonHref );
- // tags and annotations
- //TODO: to their own file? tested elsewhere?
+ this.assertNavigationRequested( refreshButtonHref, "History refreshed when clicking refresh icon", function(){
+ this.click( refreshButtonSelector );
+ });
+});
- // peek:
- // proper headers?
- // lines?
- // scrollbar?
+// ------------------------------------------------------------------- history options menu structure
+//NOTE: options menu should be functionally tested elsewhere
+spaceghost.then( function historyOptions(){
+ this.test.comment( 'History options icon should be in place and menu should have the proper structure' );
- // can re-collapse?
+ // check the button and icon
+ this.test.assertExists( this.historyoptions.data.selectors.button, "Found history options button" );
+ this.test.assertVisible( this.historyoptions.data.selectors.button, "History options button is visible" );
+ this.test.assertVisible( this.historyoptions.data.selectors.buttonIcon, "History options icon is visible" );
+ // open the menu
+ this.click( this.historyoptions.data.selectors.button );
+ this.test.assertVisible( this.historyoptions.data.selectors.menu,
+ "Menu is visible when options button is clicked" );
+ // check the options
+ for( var optionKey in this.historyoptions.data.labels.options ){
+ if( this.historyoptions.data.labels.options.hasOwnProperty( optionKey ) ){
+ var optionLabel = this.historyoptions.data.labels.options[ optionKey ],
+ optionXpath = this.historyoptions.data.selectors.optionXpathByLabelFn( optionLabel );
+ this.test.assertVisible( optionXpath, 'Option label is visible: ' + optionLabel );
+ }
+ }
+});
+// ------------------------------------------------------------------- deleted hdas aren't in the dom
+spaceghost.then( function(){
+ this.test.comment( 'deleted hdas shouldn\'t be in the history panel DOM' );
+ this.historypanel.deleteHda( '#' + uploadInfo.hdaElement.attributes.id, function(){
+ this.test.assertDoesntExist( '#' + uploadInfo.hdaElement.attributes.id, "Deleted HDA is not in the DOM" );
+ });
+});
+// ------------------------------------------------------------------- options allow showing/hiding deleted hdas
+spaceghost.then( function(){
+ this.test.comment( 'History options->' + includeDeletedOptionsLabel + ' shows deleted datasets' );
+ this.historyoptions.includeDeleted();
+ this.withFrame( this.selectors.frames.history, function(){
+ this.waitForSelector( nameSelector, function(){
+ this.test.assertExists( '#' + uploadInfo.hdaElement.attributes.id,
+ "Deleted HDA is in the DOM (using history options -> " + includeDeletedOptionsLabel + ")" );
+ this.test.assertVisible( '#' + uploadInfo.hdaElement.attributes.id,
+ "Deleted HDA is visible again (using history options -> " + includeDeletedOptionsLabel + ")" );
+ });
+ });
+});
+spaceghost.then( function(){
+ this.test.comment( 'History options->' + includeDeletedOptionsLabel + ' (again) re-hides deleted datasets' );
+
+ this.historyoptions.includeDeleted();
+ this.withFrame( this.selectors.frames.history, function(){
+ this.waitForSelector( nameSelector, function(){
+ this.test.assertDoesntExist( '#' + uploadInfo.hdaElement.attributes.id,
+ "Deleted HDA is not in the DOM (using history options -> " + includeDeletedOptionsLabel + ")" );
+ });
+ });
+});
+
+// undelete the uploaded file
+spaceghost.then( function(){
+ this.historyoptions.includeDeleted();
+ this.withFrame( this.selectors.frames.history, function(){
+ this.waitForSelector( nameSelector, function(){
+ //TODO: to conv. fn
+ this.click( '#' + uploadInfo.hdaElement.attributes.id + ' .historyItemUndelete' );
+ });
+ });
+});
+
+// ------------------------------------------------------------------- hidden hdas aren't shown
+// ------------------------------------------------------------------- history options allows showing hidden hdas
+// can't test this yet w/o a way to make hdas hidden thru the ui or api
+
+// ------------------------------------------------------------------- hdas can be expanded by clicking on the hda name
+// broken in webkit w/ jq 1.7
+spaceghost.then( function(){
+ this.test.comment( 'HDAs can be expanded by clicking on the name' );
+ var uploadedSelector = '#' + uploadInfo.hdaElement.attributes.id;
+
+ this.withFrame( this.selectors.frames.history, function(){
+ this.click( uploadedSelector + ' .historyItemTitle' );
+ this.debug( 'title: ' + this.debugElement( uploadedSelector + ' .historyItemTitle' ) );
+ this.debug( 'wrapper: ' + this.debugElement( uploadedSelector ) );
+
+ this.wait( 1000, function(){
+ this.test.assertExists( uploadedSelector + ' .historyItemBody', "Body for uploaded file is found" );
+ this.test.assertVisible( uploadedSelector + ' .hda-summary', "hda-summary is visible" );
+ });
+ });
+});
+
+// ------------------------------------------------------------------- expanded hdas are still expanded after a refresh
+spaceghost.then( function(){
+ this.test.comment( 'Expanded hdas are still expanded after a refresh' );
+ var uploadedSelector = '#' + uploadInfo.hdaElement.attributes.id;
+
+ this.click( refreshButtonSelector );
+ this.withFrame( this.selectors.frames.history, function(){
+ this.waitForSelector( nameSelector, function(){
+ this.test.assertExists( uploadedSelector + ' .historyItemBody', "Body for uploaded file is found" );
+ this.test.assertVisible( uploadedSelector + ' .hda-summary', "hda-summary is visible" );
+ });
+ });
+ // this will break: webkit + jq 1.7
+});
+
+// ------------------------------------------------------------------- expanded hdas collapse by clicking name again
+spaceghost.then( function(){
+ this.test.comment( 'Expanded hdas collapse by clicking name again' );
+ var uploadedSelector = '#' + uploadInfo.hdaElement.attributes.id;
+
+ this.withFrame( this.selectors.frames.history, function(){
+ this.click( uploadedSelector + ' .historyItemTitle' );
+
+ this.wait( 500, function(){
+ this.test.assertNotVisible( uploadedSelector + ' .hda-summary', "hda-summary is not visible" );
+ });
+ });
+});
+
+// ------------------------------------------------------------------- collapsed hdas are still collapsed after a refresh
+spaceghost.then( function(){
+ this.test.comment( 'Expanded hdas are still expanded after a refresh' );
+ var uploadedSelector = '#' + uploadInfo.hdaElement.attributes.id;
+
+ this.click( refreshButtonSelector );
+ this.withFrame( this.selectors.frames.history, function(){
+ this.waitForSelector( nameSelector, function(){
+ this.test.assertNotVisible( uploadedSelector + ' .hda-summary', "hda-summary is not visible" );
+ });
+ });
+});
+
+// ------------------------------------------------------------------- history options collapses all expanded hdas
+spaceghost.then( function(){
+ // expand again
+ this.withFrame( this.selectors.frames.history, function(){
+ this.click( '#' + uploadInfo.hdaElement.attributes.id + ' .historyItemTitle' );
+ this.wait( 500, function(){});
+ });
+});
+spaceghost.then( function(){
+ this.test.comment( 'History option collapses all expanded hdas' );
+ var uploadedSelector = '#' + uploadInfo.hdaElement.attributes.id;
+
+ this.historyoptions.collapseExpanded();
+ this.wait( 500, function(){
+ this.withFrame( this.selectors.frames.history, function(){
+ this.test.assertNotVisible( uploadedSelector + ' .hda-summary', "hda-summary is not visible" );
+ });
+ });
+});
// ===================================================================
spaceghost.run( function(){
diff -r 592af505e4e673aa891e9163bd237dbbc728ba62 -r 0a6a4752493eacfac2fb7a537d30be0333a58977 test/casperjs/modules/historyoptions.js
--- /dev/null
+++ b/test/casperjs/modules/historyoptions.js
@@ -0,0 +1,160 @@
+// =================================================================== module object, exports
+/** Creates a new historyoptions module object.
+ * @exported
+ */
+exports.create = function createHistoryOptions( spaceghost ){
+ return new HistoryOptions( spaceghost );
+};
+
+/** HistoryOptions object constructor.
+ * @param {SpaceGhost} spaceghost a spaceghost instance
+ */
+var HistoryOptions = function HistoryOptions( spaceghost ){
+ //??: circ ref?
+ this.spaceghost = spaceghost;
+};
+exports.HistoryOptions = HistoryOptions;
+
+HistoryOptions.prototype.toString = function toString(){
+ return this.spaceghost + '.HistoryOptions';
+};
+
+// -------------------------------------------------------------------
+/* TODO:
+
+
+*/
+// =================================================================== API (external)
+/** Just open the menu
+ */
+HistoryOptions.prototype.openMenu = function openMenu(){
+ this.spaceghost.click( this.data.selectors.button );
+};
+
+/** Click an option by Label
+ */
+HistoryOptions.prototype.clickOption = function clickOption( optionLabel ){
+ this.openMenu();
+ // casperjs clickLabel
+ var optionXpath = this.data.selectors.optionXpathByLabelFn( optionLabel );
+ this.spaceghost.click( optionXpath );
+};
+
+// -------------------------------------------------------------------
+// these options lead to controller pages - encapsulate those pages here
+/** corresponds to history options menu: 'Saved Histories'
+ * @param {String} historyName the name of the history
+ */
+//HistoryOptions.prototype.savedHistoryByName = function savedHistoryByName( historyName ){
+//};
+/** corresponds to history options menu: 'Histories Shared with Me'
+ * @param {String} historyName the name of the history
+ */
+//HistoryOptions.prototype.sharedHistoryByName = function sharedHistoryByName( historyName ){
+//};
+
+/** corresponds to history options menu: 'Create New'
+ */
+//HistoryOptions.prototype.createNew = function createNew(){
+//};
+
+/** corresponds to history options menu: 'Copy History'
+ */
+//HistoryOptions.prototype.copyHistory = function copyHistory(){
+//};
+
+/** corresponds to history options menu: 'Copy Datasets'
+ */
+//HistoryOptions.prototype.copyDatasets = function copyDatasets(){
+//};
+
+/** corresponds to history options menu: 'Extract Workflow'
+ */
+//HistoryOptions.prototype.extractWorkflow = function extractWorkflow(){
+//};
+
+/** corresponds to history options menu: 'Share or Publish'
+ */
+//HistoryOptions.prototype.shareHistoryViaLink = function shareHistoryViaLink(){
+//};
+/** corresponds to history options menu: 'Share or Publish'
+ */
+//HistoryOptions.prototype.publishHistory = function publishHistory(){
+//};
+/** corresponds to history options menu: 'Share or Publish'
+ */
+//HistoryOptions.prototype.shareHistoryWithUser = function shareHistoryWithUser(){
+//};
+
+/** corresponds to history options menu: 'Dataset Security'
+ */
+//HistoryOptions.prototype.managePermissions = function managePermissions(){
+//};
+/** corresponds to history options menu: 'Dataset Security'
+ */
+//HistoryOptions.prototype.accessPermissions = function accessPermissions(){
+//};
+
+/** corresponds to history options menu: 'Resume Paused Jobs'
+ */
+//HistoryOptions.prototype.resumePausedJobs = function resumePausedJobs(){
+//};
+
+
+// -------------------------------------------------------------------
+// these are easy, one click options (they don't open a new page)
+/** corresponds to history options menu: 'Collapse Expanded Datasets'
+ */
+HistoryOptions.prototype.collapseExpanded = function collapseExpanded(){
+ this.clickOption( this.data.labels.options.collapseExpanded );
+};
+/** corresponds to history options menu: 'Include Deleted Datasets'
+ */
+HistoryOptions.prototype.includeDeleted = function includeDeleted(){
+ this.clickOption( this.data.labels.options.includeDeleted );
+};
+/** corresponds to history options menu: 'Include Hidden Datasets'
+ */
+HistoryOptions.prototype.includeHidden = function includeHidden(){
+ this.clickOption( this.data.labels.options.includeHidden );
+};
+
+
+// =================================================================== SELECTORS
+//TODO: data is not a very good name
+HistoryOptions.prototype.data = {
+ selectors : {
+ button : '#history-options-button',
+ buttonIcon : '#history-options-button span.fa-icon-cog',
+ menu : '#history-options-button-menu',
+ optionXpathByLabelFn : function optionXpathByLabelFn( label ){
+ return xpath( '//ul[@id="history-options-button-menu"]/li/a[text()[contains(.,"' + label + '")]]' );
+ }
+ },
+ labels : {
+ options : {
+ //History Lists
+ savedHistories : "Saved Histories",
+ sharedHistories : "Histories Shared with Me",
+ //Current History
+ createNew : "Create New",
+ copyHistory : "Copy History",
+ copyDatasets : "Copy Datasets",
+ shareOrPublish : "Share or Publish",
+ extractWorkflow : "Extract Workflow",
+ datasetSecurity : "Dataset Security",
+ resumePausedJobs : "Resume Paused Jobs",
+ collapseExpanded : 'Collapse Expanded Datasets',
+ includeDeleted : 'Include Deleted Datasets',
+ includeHidden : 'Include Hidden Datasets',
+ unhideHiddenDatasets : "Unhide Hidden Datasets",
+ purgeDeletedDatasets : "Purge Deleted Datasets",
+ showStructure : "Show Structure",
+ exportToFile : "Export to File",
+ deleteHistory : "Delete",
+ deleteHistoryPermanently : "Delete Permanently",
+ //Other Actions
+ importFromFile : "Import from File"
+ }
+ }
+};
diff -r 592af505e4e673aa891e9163bd237dbbc728ba62 -r 0a6a4752493eacfac2fb7a537d30be0333a58977 test/casperjs/modules/historypanel.js
--- a/test/casperjs/modules/historypanel.js
+++ b/test/casperjs/modules/historypanel.js
@@ -1,5 +1,5 @@
// =================================================================== module object, exports
-/** Creates a new tools module object.
+/** Creates a new historypanel module object.
* @exported
*/
exports.create = function createHistoryPanel( spaceghost ){
@@ -24,13 +24,15 @@
// -------------------------------------------------------------------
/* TODO:
- run a tool
+ conv.fns:
+ expand hda (click title)
+ undelete hda
+ rename history
*/
// =================================================================== INTERNAL
// =================================================================== API (external)
-//TODO: to history module
/** Find the casper element info of the hda wrapper given the hda title and hid.
* NOTE: if more than one is found, will return the first found.
* precondition: you should wrap this with withFrame( 'galaxy_history' ) :(
@@ -116,3 +118,84 @@
});
});
};
+
+/** Find the casper element info of the hda wrapper given the hda title and hid.
+ * NOTE: if more than one is found, will return the first found.
+ * precondition: you should wrap this with withFrame( 'galaxy_history' ) :(
+ * @param {String} title the title of the hda
+ * @param {Int} hid (optional) the hid of the hda to look for
+ * @returns {Object|null} ElementInfo of the historyItemWrapper found, null if not found
+ */
+HistoryPanel.prototype.hdaElementInfoByTitle = function hdaElementInfoByTitle( title, hid ){
+ var titleContains = ( hid !== undefined )?( hid + ': ' + title ):( title ),
+ wrapperInfo = this.spaceghost.elementInfoOrNull(
+ //TODO??: how to put this in editable json file
+ xpath( '//span[contains(text(),"' + titleContains + '")]/parent::*/parent::*' ) );
+ //this.spaceghost.debug( 'wrapperInfo: ' + this.spaceghost.jsonStr( wrapperInfo ) );
+ return wrapperInfo;
+};
+//TODO!: this will break if the hda name has single or double quotes (which are permitted in names)
+
+/** Find the id of the hda wrapper given the hda title and hid.
+ * @param {String} title the title of the hda
+ * @param {Int} hid (optional) the hid of the hda to look for
+ * @returns {String|null} DOM id of the historyItemWrapper found, null if not found
+ */
+HistoryPanel.prototype.hdaIdByTitle = function hdaIdByTitle( title, hid ){
+ var elementInfo = this.hdaElementInfoByTitle( title, hid );
+ return (( elementInfo && elementInfo.attributes && elementInfo.attributes.id )?
+ ( elementInfo.attributes.id ):( null ));
+};
+
+/** Deletes an hda by finding an hda with the given title and clicking on the delete icon.
+ * NOTE: if more than one is found, the first found will be deleted.
+ * @param {String} hdaSelector a css or xpath selector for an historyItemWrapper
+ * @param {Function} whenDeletedFn function to be called when the hda is deleted (optional)
+ * @param {Function} timeoutFn function to be called if/when the deleted attempted times out (optional)
+ */
+HistoryPanel.prototype.deleteHda = function deleteHda( hdaSelector, whenDeletedFn, timeoutFn ){
+ whenDeletedFn = whenDeletedFn || function(){};
+ var spaceghost = this.spaceghost;
+
+ spaceghost.withFrame( spaceghost.selectors.frames.history, function deletingHda(){
+ //precondition: historyItemWrapper's (hda dom elements) should have an id
+ // we could use the selector directly, but better if it errors before an attempted delete
+ var hdaId = spaceghost.getElementInfo( hdaSelector ).attributes.id;
+ spaceghost.debug( 'hda id: ' + spaceghost.jsonStr( hdaId ) );
+
+ // get the delete icon and click it
+ //TODO: handle disabled delete icon?
+ var deleteIconSelector = 'a[id^="historyItemDeleter-"]',
+ thisDeleteIconSelector = '#' + hdaId + ' ' + deleteIconSelector;
+ spaceghost.click( thisDeleteIconSelector );
+
+ spaceghost.waitWhileSelector( '#' + hdaId,
+ function hdaNoLongerInDom(){
+ spaceghost.info( 'hda deleted: ' + hdaSelector );
+ whenDeletedFn.call( spaceghost );
+
+ }, function timeout(){
+ if( timeoutFn ){
+ timeoutFn.call( spaceghost );
+ } else {
+ throw new spaceghost.GalaxyError( 'HistoryPanelError: '
+ + 'timeout attempting to delete hda : ' + hdaSelector );
+ }
+ });
+ });
+};
+
+/** Expands an HDA.
+ * @param {String} hdaSelector a css or xpath selector for an historyItemWrapper
+ */
+HistoryPanel.prototype.expandHda = function expandHda( hdaSelector ){
+ var spaceghost = this.spaceghost,
+ historyFrameInfo = spaceghost.getElementInfo( 'iframe[name="galaxy_history"]' );
+
+ spaceghost.withFrame( spaceghost.selectors.frames.history, function expandingHda(){
+ var titleInfo = spaceghost.getElementInfo( hdaSelector + ' .historyItemTitle' );
+ spaceghost.page.sendEvent( 'mousedown',
+ historyFrameInfo.x + titleInfo.x + 1, historyFrameInfo.y + titleInfo.y - 5 );
+ });
+ return spaceghost;
+};
diff -r 592af505e4e673aa891e9163bd237dbbc728ba62 -r 0a6a4752493eacfac2fb7a537d30be0333a58977 test/casperjs/modules/tools.js
--- a/test/casperjs/modules/tools.js
+++ b/test/casperjs/modules/tools.js
@@ -154,7 +154,7 @@
uploadInfo = {};
// precondition: filepath is relative to scriptDir
- filepath = spaceghost.options.scriptDir + '/' + filepath;
+ filepath = spaceghost.options.scriptDir + filepath;
// upload the file erroring if a done message is not displayed, aggregate info about upload
spaceghost.info( 'uploading file: ' + filepath + ' (timeout after ' + timeoutAfterMs + ')' );
@@ -179,14 +179,16 @@
spaceghost.debug( 'beginning wait for upload file\'s ok state' );
// get the hda view DOM element from the upload name and hid
spaceghost.withFrame( spaceghost.selectors.frames.history, function(){
- var hdaInfo = spaceghost.historypanel.hdaElementInfoByTitle( uploadInfo.name, uploadInfo.hid );
- if( !hdaInfo ){
- throw new spaceghost.GalaxyError( 'Upload Error: uploaded file HDA not found: '
- + uploadInfo.hid + ', ' + uploadInfo.name );
- }
- spaceghost.debug( 'hdaInfo: ' + spaceghost.jsonStr( hdaInfo ) );
- uploadInfo.hdaElement = hdaInfo;
- // uploadInfo now has filepath, filename, name, hid, and hdaElement
+ spaceghost.waitForSelector( '#history-name', function(){
+ var hdaInfo = spaceghost.historypanel.hdaElementInfoByTitle( uploadInfo.name, uploadInfo.hid );
+ if( hdaInfo === null ){
+ throw new spaceghost.GalaxyError( 'Upload Error: uploaded file HDA not found: '
+ + uploadInfo.hid + ', ' + uploadInfo.name );
+ }
+ spaceghost.debug( 'hdaInfo: ' + spaceghost.jsonStr( hdaInfo ) );
+ uploadInfo.hdaElement = hdaInfo;
+ // uploadInfo now has filepath, filename, name, hid, and hdaElement
+ });
});
spaceghost.then( function waitForOk(){
@@ -217,5 +219,3 @@
};
//TODO: upload via url
//TODO: upload by textarea
-
-
diff -r 592af505e4e673aa891e9163bd237dbbc728ba62 -r 0a6a4752493eacfac2fb7a537d30be0333a58977 test/casperjs/modules/user.js
--- a/test/casperjs/modules/user.js
+++ b/test/casperjs/modules/user.js
@@ -68,7 +68,7 @@
* @param {String} email the users email address
* @param {String} password the users password
*/
-User.prototype._submitLogin = function logoutUser( email, password ){
+User.prototype._submitLogin = function _submitLogin( email, password ){
var spaceghost = this.spaceghost,
loginInfo = {
//NOTE: keys are used as name selectors in the fill fn - must match the names of the inputs
@@ -166,7 +166,7 @@
/** Log out the current user
* @returns {SpaceGhost} the spaceghost instance (for chaining)
*/
-User.prototype.logout = function logoutUser(){
+User.prototype.logout = function logout(){
var spaceghost = this.spaceghost;
spaceghost.thenOpen( spaceghost.baseUrl, function(){
//TODO: handle already logged out
@@ -205,5 +205,3 @@
domain = domain || 'test.test';
return username + Date.now() + '@' + domain;
};
-
-
diff -r 592af505e4e673aa891e9163bd237dbbc728ba62 -r 0a6a4752493eacfac2fb7a537d30be0333a58977 test/casperjs/page-data/selectors.json
--- /dev/null
+++ b/test/casperjs/page-data/selectors.json
@@ -0,0 +1,35 @@
+{
+ "cookies" : {
+ "galaxyCookieName" : "galaxysession"
+ },
+
+ "selectors" : {
+ "historyPanel" : {
+ "name" : "div#history-name",
+ "subtitle" : "div#history-subtitle-area",
+ "tagIcon" : "#history-tag.icon-button",
+ "annoIcon" : "#history-annotate.icon-button",
+ "emptyMsg" : ".infomessagesmall"
+ },
+
+ "bootstrap" : {
+ "activeTooltip" : ".bs-tooltip"
+ },
+
+ "editableText" : {
+ "class" : "editable-text",
+ "activeInput" : "input#renaming-active"
+ }
+ },
+
+ "text" : {
+ "historyPanel" : {
+ "newName" : "Unnamed history",
+ "initialSizeStr" : "0 bytes",
+ "emptyMsgStr" : "Your history is empty. Click 'Get Data' on the left pane to start",
+ "tooltips" : {
+ "anonUserName" : "You must be logged in to edit your history name"
+ }
+ }
+ }
+}
diff -r 592af505e4e673aa891e9163bd237dbbc728ba62 -r 0a6a4752493eacfac2fb7a537d30be0333a58977 test/casperjs/spaceghost.js
--- a/test/casperjs/spaceghost.js
+++ b/test/casperjs/spaceghost.js
@@ -2,6 +2,8 @@
Use in test command
bug: assertStepsRaise raise errors (all the way) when used in 'casperjs test .'
+ normalize names of fns that use withFrame or then to 'then<action>'
+ make any callbacks optional (that can be)
Does it run:
casperjs usertests.js --url='http://localhost:8080'
@@ -174,6 +176,8 @@
/** Set up any SG specific options passed in on the cli.
*/
SpaceGhost.prototype._processCLIArguments = function _processCLIArguments(){
+ //this.debug( 'cli: ' + this.jsonStr( this.cli ) );
+
//TODO: init these programmitically
var CLI_OPTIONS = {
returnJsonOnly : { defaultsTo: false, flag: 'return-json', help: 'send output to stderr, json to stdout' },
@@ -248,7 +252,7 @@
// get any fixture data passed in as JSON in args
// (NOTE: currently the 2nd arg (with the url being 1st?)
- this.fixtureData = ( this.cli.has( 1 ) )?( JSON.parse( this.cli.get( 1 ) ) ):( {} );
+ this.fixtureData = ( this.cli.has( 0 ) )?( JSON.parse( this.cli.get( 0 ) ) ):( {} );
this.debug( 'fixtureData:' + this.jsonStr( this.fixtureData ) );
};
@@ -329,6 +333,14 @@
}
};
+/** Event handler for step/casper timeouts - throws PageError
+ */
+SpaceGhost.prototype._timeoutHandler = function _timeoutHandler(){
+ console.debug( 'timeout' );
+ //msg = msg.replace( 'PageError: ', '' );
+ throw new PageError( 'Timeout occurred' );
+};
+
/** Event handler for console messages from the page.
*/
SpaceGhost.prototype._pageConsoleHandler = function _pageConsoleHandler(){
@@ -372,6 +384,9 @@
// ........................ page errors
this.on( 'page.error', this._pageErrorHandler );
//this.on( 'load.failed', this._loadFailedHandler );
+ this.on( 'timeout', this._timeoutHandler );
+ this.on( 'step.timeout', this._timeoutHandler );
+ this.on( 'waitFor.timeout', this._timeoutHandler );
// ........................ page info/debugging
// these are already displayed at the casper info level
@@ -391,6 +406,7 @@
this.user = require( this.options.scriptDir + 'modules/user' ).create( this );
this.tools = require( this.options.scriptDir + 'modules/tools' ).create( this );
this.historypanel = require( this.options.scriptDir + 'modules/historypanel' ).create( this );
+ this.historyoptions = require( this.options.scriptDir + 'modules/historyoptions' ).create( this );
};
// =================================================================== PAGE CONTROL
@@ -527,6 +543,32 @@
this.tryStepsCatch( stepsFn, testTheError );
};
+/** Assert that a function causes a navigation request with (at least partially) the given url.
+ * NOTE: _should_ play well with steps (e.g. then, thenOpen, etc.)
+ * @param {String} url some portion of the expected url for the nav request
+ * @param {String} message the assertion message
+ * @param {Function} fnThatRequests a function that causes a navigation request (e.g. click a link)
+ */
+SpaceGhost.prototype.assertNavigationRequested = function assertNavigationRequested( expectedUrl, message,
+ fnThatRequests ){
+ var requested = false;
+ function captureNavReq( url, navigationType, navigationLocked, isMainFrame ){
+ this.debug( 'Checking navigation.requested for url: ' + expectedUrl );
+ // use || here to handle multiple requests, if any one url works -> test will pass
+ requested = requested || ( url.indexOf( expectedUrl ) !== -1 );
+ }
+ this.then( function(){
+ this.on( 'navigation.requested', captureNavReq );
+ });
+ this.then( function(){
+ fnThatRequests.call( this );
+ });
+ this.then( function(){
+ this.removeListener( 'navigation.requested', captureNavReq );
+ this.test.assert( requested, message );
+ });
+};
+
// =================================================================== CONVENIENCE
/** Wraps casper.getElementInfo in try, returning null if element not found instead of erroring.
* @param {String} selector css or xpath selector for the element to find
@@ -605,6 +647,12 @@
return JSON.stringify( obj, null, 2 );
};
+/** output to debug the JSON of the selector (or null if not found)
+ */
+SpaceGhost.prototype.debugElement = function debugElement( selector ){
+ this.debug( this.jsonStr( this.elementInfoOrNull( selector ) ) );
+};
+
/** Debug SG itself
*/
SpaceGhost.prototype.debugMe = function(){
@@ -720,6 +768,13 @@
return JSON.parse( require( 'fs' ).read( filepath ) );
};
+SpaceGhost.prototype.writeJSONFile = function writeJSONFile( filepath, object, mode ){
+ mode = mode || 'w';
+ //precondition: filepath is relative to script dir
+ filepath = this.options.scriptDir + filepath;
+ return require( 'fs' ).write( filepath, this.jsonStr( object ), mode );
+};
+
// =================================================================== EXPORTS
/**
*/
@@ -733,4 +788,3 @@
"use strict";
return new SpaceGhost(options);
};
-
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Add baseline support for reviewing functional test results for installing repositories and executing functional tests on the contained tool inside a Galaxy instance for tools included in repositories in the tool shed.
by commits-noreply@bitbucket.org 05 Mar '13
by commits-noreply@bitbucket.org 05 Mar '13
05 Mar '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/592af505e4e6/
changeset: 592af505e4e6
user: greg
date: 2013-03-05 18:36:58
summary: Add baseline support for reviewing functional test results for installing repositories and executing functional tests on the contained tool inside a Galaxy instance for tools included in repositories in the tool shed.
affected #: 4 files
diff -r dbe53e2c1eab2b377880e87c4d2bd770dc470b80 -r 592af505e4e673aa891e9163bd237dbbc728ba62 lib/galaxy/webapps/tool_shed/controllers/repository.py
--- a/lib/galaxy/webapps/tool_shed/controllers/repository.py
+++ b/lib/galaxy/webapps/tool_shed/controllers/repository.py
@@ -672,20 +672,32 @@
status='error' ) )
@web.expose
- def display_tool_functional_test_errors( self, trans, repository_id, repository_metadata_id, **kwd ):
+ def display_tool_functional_test_results( self, trans, repository_id, repository_metadata_id, **kwd ):
+ """
+ The test framework in ~/test/install_and_test_tool_shed_repositories can be executed on a regularly defined schedule (e.g., via cron) to install appropriate
+ repositories from a tool shed into a Galaxy instance and run defined functional tests for the tools included in the repository. This process affects the values
+ if these columns in the repository_metadata table: tools_functionally_correct, do_not_test, time_last_tested and tool_test_errors. The tool_test_errors is
+ slightly mis-named (it should have been named tool_test_results) it will contain a dictionary that includes information about the test environment even if all
+ tests passed and the tools_functionally_correct column is set to True.
+ The value of the tool_test_errors column will be a dictionary with the key / value pairs:
+ "test_environment", {"architecture": "i386", "python_version": "2.5.4", "system": "Darwin 10.8.0"}
+ "test_errors" [ { "test_id":<some test id>, "stdout":<stdout of running the test>, "stderr":<stderr of running the test>, "traceback":<traceback of running the test>]
+ """
params = util.Params( kwd )
message = util.restore_text( params.get( 'message', '' ) )
status = params.get( 'status', 'done' )
- repository = suc.get_repository_by_id( trans.app, repository_id )
+ repository = suc.get_repository_by_id( trans, repository_id )
if repository:
repository_metadata = suc.get_repository_metadata_by_id( trans, repository_metadata_id )
changeset_revision = repository_metadata.changeset_revision
if repository_metadata:
metadata = repository_metadata.metadata
if metadata:
- return trans.fill_template( '/webapps/tool_shed/repository/display_tool_functional_test_errors.mako',
+ revision_label = suc.get_revision_label( trans, repository, repository_metadata.changeset_revision )
+ return trans.fill_template( '/webapps/tool_shed/repository/display_tool_functional_test_results.mako',
repository=repository,
repository_metadata=repository_metadata,
+ revision_label=revision_label,
message=message,
status=status )
else:
@@ -1636,7 +1648,6 @@
downloadable=False )
revision_label = suc.get_revision_label( trans, repository, repository.tip( trans.app ) )
repository_metadata = None
- repository_metadata_id = None
metadata = None
is_malicious = False
repository_dependencies = None
@@ -1644,7 +1655,6 @@
repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans, id, changeset_revision )
if repository_metadata:
revision_label = suc.get_revision_label( trans, repository, changeset_revision )
- repository_metadata_id = trans.security.encode_id( repository_metadata.id )
metadata = repository_metadata.metadata
is_malicious = repository_metadata.malicious
else:
@@ -1654,7 +1664,6 @@
repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans, id, previous_changeset_revision )
if repository_metadata:
revision_label = suc.get_revision_label( trans, repository, previous_changeset_revision )
- repository_metadata_id = trans.security.encode_id( repository_metadata.id )
metadata = repository_metadata.metadata
is_malicious = repository_metadata.malicious
if repository_metadata:
@@ -1702,7 +1711,7 @@
repo=repo,
repository=repository,
containers_dict=containers_dict,
- repository_metadata_id=repository_metadata_id,
+ repository_metadata=repository_metadata,
changeset_revision=changeset_revision,
reviewed_by_user=reviewed_by_user,
review_id=review_id,
diff -r dbe53e2c1eab2b377880e87c4d2bd770dc470b80 -r 592af505e4e673aa891e9163bd237dbbc728ba62 lib/tool_shed/util/shed_util_common.py
--- a/lib/tool_shed/util/shed_util_common.py
+++ b/lib/tool_shed/util/shed_util_common.py
@@ -2134,13 +2134,12 @@
elif len( repo_info_tuple ) == 7:
description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, tool_dependencies = repo_info_tuple
return description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, tool_dependencies
-def get_repository_by_id( app, id ):
+def get_repository_by_id( trans, id ):
"""Get a repository from the database via id."""
- sa_session = app.model.context.current
- if app.name == 'galaxy':
- return sa_session.query( app.model.ToolShedRepository ).get( id )
+ if trans.webapp.name == 'galaxy':
+ return trans.sa_session.query( trans.model.ToolShedRepository ).get( trans.security.decode_id( id ) )
else:
- return sa_session.query( app.model.Repository ).get( id )
+ return trans.sa_session.query( trans.model.Repository ).get( trans.security.decode_id( id ) )
def get_repository_by_name( app, name ):
"""Get a repository from the database via name."""
sa_session = app.model.context.current
diff -r dbe53e2c1eab2b377880e87c4d2bd770dc470b80 -r 592af505e4e673aa891e9163bd237dbbc728ba62 templates/webapps/tool_shed/repository/display_tool_functional_test_results.mako
--- /dev/null
+++ b/templates/webapps/tool_shed/repository/display_tool_functional_test_results.mako
@@ -0,0 +1,185 @@
+<%inherit file="/base.mako"/>
+<%namespace file="/message.mako" import="render_msg" />
+<%namespace file="/webapps/tool_shed/common/common.mako" import="*" />
+<%namespace file="/webapps/tool_shed/repository/common.mako" import="*" />
+
+<%
+ from galaxy.web.framework.helpers import time_ago
+
+ changeset_revision = repository_metadata.changeset_revision
+ has_metadata = repository.metadata_revisions
+ has_readme = metadata and 'readme' in metadata
+ is_admin = trans.user_is_admin()
+ is_new = repository.is_new( trans.app )
+ is_deprecated = repository.deprecated
+
+ can_browse_contents = trans.webapp.name == 'tool_shed' and not is_new
+ can_contact_owner = trans.user and trans.user != repository.user
+ can_download = not is_new and ( not is_malicious or can_push )
+ can_manage = is_admin or repository.user == trans.user
+ can_push = trans.app.security_agent.can_push( trans.app, trans.user, repository )
+ can_rate = repository.user != trans.user
+ can_review_repository = has_metadata and not is_deprecated and trans.app.security_agent.user_can_review_repositories( trans.user )
+ can_upload = can_push
+ can_view_change_log = trans.webapp.name == 'tool_shed' and not is_new
+ if repository_metadata.tool_test_errors:
+ # The tool_test_errors is mis-named (it should have been named tool_test_results) it will contain a dictionary that includes information
+ # about the test environment even if all tests passed and the repository_metadata.tools_functionally_correct column is set to True.
+ tool_test_errors = repository_metadata.tool_test_errors
+ test_environment_dict = tool_test_errors.get( 'test_environment', None )
+ test_errors = tool_test_errors.get( 'test_errors', [] )
+ else:
+ tool_test_errors = None
+ test_environment_dict = {}
+ test_errors = []
+
+ if can_push:
+ browse_label = 'Browse or delete repository tip files'
+ else:
+ browse_label = 'Browse repository tip files'
+%>
+
+<%!
+ def inherit(context):
+ if context.get('use_panels'):
+ return '/webapps/tool_shed/base_panels.mako'
+ else:
+ return '/base.mako'
+%>
+<%inherit file="${inherit(context)}"/>
+
+<br/><br/>
+<ul class="manage-table-actions">
+ %if is_new:
+ <a class="action-button" href="${h.url_for( controller='upload', action='upload', repository_id=trans.security.encode_id( repository.id ) )}">Upload files to repository</a>
+ %else:
+ <li><a class="action-button" id="repository-${repository.id}-popup" class="menubutton">Repository Actions</a></li>
+ <div popupmenu="repository-${repository.id}-popup">
+ %if can_review_repository:
+ %if reviewed_by_user:
+ <a class="action-button" href="${h.url_for( controller='repository_review', action='edit_review', id=review_id )}">Manage my review of this revision</a>
+ %else:
+ <a class="action-button" href="${h.url_for( controller='repository_review', action='create_review', id=trans.app.security.encode_id( repository.id ), changeset_revision=changeset_revision )}">Add a review to this revision</a>
+ %endif
+ %endif
+ %if can_manage:
+ <a class="action-button" href="${h.url_for( controller='repository', action='manage_repository', id=trans.app.security.encode_id( repository.id ), changeset_revision=changeset_revision )}">Manage repository</a>
+ %else:
+ <a class="action-button" href="${h.url_for( controller='repository', action='view_repository', id=trans.app.security.encode_id( repository.id ), changeset_revision=changeset_revision )}">View repository</a>
+ %endif
+ %if can_upload:
+ <a class="action-button" href="${h.url_for( controller='upload', action='upload', repository_id=trans.security.encode_id( repository.id ) )}">Upload files to repository</a>
+ %endif
+ %if can_view_change_log:
+ <a class="action-button" href="${h.url_for( controller='repository', action='view_changelog', id=trans.app.security.encode_id( repository.id ) )}">View change log</a>
+ %endif
+ %if can_browse_contents:
+ <a class="action-button" href="${h.url_for( controller='repository', action='browse_repository', id=trans.app.security.encode_id( repository.id ) )}">${browse_label}</a>
+ %endif
+ %if can_contact_owner:
+ <a class="action-button" href="${h.url_for( controller='repository', action='contact_owner', id=trans.security.encode_id( repository.id ) )}">Contact repository owner</a>
+ %endif
+ %if can_download:
+ <a class="action-button" href="${h.url_for( controller='repository', action='download', repository_id=trans.app.security.encode_id( repository.id ), changeset_revision=changeset_revision, file_type='gz' )}">Download as a .tar.gz file</a>
+ <a class="action-button" href="${h.url_for( controller='repository', action='download', repository_id=trans.app.security.encode_id( repository.id ), changeset_revision=changeset_revision, file_type='bz2' )}">Download as a .tar.bz2 file</a>
+ <a class="action-button" href="${h.url_for( controller='repository', action='download', repository_id=trans.app.security.encode_id( repository.id ), changeset_revision=changeset_revision, file_type='zip' )}">Download as a zip file</a>
+ %endif
+ </div>
+ %endif
+</ul>
+
+%if message:
+ ${render_msg( message, status )}
+%endif
+
+<div class="toolForm">
+ <div class="toolFormTitle">Repository revision</div>
+ <div class="toolFormBody">
+ <div class="form-row">
+ <label>Revision:</label>
+ %if can_view_change_log:
+ <a href="${h.url_for( controller='repository', action='view_changelog', id=trans.app.security.encode_id( repository.id ) )}">${revision_label}</a>
+ %else:
+ ${revision_label}
+ %endif
+ </div>
+ </div>
+</div>
+<p/>
+%if can_download:
+ <div class="toolForm">
+ <div class="toolFormTitle">Repository '${repository.name}'</div>
+ <div class="toolFormBody">
+ <div class="form-row">
+ <label>Clone this repository:</label>
+ ${render_clone_str( repository )}
+ </div>
+ </div>
+ </div>
+%else:
+ <b>Repository name:</b><br/>
+ ${repository.name}
+%endif
+%if tool_test_errors:
+ <p/>
+ <div class="toolForm">
+ <div class="toolFormTitle">Tool functional test results</div>
+ <div class="toolFormBody">
+ <div class="form-row">
+ <label>Time tested:</label>
+ ${time_ago( repository_metadata.time_last_tested ) | h}
+ </div>
+ <div class="form-row">
+ <table width="100%">
+ <tr bgcolor="#D8D8D8" width="100%"><td><b>Test environment</td></tr>
+ </table>
+ </div>
+ <div class="form-row">
+ <label>Architecture:</label>
+ ${test_environment_dict.get( 'architecture', 'unknown' ) | h}
+ <div style="clear: both"></div>
+ </div>
+ <div class="form-row">
+ <label>Python version:</label>
+ ${test_environment_dict.get( 'python_version', 'unknown' ) | h}
+ <div style="clear: both"></div>
+ </div>
+ <div class="form-row">
+ <label>Operating system:</label>
+ ${test_environment_dict.get( 'system', 'unknown' ) | h}
+ <div style="clear: both"></div>
+ </div>
+ <div class="form-row">
+ <table width="100%">
+ <tr bgcolor="#D8D8D8" width="100%"><td><b>Test results</td></tr>
+ </table>
+ </div>
+ <div class="form-row">
+ <table class="grid">
+ %for test_results_dict in test_errors:
+ <tr>
+ <td bgcolor="#FFFFCC"><b>Test id</b></td>
+ <td bgcolor="#FFFFCC">${test_results_dict.get( 'test_id', 'unknown' )}</td>
+ </tr>
+ %if repository_metadata.tools_functionally_correct:
+ <tr>
+ <td><b>Stdout</b></td>
+ <td>${test_results_dict.get( 'stdout', '' )}</td>
+ </tr>
+ %else:
+ <tr>
+ <td><b>Stderr</b></td>
+ <td>${test_results_dict.get( 'stderr', '' )}</td>
+ </tr>
+ <tr>
+ <td><b>Traceback</b></td>
+ <td>${test_results_dict.get( 'traceback', '' )}</td>
+ </tr>
+ %endif
+ %endfor
+ </table>
+ <div style="clear: both"></div>
+ </div>
+ </div>
+ </div>
+%endif
diff -r dbe53e2c1eab2b377880e87c4d2bd770dc470b80 -r 592af505e4e673aa891e9163bd237dbbc728ba62 templates/webapps/tool_shed/repository/manage_repository.mako
--- a/templates/webapps/tool_shed/repository/manage_repository.mako
+++ b/templates/webapps/tool_shed/repository/manage_repository.mako
@@ -26,6 +26,13 @@
can_undeprecate = trans.user and ( is_admin or repository.user == trans.user ) and is_deprecated
can_upload = can_push
can_view_change_log = not is_new
+ if repository_metadata:
+ if repository_metadata.includes_tools and repository_metadata.tool_test_errors is not None:
+ can_display_tool_functional_test_results = True
+ else:
+ can_display_tool_functional_test_results = False
+ else:
+ can_display_tool_functional_test_results = False
if can_push:
browse_label = 'Browse or delete repository tip files'
@@ -105,6 +112,9 @@
%if can_undeprecate:
<a class="action-button" href="${h.url_for( controller='repository', action='deprecate', id=trans.security.encode_id( repository.id ), mark_deprecated=False )}">Mark repository as not deprecated</a>
%endif
+ %if can_display_tool_functional_test_results:
+ <a class="action-button" href="${h.url_for( controller='repository', action='display_tool_functional_test_results', repository_id=trans.security.encode_id( repository.id ), repository_metadata_id=trans.security.encode_id( repository_metadata.id ) )}">View tool functional test results</a>
+ %endif
%if can_download:
<a class="action-button" href="${h.url_for( controller='repository', action='download', repository_id=trans.app.security.encode_id( repository.id ), changeset_revision=changeset_revision, file_type='gz' )}">Download as a .tar.gz file</a><a class="action-button" href="${h.url_for( controller='repository', action='download', repository_id=trans.app.security.encode_id( repository.id ), changeset_revision=changeset_revision, file_type='bz2' )}">Download as a .tar.bz2 file</a>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/b5f22685678d/
changeset: b5f22685678d
user: inithello
date: 2013-03-05 16:23:21
summary: Script to check repositories and determine whether functional tests are defined for all tools, and set the do_not_test flag accordingly. Cleaned up the install and test framework and functional test common methods.
affected #: 3 files
diff -r d1f875d0f213c0ba5d858b34ac76d314803a6efb -r b5f22685678d6938467ef17c336e154f88698289 lib/tool_shed/scripts/check_repositories_for_functional_tests.py
--- /dev/null
+++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
@@ -0,0 +1,274 @@
+#!/usr/bin/env python
+
+import os, sys, logging, tempfile
+
+new_path = [ os.path.join( os.getcwd(), "lib" ), os.path.join( os.getcwd(), "test" ) ]
+new_path.extend( sys.path[1:] )
+sys.path = new_path
+
+log = logging.getLogger()
+log.setLevel( 10 )
+log.addHandler( logging.StreamHandler( sys.stdout ) )
+
+from galaxy import eggs
+import pkg_resources
+pkg_resources.require( "SQLAlchemy >= 0.4" )
+pkg_resources.require( 'mercurial' )
+from mercurial import hg, ui, commands
+
+import time, ConfigParser, shutil
+from datetime import datetime, timedelta
+from time import strftime
+from optparse import OptionParser
+
+import galaxy.webapps.tool_shed.config as tool_shed_config
+import galaxy.webapps.tool_shed.model.mapping
+import sqlalchemy as sa
+from galaxy.model.orm import and_, not_, distinct
+from galaxy.util.json import from_json_string, to_json_string
+from galaxy.web import url_for
+from tool_shed.util.shed_util_common import clone_repository, get_configured_ui
+
+from base.util import get_test_environment
+
+assert sys.version_info[:2] >= ( 2, 4 )
+
+def main():
+ '''Script that checks repositories to see if the tools contained within them have functional tests defined.'''
+ parser = OptionParser()
+ parser.add_option( "-i", "--info_only", action="store_true", dest="info_only", help="info about the requested action", default=False )
+ parser.add_option( "-v", "--verbose", action="store_true", dest="verbose", help="verbose mode, print the name, owner, and changeset revision of each repository", default=False )
+ ( options, args ) = parser.parse_args()
+ ini_file = args[0]
+ config_parser = ConfigParser.ConfigParser( {'here':os.getcwd()} )
+ config_parser.read( ini_file )
+ config_dict = {}
+ for key, value in config_parser.items( "app:main" ):
+ config_dict[key] = value
+ config = tool_shed_config.Configuration( **config_dict )
+
+ now = strftime( "%Y-%m-%d %H:%M:%S" )
+ print "#############################################################################"
+ print "# %s - Checking repositories for tools with functional tests." % now
+ app = FlagRepositoriesApplication( config )
+
+ if options.info_only:
+ print "# Displaying info only ( --info_only )"
+ if options.verbose:
+ print "# Displaying extra information ( --verbose )"
+
+ check_and_flag_repositories( app, info_only=options.info_only, verbose=options.verbose )
+
+def check_and_flag_repositories( app, info_only=False, verbose=False ):
+ '''
+ This method will iterate through all records in the repository_metadata table, checking each one for tool metadata,
+ then checking the tool metadata for tests.
+ Each tool's metadata should look something like:
+ {
+ "add_to_tool_panel": true,
+ "description": "",
+ "guid": "toolshed.url:9009/repos/owner/name/tool_id/1.2.3",
+ "id": "tool_wrapper",
+ "name": "Map with Tool Wrapper",
+ "requirements": [],
+ "tests": [
+ {
+ "inputs": [ [ "parameter", "value" ], [ "other_parameter", "other_value" ], ],
+ "name": "Test-1",
+ "outputs": [
+ [
+ "output_field_name",
+ "output_file_name.bed"
+ ]
+ ],
+ "required_files": [ '1.bed', '2.bed', '3.bed' ]
+ }
+ ],
+ "tool_config": "database/community_files/000/repo_1/tool_wrapper.xml",
+ "tool_type": "default",
+ "version": "1.2.3",
+ "version_string_cmd": null
+ }
+
+ If the "tests" attribute is missing or empty, this script will mark the metadata record (which is specific to a changeset revision of a repository)
+ not to be tested. If each "tools" attribute has at least one valid "tests" entry, this script will do nothing, and leave it available for the install
+ and test repositories script to process. If the tested changeset revision does not have a test-data directory, this script will also mark the revision
+ not to be tested.
+
+ If any error is encountered, the script will update the repository_metadata.tool_test_errors attribute with the following structure:
+ {
+ "test_environment":
+ {
+ "python_version": "2.7.2",
+ "architecture": "x86_64",
+ "system": "Darwin 12.2.0"
+ },
+ "test_errors":
+ [
+ {
+ "test_id": "Something that will easily identify what the problem is",
+ "stdout": "The output of the test, or a more detailed description of what was tested and why it failed."
+ },
+ ]
+ }
+ '''
+ start = time.time()
+ repository_ids_to_check = []
+ tool_count = 0
+ has_tests = 0
+ no_tests = 0
+ no_tools = 0
+ # Get the list of metadata records to check for functional tests and test data. Limit this to records that have not been flagged do_not_test
+ # or tools_functionally_correct. Also filter out changeset revisions that are not downloadable, because it's redundant to test a revision that
+ # a user can't install.
+ metadata_records_to_check = app.sa_session.query( app.model.RepositoryMetadata ) \
+ .filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True,
+ app.model.RepositoryMetadata.table.c.do_not_test == False,
+ app.model.RepositoryMetadata.table.c.tools_functionally_correct == False ) ) \
+ .all()
+ for metadata_record in metadata_records_to_check:
+ name = metadata_record.repository.name
+ owner = metadata_record.repository.user.username
+ changeset_revision = str( metadata_record.changeset_revision )
+ repository_status = {}
+ # If this changeset revision has no tools, we don't need to do anything here, the install and test script has a filter for returning
+ # only repositories that contain tools.
+ if 'tools' not in metadata_record.metadata:
+ no_tools += 1
+ continue
+ else:
+ # Initialize the repository_status dict with the test environment, but leave the test_errors empty.
+ repository_status[ 'test_environment' ] = get_test_environment()
+ repository_status[ 'test_errors' ] = []
+ # Loop through all the tools in this metadata record, checking each one for defined functional tests.
+ for tool_metadata in metadata_record.metadata[ 'tools' ]:
+ tool_count += 1
+ tool_id = tool_metadata[ 'id' ]
+ if verbose:
+ print '# Checking for functional tests in changeset revision %s of %s, tool ID %s.' % \
+ ( changeset_revision, name, tool_id )
+ # If there are no tests, this tool should not be tested, since the tool functional tests only report failure if the test itself fails,
+ # not if it's missing or undefined. Filtering out those repositories at this step will reduce the number of "false negatives" the
+ # automated functional test framework produces.
+ if 'tests' not in tool_metadata or not tool_metadata[ 'tests' ]:
+ if verbose:
+ print '# No functional tests defined for %s.' % tool_id
+ if 'test_errors' not in repository_status:
+ repository_status[ 'test_errors' ] = []
+ test_id = 'Functional tests for %s' % tool_id
+ # The repository_metadata.tool_test_errors attribute should always have the following structure:
+ # {
+ # "environment":
+ # {
+ # "python_version": "2.7.2",
+ # "architecture": "x86_64",
+ # "system": "Darwin 12.2.0"
+ # },
+ # "test_errors":
+ # [
+ # {
+ # "test_id": "Something that will easily identify what the problem is",
+ # "stdout": "The output of the test, or a more detailed description of what was tested and why it failed."
+ # },
+ # ]
+ # }
+ # Optionally, "stderr" and "traceback" may be included in a test_errors dict, if they are relevant.
+ test_errors = dict( test_id=test_id,
+ stdout='No functional tests defined in changeset revision %s of repository %s owned by %s.' % \
+ ( changeset_revision, name, owner ) )
+ repository_status[ 'test_errors' ].append( test_errors )
+ no_tests += 1
+ else:
+ has_tests += 1
+ if verbose:
+ if not repository_status[ 'test_errors' ]:
+ print '# All tools have functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
+ else:
+ print '# Some tools missing functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
+ has_test_data = False
+ # Clone the repository up to the changeset revision we're checking.
+ repo_dir = metadata_record.repository.repo_path( app )
+ repo = hg.repository( get_configured_ui(), repo_dir )
+ work_dir = tempfile.mkdtemp()
+ cloned_ok, error_message = clone_repository( repo_dir, work_dir, changeset_revision )
+ if cloned_ok:
+ # Iterate through all the directories in the cloned changeset revision and determine whether there's a
+ # directory named test-data. If this directory is not present, update the metadata record for the changeset
+ # revision we're checking.
+ for root, dirs, files in os.walk( work_dir ):
+ if '.hg' in dirs:
+ dirs.remove( '.hg' )
+ if 'test-data' in dirs:
+ has_test_data = True
+ break
+ # Remove the cloned path.
+ if os.path.exists( work_dir ):
+ shutil.rmtree( work_dir )
+ if not has_test_data:
+ if verbose:
+ print '# Test data missing in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
+ repository_status[ 'test_environment' ] = get_test_environment()
+ test_id = 'Find functional test data for %s' % metadata_record.repository.name
+ # The repository_metadata.tool_test_errors attribute should always have the following structure:
+ # {
+ # "test_environment":
+ # {
+ # "python_version": "2.7.2",
+ # "architecture": "x86_64",
+ # "system": "Darwin 12.2.0"
+ # },
+ # "test_errors":
+ # [
+ # {
+ # "test_id": "Something that will easily identify what the problem is",
+ # "stdout": "The output of the test, or a more detailed description of what was tested and why it failed."
+ # },
+ # ]
+ # }
+ # Optionally, "stderr" and "traceback" may be included in a test_errors dict, if they are relevant.
+ test_errors = dict( test_id=test_id,
+ stdout='No test data found for changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) )
+ repository_status[ 'test_errors' ].append( test_errors )
+ else:
+ if verbose:
+ print '# Test data found in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
+ if not info_only:
+ if repository_status[ 'test_errors' ]:
+ metadata_record.do_not_test = True
+ metadata_record.tools_functionally_correct = False
+ metadata_record.tool_test_errors = to_json_string( repository_status )
+ metadata_record.time_last_tested = datetime.utcnow()
+ app.sa_session.add( metadata_record )
+ app.sa_session.flush()
+ stop = time.time()
+ print '# Checked %d tools in %d changeset revisions.' % ( tool_count, len( metadata_records_to_check ) )
+ print '# Found %d changeset revisions without tools.' % no_tools
+ print '# Found %d tools without functional tests.' % no_tests
+ print '# Found %d tools with functional tests.' % has_tests
+ if info_only:
+ print '# Database not updated, info_only set.'
+ print "# Elapsed time: ", stop - start
+ print "#############################################################################"
+
+class FlagRepositoriesApplication( object ):
+ """Encapsulates the state of a Universe application"""
+ def __init__( self, config ):
+ if config.database_connection is False:
+ config.database_connection = "sqlite:///%s?isolation_level=IMMEDIATE" % config.database
+ # Setup the database engine and ORM
+ self.model = galaxy.webapps.tool_shed.model.mapping.init( config.file_path, config.database_connection, engine_options={}, create_tables=False )
+ self.hgweb_config_manager = self.model.hgweb_config_manager
+ self.hgweb_config_manager.hgweb_config_dir = config.hgweb_config_dir
+ print "# Using configured hgweb.config file: ", self.hgweb_config_manager.hgweb_config
+ @property
+ def sa_session( self ):
+ """
+ Returns a SQLAlchemy session -- currently just gets the current
+ session from the threadlocal session context, but this is provided
+ to allow migration toward a more SQLAlchemy 0.4 style of use.
+ """
+ return self.model.context.current
+ def shutdown( self ):
+ pass
+
+if __name__ == "__main__": main()
diff -r d1f875d0f213c0ba5d858b34ac76d314803a6efb -r b5f22685678d6938467ef17c336e154f88698289 test/base/util.py
--- a/test/base/util.py
+++ b/test/base/util.py
@@ -1,4 +1,4 @@
-import os, sys, logging
+import os, sys, logging, platform
log = logging.getLogger(__name__)
@@ -36,6 +36,14 @@
return None, repository_name, changeset_revision
return last_galaxy_test_file_dir, last_tested_repository_name, last_tested_changeset_revision
+def get_test_environment():
+ rval = {}
+ rval[ 'python_version' ] = platform.python_version()
+ rval[ 'architecture' ] = platform.machine()
+ os, hostname, os_version, uname, arch, processor = platform.uname()
+ rval[ 'system' ] = '%s %s' % ( os, os_version )
+ return rval
+
def parse_tool_panel_config( config, shed_tools_dict ):
"""
Parse a shed-related tool panel config to generate the shed_tools_dict. This only happens when testing tools installed from the tool shed.
diff -r d1f875d0f213c0ba5d858b34ac76d314803a6efb -r b5f22685678d6938467ef17c336e154f88698289 test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -180,7 +180,7 @@
do_not_test='false',
downloadable='true',
malicious='false',
- must_include_tools='true' ) )
+ includes_tools='true' ) )
api_url = get_api_url( base=location, parts=[ 'repository_revisions' ], params=params )
if format == 'json':
return json_from_url( api_url )
@@ -394,7 +394,7 @@
log.info( "The embedded Galaxy application is running on %s:%s" % ( galaxy_test_host, galaxy_test_port ) )
log.info( "Repositories will be installed from the tool shed at %s" % galaxy_tool_shed_url )
success = False
- repository_status = {}
+ repository_status = dict()
try:
# Iterate through a list of repository info dicts.
log.info( "Retrieving repositories to install from the URL:\n%s\n" % str( galaxy_tool_shed_url ) )
https://bitbucket.org/galaxy/galaxy-central/commits/dbe53e2c1eab/
changeset: dbe53e2c1eab
user: inithello
date: 2013-03-05 16:23:55
summary: Removed the must_include_tools filter from the repository revisions API controller, since repository_metadata now has a flag includes_tools.
affected #: 1 file
diff -r b5f22685678d6938467ef17c336e154f88698289 -r dbe53e2c1eab2b377880e87c4d2bd770dc470b80 lib/galaxy/webapps/tool_shed/api/repository_revisions.py
--- a/lib/galaxy/webapps/tool_shed/api/repository_revisions.py
+++ b/lib/galaxy/webapps/tool_shed/api/repository_revisions.py
@@ -47,27 +47,20 @@
do_not_test = kwd.get( 'do_not_test', None )
if do_not_test is not None:
clause_list.append( trans.model.RepositoryMetadata.table.c.do_not_test == util.string_as_bool( do_not_test ) )
- # Filter by must_include_tools if received.
- must_include_tools = kwd.get( 'must_include_tools', False )
+ # Filter by includes_tools if received.
+ includes_tools = kwd.get( 'includes_tools', None )
+ if includes_tools is not None:
+ clause_list.append( trans.model.RepositoryMetadata.table.c.includes_tools == util.string_as_bool( includes_tools ) )
try:
query = trans.sa_session.query( trans.app.model.RepositoryMetadata ) \
.filter( and_( *clause_list ) ) \
.order_by( trans.app.model.RepositoryMetadata.table.c.repository_id ) \
.all()
for repository_metadata in query:
- if must_include_tools:
- metadata = repository_metadata.metadata
- if 'tools' in metadata:
- ok_to_return = True
- else:
- ok_to_return = False
- else:
- ok_to_return = True
- if ok_to_return:
- item = repository_metadata.get_api_value( view='collection',
- value_mapper=default_value_mapper( trans, repository_metadata ) )
- item[ 'url' ] = web.url_for( 'repository_revision', id=trans.security.encode_id( repository_metadata.id ) )
- rval.append( item )
+ item = repository_metadata.get_api_value( view='collection',
+ value_mapper=default_value_mapper( trans, repository_metadata ) )
+ item[ 'url' ] = web.url_for( 'repository_revision', id=trans.security.encode_id( repository_metadata.id ) )
+ rval.append( item )
except Exception, e:
rval = "Error in the Tool Shed repository_revisions API in index: " + str( e )
log.error( rval + ": %s" % str( e ) )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Add white space to the tool shed's admin and repository_review controllers per new Galaxy dev team standards and factor out grid utility methods.
by commits-noreply@bitbucket.org 05 Mar '13
by commits-noreply@bitbucket.org 05 Mar '13
05 Mar '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/d1f875d0f213/
changeset: d1f875d0f213
user: greg
date: 2013-03-05 15:27:50
summary: Add white space to the tool shed's admin and repository_review controllers per new Galaxy dev team standards and factor out grid utility methods.
affected #: 5 files
diff -r 19d8ba6eddf37211ca69282f350bb67daa42958c -r d1f875d0f213c0ba5d858b34ac76d314803a6efb lib/galaxy/webapps/tool_shed/controllers/admin.py
--- a/lib/galaxy/webapps/tool_shed/controllers/admin.py
+++ b/lib/galaxy/webapps/tool_shed/controllers/admin.py
@@ -93,6 +93,7 @@
changeset_revision=v ) )
# Render the list view
return self.repository_grid( trans, **kwd )
+
@web.expose
@web.require_admin
def browse_repository_metadata( self, trans, **kwd ):
@@ -113,6 +114,7 @@
action='browse_repositories',
**kwd ) )
return self.repository_metadata_grid( trans, **kwd )
+
@web.expose
@web.require_admin
def create_category( self, trans, **kwd ):
@@ -144,6 +146,7 @@
description=description,
message=message,
status=status )
+
@web.expose
@web.require_admin
def delete_repository( self, trans, **kwd ):
@@ -180,6 +183,7 @@
action='browse_repositories',
message=util.sanitize_text( message ),
status=status ) )
+
@web.expose
@web.require_admin
def delete_repository_metadata( self, trans, **kwd ):
@@ -204,6 +208,7 @@
action='browse_repository_metadata',
message=util.sanitize_text( message ),
status=status ) )
+
@web.expose
@web.require_admin
def edit_category( self, trans, **kwd ):
@@ -243,6 +248,7 @@
category=category,
message=message,
status=status )
+
@web.expose
@web.require_admin
def manage_categories( self, trans, **kwd ):
@@ -277,6 +283,7 @@
action='edit_category',
**kwd ) )
return self.manage_category_grid( trans, **kwd )
+
@web.expose
@web.require_admin
def regenerate_statistics( self, trans, **kwd ):
@@ -289,6 +296,7 @@
return trans.fill_template( '/webapps/tool_shed/admin/statistics.mako',
message=message,
status=status )
+
@web.expose
@web.require_admin
def reset_metadata_on_selected_repositories_in_tool_shed( self, trans, **kwd ):
@@ -302,6 +310,7 @@
repositories_select_field=repositories_select_field,
message=message,
status=status )
+
@web.expose
@web.require_admin
def undelete_repository( self, trans, **kwd ):
@@ -341,6 +350,7 @@
action='browse_repositories',
message=util.sanitize_text( message ),
status='done' ) )
+
@web.expose
@web.require_admin
def mark_category_deleted( self, trans, **kwd ):
@@ -367,6 +377,7 @@
action='manage_categories',
message=util.sanitize_text( message ),
status='done' ) )
+
@web.expose
@web.require_admin
def purge_category( self, trans, **kwd ):
@@ -398,6 +409,7 @@
action='manage_categories',
message=util.sanitize_text( message ),
status='done' ) )
+
@web.expose
@web.require_admin
def undelete_category( self, trans, **kwd ):
diff -r 19d8ba6eddf37211ca69282f350bb67daa42958c -r d1f875d0f213c0ba5d858b34ac76d314803a6efb lib/galaxy/webapps/tool_shed/controllers/repository_review.py
--- a/lib/galaxy/webapps/tool_shed/controllers/repository_review.py
+++ b/lib/galaxy/webapps/tool_shed/controllers/repository_review.py
@@ -8,6 +8,7 @@
import tool_shed.util.shed_util_common as suc
from galaxy.util.odict import odict
import tool_shed.grids.repository_review_grids as repository_review_grids
+import tool_shed.grids.util as grids_util
from galaxy import eggs
eggs.require('mercurial')
@@ -48,7 +49,8 @@
id=repository_id,
changeset_revision=changeset_revision,
message=message,
- status=status ) )
+ status=status ) )
+
@web.expose
@web.require_login( "browse components" )
def browse_components( self, trans, **kwd ):
@@ -59,6 +61,7 @@
action='create_component',
**kwd ) )
return self.component_grid( trans, **kwd )
+
@web.expose
@web.require_login( "browse review" )
def browse_review( self, trans, **kwd ):
@@ -75,6 +78,7 @@
review=review,
message=message,
status=status )
+
def copy_review( self, trans, review_to_copy, review ):
for component_review in review_to_copy.component_reviews:
copied_component_review = trans.model.ComponentReview( repository_review_id=review.id,
@@ -89,6 +93,7 @@
review.rating = review_to_copy.rating
trans.sa_session.add( review )
trans.sa_session.flush()
+
@web.expose
@web.require_login( "create component" )
def create_component( self, trans, **kwd ):
@@ -119,6 +124,7 @@
description=description,
message=message,
status=status )
+
@web.expose
@web.require_login( "create review" )
def create_review( self, trans, **kwd ):
@@ -183,6 +189,7 @@
return trans.response.send_redirect( web.url_for( controller='repository_review',
action='view_or_manage_repository',
**kwd ) )
+
@web.expose
@web.require_login( "edit component" )
def edit_component( self, trans, **kwd ):
@@ -213,6 +220,7 @@
component=component,
message=message,
status=status )
+
@web.expose
@web.require_login( "edit review" )
def edit_review( self, trans, **kwd ):
@@ -324,10 +332,10 @@
selected_value = review.approved
else:
selected_value = trans.model.ComponentReview.approved_states.NO
- revision_approved_select_field = build_approved_select_field( trans,
- name='revision_approved',
- selected_value=selected_value,
- for_component=False )
+ revision_approved_select_field = grids_util.build_approved_select_field( trans,
+ name='revision_approved',
+ selected_value=selected_value,
+ for_component=False )
rev, changeset_revision_label = suc.get_rev_label_from_changeset_revision( repo, review.changeset_revision )
return trans.fill_template( '/webapps/tool_shed/repository_review/edit_review.mako',
repository=repository,
@@ -337,6 +345,7 @@
components_dict=components_dict,
message=message,
status=status )
+
@web.expose
@web.require_login( "manage components" )
def manage_components( self, trans, **kwd ):
@@ -357,6 +366,7 @@
kwd[ 'message' ] = message
kwd[ 'status' ] = status
return self.component_grid( trans, **kwd )
+
@web.expose
@web.require_login( "manage repositories reviewed by me" )
def manage_repositories_reviewed_by_me( self, trans, **kwd ):
@@ -371,6 +381,7 @@
**kwd ) )
self.repositories_reviewed_by_me_grid.title = 'Repositories reviewed by me'
return self.repositories_reviewed_by_me_grid( trans, **kwd )
+
@web.expose
@web.require_login( "manage repositories with reviews" )
def manage_repositories_with_reviews( self, trans, **kwd ):
@@ -386,6 +397,7 @@
action='view_or_manage_repository',
**kwd ) )
return self.repositories_with_reviews_grid( trans, **kwd )
+
@web.expose
@web.require_login( "manage repositories without reviews" )
def manage_repositories_without_reviews( self, trans, **kwd ):
@@ -400,6 +412,7 @@
action='view_or_manage_repository',
**kwd ) )
return self.repositories_without_reviews_grid( trans, **kwd )
+
@web.expose
@web.require_login( "manage repository reviews" )
def manage_repository_reviews( self, trans, mine=False, **kwd ):
@@ -447,6 +460,7 @@
mine=mine,
message=message,
status=status )
+
@web.expose
@web.require_login( "manage repository reviews of revision" )
def manage_repository_reviews_of_revision( self, trans, **kwd ):
@@ -470,6 +484,7 @@
installable=installable,
message=message,
status=status )
+
@web.expose
@web.require_login( "repository reviews by user" )
def repository_reviews_by_user( self, trans, **kwd ):
@@ -496,6 +511,7 @@
user = suc.get_user( trans, kwd[ 'id' ] )
self.repository_reviews_by_user_grid.title = "All repository revision reviews for user '%s'" % user.username
return self.repository_reviews_by_user_grid( trans, **kwd )
+
@web.expose
@web.require_login( "reviewed repositories i own" )
def reviewed_repositories_i_own( self, trans, **kwd ):
@@ -514,6 +530,7 @@
action='view_or_manage_repository',
**kwd ) )
return self.reviewed_repositories_i_own_grid( trans, **kwd )
+
@web.expose
@web.require_login( "select previous review" )
def select_previous_review( self, trans, **kwd ):
@@ -533,6 +550,7 @@
previous_reviews_dict=previous_reviews_dict,
message=message,
status=status )
+
@web.expose
@web.require_login( "view or manage repository" )
def view_or_manage_repository( self, trans, **kwd ):
@@ -545,16 +563,3 @@
return trans.response.send_redirect( web.url_for( controller='repository',
action='view_repository',
**kwd ) )
-
-# ----- Utility methods -----
-
-def build_approved_select_field( trans, name, selected_value=None, for_component=True ):
- options = [ ( 'No', trans.model.ComponentReview.approved_states.NO ),
- ( 'Yes', trans.model.ComponentReview.approved_states.YES ) ]
- if for_component:
- options.append( ( 'Not applicable', trans.model.ComponentReview.approved_states.NA ) )
- select_field = SelectField( name=name )
- for option_tup in options:
- selected = selected_value and option_tup[1] == selected_value
- select_field.add_option( option_tup[0], option_tup[1], selected=selected )
- return select_field
diff -r 19d8ba6eddf37211ca69282f350bb67daa42958c -r d1f875d0f213c0ba5d858b34ac76d314803a6efb lib/tool_shed/grids/util.py
--- a/lib/tool_shed/grids/util.py
+++ b/lib/tool_shed/grids/util.py
@@ -2,8 +2,18 @@
import tool_shed.util.shed_util_common as suc
from galaxy.web.form_builder import SelectField
-def build_changeset_revision_select_field( trans, repository, selected_value=None, add_id_to_name=True,
- downloadable=False, reviewed=False, not_reviewed=False ):
+def build_approved_select_field( trans, name, selected_value=None, for_component=True ):
+ options = [ ( 'No', trans.model.ComponentReview.approved_states.NO ),
+ ( 'Yes', trans.model.ComponentReview.approved_states.YES ) ]
+ if for_component:
+ options.append( ( 'Not applicable', trans.model.ComponentReview.approved_states.NA ) )
+ select_field = SelectField( name=name )
+ for option_tup in options:
+ selected = selected_value and option_tup[ 1 ] == selected_value
+ select_field.add_option( option_tup[ 0 ], option_tup[ 1 ], selected=selected )
+ return select_field
+
+def build_changeset_revision_select_field( trans, repository, selected_value=None, add_id_to_name=True, downloadable=False, reviewed=False, not_reviewed=False ):
"""Build a SelectField whose options are the changeset_rev strings of certain revisions of the received repository."""
options = []
changeset_tups = []
@@ -40,7 +50,7 @@
# the changeset revisions may not be sorted correctly because setting metadata over time will reset update_time.
for changeset_tup in sorted( changeset_tups ):
# Display the latest revision first.
- options.insert( 0, ( changeset_tup[1], changeset_tup[2] ) )
+ options.insert( 0, ( changeset_tup[ 1 ], changeset_tup[ 2 ] ) )
if add_id_to_name:
name = 'changeset_revision_%d' % repository.id
else:
@@ -49,6 +59,6 @@
refresh_on_change=True,
refresh_on_change_values=refresh_on_change_values )
for option_tup in options:
- selected = selected_value and option_tup[1] == selected_value
- select_field.add_option( option_tup[0], option_tup[1], selected=selected )
+ selected = selected_value and option_tup[ 1 ] == selected_value
+ select_field.add_option( option_tup[ 0 ], option_tup[ 1 ], selected=selected )
return select_field
diff -r 19d8ba6eddf37211ca69282f350bb67daa42958c -r d1f875d0f213c0ba5d858b34ac76d314803a6efb templates/webapps/tool_shed/repository_review/edit_review.mako
--- a/templates/webapps/tool_shed/repository_review/edit_review.mako
+++ b/templates/webapps/tool_shed/repository_review/edit_review.mako
@@ -4,7 +4,7 @@
<%
from galaxy.web.form_builder import CheckboxField
- from galaxy.webapps.tool_shed.controllers.repository_review import build_approved_select_field
+ from tool_shed.grids.util import build_approved_select_field
from galaxy.webapps.tool_shed.util.container_util import STRSEP
can_manage_repository = is_admin or repository.user == trans.user
%>
diff -r 19d8ba6eddf37211ca69282f350bb67daa42958c -r d1f875d0f213c0ba5d858b34ac76d314803a6efb templates/webapps/tool_shed/repository_review/reviews_of_changeset_revision.mako
--- a/templates/webapps/tool_shed/repository_review/reviews_of_changeset_revision.mako
+++ b/templates/webapps/tool_shed/repository_review/reviews_of_changeset_revision.mako
@@ -4,7 +4,7 @@
<%namespace file="/webapps/tool_shed/repository/common.mako" import="*" /><%
- from galaxy.webapps.tool_shed.controllers.repository_review import build_approved_select_field
+ from tool_shed.grids.util import build_approved_select_field
from galaxy.webapps.tool_shed.util.container_util import STRSEP
is_admin = trans.user_is_admin()
is_new = repository.is_new( trans.app )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/9de5110ddf65/
changeset: 9de5110ddf65
user: jmchilton
date: 2013-03-04 20:14:08
summary: Add `file_ext` attribute to image classes. Whitespace fixes for same classes.
affected #: 1 file
diff -r 93f0c6867e3427d582f6fba6ca2877ecf472b6d5 -r 9de5110ddf6573375cc631c703a20ef24e8a72e9 lib/galaxy/datatypes/images.py
--- a/lib/galaxy/datatypes/images.py
+++ b/lib/galaxy/datatypes/images.py
@@ -33,6 +33,7 @@
# we need to ensure that the implementation is such that image files cannot be uploaded
# to our main public instance.
+
class Image( data.Data ):
"""Class describing an image"""
def set_peek( self, dataset, is_multi_byte=False ):
@@ -56,94 +57,147 @@
return True
else:
return False
-
+
+
class Jpg( Image ):
+ file_ext = "jpeg"
+
def sniff(self, filename, image=None):
"""Determine if the file is in jpg format."""
return check_image_type( filename, ['JPEG'], image )
+
class Png( Image ):
+ file_ext = "png"
+
def sniff(self, filename, image=None):
"""Determine if the file is in png format."""
return check_image_type( filename, ['PNG'], image )
-
+
+
class Tiff( Image ):
+ file_ext = "tiff"
+
def sniff(self, filename, image=None):
"""Determine if the file is in tiff format."""
return check_image_type( filename, ['TIFF'], image )
-
+
+
class Bmp( Image ):
+ file_ext = "bmp"
+
def sniff(self, filename, image=None):
"""Determine if the file is in bmp format."""
return check_image_type( filename, ['BMP'], image )
+
class Gif( Image ):
+ file_ext = "gif"
+
def sniff(self, filename, image=None):
"""Determine if the file is in gif format."""
return check_image_type( filename, ['GIF'], image )
+
class Im( Image ):
+ file_ext = "im"
+
def sniff(self, filename, image=None):
"""Determine if the file is in im format."""
return check_image_type( filename, ['IM'], image )
+
class Pcd( Image ):
+ file_ext = "pcd"
+
def sniff(self, filename, image=None):
"""Determine if the file is in pcd format."""
return check_image_type( filename, ['PCD'], image )
+
class Pcx( Image ):
+ file_ext = "pcx"
+
def sniff(self, filename, image=None):
"""Determine if the file is in pcx format."""
return check_image_type( filename, ['PCX'], image )
+
class Ppm( Image ):
+ file_ext = "ppm"
+
def sniff(self, filename, image=None):
"""Determine if the file is in ppm format."""
return check_image_type( filename, ['PPM'], image )
+
class Psd( Image ):
+ file_ext = "psd"
+
def sniff(self, filename, image=None):
"""Determine if the file is in psd format."""
return check_image_type( filename, ['PSD'], image )
+
class Xbm( Image ):
+ file_ext = "xbm"
+
def sniff(self, filename, image=None):
"""Determine if the file is in XBM format."""
return check_image_type( filename, ['XBM'], image )
+
class Xpm( Image ):
+ file_ext = "xpm"
+
def sniff(self, filename, image=None):
"""Determine if the file is in XPM format."""
return check_image_type( filename, ['XPM'], image )
+
class Rgb( Image ):
+ file_ext = "rgb"
+
def sniff(self, filename, image=None):
"""Determine if the file is in RGB format."""
return check_image_type( filename, ['RGB'], image )
+
class Pbm( Image ):
+ file_ext = "pbm"
+
def sniff(self, filename, image=None):
"""Determine if the file is in PBM format"""
return check_image_type( filename, ['PBM'], image )
+
class Pgm( Image ):
+ file_ext = "pgm"
+
def sniff(self, filename, image=None):
"""Determine if the file is in PGM format"""
return check_image_type( filename, ['PGM'], image )
+
class Eps( Image ):
+ file_ext = "eps"
+
def sniff(self, filename, image=None):
"""Determine if the file is in eps format."""
return check_image_type( filename, ['EPS'], image )
class Rast( Image ):
+ file_ext = "rast"
+
def sniff(self, filename, image=None):
"""Determine if the file is in rast format"""
return check_image_type( filename, ['RAST'], image )
+
class Pdf( Image ):
+ file_ext = "pdf"
+
def sniff(self, filename):
"""Determine if the file is in pdf format."""
headers = get_headers(filename, None, 1)
https://bitbucket.org/galaxy/galaxy-central/commits/19d8ba6eddf3/
changeset: 19d8ba6eddf3
user: dannon
date: 2013-03-05 14:18:39
summary: Merged in galaxyp/galaxy-central-parallelism-refactorings (pull request #133)
Add `file_ext` attribute to image classes. Whitespace fixes for same classes.
affected #: 1 file
diff -r b9af8161548a17653704304a72d392d2a328cc4b -r 19d8ba6eddf37211ca69282f350bb67daa42958c lib/galaxy/datatypes/images.py
--- a/lib/galaxy/datatypes/images.py
+++ b/lib/galaxy/datatypes/images.py
@@ -33,6 +33,7 @@
# we need to ensure that the implementation is such that image files cannot be uploaded
# to our main public instance.
+
class Image( data.Data ):
"""Class describing an image"""
def set_peek( self, dataset, is_multi_byte=False ):
@@ -56,94 +57,147 @@
return True
else:
return False
-
+
+
class Jpg( Image ):
+ file_ext = "jpeg"
+
def sniff(self, filename, image=None):
"""Determine if the file is in jpg format."""
return check_image_type( filename, ['JPEG'], image )
+
class Png( Image ):
+ file_ext = "png"
+
def sniff(self, filename, image=None):
"""Determine if the file is in png format."""
return check_image_type( filename, ['PNG'], image )
-
+
+
class Tiff( Image ):
+ file_ext = "tiff"
+
def sniff(self, filename, image=None):
"""Determine if the file is in tiff format."""
return check_image_type( filename, ['TIFF'], image )
-
+
+
class Bmp( Image ):
+ file_ext = "bmp"
+
def sniff(self, filename, image=None):
"""Determine if the file is in bmp format."""
return check_image_type( filename, ['BMP'], image )
+
class Gif( Image ):
+ file_ext = "gif"
+
def sniff(self, filename, image=None):
"""Determine if the file is in gif format."""
return check_image_type( filename, ['GIF'], image )
+
class Im( Image ):
+ file_ext = "im"
+
def sniff(self, filename, image=None):
"""Determine if the file is in im format."""
return check_image_type( filename, ['IM'], image )
+
class Pcd( Image ):
+ file_ext = "pcd"
+
def sniff(self, filename, image=None):
"""Determine if the file is in pcd format."""
return check_image_type( filename, ['PCD'], image )
+
class Pcx( Image ):
+ file_ext = "pcx"
+
def sniff(self, filename, image=None):
"""Determine if the file is in pcx format."""
return check_image_type( filename, ['PCX'], image )
+
class Ppm( Image ):
+ file_ext = "ppm"
+
def sniff(self, filename, image=None):
"""Determine if the file is in ppm format."""
return check_image_type( filename, ['PPM'], image )
+
class Psd( Image ):
+ file_ext = "psd"
+
def sniff(self, filename, image=None):
"""Determine if the file is in psd format."""
return check_image_type( filename, ['PSD'], image )
+
class Xbm( Image ):
+ file_ext = "xbm"
+
def sniff(self, filename, image=None):
"""Determine if the file is in XBM format."""
return check_image_type( filename, ['XBM'], image )
+
class Xpm( Image ):
+ file_ext = "xpm"
+
def sniff(self, filename, image=None):
"""Determine if the file is in XPM format."""
return check_image_type( filename, ['XPM'], image )
+
class Rgb( Image ):
+ file_ext = "rgb"
+
def sniff(self, filename, image=None):
"""Determine if the file is in RGB format."""
return check_image_type( filename, ['RGB'], image )
+
class Pbm( Image ):
+ file_ext = "pbm"
+
def sniff(self, filename, image=None):
"""Determine if the file is in PBM format"""
return check_image_type( filename, ['PBM'], image )
+
class Pgm( Image ):
+ file_ext = "pgm"
+
def sniff(self, filename, image=None):
"""Determine if the file is in PGM format"""
return check_image_type( filename, ['PGM'], image )
+
class Eps( Image ):
+ file_ext = "eps"
+
def sniff(self, filename, image=None):
"""Determine if the file is in eps format."""
return check_image_type( filename, ['EPS'], image )
class Rast( Image ):
+ file_ext = "rast"
+
def sniff(self, filename, image=None):
"""Determine if the file is in rast format"""
return check_image_type( filename, ['RAST'], image )
+
class Pdf( Image ):
+ file_ext = "pdf"
+
def sniff(self, filename):
"""Determine if the file is in pdf format."""
headers = get_headers(filename, None, 1)
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0