galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
January 2014
- 1 participants
- 280 discussions
commit/galaxy-central: martenson: reports: addressing stringification bug
by commits-noreply@bitbucket.org 30 Jan '14
by commits-noreply@bitbucket.org 30 Jan '14
30 Jan '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/2ce1ba52c6a7/
Changeset: 2ce1ba52c6a7
User: martenson
Date: 2014-01-30 22:38:04
Summary: reports: addressing stringification bug
Affected #: 1 file
diff -r 72a210496cda233f28769dac11c116e0e6a71f18 -r 2ce1ba52c6a7b33676ffebb73053ab3f9e9baa21 lib/galaxy/webapps/reports/controllers/jobs.py
--- a/lib/galaxy/webapps/reports/controllers/jobs.py
+++ b/lib/galaxy/webapps/reports/controllers/jobs.py
@@ -29,7 +29,7 @@
return job.tool_id
class CreateTimeColumn( grids.DateTimeColumn ):
def get_value( self, trans, grid, job ):
- return job.create_time
+ return job.create_time.isoformat()
class UserColumn( grids.GridColumn ):
def get_value( self, trans, grid, job ):
if job.user:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/c77d83fe4c3e/
Changeset: c77d83fe4c3e
Branch: next-stable
User: jeremy goecks
Date: 2014-01-30 21:54:26
Summary: Fix bug that prevented relative path from being used for len_file_path config option.
Affected #: 2 files
diff -r fd4936ae82193639011ed06b9552ec1a8c8166e5 -r c77d83fe4c3e37d07c241bb1f76064a3d0b7f149 lib/galaxy/config.py
--- a/lib/galaxy/config.py
+++ b/lib/galaxy/config.py
@@ -67,7 +67,7 @@
self.enable_unique_workflow_defaults = string_as_bool( kwargs.get( 'enable_unique_workflow_defaults', False ) )
self.tool_path = resolve_path( kwargs.get( "tool_path", "tools" ), self.root )
self.tool_data_path = resolve_path( kwargs.get( "tool_data_path", "tool-data" ), os.getcwd() )
- self.len_file_path = kwargs.get( "len_file_path", resolve_path(os.path.join(self.tool_data_path, 'shared','ucsc','chrom'), self.root) )
+ self.len_file_path = resolve_path( kwargs.get( "len_file_path", os.path.join( self.tool_data_path, 'shared','ucsc','chrom') ), self.root )
self.test_conf = resolve_path( kwargs.get( "test_conf", "" ), self.root )
# The value of migrated_tools_config is the file reserved for containing only those tools that have been eliminated from the distribution
# and moved to the tool shed.
diff -r fd4936ae82193639011ed06b9552ec1a8c8166e5 -r c77d83fe4c3e37d07c241bb1f76064a3d0b7f149 lib/galaxy/tools/actions/__init__.py
--- a/lib/galaxy/tools/actions/__init__.py
+++ b/lib/galaxy/tools/actions/__init__.py
@@ -236,7 +236,7 @@
if not chrom_info:
# Default to built-in build.
chrom_info = os.path.join( trans.app.config.len_file_path, "%s.len" % input_dbkey )
- incoming[ "chromInfo" ] = chrom_info
+ incoming[ "chromInfo" ] = os.path.abspath( chrom_info )
inp_data.update( db_datasets )
# Determine output dataset permission/roles list
https://bitbucket.org/galaxy/galaxy-central/commits/72a210496cda/
Changeset: 72a210496cda
User: jeremy goecks
Date: 2014-01-30 21:54:54
Summary: Automated merge of next-stable to default branch
Affected #: 2 files
diff -r bf7251133a037cda8ca7025a15af4ab9efb326ba -r 72a210496cda233f28769dac11c116e0e6a71f18 lib/galaxy/config.py
--- a/lib/galaxy/config.py
+++ b/lib/galaxy/config.py
@@ -67,7 +67,7 @@
self.enable_unique_workflow_defaults = string_as_bool( kwargs.get( 'enable_unique_workflow_defaults', False ) )
self.tool_path = resolve_path( kwargs.get( "tool_path", "tools" ), self.root )
self.tool_data_path = resolve_path( kwargs.get( "tool_data_path", "tool-data" ), os.getcwd() )
- self.len_file_path = kwargs.get( "len_file_path", resolve_path(os.path.join(self.tool_data_path, 'shared','ucsc','chrom'), self.root) )
+ self.len_file_path = resolve_path( kwargs.get( "len_file_path", os.path.join( self.tool_data_path, 'shared','ucsc','chrom') ), self.root )
self.test_conf = resolve_path( kwargs.get( "test_conf", "" ), self.root )
# The value of migrated_tools_config is the file reserved for containing only those tools that have been eliminated from the distribution
# and moved to the tool shed.
diff -r bf7251133a037cda8ca7025a15af4ab9efb326ba -r 72a210496cda233f28769dac11c116e0e6a71f18 lib/galaxy/tools/actions/__init__.py
--- a/lib/galaxy/tools/actions/__init__.py
+++ b/lib/galaxy/tools/actions/__init__.py
@@ -236,7 +236,7 @@
if not chrom_info:
# Default to built-in build.
chrom_info = os.path.join( trans.app.config.len_file_path, "%s.len" % input_dbkey )
- incoming[ "chromInfo" ] = chrom_info
+ incoming[ "chromInfo" ] = os.path.abspath( chrom_info )
inp_data.update( db_datasets )
# Determine output dataset permission/roles list
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: carlfeberhard: Visualizations Registry: small fixes
by commits-noreply@bitbucket.org 30 Jan '14
by commits-noreply@bitbucket.org 30 Jan '14
30 Jan '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/bf7251133a03/
Changeset: bf7251133a03
User: carlfeberhard
Date: 2014-01-30 19:10:34
Summary: Visualizations Registry: small fixes
Affected #: 4 files
diff -r d9f6f3f2467195397828d452664c68e5e4c104ef -r bf7251133a037cda8ca7025a15af4ab9efb326ba lib/galaxy/datatypes/dataproviders/decorators.py
--- a/lib/galaxy/datatypes/dataproviders/decorators.py
+++ b/lib/galaxy/datatypes/dataproviders/decorators.py
@@ -18,6 +18,7 @@
from functools import wraps
#from types import MethodType
+import urllib2
import copy
import logging
@@ -143,8 +144,9 @@
'int' : int,
'float' : float,
'bool' : bool,
- 'list:str' : lambda s: list_from_query_string( s ),
- 'list:int' : lambda s: [ int( i ) for i in list_from_query_string( s ) ],
+ 'list:str' : lambda s: list_from_query_string( s ),
+ 'list:escaped' : lambda s: [ urllib2.unquote( e ) for e in list_from_query_string( s ) ],
+ 'list:int' : lambda s: [ int( i ) for i in list_from_query_string( s ) ],
}
settings = settings or {}
# yay! yet another set of query string parsers! <-- sarcasm
diff -r d9f6f3f2467195397828d452664c68e5e4c104ef -r bf7251133a037cda8ca7025a15af4ab9efb326ba lib/galaxy/datatypes/dataproviders/line.py
--- a/lib/galaxy/datatypes/dataproviders/line.py
+++ b/lib/galaxy/datatypes/dataproviders/line.py
@@ -78,7 +78,7 @@
line = line.strip( '\n' )
if not self.provide_blank and line == '':
return None
- elif line.startswith( self.comment_char ):
+ elif self.comment_char and line.startswith( self.comment_char ):
return None
return super( FilteredLineDataProvider, self ).filter( line )
@@ -94,7 +94,7 @@
the line it is considered valid and will be provided).
"""
settings = {
- 'regex_list' : 'list:str',
+ 'regex_list' : 'list:escaped',
'invert' : 'bool',
}
diff -r d9f6f3f2467195397828d452664c68e5e4c104ef -r bf7251133a037cda8ca7025a15af4ab9efb326ba lib/galaxy/visualization/registry.py
--- a/lib/galaxy/visualization/registry.py
+++ b/lib/galaxy/visualization/registry.py
@@ -41,23 +41,18 @@
NOTE: also searches dictionaries
"""
- #print '\n\t getattr_recursive:', item, attr_key
using_default = len( args ) >= 1
default = args[0] if using_default else None
- #print '\t defaults:', using_default, default
for attr_key in attr_key.split( '.' ):
try:
- #print '\t\t attr:', attr_key, 'item:', item
if isinstance( item, dict ):
item = item.__getitem__( attr_key )
else:
item = getattr( item, attr_key )
except ( KeyError, AttributeError ), err:
- #print '\t\t error:', err
if using_default:
- #print '\t\t\t default:', default
return default
raise
@@ -77,7 +72,6 @@
attr_key = last_key
try:
- #print '\t\t attr:', attr_key, 'item:', item
if isinstance( item, dict ):
return item.__contains__( attr_key )
else:
@@ -88,6 +82,7 @@
return True
+
# ------------------------------------------------------------------- the registry
class VisualizationsRegistry( pluginframework.PageServingPluginManager ):
"""
@@ -118,7 +113,7 @@
super( VisualizationsRegistry, self ).__init__( app, 'visualizations', **kwargs )
# what to use to parse query strings into resources/vars for the template
self.resource_parser = ResourceParser()
- log.debug( '%s loaded', str( self ) )
+ #log.debug( '%s loaded', str( self ) )
def is_plugin( self, plugin_path ):
"""
@@ -265,8 +260,6 @@
#precondition: the target_object should be usable by the visualization (accrd. to data_sources)
# convert params using vis.data_source.to_params
params = self.get_url_params( trans, target_object, param_data )
- #for param in params:
- # print param
# we want existing visualizations to work as normal but still be part of the registry (without mod'ing)
# so generate their urls differently
@@ -294,12 +287,8 @@
# assign takes precedence (goes last, overwrites)?
#NOTE this is only one level
- #print 'target_object:', target_object
- #print 'target_attr:', target_attr
-
if target_attr and hasattr_recursive( target_object, target_attr ):
params[ to_param_name ] = getattr_recursive( target_object, target_attr )
- #print 'params[ %s ]:' %( to_param_name ), params[ to_param_name ]
if assign:
params[ to_param_name ] = assign
@@ -394,7 +383,6 @@
returned[ 'name' ] = xml_tree.attrib.get( 'name', None )
if not returned[ 'name' ]:
raise ParsingException( 'visualization needs a name attribute' )
- print returned[ 'name' ]
# a (for now) text description of what the visualization does
description = xml_tree.find( 'description' )
@@ -532,7 +520,7 @@
raise ParsingException( 'data_source entry requires a model_class' )
if xml_tree.text not in self.ALLOWED_MODEL_CLASSES:
- log.debug( 'available data_source model_classes: %s' %( str( self.ALLOWED_MODEL_CLASSES ) ) )
+ #log.debug( 'available data_source model_classes: %s' %( str( self.ALLOWED_MODEL_CLASSES ) ) )
raise ParsingException( 'Invalid data_source model_class: %s' %( xml_tree.text ) )
# look up the model from the model module returning an empty data_source if not found
@@ -760,6 +748,7 @@
If param is required and not present, raises a `KeyError`.
"""
+ #log.debug( 'parse_parameter_dictionary, query_params:\n%s', query_params )
# first parse any params from any visualizations that were passed
query_params = self.get_params_from_visualization_param( trans, controller, param_config_dict, query_params )
@@ -842,8 +831,8 @@
return self.parse_parameter( trans, param_config, default )
def get_params_from_visualization_param( self, trans, controller, param_config_dict, query_params ):
- log.debug( 'parse_visualization_params: %s', param_config_dict )
- log.debug( ' : %s', query_params )
+ #log.debug( 'parse_visualization_params: %s', param_config_dict )
+ #log.debug( ' : %s', query_params )
# first, find the visualization in the parameters if any
visualization = None
@@ -854,25 +843,25 @@
if query_val is None:
continue
- log.debug( 'found visualization param: %s, %s', param_name, query_val )
+ #log.debug( 'found visualization param: %s, %s', param_name, query_val )
visualization = self.parse_parameter( trans, controller, param_config, query_val )
if visualization:
break
# if no vis is found, can't get any new params from it: return the original query_params
if not visualization:
- log.debug( 'visualization not found' )
+ #log.debug( 'visualization not found' )
return query_params
- log.debug( 'found visualization: %s', visualization )
+ #log.debug( 'found visualization: %s', visualization )
# next, attempt to copy any params from the visualizations config
visualization_config = visualization.latest_revision.config
- log.debug( '\t config: %s', visualization_config )
+ #log.debug( '\t config: %s', visualization_config )
params_from_visualization = {}
for param_name, param_config in param_config_dict.items():
if param_name in visualization_config:
params_from_visualization[ param_name ] = visualization_config[ param_name ]
- log.debug( 'params_from_visualization: %s', params_from_visualization )
+ #log.debug( 'params_from_visualization: %s', params_from_visualization )
# layer the query_params over the params from the visualization, returning the combined
params_from_visualization.update( query_params )
@@ -926,7 +915,7 @@
#TODO: subclass here?
elif param_type == 'visualization':
encoded_visualization_id = query_param
- log.debug( 'visualization param, id : %s', encoded_visualization_id )
+ #log.debug( 'visualization param, id : %s', encoded_visualization_id )
#TODO:?? some fallback if there's no get_X in controller that's passed?
parsed_param = controller.get_visualization( trans, encoded_visualization_id,
check_ownership=False, check_accessible=True )
@@ -948,5 +937,4 @@
dbkey = query_param
parsed_param = galaxy.util.sanitize_html.sanitize_html( dbkey, 'utf-8' )
- #print ( '%s, %s -> %s, %s' %( param_type, query_param, str( type( parsed_param ) ), parsed_param ) )
return parsed_param
diff -r d9f6f3f2467195397828d452664c68e5e4c104ef -r bf7251133a037cda8ca7025a15af4ab9efb326ba lib/galaxy/web/base/pluginframework.py
--- a/lib/galaxy/web/base/pluginframework.py
+++ b/lib/galaxy/web/base/pluginframework.py
@@ -62,7 +62,7 @@
:param skip_bad_plugins: whether to skip plugins that cause
exceptions when loaded or to raise that exception
"""
- log.debug( 'PluginManager.init: %s, %s', directories_setting, kwargs )
+ #log.debug( 'PluginManager.init: %s, %s', directories_setting, kwargs )
self.directories = []
self.skip_bad_plugins = skip_bad_plugins
self.plugins = odict.odict()
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/fd4936ae8219/
Changeset: fd4936ae8219
Branch: next-stable
User: greg
Date: 2014-01-30 17:20:04
Summary: Fixes in the tool shed's install and test framework for handling repositories containing tools that install correctly but with one or more dependencies that do not.
Affected #: 2 files
diff -r 593be4d22c470abe6bdbbfa51f2a8e7ac035de33 -r fd4936ae82193639011ed06b9552ec1a8c8166e5 test/install_and_test_tool_shed_repositories/base/util.py
--- a/test/install_and_test_tool_shed_repositories/base/util.py
+++ b/test/install_and_test_tool_shed_repositories/base/util.py
@@ -735,6 +735,8 @@
repository_name = str( repository.name )
repository_owner = str( repository.owner )
repository_changeset_revision = str( repository.changeset_revision )
+ print 'Populating dependency install containers for revision %s of repository %s owned by %s.' % \
+ ( repository_changeset_revision, repository_name, repository_owner )
processed_successful_repository_installations = install_and_test_statistics_dict.get( 'successful_repository_installations', [] )
if repository_identifier_tup not in processed_successful_repository_installations:
install_and_test_statistics_dict[ 'successful_repository_installations' ].append( repository_identifier_tup )
@@ -844,12 +846,17 @@
repository_name = str( repository.name )
repository_owner = str( repository.owner )
repository_changeset_revision = str( repository.changeset_revision )
+ print 'Potentially populating install containers for repository dependencies of revision %s of repository %s owned by %s.' % \
+ ( repository_changeset_revision, repository_name, repository_owner )
repository_dependencies_dicts, error_message = get_repository_dependencies_dicts( galaxy_tool_shed_url, repository_metadata_id )
if error_message:
- print 'Cannot check or populate repository dependency install containers for version %s of repository %s owned by %s ' % \
+ print 'Cannot check or populate repository dependency install containers for revision %s of repository %s owned by %s ' % \
( repository_changeset_revision, repository_name, repository_owner )
print 'due to the following error getting repository_dependencies_dicts:\n%s' % str( error_message )
else:
+ if not repository_dependencies_dicts:
+ print 'Revision %s of repository %s owned by %s has no repository dependencies.' % \
+ ( repository_changeset_revision, repository_name, repository_owner )
for repository_dependencies_dict in repository_dependencies_dicts:
if not isinstance( repository_dependencies_dict, dict ):
print 'Skipping invalid repository_dependencies_dict: %s' % str( repository_dependencies_dict )
@@ -951,7 +958,7 @@
can_update_tool_shed )
print 'Result of inserting tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
( changeset_revision, name, owner, str( response_dict ) )
- print'============================================================='
+ print'\n============================================================='
else:
# The required repository's installation failed.
tool_test_results_dict[ 'installation_errors' ][ 'current_repository' ] = str( required_repository.error_message )
@@ -965,7 +972,7 @@
can_update_tool_shed )
print 'Result of inserting tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
( changeset_revision, name, owner, str( response_dict ) )
- print'============================================================='
+ print'\n============================================================='
else:
print 'Cannot retrieve revision %s of required repository %s owned by %s from the database ' % \
( changeset_revision, name, owner )
@@ -1056,7 +1063,7 @@
name = str( name )
owner = str( owner )
changeset_revision = str( changeset_revision )
- print '============================================================='
+ print '\n============================================================='
print 'Inserting the following into tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
( changeset_revision, name, owner, str( tool_test_results_dict ) )
print 'Updating tool_test_results for repository_metadata id %s.' % metadata_revision_id
diff -r 593be4d22c470abe6bdbbfa51f2a8e7ac035de33 -r fd4936ae82193639011ed06b9552ec1a8c8166e5 test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
@@ -251,6 +251,19 @@
repository_identifier_tup,
install_and_test_statistics_dict,
tool_test_results_dict )
+ if params.get( 'test_install_error', False ):
+ # The repository was successfully installed, but one or more dependencies had installation errors,
+ # so we'll populate the test result containers since we cannot execute any tests.
+ response_dict = \
+ install_and_test_base_util.save_test_results_for_changeset_revision( install_and_test_base_util.galaxy_tool_shed_url,
+ tool_test_results_dicts,
+ tool_test_results_dict,
+ repository_dict,
+ params,
+ can_update_tool_shed )
+ print 'Result of inserting tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
+ ( changeset_revision, name, owner, str( response_dict ) )
+ print'\n============================================================='
# Populate the installation containers (success or error) for the repository's immediate repository
# dependencies whose containers are not yet populated.
install_and_test_base_util.populate_install_containers_for_repository_dependencies( app,
@@ -266,6 +279,10 @@
print 'Cannot execute tests for tools in revision %s of repository %s owned by %s ' % \
( changeset_revision, name, owner )
print 'because one or more dependencies has installation errors.'
+ # The repository was installed successfully, but one or more dependencies had installation errors. Since
+ # we cannot test the tools due to these errors, we'll remove tests and tools were created during the repository
+ # installation process so nose will not discover them and attempt to execute them.
+ remove_tests( app )
else:
print 'Revision %s of repository %s owned by %s installed successfully, so running tool tests.' % \
( changeset_revision, name, owner )
@@ -279,6 +296,9 @@
from_json_string( file( galaxy_shed_tools_dict, 'r' ).read() ) )
# If the repository has a test-data directory we write the generated shed_tools_dict to a temporary
# file so the functional test framework can find it.
+ # TODO: Eliminate the need for this shed_tools_dict since it grows large over the course of each test run.
+ # If it cannot be eliminated altogether, reinitialize it with each new repository install so at this point
+ # it contains only entries for the current repository dependency hierarchy being tested.
file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( shed_tools_dict ) )
print 'Saved generated shed_tools_dict to %s\nContents: %s' % ( galaxy_shed_tools_dict, shed_tools_dict )
try:
@@ -605,6 +625,7 @@
owner = str( repository.owner )
changeset_revision = str( repository.changeset_revision )
repository_identifier_tup = ( name, owner, changeset_revision )
+ print 'Testing tools contained in revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
# Generate the test methods for this installed repository. We need to pass testing_shed_tools=True here
# or twill will look in $GALAXY_HOME/test-data for test data, which may result in missing or invalid test
# files.
https://bitbucket.org/galaxy/galaxy-central/commits/d9f6f3f24671/
Changeset: d9f6f3f24671
User: greg
Date: 2014-01-30 17:20:33
Summary: Merged from next-stable
Affected #: 2 files
diff -r b67a464b8588525e17fd64910b9b9f3bcadb82ae -r d9f6f3f2467195397828d452664c68e5e4c104ef test/install_and_test_tool_shed_repositories/base/util.py
--- a/test/install_and_test_tool_shed_repositories/base/util.py
+++ b/test/install_and_test_tool_shed_repositories/base/util.py
@@ -735,6 +735,8 @@
repository_name = str( repository.name )
repository_owner = str( repository.owner )
repository_changeset_revision = str( repository.changeset_revision )
+ print 'Populating dependency install containers for revision %s of repository %s owned by %s.' % \
+ ( repository_changeset_revision, repository_name, repository_owner )
processed_successful_repository_installations = install_and_test_statistics_dict.get( 'successful_repository_installations', [] )
if repository_identifier_tup not in processed_successful_repository_installations:
install_and_test_statistics_dict[ 'successful_repository_installations' ].append( repository_identifier_tup )
@@ -844,12 +846,17 @@
repository_name = str( repository.name )
repository_owner = str( repository.owner )
repository_changeset_revision = str( repository.changeset_revision )
+ print 'Potentially populating install containers for repository dependencies of revision %s of repository %s owned by %s.' % \
+ ( repository_changeset_revision, repository_name, repository_owner )
repository_dependencies_dicts, error_message = get_repository_dependencies_dicts( galaxy_tool_shed_url, repository_metadata_id )
if error_message:
- print 'Cannot check or populate repository dependency install containers for version %s of repository %s owned by %s ' % \
+ print 'Cannot check or populate repository dependency install containers for revision %s of repository %s owned by %s ' % \
( repository_changeset_revision, repository_name, repository_owner )
print 'due to the following error getting repository_dependencies_dicts:\n%s' % str( error_message )
else:
+ if not repository_dependencies_dicts:
+ print 'Revision %s of repository %s owned by %s has no repository dependencies.' % \
+ ( repository_changeset_revision, repository_name, repository_owner )
for repository_dependencies_dict in repository_dependencies_dicts:
if not isinstance( repository_dependencies_dict, dict ):
print 'Skipping invalid repository_dependencies_dict: %s' % str( repository_dependencies_dict )
@@ -951,7 +958,7 @@
can_update_tool_shed )
print 'Result of inserting tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
( changeset_revision, name, owner, str( response_dict ) )
- print'============================================================='
+ print'\n============================================================='
else:
# The required repository's installation failed.
tool_test_results_dict[ 'installation_errors' ][ 'current_repository' ] = str( required_repository.error_message )
@@ -965,7 +972,7 @@
can_update_tool_shed )
print 'Result of inserting tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
( changeset_revision, name, owner, str( response_dict ) )
- print'============================================================='
+ print'\n============================================================='
else:
print 'Cannot retrieve revision %s of required repository %s owned by %s from the database ' % \
( changeset_revision, name, owner )
@@ -1056,7 +1063,7 @@
name = str( name )
owner = str( owner )
changeset_revision = str( changeset_revision )
- print '============================================================='
+ print '\n============================================================='
print 'Inserting the following into tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
( changeset_revision, name, owner, str( tool_test_results_dict ) )
print 'Updating tool_test_results for repository_metadata id %s.' % metadata_revision_id
diff -r b67a464b8588525e17fd64910b9b9f3bcadb82ae -r d9f6f3f2467195397828d452664c68e5e4c104ef test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
@@ -251,6 +251,19 @@
repository_identifier_tup,
install_and_test_statistics_dict,
tool_test_results_dict )
+ if params.get( 'test_install_error', False ):
+ # The repository was successfully installed, but one or more dependencies had installation errors,
+ # so we'll populate the test result containers since we cannot execute any tests.
+ response_dict = \
+ install_and_test_base_util.save_test_results_for_changeset_revision( install_and_test_base_util.galaxy_tool_shed_url,
+ tool_test_results_dicts,
+ tool_test_results_dict,
+ repository_dict,
+ params,
+ can_update_tool_shed )
+ print 'Result of inserting tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
+ ( changeset_revision, name, owner, str( response_dict ) )
+ print'\n============================================================='
# Populate the installation containers (success or error) for the repository's immediate repository
# dependencies whose containers are not yet populated.
install_and_test_base_util.populate_install_containers_for_repository_dependencies( app,
@@ -266,6 +279,10 @@
print 'Cannot execute tests for tools in revision %s of repository %s owned by %s ' % \
( changeset_revision, name, owner )
print 'because one or more dependencies has installation errors.'
+ # The repository was installed successfully, but one or more dependencies had installation errors. Since
+ # we cannot test the tools due to these errors, we'll remove tests and tools were created during the repository
+ # installation process so nose will not discover them and attempt to execute them.
+ remove_tests( app )
else:
print 'Revision %s of repository %s owned by %s installed successfully, so running tool tests.' % \
( changeset_revision, name, owner )
@@ -279,6 +296,9 @@
from_json_string( file( galaxy_shed_tools_dict, 'r' ).read() ) )
# If the repository has a test-data directory we write the generated shed_tools_dict to a temporary
# file so the functional test framework can find it.
+ # TODO: Eliminate the need for this shed_tools_dict since it grows large over the course of each test run.
+ # If it cannot be eliminated altogether, reinitialize it with each new repository install so at this point
+ # it contains only entries for the current repository dependency hierarchy being tested.
file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( shed_tools_dict ) )
print 'Saved generated shed_tools_dict to %s\nContents: %s' % ( galaxy_shed_tools_dict, shed_tools_dict )
try:
@@ -605,6 +625,7 @@
owner = str( repository.owner )
changeset_revision = str( repository.changeset_revision )
repository_identifier_tup = ( name, owner, changeset_revision )
+ print 'Testing tools contained in revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
# Generate the test methods for this installed repository. We need to pass testing_shed_tools=True here
# or twill will look in $GALAXY_HOME/test-data for test data, which may result in missing or invalid test
# files.
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jmchilton: Fix division by zero error in quota.
by commits-noreply@bitbucket.org 29 Jan '14
by commits-noreply@bitbucket.org 29 Jan '14
29 Jan '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/b67a464b8588/
Changeset: b67a464b8588
User: jmchilton
Date: 2014-01-30 01:35:54
Summary: Fix division by zero error in quota.
Affected #: 1 file
diff -r 286b0d3b0485cfd1e5434dda86528662549d4468 -r b67a464b8588525e17fd64910b9b9f3bcadb82ae lib/galaxy/quota/__init__.py
--- a/lib/galaxy/quota/__init__.py
+++ b/lib/galaxy/quota/__init__.py
@@ -138,7 +138,11 @@
# get the usage, if it wasn't passed
if usage is False:
usage = self.get_usage( trans, user, history )
- return min( ( int( float( usage ) / quota * 100 ), 100 ) )
+ try:
+ return min( ( int( float( usage ) / quota * 100 ), 100 ) )
+ except ZeroDivisionError:
+ return 100
+
def set_entity_quota_associations( self, quotas=[], users=[], groups=[], delete_existing_assocs=True ):
for quota in quotas:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
3 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/e034999e8b1a/
Changeset: e034999e8b1a
User: jmchilton
Date: 2014-01-29 15:36:32
Summary: Bugfix: History import when 'cleanup_job=never'.
Affected #: 1 file
diff -r 6a7cb977238c76359f39c8484b88cdd1daff4b1f -r e034999e8b1a2820f81e6fe64c623b7417e38148 lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py
+++ b/lib/galaxy/jobs/__init__.py
@@ -789,8 +789,8 @@
# If the job was deleted, call tool specific fail actions (used for e.g. external metadata) and clean up
if self.tool:
self.tool.job_failed( self, message, exception )
- if self.app.config.cleanup_job == 'always' or (self.app.config.cleanup_job == 'onsuccess' and job.state == job.states.DELETED):
- self.cleanup()
+ delete_files = self.app.config.cleanup_job == 'always' or (self.app.config.cleanup_job == 'onsuccess' and job.state == job.states.DELETED)
+ self.cleanup( delete_files=delete_files )
def pause( self, job=None, message=None ):
if job is None:
@@ -1067,22 +1067,27 @@
self.sa_session.flush()
log.debug( 'job %d ended' % self.job_id )
- if self.app.config.cleanup_job == 'always' or ( not stderr and self.app.config.cleanup_job == 'onsuccess' ):
- self.cleanup()
+ # TODO: Also fix "not stderr" below, use final_job_state.
+ delete_files = self.app.config.cleanup_job == 'always' or ( not stderr and self.app.config.cleanup_job == 'onsuccess' )
+ self.cleanup( delete_files=delete_files )
def check_tool_output( self, stdout, stderr, tool_exit_code, job ):
return check_output( self.tool, stdout, stderr, tool_exit_code, job )
- def cleanup( self ):
- # remove temporary files
+ def cleanup( self, delete_files=True ):
+ # At least one of these tool cleanup actions (job import), is needed
+ # for thetool to work properly, that is why one might want to run
+ # cleanup but not delete files.
try:
- for fname in self.extra_filenames:
- os.remove( fname )
- self.external_output_metadata.cleanup_external_metadata( self.sa_session )
+ if delete_files:
+ for fname in self.extra_filenames:
+ os.remove( fname )
+ self.external_output_metadata.cleanup_external_metadata( self.sa_session )
galaxy.tools.imp_exp.JobExportHistoryArchiveWrapper( self.job_id ).cleanup_after_job( self.sa_session )
galaxy.tools.imp_exp.JobImportHistoryArchiveWrapper( self.app, self.job_id ).cleanup_after_job()
galaxy.tools.genome_index.GenomeIndexToolWrapper( self.job_id ).postprocessing( self.sa_session, self.app )
- self.app.object_store.delete(self.get_job(), base_dir='job_work', entire_dir=True, dir_only=True, extra_dir=str(self.job_id))
+ if delete_files:
+ self.app.object_store.delete(self.get_job(), base_dir='job_work', entire_dir=True, dir_only=True, extra_dir=str(self.job_id))
except:
log.exception( "Unable to cleanup job %d" % self.job_id )
@@ -1520,8 +1525,8 @@
# if the job was deleted, don't finish it
if task.state == task.states.DELETED:
# Job was deleted by an administrator
- if self.app.config.cleanup_job in ( 'always', 'onsuccess' ):
- self.cleanup()
+ delete_files = self.app.config.cleanup_job in ( 'always', 'onsuccess' )
+ self.cleanup( delete_files=delete_files )
return
elif task.state == task.states.ERROR:
self.fail( task.info )
https://bitbucket.org/galaxy/galaxy-central/commits/40d94d94900b/
Changeset: 40d94d94900b
User: jmchilton
Date: 2014-01-29 15:36:32
Summary: Bugfix: cleanup_job for newer job error checking logic.
Affected #: 1 file
diff -r e034999e8b1a2820f81e6fe64c623b7417e38148 -r 40d94d94900b82be76e11b778626d09a421e4966 lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py
+++ b/lib/galaxy/jobs/__init__.py
@@ -1067,8 +1067,7 @@
self.sa_session.flush()
log.debug( 'job %d ended' % self.job_id )
- # TODO: Also fix "not stderr" below, use final_job_state.
- delete_files = self.app.config.cleanup_job == 'always' or ( not stderr and self.app.config.cleanup_job == 'onsuccess' )
+ delete_files = self.app.config.cleanup_job == 'always' or ( job.state == job.states.OK and self.app.config.cleanup_job == 'onsuccess' )
self.cleanup( delete_files=delete_files )
def check_tool_output( self, stdout, stderr, tool_exit_code, job ):
https://bitbucket.org/galaxy/galaxy-central/commits/07dceba219c3/
Changeset: 07dceba219c3
User: jmchilton
Date: 2014-01-29 15:36:32
Summary: Eliminate unneeded if in jobs/__init__.py.
Checking if job.state is ERROR when that cannot be (method returns self.fail above this if that were the case). Even if it were somehow possible, subsequent code would fail badly with final_job_state not being set.
Affected #: 1 file
diff -r 40d94d94900b82be76e11b778626d09a421e4966 -r 07dceba219c315e2857f105abfdaf030efa1f16c lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py
+++ b/lib/galaxy/jobs/__init__.py
@@ -883,11 +883,10 @@
# We set final_job_state to use for dataset management, but *don't* set
# job.state until after dataset collection to prevent history issues
- if job.states.ERROR != job.state:
- if ( self.check_tool_output( stdout, stderr, tool_exit_code, job )):
- final_job_state = job.states.OK
- else:
- final_job_state = job.states.ERROR
+ if ( self.check_tool_output( stdout, stderr, tool_exit_code, job ) ):
+ final_job_state = job.states.OK
+ else:
+ final_job_state = job.states.ERROR
if self.version_string_cmd:
version_filename = self.get_version_string_path()
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jmchilton: Fix for a failing API test in buildbot.
by commits-noreply@bitbucket.org 29 Jan '14
by commits-noreply@bitbucket.org 29 Jan '14
29 Jan '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/6a7cb977238c/
Changeset: 6a7cb977238c
User: jmchilton
Date: 2014-01-29 07:27:30
Summary: Fix for a failing API test in buildbot.
Just noticed these are actually running tonight, that is good news.
Affected #: 1 file
diff -r 60203bad003cc6b4bc054500248e5a69cbe9b8bc -r 6a7cb977238c76359f39c8484b88cdd1daff4b1f test/functional/api/test_page_revisions.py
--- a/test/functional/api/test_page_revisions.py
+++ b/test/functional/api/test_page_revisions.py
@@ -1,5 +1,5 @@
from galaxy.exceptions import error_codes
-from functional.api.pages import BasePageApiTestCase
+from functional.api.test_pages import BasePageApiTestCase
class PageRevisionsApiTestCase( BasePageApiTestCase ):
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
11 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/b928679cc4ac/
Changeset: b928679cc4ac
User: jmchilton
Date: 2014-01-28 21:35:14
Summary: PEP-8 and style fixes for various files related to history import/export.
Affected #: 3 files
diff -r 0ab0eb81d9828b0527d68f404a36acc64359f5f8 -r b928679cc4acd843e8a6dcfb24c89f5a0b40d3a6 lib/galaxy/tools/imp_exp/__init__.py
--- a/lib/galaxy/tools/imp_exp/__init__.py
+++ b/lib/galaxy/tools/imp_exp/__init__.py
@@ -1,4 +1,8 @@
-import os, shutil, logging, tempfile, json
+import os
+import shutil
+import logging
+import tempfile
+import json
from galaxy import model
from galaxy.tools.parameters.basic import UnvalidatedValue
from galaxy.web.framework.helpers import to_unicode
@@ -8,6 +12,7 @@
log = logging.getLogger(__name__)
+
def load_history_imp_exp_tools( toolbox ):
""" Adds tools for importing/exporting histories to archives. """
# Use same process as that used in load_external_metadata_tool; see that
@@ -42,6 +47,7 @@
toolbox.tools_by_id[ history_imp_tool.id ] = history_imp_tool
log.debug( "Loaded history import tool: %s", history_imp_tool.id )
+
class JobImportHistoryArchiveWrapper( object, UsesHistoryMixin, UsesAnnotations ):
"""
Class provides support for performing jobs that import a history from
@@ -144,23 +150,23 @@
metadata = dataset_attrs['metadata']
# Create dataset and HDA.
- hda = model.HistoryDatasetAssociation( name = dataset_attrs['name'].encode( 'utf-8' ),
- extension = dataset_attrs['extension'],
- info = dataset_attrs['info'].encode( 'utf-8' ),
- blurb = dataset_attrs['blurb'],
- peek = dataset_attrs['peek'],
- designation = dataset_attrs['designation'],
- visible = dataset_attrs['visible'],
- dbkey = metadata['dbkey'],
- metadata = metadata,
- history = new_history,
- create_dataset = True,
- sa_session = self.sa_session )
+ hda = model.HistoryDatasetAssociation( name=dataset_attrs['name'].encode( 'utf-8' ),
+ extension=dataset_attrs['extension'],
+ info=dataset_attrs['info'].encode( 'utf-8' ),
+ blurb=dataset_attrs['blurb'],
+ peek=dataset_attrs['peek'],
+ designation=dataset_attrs['designation'],
+ visible=dataset_attrs['visible'],
+ dbkey=metadata['dbkey'],
+ metadata=metadata,
+ history=new_history,
+ create_dataset=True,
+ sa_session=self.sa_session )
hda.state = hda.states.OK
self.sa_session.add( hda )
self.sa_session.flush()
- new_history.add_dataset( hda, genome_build = None )
- hda.hid = dataset_attrs['hid'] # Overwrite default hid set when HDA added to history.
+ new_history.add_dataset( hda, genome_build=None )
+ hda.hid = dataset_attrs['hid'] # Overwrite default hid set when HDA added to history.
# TODO: Is there a way to recover permissions? Is this needed?
#permissions = trans.app.security_agent.history_get_default_permissions( new_history )
#trans.app.security_agent.set_all_dataset_permissions( hda.dataset, permissions )
@@ -273,6 +279,7 @@
jiha.job.stderr += "Error cleaning up history import job: %s" % e
self.sa_session.flush()
+
class JobExportHistoryArchiveWrapper( object, UsesHistoryMixin, UsesAnnotations ):
"""
Class provides support for performing jobs that export a history to an
@@ -317,23 +324,23 @@
""" Encode an HDA, default encoding for everything else. """
if isinstance( obj, trans.app.model.HistoryDatasetAssociation ):
return {
- "__HistoryDatasetAssociation__" : True,
- "create_time" : obj.create_time.__str__(),
- "update_time" : obj.update_time.__str__(),
- "hid" : obj.hid,
- "name" : to_unicode( obj.name ),
- "info" : to_unicode( obj.info ),
- "blurb" : obj.blurb,
- "peek" : obj.peek,
- "extension" : obj.extension,
- "metadata" : prepare_metadata( dict( obj.metadata.items() ) ),
- "parent_id" : obj.parent_id,
- "designation" : obj.designation,
- "deleted" : obj.deleted,
- "visible" : obj.visible,
- "file_name" : obj.file_name,
- "annotation" : to_unicode( getattr( obj, 'annotation', '' ) ),
- "tags" : get_item_tag_dict( obj ),
+ "__HistoryDatasetAssociation__": True,
+ "create_time": obj.create_time.__str__(),
+ "update_time": obj.update_time.__str__(),
+ "hid": obj.hid,
+ "name": to_unicode( obj.name ),
+ "info": to_unicode( obj.info ),
+ "blurb": obj.blurb,
+ "peek": obj.peek,
+ "extension": obj.extension,
+ "metadata": prepare_metadata( dict( obj.metadata.items() ) ),
+ "parent_id": obj.parent_id,
+ "designation": obj.designation,
+ "deleted": obj.deleted,
+ "visible": obj.visible,
+ "file_name": obj.file_name,
+ "annotation": to_unicode( getattr( obj, 'annotation', '' ) ),
+ "tags": get_item_tag_dict( obj ),
}
if isinstance( obj, UnvalidatedValue ):
return obj.__str__()
@@ -347,15 +354,15 @@
# Write history attributes to file.
history = jeha.history
history_attrs = {
- "create_time" : history.create_time.__str__(),
- "update_time" : history.update_time.__str__(),
- "name" : to_unicode( history.name ),
- "hid_counter" : history.hid_counter,
- "genome_build" : history.genome_build,
- "annotation" : to_unicode( self.get_item_annotation_str( trans.sa_session, history.user, history ) ),
- "tags" : get_item_tag_dict( history ),
- "includes_hidden_datasets" : include_hidden,
- "includes_deleted_datasets" : include_deleted
+ "create_time": history.create_time.__str__(),
+ "update_time": history.update_time.__str__(),
+ "name": to_unicode( history.name ),
+ "hid_counter": history.hid_counter,
+ "genome_build": history.genome_build,
+ "annotation": to_unicode( self.get_item_annotation_str( trans.sa_session, history.user, history ) ),
+ "tags": get_item_tag_dict( history ),
+ "includes_hidden_datasets": include_hidden,
+ "includes_deleted_datasets": include_deleted
}
history_attrs_filename = tempfile.NamedTemporaryFile( dir=temp_output_dir ).name
history_attrs_out = open( history_attrs_filename, 'w' )
@@ -391,7 +398,7 @@
# Get the associated job, if any. If this hda was copied from another,
# we need to find the job that created the origial hda
job_hda = hda
- while job_hda.copied_from_history_dataset_association: #should this check library datasets as well?
+ while job_hda.copied_from_history_dataset_association: # should this check library datasets as well?
job_hda = job_hda.copied_from_history_dataset_association
if not job_hda.creating_job_associations:
# No viable HDA found.
@@ -472,4 +479,3 @@
shutil.rmtree( temp_dir )
except Exception, e:
log.debug( 'Error deleting directory containing attribute files (%s): %s' % ( temp_dir, e ) )
-
diff -r 0ab0eb81d9828b0527d68f404a36acc64359f5f8 -r b928679cc4acd843e8a6dcfb24c89f5a0b40d3a6 lib/galaxy/tools/imp_exp/unpack_tar_gz_archive.py
--- a/lib/galaxy/tools/imp_exp/unpack_tar_gz_archive.py
+++ b/lib/galaxy/tools/imp_exp/unpack_tar_gz_archive.py
@@ -6,19 +6,25 @@
--[url|file] source type, either a URL or a file.
"""
-import sys, optparse, tarfile, tempfile, urllib2, math
+import sys
+import optparse
+import tarfile
+import tempfile
+import urllib2
+import math
# Set max size of archive/file that will be handled to be 100 GB. This is
# arbitrary and should be adjusted as needed.
MAX_SIZE = 100 * math.pow( 2, 30 )
+
def url_to_file( url, dest_file ):
"""
Transfer a file from a remote URL to a temporary file.
"""
try:
url_reader = urllib2.urlopen( url )
- CHUNK = 10 * 1024 # 10k
+ CHUNK = 10 * 1024 # 10k
total = 0
fp = open( dest_file, 'wb')
while True:
@@ -35,6 +41,7 @@
print "Exception getting file from URL: %s" % e, sys.stderr
return None
+
def unpack_archive( archive_file, dest_dir ):
"""
Unpack a tar and/or gzipped archive into a destination directory.
@@ -63,4 +70,4 @@
# Unpack archive.
unpack_archive( archive_file, dest_dir )
except Exception, e:
- print "Error unpacking tar/gz archive: %s" % e, sys.stderr
\ No newline at end of file
+ print "Error unpacking tar/gz archive: %s" % e, sys.stderr
diff -r 0ab0eb81d9828b0527d68f404a36acc64359f5f8 -r b928679cc4acd843e8a6dcfb24c89f5a0b40d3a6 lib/galaxy/webapps/galaxy/api/histories.py
--- a/lib/galaxy/webapps/galaxy/api/histories.py
+++ b/lib/galaxy/webapps/galaxy/api/histories.py
@@ -11,15 +11,17 @@
from galaxy import web
from galaxy.web import _future_expose_api as expose_api
from galaxy.web import _future_expose_api_anonymous as expose_api_anonymous
-from galaxy.util import string_as_bool, restore_text
-from galaxy.util.sanitize_html import sanitize_html
-from galaxy.web.base.controller import BaseAPIController, UsesHistoryMixin, UsesTagsMixin
+from galaxy.util import string_as_bool
+from galaxy.util import restore_text
+from galaxy.web.base.controller import BaseAPIController
+from galaxy.web.base.controller import UsesHistoryMixin
+from galaxy.web.base.controller import UsesTagsMixin
from galaxy.web import url_for
-from galaxy.model.orm import desc
import logging
log = logging.getLogger( __name__ )
+
class HistoriesController( BaseAPIController, UsesHistoryMixin, UsesTagsMixin ):
@expose_api_anonymous
@@ -46,14 +48,14 @@
histories = self.get_user_histories( trans, user=trans.user, only_deleted=deleted )
#for history in query:
for history in histories:
- item = history.to_dict(value_mapper={'id':trans.security.encode_id})
+ item = history.to_dict(value_mapper={'id': trans.security.encode_id})
item['url'] = url_for( 'history', id=trans.security.encode_id( history.id ) )
rval.append( item )
elif trans.galaxy_session.current_history:
#No user, this must be session authentication with an anonymous user.
history = trans.galaxy_session.current_history
- item = history.to_dict(value_mapper={'id':trans.security.encode_id})
+ item = history.to_dict(value_mapper={'id': trans.security.encode_id})
item['url'] = url_for( 'history', id=trans.security.encode_id( history.id ) )
rval.append(item)
@@ -257,7 +259,7 @@
log.exception( 'Histories API, delete: uncaught HTTPInternalServerError: %s, %s\n%s',
history_id, str( kwd ), str( http_server_err ) )
raise
- except HTTPException, http_exc:
+ except HTTPException:
raise
except Exception, exc:
log.exception( 'Histories API, delete: uncaught exception: %s, %s\n%s',
https://bitbucket.org/galaxy/galaxy-central/commits/f4485c8dd5f5/
Changeset: f4485c8dd5f5
Branch: job-search
User: Kyle Ellrott
Date: 2014-01-22 02:30:47
Summary: Adding in /api/jobs and refining the Job.to_dict method
Affected #: 4 files
diff -r 54defa390a91ec1db34141f97711e673218cde13 -r f4485c8dd5f5392f1cc8a1ed296fb772064f1e10 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -209,8 +209,8 @@
class Job( object, Dictifiable ):
- dict_collection_visible_keys = [ 'id', 'state', 'exit_code' ]
- dict_element_visible_keys = [ 'id', 'state', 'exit_code' ]
+ dict_collection_visible_keys = [ 'id', 'state', 'exit_code', 'update_time', 'create_time' ]
+ dict_element_visible_keys = [ 'id', 'state', 'exit_code', 'update_time', 'create_time' ]
"""
A job represents a request to run a tool given input datasets, tool
@@ -416,30 +416,31 @@
dataset.info = 'Job output deleted by user before job completed'
def to_dict( self, view='collection' ):
rval = super( Job, self ).to_dict( view=view )
- rval['tool_name'] = self.tool_id
- param_dict = dict( [ ( p.name, p.value ) for p in self.parameters ] )
- rval['params'] = param_dict
+ if view == 'element':
+ rval['tool_name'] = self.tool_id
+ param_dict = dict( [ ( p.name, p.value ) for p in self.parameters ] )
+ rval['params'] = param_dict
- input_dict = {}
- for i in self.input_datasets:
- if i.dataset is not None:
- input_dict[i.name] = {"hda_id" : i.dataset.id}
- for i in self.input_library_datasets:
- if i.dataset is not None:
- input_dict[i.name] = {"ldda_id" : i.dataset.id}
- for k in input_dict:
- if k in param_dict:
- del param_dict[k]
- rval['inputs'] = input_dict
+ input_dict = {}
+ for i in self.input_datasets:
+ if i.dataset is not None:
+ input_dict[i.name] = {"id" : i.dataset.id, "src" : "hda"}
+ for i in self.input_library_datasets:
+ if i.dataset is not None:
+ input_dict[i.name] = {"id" : i.dataset.id, "src" : "ldda"}
+ for k in input_dict:
+ if k in param_dict:
+ del param_dict[k]
+ rval['inputs'] = input_dict
- output_dict = {}
- for i in self.output_datasets:
- if i.dataset is not None:
- output_dict[i.name] = {"hda_id" : i.dataset.id}
- for i in self.output_library_datasets:
- if i.dataset is not None:
- output_dict[i.name] = {"ldda_id" : i.dataset.id}
- rval['outputs'] = output_dict
+ output_dict = {}
+ for i in self.output_datasets:
+ if i.dataset is not None:
+ output_dict[i.name] = {"id" : i.dataset.id, "src" : "hda"}
+ for i in self.output_library_datasets:
+ if i.dataset is not None:
+ output_dict[i.name] = {"id" : i.dataset.id, "src" : "ldda"}
+ rval['outputs'] = output_dict
return rval
diff -r 54defa390a91ec1db34141f97711e673218cde13 -r f4485c8dd5f5392f1cc8a1ed296fb772064f1e10 lib/galaxy/webapps/galaxy/api/jobs.py
--- /dev/null
+++ b/lib/galaxy/webapps/galaxy/api/jobs.py
@@ -0,0 +1,84 @@
+"""
+API operations on a jobs.
+
+.. seealso:: :class:`galaxy.model.Jobs`
+"""
+
+import pkg_resources
+pkg_resources.require( "Paste" )
+from paste.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPInternalServerError, HTTPException
+
+from sqlalchemy import or_
+
+from galaxy import web
+from galaxy.web import _future_expose_api as expose_api
+from galaxy.web import _future_expose_api_anonymous as expose_api_anonymous
+from galaxy.util import string_as_bool, restore_text
+from galaxy.util.sanitize_html import sanitize_html
+from galaxy.web.base.controller import BaseAPIController
+from galaxy.web import url_for
+from galaxy.model.orm import desc
+
+import logging
+log = logging.getLogger( __name__ )
+
+class HistoriesController( BaseAPIController ):
+
+ @web.expose_api
+ def index( self, trans, **kwd ):
+ """
+ index( trans, state=None )
+ * GET /api/jobs:
+ return jobs for current user
+
+ :type state: string or list
+ :param state: limit listing of jobs to those that match one of the included states. If none, all are returned.
+ Valid Galaxy job states include:
+ 'new', 'upload', 'waiting', 'queued', 'running', 'ok', 'error', 'paused', 'deleted', 'deleted_new'
+
+ :rtype: list
+ :returns: list of dictionaries containing summary job information
+ """
+
+ state = kwd.get('state', None)
+ query = trans.sa_session.query(trans.app.model.Job).filter(
+ trans.app.model.Job.user == trans.user )
+ if state is not None:
+ if isinstance(state, basestring):
+ query = query.filter( trans.app.model.Job.state == state )
+ elif isinstance(state, list):
+ t = []
+ for s in state:
+ t.append( trans.app.model.Job.state == s )
+ query = query.filter( or_( *t ) )
+
+ out = []
+ for job in query.order_by(
+ trans.app.model.Job.update_time.desc()
+ ).all():
+ out.append( self.encode_all_ids( trans, job.to_dict('collection'), True) )
+ return out
+
+ @web.expose_api
+ def show( self, trans, id, **kwd ):
+ decoded_job_id = trans.security.decode_id(id)
+ query = trans.sa_session.query(trans.app.model.Job).filter(
+ trans.app.model.Job.user == trans.user,
+ trans.app.model.Job.id == decoded_job_id)
+ job = query.first()
+ if job is None:
+ return None
+ return self.encode_all_ids( trans, job.to_dict('element'), True)
+
+ @expose_api
+ def create( self, trans, payload, **kwd ):
+ error = None
+ if 'tool_id' not in payload:
+ error = "No tool ID"
+
+ tool_id = payload.get('tool_id')
+
+ if error is not None:
+ return { "error" : error }
+
+
diff -r 54defa390a91ec1db34141f97711e673218cde13 -r f4485c8dd5f5392f1cc8a1ed296fb772064f1e10 lib/galaxy/webapps/galaxy/api/tools.py
--- a/lib/galaxy/webapps/galaxy/api/tools.py
+++ b/lib/galaxy/webapps/galaxy/api/tools.py
@@ -78,7 +78,12 @@
# -- Execute tool. --
# Get tool.
- tool = trans.app.toolbox.get_tool( payload[ 'tool_id' ] ) if 'tool_id' in payload else None
+ tool = None
+ if 'tool_id' in payload:
+ tool = trans.app.toolbox.get_tool( payload[ 'tool_id' ] )
+ if 'tool_name' in payload:
+ #in job descriptions it is called 'tool_name' to avoid having the name 'crushed'
+ tool = trans.app.toolbox.get_tool( payload[ 'tool_name' ] )
if not tool:
trans.response.status = 404
return { "message": { "type": "error", "text" : trans.app.model.Dataset.conversion_messages.NO_TOOL } }
diff -r 54defa390a91ec1db34141f97711e673218cde13 -r f4485c8dd5f5392f1cc8a1ed296fb772064f1e10 lib/galaxy/webapps/galaxy/buildapp.py
--- a/lib/galaxy/webapps/galaxy/buildapp.py
+++ b/lib/galaxy/webapps/galaxy/buildapp.py
@@ -228,6 +228,12 @@
path_prefix='/api/libraries/:library_id',
parent_resources=dict( member_name='library', collection_name='libraries' ) )
+ webapp.mapper.resource( 'job',
+ 'jobs',
+ path_prefix='/api' )
+ #webapp.mapper.connect( 'job_item', '/api/jobs/:job_id', controller='jobs', action='show', conditions=dict( method=['GET'] ) )
+
+
_add_item_extended_metadata_controller( webapp,
name_prefix="library_dataset_",
path_prefix='/api/libraries/:library_id/contents/:library_content_id' )
https://bitbucket.org/galaxy/galaxy-central/commits/f78bab3d44f0/
Changeset: f78bab3d44f0
Branch: job-search
User: Kyle Ellrott
Date: 2014-01-22 23:09:36
Summary: Adding job search method that attempts to find previous tool runs with the same requested inputs and outputs.
Affected #: 2 files
diff -r f4485c8dd5f5392f1cc8a1ed296fb772064f1e10 -r f78bab3d44f00b245beebce45cac2a2287c8f4ce lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -416,8 +416,8 @@
dataset.info = 'Job output deleted by user before job completed'
def to_dict( self, view='collection' ):
rval = super( Job, self ).to_dict( view=view )
+ rval['tool_name'] = self.tool_id
if view == 'element':
- rval['tool_name'] = self.tool_id
param_dict = dict( [ ( p.name, p.value ) for p in self.parameters ] )
rval['params'] = param_dict
diff -r f4485c8dd5f5392f1cc8a1ed296fb772064f1e10 -r f78bab3d44f00b245beebce45cac2a2287c8f4ce lib/galaxy/webapps/galaxy/api/jobs.py
--- a/lib/galaxy/webapps/galaxy/api/jobs.py
+++ b/lib/galaxy/webapps/galaxy/api/jobs.py
@@ -8,21 +8,22 @@
pkg_resources.require( "Paste" )
from paste.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPInternalServerError, HTTPException
-from sqlalchemy import or_
-
+from sqlalchemy import or_, and_
+from sqlalchemy.orm import aliased
+import json
from galaxy import web
from galaxy.web import _future_expose_api as expose_api
from galaxy.web import _future_expose_api_anonymous as expose_api_anonymous
from galaxy.util import string_as_bool, restore_text
from galaxy.util.sanitize_html import sanitize_html
-from galaxy.web.base.controller import BaseAPIController
+from galaxy.web.base.controller import BaseAPIController, UsesHistoryDatasetAssociationMixin, UsesLibraryMixinItems
from galaxy.web import url_for
from galaxy.model.orm import desc
import logging
log = logging.getLogger( __name__ )
-class HistoriesController( BaseAPIController ):
+class HistoriesController( BaseAPIController, UsesHistoryDatasetAssociationMixin, UsesLibraryMixinItems ):
@web.expose_api
def index( self, trans, **kwd ):
@@ -61,6 +62,18 @@
@web.expose_api
def show( self, trans, id, **kwd ):
+ """
+ show( trans, id )
+ * GET /api/jobs/{job_id}:
+ return jobs for current user
+
+ :type id: string
+ :param id: Specific job id
+
+ :rtype: dictionary
+ :returns: dictionary containing full description of job data
+ """
+
decoded_job_id = trans.security.decode_id(id)
query = trans.sa_session.query(trans.app.model.Job).filter(
trans.app.model.Job.user == trans.user,
@@ -72,13 +85,100 @@
@expose_api
def create( self, trans, payload, **kwd ):
+ """
+ show( trans, payload )
+ * POST /api/jobs:
+ return jobs for current user
+
+ :type payload: dict
+ :param payload: Dictionary containing description of requested job. This is in the same format as
+ a request to POST /apt/tools would take to initiate a job
+
+ :rtype: list
+ :returns: list of dictionaries containing summary job information of the jobs that match the requested job run
+
+ This method is designed to scan the list of previously run jobs and find records of jobs that had
+ the exact some input parameters and datasets. This can be used to minimize the amount of repeated work, and simply
+ recycle the old results.
+ """
+
error = None
- if 'tool_id' not in payload:
+ tool_id = None
+ if 'tool_id' in payload:
+ tool_id = payload.get('tool_id')
+ if 'tool_name' in payload:
+ tool_id = payload.get('tool_name')
+
+ tool = trans.app.toolbox.get_tool( tool_id )
+ if tool is None:
+ error = "Requested tool not found"
+ if 'inputs' not in payload:
+ error = "No inputs defined"
+ if tool_id is None:
error = "No tool ID"
-
- tool_id = payload.get('tool_id')
-
if error is not None:
return { "error" : error }
+ inputs = payload['inputs']
+ input_data = {}
+ input_param = {}
+ for k, v in inputs.items():
+ if isinstance(v,dict):
+ if 'id' in v:
+ try:
+ if 'src' not in v or v['src'] == 'hda':
+ dataset = self.get_dataset( trans, v['id'], check_ownership=False, check_accessible=True )
+ else:
+ dataset = self.get_library_dataset_dataset_association( trans, v['id'] )
+ except Exception, e:
+ return { "error" : str( e ) }
+ if dataset is None:
+ return { "error" : "Dataset %s not found" % (v['id']) }
+ input_data[k] = dataset.dataset_id
+ else:
+ input_param[k] = json.dumps(v)
+
+ query = trans.sa_session.query( trans.app.model.Job ).filter(
+ trans.app.model.Job.tool_id == tool_id,
+ trans.app.model.Job.user == trans.user
+ ).filter(
+ or_(
+ trans.app.model.Job.state == 'running',
+ trans.app.model.Job.state == 'queued',
+ trans.app.model.Job.state == 'waiting',
+ trans.app.model.Job.state == 'running',
+ trans.app.model.Job.state == 'ok',
+ )
+ )
+
+ for k,v in input_param.items():
+ a = aliased(trans.app.model.JobParameter)
+ query = query.filter( and_(
+ trans.app.model.Job.id == a.job_id,
+ a.name == k,
+ a.value == v
+ ))
+
+ for k,v in input_data.items():
+ """
+ Here we are attempting to link the inputs to the underlying dataset (not the dataset association)
+ This way, if the calulation was done using a copied HDA (copied from the library or another history)
+ the search will still find the job
+ """
+ a = aliased(trans.app.model.JobToInputDatasetAssociation)
+ b = aliased(trans.app.model.HistoryDatasetAssociation)
+ query = query.filter( and_(
+ trans.app.model.Job.id == a.job_id,
+ a.dataset_id == b.id,
+ b.deleted == False,
+ b.dataset_id == v
+ ))
+ out = []
+ for job in query.all():
+ """
+ check to make sure none of the output files have been deleted
+ """
+ if all(list(a.dataset.deleted == False for a in job.output_datasets)):
+ out.append( self.encode_all_ids( trans, job.to_dict('element'), True) )
+ return out
https://bitbucket.org/galaxy/galaxy-central/commits/3284b02d842a/
Changeset: 3284b02d842a
Branch: job-search
User: kellrott
Date: 2014-01-24 21:05:03
Summary: Modified patch behavoir to match comments to pull request
Affected #: 5 files
diff -r f78bab3d44f00b245beebce45cac2a2287c8f4ce -r 3284b02d842a48c92bab4669982231ab0ac3ca98 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -416,7 +416,7 @@
dataset.info = 'Job output deleted by user before job completed'
def to_dict( self, view='collection' ):
rval = super( Job, self ).to_dict( view=view )
- rval['tool_name'] = self.tool_id
+ rval['tool_id'] = self.tool_id
if view == 'element':
param_dict = dict( [ ( p.name, p.value ) for p in self.parameters ] )
rval['params'] = param_dict
diff -r f78bab3d44f00b245beebce45cac2a2287c8f4ce -r 3284b02d842a48c92bab4669982231ab0ac3ca98 lib/galaxy/web/base/controller.py
--- a/lib/galaxy/web/base/controller.py
+++ b/lib/galaxy/web/base/controller.py
@@ -161,7 +161,7 @@
if type( rval ) != dict:
return rval
for k, v in rval.items():
- if (k == 'id' or k.endswith( '_id' )) and v is not None:
+ if (k == 'id' or k.endswith( '_id' )) and v is not None and k not in ['tool_id']:
try:
rval[k] = trans.security.encode_id( v )
except:
diff -r f78bab3d44f00b245beebce45cac2a2287c8f4ce -r 3284b02d842a48c92bab4669982231ab0ac3ca98 lib/galaxy/webapps/galaxy/api/jobs.py
--- a/lib/galaxy/webapps/galaxy/api/jobs.py
+++ b/lib/galaxy/webapps/galaxy/api/jobs.py
@@ -23,7 +23,7 @@
import logging
log = logging.getLogger( __name__ )
-class HistoriesController( BaseAPIController, UsesHistoryDatasetAssociationMixin, UsesLibraryMixinItems ):
+class JobController( BaseAPIController, UsesHistoryDatasetAssociationMixin, UsesLibraryMixinItems ):
@web.expose_api
def index( self, trans, **kwd ):
@@ -85,9 +85,13 @@
@expose_api
def create( self, trans, payload, **kwd ):
+ raise NotImplementedError()
+
+ @expose_api
+ def search(self, trans, payload, **kwd):
"""
- show( trans, payload )
- * POST /api/jobs:
+ search( trans, payload )
+ * POST /api/jobs/search:
return jobs for current user
:type payload: dict
@@ -106,8 +110,6 @@
tool_id = None
if 'tool_id' in payload:
tool_id = payload.get('tool_id')
- if 'tool_name' in payload:
- tool_id = payload.get('tool_name')
tool = trans.app.toolbox.get_tool( tool_id )
if tool is None:
@@ -142,15 +144,28 @@
query = trans.sa_session.query( trans.app.model.Job ).filter(
trans.app.model.Job.tool_id == tool_id,
trans.app.model.Job.user == trans.user
- ).filter(
- or_(
- trans.app.model.Job.state == 'running',
- trans.app.model.Job.state == 'queued',
- trans.app.model.Job.state == 'waiting',
- trans.app.model.Job.state == 'running',
- trans.app.model.Job.state == 'ok',
+ )
+
+ if 'state' not in payload:
+ query = query.filter(
+ or_(
+ trans.app.model.Job.state == 'running',
+ trans.app.model.Job.state == 'queued',
+ trans.app.model.Job.state == 'waiting',
+ trans.app.model.Job.state == 'running',
+ trans.app.model.Job.state == 'ok',
+ )
)
- )
+ else:
+ if isinstance(payload['state'], basestring):
+ query = query.filter( trans.app.model.Job.state == payload['state'] )
+ elif isinstance(payload['state'], list):
+ o = []
+ for s in payload['state']:
+ o.append( trans.app.model.Job.state == s )
+ query = query.filter(
+ or_(*o)
+ )
for k,v in input_param.items():
a = aliased(trans.app.model.JobParameter)
diff -r f78bab3d44f00b245beebce45cac2a2287c8f4ce -r 3284b02d842a48c92bab4669982231ab0ac3ca98 lib/galaxy/webapps/galaxy/api/tools.py
--- a/lib/galaxy/webapps/galaxy/api/tools.py
+++ b/lib/galaxy/webapps/galaxy/api/tools.py
@@ -78,12 +78,7 @@
# -- Execute tool. --
# Get tool.
- tool = None
- if 'tool_id' in payload:
- tool = trans.app.toolbox.get_tool( payload[ 'tool_id' ] )
- if 'tool_name' in payload:
- #in job descriptions it is called 'tool_name' to avoid having the name 'crushed'
- tool = trans.app.toolbox.get_tool( payload[ 'tool_name' ] )
+ tool = trans.app.toolbox.get_tool( payload[ 'tool_id' ] ) if 'tool_id' in payload else None
if not tool:
trans.response.status = 404
return { "message": { "type": "error", "text" : trans.app.model.Dataset.conversion_messages.NO_TOOL } }
diff -r f78bab3d44f00b245beebce45cac2a2287c8f4ce -r 3284b02d842a48c92bab4669982231ab0ac3ca98 lib/galaxy/webapps/galaxy/buildapp.py
--- a/lib/galaxy/webapps/galaxy/buildapp.py
+++ b/lib/galaxy/webapps/galaxy/buildapp.py
@@ -231,7 +231,7 @@
webapp.mapper.resource( 'job',
'jobs',
path_prefix='/api' )
- #webapp.mapper.connect( 'job_item', '/api/jobs/:job_id', controller='jobs', action='show', conditions=dict( method=['GET'] ) )
+ webapp.mapper.connect( 'job_search', '/api/jobs/search', controller='jobs', action='search', conditions=dict( method=['POST'] ) )
_add_item_extended_metadata_controller( webapp,
https://bitbucket.org/galaxy/galaxy-central/commits/558fb7b96b49/
Changeset: 558fb7b96b49
Branch: job-search
User: kellrott
Date: 2014-01-24 22:59:36
Summary: Adding more standard exceptions
Affected #: 1 file
diff -r 3284b02d842a48c92bab4669982231ab0ac3ca98 -r 558fb7b96b49bdcec0321a0c8d50afbf37ff972b lib/galaxy/webapps/galaxy/api/jobs.py
--- a/lib/galaxy/webapps/galaxy/api/jobs.py
+++ b/lib/galaxy/webapps/galaxy/api/jobs.py
@@ -19,6 +19,7 @@
from galaxy.web.base.controller import BaseAPIController, UsesHistoryDatasetAssociationMixin, UsesLibraryMixinItems
from galaxy.web import url_for
from galaxy.model.orm import desc
+from galaxy import exceptions
import logging
log = logging.getLogger( __name__ )
@@ -73,14 +74,16 @@
:rtype: dictionary
:returns: dictionary containing full description of job data
"""
-
- decoded_job_id = trans.security.decode_id(id)
+ try:
+ decoded_job_id = trans.security.decode_id(id)
+ except:
+ raise exceptions.ObjectAttributeInvalidException()
query = trans.sa_session.query(trans.app.model.Job).filter(
trans.app.model.Job.user == trans.user,
trans.app.model.Job.id == decoded_job_id)
job = query.first()
if job is None:
- return None
+ raise exceptions.ObjectNotFound()
return self.encode_all_ids( trans, job.to_dict('element'), True)
@expose_api
@@ -106,20 +109,17 @@
recycle the old results.
"""
- error = None
tool_id = None
if 'tool_id' in payload:
tool_id = payload.get('tool_id')
+ if tool_id is None:
+ raise exceptions.ObjectAttributeMissingException("No tool id")
tool = trans.app.toolbox.get_tool( tool_id )
if tool is None:
- error = "Requested tool not found"
+ raise exceptions.ObjectNotFound( "Requested tool not found" )
if 'inputs' not in payload:
- error = "No inputs defined"
- if tool_id is None:
- error = "No tool ID"
- if error is not None:
- return { "error" : error }
+ raise exceptions.ObjectAttributeMissingException("No inputs defined")
inputs = payload['inputs']
@@ -136,7 +136,7 @@
except Exception, e:
return { "error" : str( e ) }
if dataset is None:
- return { "error" : "Dataset %s not found" % (v['id']) }
+ raise exceptions.ObjectNotFound("Dataset %s not found" % (v['id']))
input_data[k] = dataset.dataset_id
else:
input_param[k] = json.dumps(v)
https://bitbucket.org/galaxy/galaxy-central/commits/95747eacf09c/
Changeset: 95747eacf09c
Branch: job-search
User: jmchilton
Date: 2014-01-29 06:29:15
Summary: PEP-8 and other style adjustments for new jobs API.
Affected #: 1 file
diff -r 558fb7b96b49bdcec0321a0c8d50afbf37ff972b -r 95747eacf09c21335323131382de433409b86e16 lib/galaxy/webapps/galaxy/api/jobs.py
--- a/lib/galaxy/webapps/galaxy/api/jobs.py
+++ b/lib/galaxy/webapps/galaxy/api/jobs.py
@@ -4,26 +4,20 @@
.. seealso:: :class:`galaxy.model.Jobs`
"""
-import pkg_resources
-pkg_resources.require( "Paste" )
-from paste.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPInternalServerError, HTTPException
-
from sqlalchemy import or_, and_
from sqlalchemy.orm import aliased
import json
from galaxy import web
from galaxy.web import _future_expose_api as expose_api
-from galaxy.web import _future_expose_api_anonymous as expose_api_anonymous
-from galaxy.util import string_as_bool, restore_text
-from galaxy.util.sanitize_html import sanitize_html
-from galaxy.web.base.controller import BaseAPIController, UsesHistoryDatasetAssociationMixin, UsesLibraryMixinItems
-from galaxy.web import url_for
-from galaxy.model.orm import desc
+from galaxy.web.base.controller import BaseAPIController
+from galaxy.web.base.controller import UsesHistoryDatasetAssociationMixin
+from galaxy.web.base.controller import UsesLibraryMixinItems
from galaxy import exceptions
import logging
log = logging.getLogger( __name__ )
+
class JobController( BaseAPIController, UsesHistoryDatasetAssociationMixin, UsesLibraryMixinItems ):
@web.expose_api
@@ -42,13 +36,14 @@
:returns: list of dictionaries containing summary job information
"""
- state = kwd.get('state', None)
- query = trans.sa_session.query(trans.app.model.Job).filter(
- trans.app.model.Job.user == trans.user )
+ state = kwd.get( 'state', None )
+ query = trans.sa_session.query( trans.app.model.Job ).filter(
+ trans.app.model.Job.user == trans.user
+ )
if state is not None:
- if isinstance(state, basestring):
+ if isinstance( state, basestring ):
query = query.filter( trans.app.model.Job.state == state )
- elif isinstance(state, list):
+ elif isinstance( state, list ):
t = []
for s in state:
t.append( trans.app.model.Job.state == s )
@@ -58,7 +53,7 @@
for job in query.order_by(
trans.app.model.Job.update_time.desc()
).all():
- out.append( self.encode_all_ids( trans, job.to_dict('collection'), True) )
+ out.append( self.encode_all_ids( trans, job.to_dict( 'collection' ), True ) )
return out
@web.expose_api
@@ -78,20 +73,21 @@
decoded_job_id = trans.security.decode_id(id)
except:
raise exceptions.ObjectAttributeInvalidException()
- query = trans.sa_session.query(trans.app.model.Job).filter(
+ query = trans.sa_session.query( trans.app.model.Job ).filter(
trans.app.model.Job.user == trans.user,
- trans.app.model.Job.id == decoded_job_id)
+ trans.app.model.Job.id == decoded_job_id
+ )
job = query.first()
if job is None:
raise exceptions.ObjectNotFound()
- return self.encode_all_ids( trans, job.to_dict('element'), True)
+ return self.encode_all_ids( trans, job.to_dict( 'element' ), True )
@expose_api
def create( self, trans, payload, **kwd ):
raise NotImplementedError()
@expose_api
- def search(self, trans, payload, **kwd):
+ def search( self, trans, payload, **kwd ):
"""
search( trans, payload )
* POST /api/jobs/search:
@@ -104,96 +100,97 @@
:rtype: list
:returns: list of dictionaries containing summary job information of the jobs that match the requested job run
- This method is designed to scan the list of previously run jobs and find records of jobs that had
- the exact some input parameters and datasets. This can be used to minimize the amount of repeated work, and simply
+ This method is designed to scan the list of previously run jobs and find records of jobs that had
+ the exact some input parameters and datasets. This can be used to minimize the amount of repeated work, and simply
recycle the old results.
"""
tool_id = None
if 'tool_id' in payload:
- tool_id = payload.get('tool_id')
+ tool_id = payload.get( 'tool_id' )
if tool_id is None:
- raise exceptions.ObjectAttributeMissingException("No tool id")
+ raise exceptions.ObjectAttributeMissingException( "No tool id" )
tool = trans.app.toolbox.get_tool( tool_id )
if tool is None:
raise exceptions.ObjectNotFound( "Requested tool not found" )
if 'inputs' not in payload:
- raise exceptions.ObjectAttributeMissingException("No inputs defined")
+ raise exceptions.ObjectAttributeMissingException( "No inputs defined" )
- inputs = payload['inputs']
+ inputs = payload[ 'inputs' ]
input_data = {}
input_param = {}
for k, v in inputs.items():
- if isinstance(v,dict):
+ if isinstance( v, dict ):
if 'id' in v:
try:
- if 'src' not in v or v['src'] == 'hda':
+ if 'src' not in v or v[ 'src' ] == 'hda':
dataset = self.get_dataset( trans, v['id'], check_ownership=False, check_accessible=True )
else:
dataset = self.get_library_dataset_dataset_association( trans, v['id'] )
except Exception, e:
return { "error" : str( e ) }
if dataset is None:
- raise exceptions.ObjectNotFound("Dataset %s not found" % (v['id']))
+ raise exceptions.ObjectNotFound( "Dataset %s not found" % ( v[ 'id' ] ) )
input_data[k] = dataset.dataset_id
else:
- input_param[k] = json.dumps(v)
+ input_param[k] = json.dumps( v )
- query = trans.sa_session.query( trans.app.model.Job ).filter(
- trans.app.model.Job.tool_id == tool_id,
+ query = trans.sa_session.query( trans.app.model.Job ).filter(
+ trans.app.model.Job.tool_id == tool_id,
trans.app.model.Job.user == trans.user
)
if 'state' not in payload:
query = query.filter(
- or_(
- trans.app.model.Job.state == 'running',
- trans.app.model.Job.state == 'queued',
- trans.app.model.Job.state == 'waiting',
+ or_(
trans.app.model.Job.state == 'running',
- trans.app.model.Job.state == 'ok',
+ trans.app.model.Job.state == 'queued',
+ trans.app.model.Job.state == 'waiting',
+ trans.app.model.Job.state == 'running',
+ trans.app.model.Job.state == 'ok',
)
)
else:
- if isinstance(payload['state'], basestring):
- query = query.filter( trans.app.model.Job.state == payload['state'] )
- elif isinstance(payload['state'], list):
+ if isinstance( payload[ 'state' ], basestring ):
+ query = query.filter( trans.app.model.Job.state == payload[ 'state' ] )
+ elif isinstance( payload[ 'state' ], list ):
o = []
- for s in payload['state']:
+ for s in payload[ 'state' ]:
o.append( trans.app.model.Job.state == s )
query = query.filter(
- or_(*o)
+ or_( *o )
)
- for k,v in input_param.items():
- a = aliased(trans.app.model.JobParameter)
+ for k, v in input_param.items():
+ a = aliased( trans.app.model.JobParameter )
query = query.filter( and_(
trans.app.model.Job.id == a.job_id,
a.name == k,
a.value == v
- ))
+ ) )
- for k,v in input_data.items():
+ for k, v in input_data.items():
"""
Here we are attempting to link the inputs to the underlying dataset (not the dataset association)
This way, if the calulation was done using a copied HDA (copied from the library or another history)
the search will still find the job
"""
- a = aliased(trans.app.model.JobToInputDatasetAssociation)
- b = aliased(trans.app.model.HistoryDatasetAssociation)
+ a = aliased( trans.app.model.JobToInputDatasetAssociation )
+ b = aliased( trans.app.model.HistoryDatasetAssociation )
query = query.filter( and_(
trans.app.model.Job.id == a.job_id,
a.dataset_id == b.id,
b.deleted == False,
b.dataset_id == v
- ))
+ ) )
+
out = []
for job in query.all():
"""
check to make sure none of the output files have been deleted
"""
- if all(list(a.dataset.deleted == False for a in job.output_datasets)):
- out.append( self.encode_all_ids( trans, job.to_dict('element'), True) )
+ if all( list( a.dataset.deleted == False for a in job.output_datasets ) ):
+ out.append( self.encode_all_ids( trans, job.to_dict( 'element' ), True ) )
return out
https://bitbucket.org/galaxy/galaxy-central/commits/e20e9d60eb80/
Changeset: e20e9d60eb80
Branch: job-search
User: jmchilton
Date: 2014-01-29 06:34:04
Summary: Tweak Jobs API to only use new style API decorator, exception handling.
Affected #: 1 file
diff -r 95747eacf09c21335323131382de433409b86e16 -r e20e9d60eb8017a241976912bfceb30dd316b20e lib/galaxy/webapps/galaxy/api/jobs.py
--- a/lib/galaxy/webapps/galaxy/api/jobs.py
+++ b/lib/galaxy/webapps/galaxy/api/jobs.py
@@ -7,7 +7,6 @@
from sqlalchemy import or_, and_
from sqlalchemy.orm import aliased
import json
-from galaxy import web
from galaxy.web import _future_expose_api as expose_api
from galaxy.web.base.controller import BaseAPIController
from galaxy.web.base.controller import UsesHistoryDatasetAssociationMixin
@@ -20,7 +19,7 @@
class JobController( BaseAPIController, UsesHistoryDatasetAssociationMixin, UsesLibraryMixinItems ):
- @web.expose_api
+ @expose_api
def index( self, trans, **kwd ):
"""
index( trans, state=None )
@@ -56,7 +55,7 @@
out.append( self.encode_all_ids( trans, job.to_dict( 'collection' ), True ) )
return out
- @web.expose_api
+ @expose_api
def show( self, trans, id, **kwd ):
"""
show( trans, id )
@@ -124,13 +123,10 @@
for k, v in inputs.items():
if isinstance( v, dict ):
if 'id' in v:
- try:
- if 'src' not in v or v[ 'src' ] == 'hda':
- dataset = self.get_dataset( trans, v['id'], check_ownership=False, check_accessible=True )
- else:
- dataset = self.get_library_dataset_dataset_association( trans, v['id'] )
- except Exception, e:
- return { "error" : str( e ) }
+ if 'src' not in v or v[ 'src' ] == 'hda':
+ dataset = self.get_dataset( trans, v['id'], check_ownership=False, check_accessible=True )
+ else:
+ dataset = self.get_library_dataset_dataset_association( trans, v['id'] )
if dataset is None:
raise exceptions.ObjectNotFound( "Dataset %s not found" % ( v[ 'id' ] ) )
input_data[k] = dataset.dataset_id
https://bitbucket.org/galaxy/galaxy-central/commits/8d408c81c6c4/
Changeset: 8d408c81c6c4
Branch: job-search
User: jmchilton
Date: 2014-01-29 06:34:29
Summary: Add functional tests for new jobs API.
Affected #: 1 file
diff -r e20e9d60eb8017a241976912bfceb30dd316b20e -r 8d408c81c6c4abafce7461cab6c5e4b677e238d5 test/functional/api/test_jobs.py
--- /dev/null
+++ b/test/functional/api/test_jobs.py
@@ -0,0 +1,118 @@
+import json
+from operator import itemgetter
+
+from base import api
+
+from .helpers import TestsDatasets
+
+
+class JobsApiTestCase( api.ApiTestCase, TestsDatasets ):
+
+ def test_index( self ):
+ # Create HDA to ensure at least one job exists...
+ self.__history_with_new_dataset()
+ jobs_response = self._get( "jobs" )
+
+ self._assert_status_code_is( jobs_response, 200 )
+
+ jobs = jobs_response.json()
+ assert isinstance( jobs, list )
+ assert "upload1" in map( itemgetter( "tool_id" ), jobs )
+
+ def test_index_state_filter( self ):
+ # Initial number of ok jobs
+ original_count = len( self.__uploads_with_state( "ok" ) )
+
+ # Run through dataset upload to ensure num uplaods at least greater
+ # by 1.
+ self.__history_with_ok_dataset()
+
+ # Verify number of ok jobs is actually greater.
+ new_count = len( self.__uploads_with_state( "ok" ) )
+ assert original_count < new_count
+
+ def test_index_multiple_states_filter( self ):
+ # Initial number of ok jobs
+ original_count = len( self.__uploads_with_state( "ok", "new" ) )
+
+ # Run through dataset upload to ensure num uplaods at least greater
+ # by 1.
+ self.__history_with_ok_dataset()
+
+ # Verify number of ok jobs is actually greater.
+ new_count = len( self.__uploads_with_state( "new", "ok" ) )
+ assert original_count < new_count, new_count
+
+ def test_show( self ):
+ # Create HDA to ensure at least one job exists...
+ self.__history_with_new_dataset()
+
+ jobs_response = self._get( "jobs" )
+ first_job = jobs_response.json()[ 0 ]
+ self._assert_has_key( first_job, 'id', 'state', 'exit_code', 'update_time', 'create_time' )
+
+ job_id = first_job[ "id" ]
+ show_jobs_response = self._get( "jobs/%s" % job_id )
+ self._assert_status_code_is( show_jobs_response, 200 )
+
+ job_details = show_jobs_response.json()
+ self._assert_has_key( job_details, 'id', 'state', 'exit_code', 'update_time', 'create_time' )
+
+ def test_search( self ):
+ history_id, dataset_id = self.__history_with_ok_dataset()
+
+ inputs = json.dumps(
+ dict(
+ input1=dict(
+ src='hda',
+ id=dataset_id,
+ )
+ )
+ )
+ search_payload = dict(
+ tool_id="cat1",
+ inputs=inputs,
+ state="ok",
+ )
+
+ empty_search_response = self._post( "jobs/search", data=search_payload )
+ self._assert_status_code_is( empty_search_response, 200 )
+ assert len( empty_search_response.json() ) == 0
+
+ self.__run_cat_tool( history_id, dataset_id )
+ self._wait_for_history( history_id, assert_ok=True )
+
+ search_response = self._post( "jobs/search", data=search_payload )
+ self._assert_status_code_is( empty_search_response, 200 )
+ assert len( search_response.json() ) == 1, search_response.json()
+
+ def __run_cat_tool( self, history_id, dataset_id ):
+ # Code duplication with test_jobs.py, eliminate
+ payload = self._run_tool_payload(
+ tool_id='cat1',
+ inputs=dict(
+ input1=dict(
+ src='hda',
+ id=dataset_id
+ ),
+ ),
+ history_id=history_id,
+ )
+ self._post( "tools", data=payload )
+
+ def __uploads_with_state( self, *states ):
+ jobs_response = self._get( "jobs", data=dict( state=states ) )
+ self._assert_status_code_is( jobs_response, 200 )
+ jobs = jobs_response.json()
+ assert not filter( lambda j: j[ "state" ] not in states, jobs )
+ return filter( lambda j: j[ "tool_id" ] == "upload1", jobs )
+
+ def __history_with_new_dataset( self ):
+ history_id = self._new_history()
+ dataset_id = self._new_dataset( history_id )[ "id" ]
+ return history_id, dataset_id
+
+ def __history_with_ok_dataset( self ):
+ history_id, dataset_id = self.__history_with_new_dataset()
+ self._wait_for_history( history_id, assert_ok=True )
+ return history_id, dataset_id
https://bitbucket.org/galaxy/galaxy-central/commits/6d8959c8866c/
Changeset: 6d8959c8866c
User: jmchilton
Date: 2014-01-29 06:57:46
Summary: Merge job-search branch.
Affected #: 6 files
diff -r d3bc7ea60b05f3025de3dca2684f1bce0be2c0eb -r 6d8959c8866c699ee0e9461db2a1fcc0381bfac2 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -211,8 +211,8 @@
class Job( object, Dictifiable ):
- dict_collection_visible_keys = [ 'id', 'state', 'exit_code' ]
- dict_element_visible_keys = [ 'id', 'state', 'exit_code' ]
+ dict_collection_visible_keys = [ 'id', 'state', 'exit_code', 'update_time', 'create_time' ]
+ dict_element_visible_keys = [ 'id', 'state', 'exit_code', 'update_time', 'create_time' ]
"""
A job represents a request to run a tool given input datasets, tool
@@ -418,30 +418,31 @@
dataset.info = 'Job output deleted by user before job completed'
def to_dict( self, view='collection' ):
rval = super( Job, self ).to_dict( view=view )
- rval['tool_name'] = self.tool_id
- param_dict = dict( [ ( p.name, p.value ) for p in self.parameters ] )
- rval['params'] = param_dict
+ rval['tool_id'] = self.tool_id
+ if view == 'element':
+ param_dict = dict( [ ( p.name, p.value ) for p in self.parameters ] )
+ rval['params'] = param_dict
- input_dict = {}
- for i in self.input_datasets:
- if i.dataset is not None:
- input_dict[i.name] = {"hda_id" : i.dataset.id}
- for i in self.input_library_datasets:
- if i.dataset is not None:
- input_dict[i.name] = {"ldda_id" : i.dataset.id}
- for k in input_dict:
- if k in param_dict:
- del param_dict[k]
- rval['inputs'] = input_dict
+ input_dict = {}
+ for i in self.input_datasets:
+ if i.dataset is not None:
+ input_dict[i.name] = {"id" : i.dataset.id, "src" : "hda"}
+ for i in self.input_library_datasets:
+ if i.dataset is not None:
+ input_dict[i.name] = {"id" : i.dataset.id, "src" : "ldda"}
+ for k in input_dict:
+ if k in param_dict:
+ del param_dict[k]
+ rval['inputs'] = input_dict
- output_dict = {}
- for i in self.output_datasets:
- if i.dataset is not None:
- output_dict[i.name] = {"hda_id" : i.dataset.id}
- for i in self.output_library_datasets:
- if i.dataset is not None:
- output_dict[i.name] = {"ldda_id" : i.dataset.id}
- rval['outputs'] = output_dict
+ output_dict = {}
+ for i in self.output_datasets:
+ if i.dataset is not None:
+ output_dict[i.name] = {"id" : i.dataset.id, "src" : "hda"}
+ for i in self.output_library_datasets:
+ if i.dataset is not None:
+ output_dict[i.name] = {"id" : i.dataset.id, "src" : "ldda"}
+ rval['outputs'] = output_dict
return rval
diff -r d3bc7ea60b05f3025de3dca2684f1bce0be2c0eb -r 6d8959c8866c699ee0e9461db2a1fcc0381bfac2 lib/galaxy/web/base/controller.py
--- a/lib/galaxy/web/base/controller.py
+++ b/lib/galaxy/web/base/controller.py
@@ -160,7 +160,7 @@
if type( rval ) != dict:
return rval
for k, v in rval.items():
- if (k == 'id' or k.endswith( '_id' )) and v is not None:
+ if (k == 'id' or k.endswith( '_id' )) and v is not None and k not in ['tool_id']:
try:
rval[k] = trans.security.encode_id( v )
except:
diff -r d3bc7ea60b05f3025de3dca2684f1bce0be2c0eb -r 6d8959c8866c699ee0e9461db2a1fcc0381bfac2 lib/galaxy/webapps/galaxy/api/jobs.py
--- /dev/null
+++ b/lib/galaxy/webapps/galaxy/api/jobs.py
@@ -0,0 +1,192 @@
+"""
+API operations on a jobs.
+
+.. seealso:: :class:`galaxy.model.Jobs`
+"""
+
+from sqlalchemy import or_, and_
+from sqlalchemy.orm import aliased
+import json
+from galaxy.web import _future_expose_api as expose_api
+from galaxy.web.base.controller import BaseAPIController
+from galaxy.web.base.controller import UsesHistoryDatasetAssociationMixin
+from galaxy.web.base.controller import UsesLibraryMixinItems
+from galaxy import exceptions
+
+import logging
+log = logging.getLogger( __name__ )
+
+
+class JobController( BaseAPIController, UsesHistoryDatasetAssociationMixin, UsesLibraryMixinItems ):
+
+ @expose_api
+ def index( self, trans, **kwd ):
+ """
+ index( trans, state=None )
+ * GET /api/jobs:
+ return jobs for current user
+
+ :type state: string or list
+ :param state: limit listing of jobs to those that match one of the included states. If none, all are returned.
+ Valid Galaxy job states include:
+ 'new', 'upload', 'waiting', 'queued', 'running', 'ok', 'error', 'paused', 'deleted', 'deleted_new'
+
+ :rtype: list
+ :returns: list of dictionaries containing summary job information
+ """
+
+ state = kwd.get( 'state', None )
+ query = trans.sa_session.query( trans.app.model.Job ).filter(
+ trans.app.model.Job.user == trans.user
+ )
+ if state is not None:
+ if isinstance( state, basestring ):
+ query = query.filter( trans.app.model.Job.state == state )
+ elif isinstance( state, list ):
+ t = []
+ for s in state:
+ t.append( trans.app.model.Job.state == s )
+ query = query.filter( or_( *t ) )
+
+ out = []
+ for job in query.order_by(
+ trans.app.model.Job.update_time.desc()
+ ).all():
+ out.append( self.encode_all_ids( trans, job.to_dict( 'collection' ), True ) )
+ return out
+
+ @expose_api
+ def show( self, trans, id, **kwd ):
+ """
+ show( trans, id )
+ * GET /api/jobs/{job_id}:
+ return jobs for current user
+
+ :type id: string
+ :param id: Specific job id
+
+ :rtype: dictionary
+ :returns: dictionary containing full description of job data
+ """
+ try:
+ decoded_job_id = trans.security.decode_id(id)
+ except:
+ raise exceptions.ObjectAttributeInvalidException()
+ query = trans.sa_session.query( trans.app.model.Job ).filter(
+ trans.app.model.Job.user == trans.user,
+ trans.app.model.Job.id == decoded_job_id
+ )
+ job = query.first()
+ if job is None:
+ raise exceptions.ObjectNotFound()
+ return self.encode_all_ids( trans, job.to_dict( 'element' ), True )
+
+ @expose_api
+ def create( self, trans, payload, **kwd ):
+ raise NotImplementedError()
+
+ @expose_api
+ def search( self, trans, payload, **kwd ):
+ """
+ search( trans, payload )
+ * POST /api/jobs/search:
+ return jobs for current user
+
+ :type payload: dict
+ :param payload: Dictionary containing description of requested job. This is in the same format as
+ a request to POST /apt/tools would take to initiate a job
+
+ :rtype: list
+ :returns: list of dictionaries containing summary job information of the jobs that match the requested job run
+
+ This method is designed to scan the list of previously run jobs and find records of jobs that had
+ the exact some input parameters and datasets. This can be used to minimize the amount of repeated work, and simply
+ recycle the old results.
+ """
+
+ tool_id = None
+ if 'tool_id' in payload:
+ tool_id = payload.get( 'tool_id' )
+ if tool_id is None:
+ raise exceptions.ObjectAttributeMissingException( "No tool id" )
+
+ tool = trans.app.toolbox.get_tool( tool_id )
+ if tool is None:
+ raise exceptions.ObjectNotFound( "Requested tool not found" )
+ if 'inputs' not in payload:
+ raise exceptions.ObjectAttributeMissingException( "No inputs defined" )
+
+ inputs = payload[ 'inputs' ]
+
+ input_data = {}
+ input_param = {}
+ for k, v in inputs.items():
+ if isinstance( v, dict ):
+ if 'id' in v:
+ if 'src' not in v or v[ 'src' ] == 'hda':
+ dataset = self.get_dataset( trans, v['id'], check_ownership=False, check_accessible=True )
+ else:
+ dataset = self.get_library_dataset_dataset_association( trans, v['id'] )
+ if dataset is None:
+ raise exceptions.ObjectNotFound( "Dataset %s not found" % ( v[ 'id' ] ) )
+ input_data[k] = dataset.dataset_id
+ else:
+ input_param[k] = json.dumps( v )
+
+ query = trans.sa_session.query( trans.app.model.Job ).filter(
+ trans.app.model.Job.tool_id == tool_id,
+ trans.app.model.Job.user == trans.user
+ )
+
+ if 'state' not in payload:
+ query = query.filter(
+ or_(
+ trans.app.model.Job.state == 'running',
+ trans.app.model.Job.state == 'queued',
+ trans.app.model.Job.state == 'waiting',
+ trans.app.model.Job.state == 'running',
+ trans.app.model.Job.state == 'ok',
+ )
+ )
+ else:
+ if isinstance( payload[ 'state' ], basestring ):
+ query = query.filter( trans.app.model.Job.state == payload[ 'state' ] )
+ elif isinstance( payload[ 'state' ], list ):
+ o = []
+ for s in payload[ 'state' ]:
+ o.append( trans.app.model.Job.state == s )
+ query = query.filter(
+ or_( *o )
+ )
+
+ for k, v in input_param.items():
+ a = aliased( trans.app.model.JobParameter )
+ query = query.filter( and_(
+ trans.app.model.Job.id == a.job_id,
+ a.name == k,
+ a.value == v
+ ) )
+
+ for k, v in input_data.items():
+ """
+ Here we are attempting to link the inputs to the underlying dataset (not the dataset association)
+ This way, if the calulation was done using a copied HDA (copied from the library or another history)
+ the search will still find the job
+ """
+ a = aliased( trans.app.model.JobToInputDatasetAssociation )
+ b = aliased( trans.app.model.HistoryDatasetAssociation )
+ query = query.filter( and_(
+ trans.app.model.Job.id == a.job_id,
+ a.dataset_id == b.id,
+ b.deleted == False,
+ b.dataset_id == v
+ ) )
+
+ out = []
+ for job in query.all():
+ """
+ check to make sure none of the output files have been deleted
+ """
+ if all( list( a.dataset.deleted == False for a in job.output_datasets ) ):
+ out.append( self.encode_all_ids( trans, job.to_dict( 'element' ), True ) )
+ return out
diff -r d3bc7ea60b05f3025de3dca2684f1bce0be2c0eb -r 6d8959c8866c699ee0e9461db2a1fcc0381bfac2 lib/galaxy/webapps/galaxy/buildapp.py
--- a/lib/galaxy/webapps/galaxy/buildapp.py
+++ b/lib/galaxy/webapps/galaxy/buildapp.py
@@ -228,6 +228,12 @@
path_prefix='/api/libraries/:library_id',
parent_resources=dict( member_name='library', collection_name='libraries' ) )
+ webapp.mapper.resource( 'job',
+ 'jobs',
+ path_prefix='/api' )
+ webapp.mapper.connect( 'job_search', '/api/jobs/search', controller='jobs', action='search', conditions=dict( method=['POST'] ) )
+
+
_add_item_extended_metadata_controller( webapp,
name_prefix="library_dataset_",
path_prefix='/api/libraries/:library_id/contents/:library_content_id' )
diff -r d3bc7ea60b05f3025de3dca2684f1bce0be2c0eb -r 6d8959c8866c699ee0e9461db2a1fcc0381bfac2 test/functional/api/test_jobs.py
--- /dev/null
+++ b/test/functional/api/test_jobs.py
@@ -0,0 +1,118 @@
+import json
+from operator import itemgetter
+
+from base import api
+
+from .helpers import TestsDatasets
+
+
+class JobsApiTestCase( api.ApiTestCase, TestsDatasets ):
+
+ def test_index( self ):
+ # Create HDA to ensure at least one job exists...
+ self.__history_with_new_dataset()
+ jobs_response = self._get( "jobs" )
+
+ self._assert_status_code_is( jobs_response, 200 )
+
+ jobs = jobs_response.json()
+ assert isinstance( jobs, list )
+ assert "upload1" in map( itemgetter( "tool_id" ), jobs )
+
+ def test_index_state_filter( self ):
+ # Initial number of ok jobs
+ original_count = len( self.__uploads_with_state( "ok" ) )
+
+ # Run through dataset upload to ensure num uplaods at least greater
+ # by 1.
+ self.__history_with_ok_dataset()
+
+ # Verify number of ok jobs is actually greater.
+ new_count = len( self.__uploads_with_state( "ok" ) )
+ assert original_count < new_count
+
+ def test_index_multiple_states_filter( self ):
+ # Initial number of ok jobs
+ original_count = len( self.__uploads_with_state( "ok", "new" ) )
+
+ # Run through dataset upload to ensure num uplaods at least greater
+ # by 1.
+ self.__history_with_ok_dataset()
+
+ # Verify number of ok jobs is actually greater.
+ new_count = len( self.__uploads_with_state( "new", "ok" ) )
+ assert original_count < new_count, new_count
+
+ def test_show( self ):
+ # Create HDA to ensure at least one job exists...
+ self.__history_with_new_dataset()
+
+ jobs_response = self._get( "jobs" )
+ first_job = jobs_response.json()[ 0 ]
+ self._assert_has_key( first_job, 'id', 'state', 'exit_code', 'update_time', 'create_time' )
+
+ job_id = first_job[ "id" ]
+ show_jobs_response = self._get( "jobs/%s" % job_id )
+ self._assert_status_code_is( show_jobs_response, 200 )
+
+ job_details = show_jobs_response.json()
+ self._assert_has_key( job_details, 'id', 'state', 'exit_code', 'update_time', 'create_time' )
+
+ def test_search( self ):
+ history_id, dataset_id = self.__history_with_ok_dataset()
+
+ inputs = json.dumps(
+ dict(
+ input1=dict(
+ src='hda',
+ id=dataset_id,
+ )
+ )
+ )
+ search_payload = dict(
+ tool_id="cat1",
+ inputs=inputs,
+ state="ok",
+ )
+
+ empty_search_response = self._post( "jobs/search", data=search_payload )
+ self._assert_status_code_is( empty_search_response, 200 )
+ assert len( empty_search_response.json() ) == 0
+
+ self.__run_cat_tool( history_id, dataset_id )
+ self._wait_for_history( history_id, assert_ok=True )
+
+ search_response = self._post( "jobs/search", data=search_payload )
+ self._assert_status_code_is( empty_search_response, 200 )
+ assert len( search_response.json() ) == 1, search_response.json()
+
+ def __run_cat_tool( self, history_id, dataset_id ):
+ # Code duplication with test_jobs.py, eliminate
+ payload = self._run_tool_payload(
+ tool_id='cat1',
+ inputs=dict(
+ input1=dict(
+ src='hda',
+ id=dataset_id
+ ),
+ ),
+ history_id=history_id,
+ )
+ self._post( "tools", data=payload )
+
+ def __uploads_with_state( self, *states ):
+ jobs_response = self._get( "jobs", data=dict( state=states ) )
+ self._assert_status_code_is( jobs_response, 200 )
+ jobs = jobs_response.json()
+ assert not filter( lambda j: j[ "state" ] not in states, jobs )
+ return filter( lambda j: j[ "tool_id" ] == "upload1", jobs )
+
+ def __history_with_new_dataset( self ):
+ history_id = self._new_history()
+ dataset_id = self._new_dataset( history_id )[ "id" ]
+ return history_id, dataset_id
+
+ def __history_with_ok_dataset( self ):
+ history_id, dataset_id = self.__history_with_new_dataset()
+ self._wait_for_history( history_id, assert_ok=True )
+ return history_id, dataset_id
https://bitbucket.org/galaxy/galaxy-central/commits/1dd912d65cfb/
Changeset: 1dd912d65cfb
Branch: job-search
User: jmchilton
Date: 2014-01-29 06:58:04
Summary: Close branch job-search.
Affected #: 0 files
https://bitbucket.org/galaxy/galaxy-central/commits/60203bad003c/
Changeset: 60203bad003c
User: jmchilton
Date: 2014-01-29 07:05:15
Summary: Merge.
Affected #: 3 files
diff -r 6d8959c8866c699ee0e9461db2a1fcc0381bfac2 -r 60203bad003cc6b4bc054500248e5a69cbe9b8bc lib/galaxy/tools/imp_exp/__init__.py
--- a/lib/galaxy/tools/imp_exp/__init__.py
+++ b/lib/galaxy/tools/imp_exp/__init__.py
@@ -1,4 +1,8 @@
-import os, shutil, logging, tempfile, json
+import os
+import shutil
+import logging
+import tempfile
+import json
from galaxy import model
from galaxy.tools.parameters.basic import UnvalidatedValue
from galaxy.web.framework.helpers import to_unicode
@@ -8,6 +12,7 @@
log = logging.getLogger(__name__)
+
def load_history_imp_exp_tools( toolbox ):
""" Adds tools for importing/exporting histories to archives. """
# Use same process as that used in load_external_metadata_tool; see that
@@ -42,6 +47,7 @@
toolbox.tools_by_id[ history_imp_tool.id ] = history_imp_tool
log.debug( "Loaded history import tool: %s", history_imp_tool.id )
+
class JobImportHistoryArchiveWrapper( object, UsesHistoryMixin, UsesAnnotations ):
"""
Class provides support for performing jobs that import a history from
@@ -144,23 +150,23 @@
metadata = dataset_attrs['metadata']
# Create dataset and HDA.
- hda = model.HistoryDatasetAssociation( name = dataset_attrs['name'].encode( 'utf-8' ),
- extension = dataset_attrs['extension'],
- info = dataset_attrs['info'].encode( 'utf-8' ),
- blurb = dataset_attrs['blurb'],
- peek = dataset_attrs['peek'],
- designation = dataset_attrs['designation'],
- visible = dataset_attrs['visible'],
- dbkey = metadata['dbkey'],
- metadata = metadata,
- history = new_history,
- create_dataset = True,
- sa_session = self.sa_session )
+ hda = model.HistoryDatasetAssociation( name=dataset_attrs['name'].encode( 'utf-8' ),
+ extension=dataset_attrs['extension'],
+ info=dataset_attrs['info'].encode( 'utf-8' ),
+ blurb=dataset_attrs['blurb'],
+ peek=dataset_attrs['peek'],
+ designation=dataset_attrs['designation'],
+ visible=dataset_attrs['visible'],
+ dbkey=metadata['dbkey'],
+ metadata=metadata,
+ history=new_history,
+ create_dataset=True,
+ sa_session=self.sa_session )
hda.state = hda.states.OK
self.sa_session.add( hda )
self.sa_session.flush()
- new_history.add_dataset( hda, genome_build = None )
- hda.hid = dataset_attrs['hid'] # Overwrite default hid set when HDA added to history.
+ new_history.add_dataset( hda, genome_build=None )
+ hda.hid = dataset_attrs['hid'] # Overwrite default hid set when HDA added to history.
# TODO: Is there a way to recover permissions? Is this needed?
#permissions = trans.app.security_agent.history_get_default_permissions( new_history )
#trans.app.security_agent.set_all_dataset_permissions( hda.dataset, permissions )
@@ -273,6 +279,7 @@
jiha.job.stderr += "Error cleaning up history import job: %s" % e
self.sa_session.flush()
+
class JobExportHistoryArchiveWrapper( object, UsesHistoryMixin, UsesAnnotations ):
"""
Class provides support for performing jobs that export a history to an
@@ -317,23 +324,23 @@
""" Encode an HDA, default encoding for everything else. """
if isinstance( obj, trans.app.model.HistoryDatasetAssociation ):
return {
- "__HistoryDatasetAssociation__" : True,
- "create_time" : obj.create_time.__str__(),
- "update_time" : obj.update_time.__str__(),
- "hid" : obj.hid,
- "name" : to_unicode( obj.name ),
- "info" : to_unicode( obj.info ),
- "blurb" : obj.blurb,
- "peek" : obj.peek,
- "extension" : obj.extension,
- "metadata" : prepare_metadata( dict( obj.metadata.items() ) ),
- "parent_id" : obj.parent_id,
- "designation" : obj.designation,
- "deleted" : obj.deleted,
- "visible" : obj.visible,
- "file_name" : obj.file_name,
- "annotation" : to_unicode( getattr( obj, 'annotation', '' ) ),
- "tags" : get_item_tag_dict( obj ),
+ "__HistoryDatasetAssociation__": True,
+ "create_time": obj.create_time.__str__(),
+ "update_time": obj.update_time.__str__(),
+ "hid": obj.hid,
+ "name": to_unicode( obj.name ),
+ "info": to_unicode( obj.info ),
+ "blurb": obj.blurb,
+ "peek": obj.peek,
+ "extension": obj.extension,
+ "metadata": prepare_metadata( dict( obj.metadata.items() ) ),
+ "parent_id": obj.parent_id,
+ "designation": obj.designation,
+ "deleted": obj.deleted,
+ "visible": obj.visible,
+ "file_name": obj.file_name,
+ "annotation": to_unicode( getattr( obj, 'annotation', '' ) ),
+ "tags": get_item_tag_dict( obj ),
}
if isinstance( obj, UnvalidatedValue ):
return obj.__str__()
@@ -347,15 +354,15 @@
# Write history attributes to file.
history = jeha.history
history_attrs = {
- "create_time" : history.create_time.__str__(),
- "update_time" : history.update_time.__str__(),
- "name" : to_unicode( history.name ),
- "hid_counter" : history.hid_counter,
- "genome_build" : history.genome_build,
- "annotation" : to_unicode( self.get_item_annotation_str( trans.sa_session, history.user, history ) ),
- "tags" : get_item_tag_dict( history ),
- "includes_hidden_datasets" : include_hidden,
- "includes_deleted_datasets" : include_deleted
+ "create_time": history.create_time.__str__(),
+ "update_time": history.update_time.__str__(),
+ "name": to_unicode( history.name ),
+ "hid_counter": history.hid_counter,
+ "genome_build": history.genome_build,
+ "annotation": to_unicode( self.get_item_annotation_str( trans.sa_session, history.user, history ) ),
+ "tags": get_item_tag_dict( history ),
+ "includes_hidden_datasets": include_hidden,
+ "includes_deleted_datasets": include_deleted
}
history_attrs_filename = tempfile.NamedTemporaryFile( dir=temp_output_dir ).name
history_attrs_out = open( history_attrs_filename, 'w' )
@@ -391,7 +398,7 @@
# Get the associated job, if any. If this hda was copied from another,
# we need to find the job that created the origial hda
job_hda = hda
- while job_hda.copied_from_history_dataset_association: #should this check library datasets as well?
+ while job_hda.copied_from_history_dataset_association: # should this check library datasets as well?
job_hda = job_hda.copied_from_history_dataset_association
if not job_hda.creating_job_associations:
# No viable HDA found.
@@ -472,4 +479,3 @@
shutil.rmtree( temp_dir )
except Exception, e:
log.debug( 'Error deleting directory containing attribute files (%s): %s' % ( temp_dir, e ) )
-
diff -r 6d8959c8866c699ee0e9461db2a1fcc0381bfac2 -r 60203bad003cc6b4bc054500248e5a69cbe9b8bc lib/galaxy/tools/imp_exp/unpack_tar_gz_archive.py
--- a/lib/galaxy/tools/imp_exp/unpack_tar_gz_archive.py
+++ b/lib/galaxy/tools/imp_exp/unpack_tar_gz_archive.py
@@ -6,19 +6,25 @@
--[url|file] source type, either a URL or a file.
"""
-import sys, optparse, tarfile, tempfile, urllib2, math
+import sys
+import optparse
+import tarfile
+import tempfile
+import urllib2
+import math
# Set max size of archive/file that will be handled to be 100 GB. This is
# arbitrary and should be adjusted as needed.
MAX_SIZE = 100 * math.pow( 2, 30 )
+
def url_to_file( url, dest_file ):
"""
Transfer a file from a remote URL to a temporary file.
"""
try:
url_reader = urllib2.urlopen( url )
- CHUNK = 10 * 1024 # 10k
+ CHUNK = 10 * 1024 # 10k
total = 0
fp = open( dest_file, 'wb')
while True:
@@ -35,6 +41,7 @@
print "Exception getting file from URL: %s" % e, sys.stderr
return None
+
def unpack_archive( archive_file, dest_dir ):
"""
Unpack a tar and/or gzipped archive into a destination directory.
@@ -63,4 +70,4 @@
# Unpack archive.
unpack_archive( archive_file, dest_dir )
except Exception, e:
- print "Error unpacking tar/gz archive: %s" % e, sys.stderr
\ No newline at end of file
+ print "Error unpacking tar/gz archive: %s" % e, sys.stderr
diff -r 6d8959c8866c699ee0e9461db2a1fcc0381bfac2 -r 60203bad003cc6b4bc054500248e5a69cbe9b8bc lib/galaxy/webapps/galaxy/api/histories.py
--- a/lib/galaxy/webapps/galaxy/api/histories.py
+++ b/lib/galaxy/webapps/galaxy/api/histories.py
@@ -11,15 +11,17 @@
from galaxy import web
from galaxy.web import _future_expose_api as expose_api
from galaxy.web import _future_expose_api_anonymous as expose_api_anonymous
-from galaxy.util import string_as_bool, restore_text
-from galaxy.util.sanitize_html import sanitize_html
-from galaxy.web.base.controller import BaseAPIController, UsesHistoryMixin, UsesTagsMixin
+from galaxy.util import string_as_bool
+from galaxy.util import restore_text
+from galaxy.web.base.controller import BaseAPIController
+from galaxy.web.base.controller import UsesHistoryMixin
+from galaxy.web.base.controller import UsesTagsMixin
from galaxy.web import url_for
-from galaxy.model.orm import desc
import logging
log = logging.getLogger( __name__ )
+
class HistoriesController( BaseAPIController, UsesHistoryMixin, UsesTagsMixin ):
@expose_api_anonymous
@@ -46,14 +48,14 @@
histories = self.get_user_histories( trans, user=trans.user, only_deleted=deleted )
#for history in query:
for history in histories:
- item = history.to_dict(value_mapper={'id':trans.security.encode_id})
+ item = history.to_dict(value_mapper={'id': trans.security.encode_id})
item['url'] = url_for( 'history', id=trans.security.encode_id( history.id ) )
rval.append( item )
elif trans.galaxy_session.current_history:
#No user, this must be session authentication with an anonymous user.
history = trans.galaxy_session.current_history
- item = history.to_dict(value_mapper={'id':trans.security.encode_id})
+ item = history.to_dict(value_mapper={'id': trans.security.encode_id})
item['url'] = url_for( 'history', id=trans.security.encode_id( history.id ) )
rval.append(item)
@@ -257,7 +259,7 @@
log.exception( 'Histories API, delete: uncaught HTTPInternalServerError: %s, %s\n%s',
history_id, str( kwd ), str( http_server_err ) )
raise
- except HTTPException, http_exc:
+ except HTTPException:
raise
except Exception, exc:
log.exception( 'Histories API, delete: uncaught exception: %s, %s\n%s',
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/d3bc7ea60b05/
Changeset: d3bc7ea60b05
User: guerler
Date: 2014-01-29 00:29:23
Summary: Merge
Affected #: 0 files
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: guerler: Grids: Option to pick between persistent and transient messages
by commits-noreply@bitbucket.org 28 Jan '14
by commits-noreply@bitbucket.org 28 Jan '14
28 Jan '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/593be4d22c47/
Changeset: 593be4d22c47
Branch: next-stable
User: guerler
Date: 2014-01-28 18:26:40
Summary: Grids: Option to pick between persistent and transient messages
Affected #: 7 files
diff -r 44538b70d174dd36836a88123a865e1147c016b5 -r 593be4d22c470abe6bdbbfa51f2a8e7ac035de33 lib/galaxy/web/framework/helpers/grids.py
--- a/lib/galaxy/web/framework/helpers/grids.py
+++ b/lib/galaxy/web/framework/helpers/grids.py
@@ -24,6 +24,7 @@
template = "grid_base.mako"
async_template = "grid_base_async.mako"
use_async = False
+ use_hide_message = True
global_actions = []
columns = []
operations = []
@@ -286,6 +287,7 @@
status = status,
message = message,
use_panels=self.use_panels,
+ use_hide_message=self.use_hide_message,
advanced_search=self.advanced_search,
show_item_checkboxes = ( self.show_item_checkboxes or
kwargs.get( 'show_item_checkboxes', '' ) in [ 'True', 'true' ] ),
diff -r 44538b70d174dd36836a88123a865e1147c016b5 -r 593be4d22c470abe6bdbbfa51f2a8e7ac035de33 lib/tool_shed/grids/admin_grids.py
--- a/lib/tool_shed/grids/admin_grids.py
+++ b/lib/tool_shed/grids/admin_grids.py
@@ -508,6 +508,7 @@
model_class = model.RepositoryMetadata
template='/webapps/tool_shed/repository/grid.mako'
default_sort_key = "name"
+ use_hide_message = False
columns = [
IdColumn( "Id",
visible=False,
diff -r 44538b70d174dd36836a88123a865e1147c016b5 -r 593be4d22c470abe6bdbbfa51f2a8e7ac035de33 lib/tool_shed/grids/repository_grids.py
--- a/lib/tool_shed/grids/repository_grids.py
+++ b/lib/tool_shed/grids/repository_grids.py
@@ -236,6 +236,7 @@
model_class = model.Repository
template='/webapps/tool_shed/repository/grid.mako'
default_sort_key = "name"
+ use_hide_message = False
columns = [
NameColumn( "Name",
key="name",
@@ -344,6 +345,7 @@
model_class = model.RepositoryMetadata
template='/webapps/tool_shed/repository/grid.mako'
default_sort_key = "Repository.name"
+ use_hide_message = False
columns = [
NameColumn( "Repository name",
link=( lambda item: dict( operation="view_or_manage_repository", id=item.id ) ),
@@ -652,7 +654,7 @@
columns = [ col for col in RepositoriesMissingToolTestComponentsGrid.columns ]
operations = []
use_paging = False
-
+
def build_initial_query( self, trans, **kwd ):
# First get all repositories that the current user is authorized to update.
username = trans.user.username
diff -r 44538b70d174dd36836a88123a865e1147c016b5 -r 593be4d22c470abe6bdbbfa51f2a8e7ac035de33 lib/tool_shed/grids/repository_review_grids.py
--- a/lib/tool_shed/grids/repository_review_grids.py
+++ b/lib/tool_shed/grids/repository_review_grids.py
@@ -36,6 +36,7 @@
model_class = model.Component
template='/webapps/tool_shed/repository_review/grid.mako'
default_sort_key = "name"
+ use_hide_message = False
columns = [
NameColumn( "Name",
key="Component.name",
@@ -333,6 +334,7 @@
model_class = model.RepositoryReview
template='/webapps/tool_shed/repository_review/grid.mako'
default_sort_key = 'repository_id'
+ use_hide_message = False
columns = [
RepositoryNameColumn( "Repository Name",
model_class=model.Repository,
diff -r 44538b70d174dd36836a88123a865e1147c016b5 -r 593be4d22c470abe6bdbbfa51f2a8e7ac035de33 static/scripts/galaxy.grids.js
--- a/static/scripts/galaxy.grids.js
+++ b/static/scripts/galaxy.grids.js
@@ -194,7 +194,9 @@
// update message
if (options.message) {
$('#grid-message').html(this.template_message(options));
- setTimeout( function() { $('#grid-message').html(''); }, 5000);
+ if (options.use_hide_message) {
+ setTimeout( function() { $('#grid-message').html(''); }, 5000);
+ }
}
// configure elements
diff -r 44538b70d174dd36836a88123a865e1147c016b5 -r 593be4d22c470abe6bdbbfa51f2a8e7ac035de33 static/scripts/packed/galaxy.grids.js
--- a/static/scripts/packed/galaxy.grids.js
+++ b/static/scripts/packed/galaxy.grids.js
@@ -1,1 +1,1 @@
-jQuery.ajaxSettings.traditional=true;define(["mvc/ui"],function(){var a=Backbone.Model.extend({defaults:{url_base:"",async:false,async_ops:[],categorical_filters:[],filters:{},sort_key:null,show_item_checkboxes:false,advanced_search:false,cur_page:1,num_pages:1,operation:undefined,item_ids:undefined},can_async_op:function(c){return _.indexOf(this.attributes.async_ops,c)!==-1},add_filter:function(g,h,d){if(d){var e=this.attributes.filters[g],c;if(e===null||e===undefined){c=h}else{if(typeof(e)=="string"){if(e=="All"){c=h}else{var f=[];f[0]=e;f[1]=h;c=f}}else{c=e;c.push(h)}}this.attributes.filters[g]=c}else{this.attributes.filters[g]=h}},remove_filter:function(d,g){var c=this.attributes.filters[d];if(c===null||c===undefined){return false}var f=true;if(typeof(c)==="string"){if(c=="All"){f=false}else{delete this.attributes.filters[d]}}else{var e=_.indexOf(c,g);if(e!==-1){c.splice(e,1)}else{f=false}}return f},get_url_data:function(){var c={async:this.attributes.async,sort:this.attributes.sort_key,page:this.attributes.cur_page,show_item_checkboxes:this.attributes.show_item_checkboxes,advanced_search:this.attributes.advanced_search};if(this.attributes.operation){c.operation=this.attributes.operation}if(this.attributes.item_ids){c.id=this.attributes.item_ids}var d=this;_.each(_.pairs(d.attributes.filters),function(e){c["f-"+e[0]]=e[1]});return c},get_url:function(c){return this.get("url_base")+"?"+$.param(this.get_url_data())+"&"+$.param(c)}});var b=Backbone.View.extend({grid:null,initialize:function(c){this.init_grid(c);this.init_grid_controls();$("input[type=text]").each(function(){$(this).click(function(){$(this).select()}).keyup(function(){$(this).css("font-style","normal")})})},handle_refresh:function(c){if(c){if($.inArray("history",c)>-1){if(top.Galaxy&&top.Galaxy.currHistoryPanel){top.Galaxy.currHistoryPanel.loadCurrentHistory()}}}},init_grid:function(e){this.grid=new a(e);var d=this.grid.attributes;this.handle_refresh(d.refresh_frames);var c=this.grid.get("url_base");c=c.replace(/^.*\/\/[^\/]+/,"");this.grid.set("url_base",c);$("#grid-table-body").html(this.template_body(d));$("#grid-table-footer").html(this.template_footer(d));if(d.message){$("#grid-message").html(this.template_message(d));setTimeout(function(){$("#grid-message").html("")},5000)}this.init_grid_elements();init_refresh_on_change()},init_grid_controls:function(){$(".submit-image").each(function(){$(this).mousedown(function(){$(this).addClass("gray-background")});$(this).mouseup(function(){$(this).removeClass("gray-background")})});var c=this;$(".sort-link").each(function(){$(this).click(function(){c.set_sort_condition($(this).attr("sort_key"));return false})});$(".categorical-filter > a").each(function(){$(this).click(function(){c.set_categorical_filter($(this).attr("filter_key"),$(this).attr("filter_val"));return false})});$(".text-filter-form").each(function(){$(this).submit(function(){var g=$(this).attr("column_key");var f=$("#input-"+g+"-filter");var h=f.val();f.val("");c.add_filter_condition(g,h);return false})});var d=$("#input-tags-filter");if(d.length){d.autocomplete(this.grid.history_tag_autocomplete_url,{selectFirst:false,autoFill:false,highlight:false,mustMatch:false})}var e=$("#input-name-filter");if(e.length){e.autocomplete(this.grid.history_name_autocomplete_url,{selectFirst:false,autoFill:false,highlight:false,mustMatch:false})}$(".advanced-search-toggle").each(function(){$(this).click(function(){$("#standard-search").slideToggle("fast");$("#advanced-search").slideToggle("fast");return false})})},init_grid_elements:function(){$(".grid").each(function(){var s=$(this).find("input.grid-row-select-checkbox");var r=$(this).find("span.grid-selected-count");var t=function(){r.text($(s).filter(":checked").length)};$(s).each(function(){$(this).change(t)});t()});if($(".community_rating_star").length!==0){$(".community_rating_star").rating({})}var q=this.grid.attributes;var p=this;$(".page-link > a").each(function(){$(this).click(function(){p.set_page($(this).attr("page_num"));return false})});$(".use-inbound").each(function(){$(this).click(function(r){p.execute({href:$(this).attr("href"),inbound:true});return false})});$(".use-outbound").each(function(){$(this).click(function(r){p.execute({href:$(this).attr("href")});return false})});var f=q.items.length;if(f==0){return}for(var k in q.items){var o=q.items[k];var l=$("#grid-"+k+"-popup");l.off();var d=new PopupMenu(l);for(var h in q.operations){var e=q.operations[h];var m=e.label;var c=o.operation_config[m];var g=o.encode_id;if(c.allowed&&e.allow_popup){var n={html:e.label,href:c.url_args,target:c.target,confirmation_text:e.confirm,inbound:e.inbound};n.func=function(t){t.preventDefault();var s=$(t.target).html();var r=this.findItemByHtml(s);p.execute(r)};d.addItem(n)}}}},add_filter_condition:function(e,g){if(g===""){return false}this.grid.add_filter(e,g,true);var f=$("<span>"+g+"<a href='javascript:void(0);'><span class='delete-search-icon' /></span></a>");f.addClass("text-filter-val");var d=this;f.click(function(){d.grid.remove_filter(e,g);$(this).remove();d.go_page_one();d.execute()});var c=$("#"+e+"-filtering-criteria");c.append(f);this.go_page_one();this.execute()},set_sort_condition:function(h){var g=this.grid.get("sort_key");var f=h;if(g.indexOf(h)!==-1){if(g.substring(0,1)!=="-"){f="-"+h}else{}}$(".sort-arrow").remove();var e=(f.substring(0,1)=="-")?"↑":"↓";var c=$("<span>"+e+"</span>").addClass("sort-arrow");var d=$("#"+h+"-header");d.append(c);this.grid.set("sort_key",f);this.go_page_one();this.execute()},set_categorical_filter:function(e,g){var d=this.grid.get("categorical_filters")[e],f=this.grid.get("filters")[e];var c=this;$("."+e+"-filter").each(function(){var m=$.trim($(this).text());var k=d[m];var l=k[e];if(l==g){$(this).empty();$(this).addClass("current-filter");$(this).append(m)}else{if(l==f){$(this).empty();var h=$("<a href='#'>"+m+"</a>");h.click(function(){c.set_categorical_filter(e,l)});$(this).removeClass("current-filter");$(this).append(h)}}});this.grid.add_filter(e,g);this.go_page_one();this.execute()},set_page:function(c){var d=this;$(".page-link").each(function(){var k=$(this).attr("id"),g=parseInt(k.split("-")[2],10),e=d.grid.get("cur_page"),h;if(g===c){h=$(this).children().text();$(this).empty();$(this).addClass("inactive-link");$(this).text(h)}else{if(g===e){h=$(this).text();$(this).empty();$(this).removeClass("inactive-link");var f=$("<a href='#'>"+h+"</a>");f.click(function(){d.set_page(g)});$(this).append(f)}}});if(c==="all"){this.grid.set("cur_page",c)}else{this.grid.set("cur_page",parseInt(c,10))}this.execute()},submit_operation:function(f,g){var e=$('input[name="id"]:checked').length;if(!e>0){return false}var d=$(f).val();var c=[];$("input[name=id]:checked").each(function(){c.push($(this).val())});this.execute({operation:d,id:c,confirmation_text:g});return true},execute:function(n){var f=null;var e=null;var g=null;var c=null;var m=null;if(n){e=n.href;g=n.operation;f=n.id;c=n.confirmation_text;m=n.inbound;if(e!==undefined&&e.indexOf("operation=")!=-1){var l=e.split("?");if(l.length>1){var k=l[1];var d=k.split("&");for(var h=0;h<d.length;h++){if(d[h].indexOf("operation")!=-1){g=d[h].split("=")[1];g=g.replace(/\+/g," ")}else{if(d[h].indexOf("id")!=-1){f=d[h].split("=")[1]}}}}}}if(g&&f){if(c&&c!=""&&c!="None"&&c!="null"){if(!confirm(c)){return false}}g=g.toLowerCase();this.grid.set({operation:g,item_ids:f});if(this.grid.can_async_op(g)){this.update_grid()}else{this.go_to(m,"")}return false}if(e){this.go_to(m,e);return false}if(this.grid.get("async")){this.update_grid()}else{this.go_to(m,"")}return false},go_to:function(f,d){var e=this.grid.get("async");this.grid.set("async",false);advanced_search=$("#advanced-search").is(":visible");this.grid.set("advanced_search",advanced_search);if(!d){d=this.grid.get("url_base")+"?"+$.param(this.grid.get_url_data())}this.grid.set({operation:undefined,item_ids:undefined,async:e});if(f){var c=$(".grid-header").closest(".inbound");if(c.length!==0){c.load(d);return}}window.location=d},update_grid:function(){var d=(this.grid.get("operation")?"POST":"GET");$(".loading-elt-overlay").show();var c=this;$.ajax({type:d,url:c.grid.get("url_base"),data:c.grid.get_url_data(),error:function(e){alert("Grid refresh failed")},success:function(e){c.init_grid($.parseJSON(e));$(".loading-elt-overlay").hide()},complete:function(){c.grid.set({operation:undefined,item_ids:undefined})}})},check_all_items:function(){var c=document.getElementById("check_all"),d=document.getElementsByTagName("input"),f=0,e;if(c.checked===true){for(e=0;e<d.length;e++){if(d[e].name.indexOf("id")!==-1){d[e].checked=true;f++}}}else{for(e=0;e<d.length;e++){if(d[e].name.indexOf("id")!==-1){d[e].checked=false}}}this.init_grid_elements()},go_page_one:function(){var c=this.grid.get("cur_page");if(c!==null&&c!==undefined&&c!=="all"){this.grid.set("cur_page",1)}},template_body:function(t){var m="";var u=0;var g=t.items.length;if(g==0){m+='<tr><td colspan="100"><em>No Items</em></td></tr>';u=1}for(var h in t.items){var r=t.items[h];var c=r.encode_id;var k="grid-"+h+"-popup";m+="<tr ";if(t.current_item_id==r.id){m+='class="current"'}m+=">";if(t.show_item_checkboxes){m+='<td style="width: 1.5em;"><input type="checkbox" name="id" value="'+c+'" id="'+c+'" class="grid-row-select-checkbox" /></td>'}for(j in t.columns){var f=t.columns[j];if(f.visible){var e="";if(f.nowrap){e='style="white-space:nowrap;"'}var s=r.column_config[f.label];var l=s.link;var n=s.value;var q=s.inbound;if(jQuery.type(n)==="string"){n=n.replace(/\/\//g,"/")}var d="";var p="";if(f.attach_popup){d="grid-"+h+"-popup";p="menubutton";if(l!=""){p+=" split"}p+=" popup"}m+="<td "+e+">";if(l){if(t.operations.length!=0){m+='<div id="'+d+'" class="'+p+'" style="float: left;">'}var o="";if(q){o="use-inbound"}else{o="use-outbound"}m+='<a class="label '+o+'" href="'+l+'" onclick="return false;">'+n+"</a>";if(t.operations.length!=0){m+="</div>"}}else{m+='<div id="'+d+'" class="'+p+'"><label id="'+f.label_id_prefix+c+'" for="'+c+'">'+n+"</label></div>"}m+="</td>"}}m+="</tr>";u++}return m},template_footer:function(q){var m="";if(q.use_paging&&q.num_pages>1){var o=q.num_page_links;var c=q.cur_page_num;var p=q.num_pages;var l=o/2;var k=c-l;var g=0;if(k==0){k=1;g=l-(c-k)}var f=l+g;var e=c+f;if(e<=p){max_offset=0}else{e=p;max_offset=f-(e+1-c)}if(max_offset!=0){k-=max_offset;if(k<1){k=1}}m+='<tr id="page-links-row">';if(q.show_item_checkboxes){m+="<td></td>"}m+='<td colspan="100"><span id="page-link-container">Page:';if(k>1){m+='<span class="page-link" id="page-link-1"><a href="'+this.grid.get_url({page:n})+'" page_num="1" onclick="return false;">1</a></span> ...'}for(var n=k;n<e+1;n++){if(n==q.cur_page_num){m+='<span class="page-link inactive-link" id="page-link-'+n+'">'+n+"</span>"}else{m+='<span class="page-link" id="page-link-'+n+'"><a href="'+this.grid.get_url({page:n})+'" onclick="return false;" page_num="'+n+'">'+n+"</a></span>"}}if(e<p){m+='...<span class="page-link" id="page-link-'+p+'"><a href="'+this.grid.get_url({page:p})+'" onclick="return false;" page_num="'+p+'">'+p+"</a></span>"}m+="</span>";m+='<span class="page-link" id="show-all-link-span"> | <a href="'+this.grid.get_url({page:"all"})+'" onclick="return false;" page_num="all">Show All</a></span></td></tr>'}if(q.show_item_checkboxes){m+='<tr><input type="hidden" id="operation" name="operation" value=""><td></td><td colspan="100">For <span class="grid-selected-count"></span> selected '+q.get_class_plural+": ";for(i in q.operations){var d=q.operations[i];if(d.allow_multiple){m+='<input type="button" value="'+d.label+'" class="action-button" onclick="gridView.submit_operation(this, \''+d.confirm+"')\"> "}}m+="</td></tr>"}var h=false;for(i in q.operations){if(q.operations[i].global_operation){h=true;break}}if(h){m+='<tr><td colspan="100">';for(i in q.operations){var d=q.operations[i];if(d.global_operation){m+='<a class="action-button" href="'+d.global_operation+'">'+d.label+"</a>"}}m+="</td></tr>"}if(q.legend){m+='<tr><td colspan="100">'+q.legend+"</td></tr>"}return m},template_message:function(c){return'<p><div class="'+c.status+'message transient-message">'+c.message+'</div><div style="clear: both"></div></p>'}});return{Grid:a,GridView:b}});
\ No newline at end of file
+jQuery.ajaxSettings.traditional=true;define(["mvc/ui"],function(){var a=Backbone.Model.extend({defaults:{url_base:"",async:false,async_ops:[],categorical_filters:[],filters:{},sort_key:null,show_item_checkboxes:false,advanced_search:false,cur_page:1,num_pages:1,operation:undefined,item_ids:undefined},can_async_op:function(c){return _.indexOf(this.attributes.async_ops,c)!==-1},add_filter:function(g,h,d){if(d){var e=this.attributes.filters[g],c;if(e===null||e===undefined){c=h}else{if(typeof(e)=="string"){if(e=="All"){c=h}else{var f=[];f[0]=e;f[1]=h;c=f}}else{c=e;c.push(h)}}this.attributes.filters[g]=c}else{this.attributes.filters[g]=h}},remove_filter:function(d,g){var c=this.attributes.filters[d];if(c===null||c===undefined){return false}var f=true;if(typeof(c)==="string"){if(c=="All"){f=false}else{delete this.attributes.filters[d]}}else{var e=_.indexOf(c,g);if(e!==-1){c.splice(e,1)}else{f=false}}return f},get_url_data:function(){var c={async:this.attributes.async,sort:this.attributes.sort_key,page:this.attributes.cur_page,show_item_checkboxes:this.attributes.show_item_checkboxes,advanced_search:this.attributes.advanced_search};if(this.attributes.operation){c.operation=this.attributes.operation}if(this.attributes.item_ids){c.id=this.attributes.item_ids}var d=this;_.each(_.pairs(d.attributes.filters),function(e){c["f-"+e[0]]=e[1]});return c},get_url:function(c){return this.get("url_base")+"?"+$.param(this.get_url_data())+"&"+$.param(c)}});var b=Backbone.View.extend({grid:null,initialize:function(c){this.init_grid(c);this.init_grid_controls();$("input[type=text]").each(function(){$(this).click(function(){$(this).select()}).keyup(function(){$(this).css("font-style","normal")})})},handle_refresh:function(c){if(c){if($.inArray("history",c)>-1){if(top.Galaxy&&top.Galaxy.currHistoryPanel){top.Galaxy.currHistoryPanel.loadCurrentHistory()}}}},init_grid:function(e){this.grid=new a(e);var d=this.grid.attributes;this.handle_refresh(d.refresh_frames);var c=this.grid.get("url_base");c=c.replace(/^.*\/\/[^\/]+/,"");this.grid.set("url_base",c);$("#grid-table-body").html(this.template_body(d));$("#grid-table-footer").html(this.template_footer(d));if(d.message){$("#grid-message").html(this.template_message(d));if(d.use_hide_message){setTimeout(function(){$("#grid-message").html("")},5000)}}this.init_grid_elements();init_refresh_on_change()},init_grid_controls:function(){$(".submit-image").each(function(){$(this).mousedown(function(){$(this).addClass("gray-background")});$(this).mouseup(function(){$(this).removeClass("gray-background")})});var c=this;$(".sort-link").each(function(){$(this).click(function(){c.set_sort_condition($(this).attr("sort_key"));return false})});$(".categorical-filter > a").each(function(){$(this).click(function(){c.set_categorical_filter($(this).attr("filter_key"),$(this).attr("filter_val"));return false})});$(".text-filter-form").each(function(){$(this).submit(function(){var g=$(this).attr("column_key");var f=$("#input-"+g+"-filter");var h=f.val();f.val("");c.add_filter_condition(g,h);return false})});var d=$("#input-tags-filter");if(d.length){d.autocomplete(this.grid.history_tag_autocomplete_url,{selectFirst:false,autoFill:false,highlight:false,mustMatch:false})}var e=$("#input-name-filter");if(e.length){e.autocomplete(this.grid.history_name_autocomplete_url,{selectFirst:false,autoFill:false,highlight:false,mustMatch:false})}$(".advanced-search-toggle").each(function(){$(this).click(function(){$("#standard-search").slideToggle("fast");$("#advanced-search").slideToggle("fast");return false})})},init_grid_elements:function(){$(".grid").each(function(){var s=$(this).find("input.grid-row-select-checkbox");var r=$(this).find("span.grid-selected-count");var t=function(){r.text($(s).filter(":checked").length)};$(s).each(function(){$(this).change(t)});t()});if($(".community_rating_star").length!==0){$(".community_rating_star").rating({})}var q=this.grid.attributes;var p=this;$(".page-link > a").each(function(){$(this).click(function(){p.set_page($(this).attr("page_num"));return false})});$(".use-inbound").each(function(){$(this).click(function(r){p.execute({href:$(this).attr("href"),inbound:true});return false})});$(".use-outbound").each(function(){$(this).click(function(r){p.execute({href:$(this).attr("href")});return false})});var f=q.items.length;if(f==0){return}for(var k in q.items){var o=q.items[k];var l=$("#grid-"+k+"-popup");l.off();var d=new PopupMenu(l);for(var h in q.operations){var e=q.operations[h];var m=e.label;var c=o.operation_config[m];var g=o.encode_id;if(c.allowed&&e.allow_popup){var n={html:e.label,href:c.url_args,target:c.target,confirmation_text:e.confirm,inbound:e.inbound};n.func=function(t){t.preventDefault();var s=$(t.target).html();var r=this.findItemByHtml(s);p.execute(r)};d.addItem(n)}}}},add_filter_condition:function(e,g){if(g===""){return false}this.grid.add_filter(e,g,true);var f=$("<span>"+g+"<a href='javascript:void(0);'><span class='delete-search-icon' /></span></a>");f.addClass("text-filter-val");var d=this;f.click(function(){d.grid.remove_filter(e,g);$(this).remove();d.go_page_one();d.execute()});var c=$("#"+e+"-filtering-criteria");c.append(f);this.go_page_one();this.execute()},set_sort_condition:function(h){var g=this.grid.get("sort_key");var f=h;if(g.indexOf(h)!==-1){if(g.substring(0,1)!=="-"){f="-"+h}else{}}$(".sort-arrow").remove();var e=(f.substring(0,1)=="-")?"↑":"↓";var c=$("<span>"+e+"</span>").addClass("sort-arrow");var d=$("#"+h+"-header");d.append(c);this.grid.set("sort_key",f);this.go_page_one();this.execute()},set_categorical_filter:function(e,g){var d=this.grid.get("categorical_filters")[e],f=this.grid.get("filters")[e];var c=this;$("."+e+"-filter").each(function(){var m=$.trim($(this).text());var k=d[m];var l=k[e];if(l==g){$(this).empty();$(this).addClass("current-filter");$(this).append(m)}else{if(l==f){$(this).empty();var h=$("<a href='#'>"+m+"</a>");h.click(function(){c.set_categorical_filter(e,l)});$(this).removeClass("current-filter");$(this).append(h)}}});this.grid.add_filter(e,g);this.go_page_one();this.execute()},set_page:function(c){var d=this;$(".page-link").each(function(){var k=$(this).attr("id"),g=parseInt(k.split("-")[2],10),e=d.grid.get("cur_page"),h;if(g===c){h=$(this).children().text();$(this).empty();$(this).addClass("inactive-link");$(this).text(h)}else{if(g===e){h=$(this).text();$(this).empty();$(this).removeClass("inactive-link");var f=$("<a href='#'>"+h+"</a>");f.click(function(){d.set_page(g)});$(this).append(f)}}});if(c==="all"){this.grid.set("cur_page",c)}else{this.grid.set("cur_page",parseInt(c,10))}this.execute()},submit_operation:function(f,g){var e=$('input[name="id"]:checked').length;if(!e>0){return false}var d=$(f).val();var c=[];$("input[name=id]:checked").each(function(){c.push($(this).val())});this.execute({operation:d,id:c,confirmation_text:g});return true},execute:function(n){var f=null;var e=null;var g=null;var c=null;var m=null;if(n){e=n.href;g=n.operation;f=n.id;c=n.confirmation_text;m=n.inbound;if(e!==undefined&&e.indexOf("operation=")!=-1){var l=e.split("?");if(l.length>1){var k=l[1];var d=k.split("&");for(var h=0;h<d.length;h++){if(d[h].indexOf("operation")!=-1){g=d[h].split("=")[1];g=g.replace(/\+/g," ")}else{if(d[h].indexOf("id")!=-1){f=d[h].split("=")[1]}}}}}}if(g&&f){if(c&&c!=""&&c!="None"&&c!="null"){if(!confirm(c)){return false}}g=g.toLowerCase();this.grid.set({operation:g,item_ids:f});if(this.grid.can_async_op(g)){this.update_grid()}else{this.go_to(m,"")}return false}if(e){this.go_to(m,e);return false}if(this.grid.get("async")){this.update_grid()}else{this.go_to(m,"")}return false},go_to:function(f,d){var e=this.grid.get("async");this.grid.set("async",false);advanced_search=$("#advanced-search").is(":visible");this.grid.set("advanced_search",advanced_search);if(!d){d=this.grid.get("url_base")+"?"+$.param(this.grid.get_url_data())}this.grid.set({operation:undefined,item_ids:undefined,async:e});if(f){var c=$(".grid-header").closest(".inbound");if(c.length!==0){c.load(d);return}}window.location=d},update_grid:function(){var d=(this.grid.get("operation")?"POST":"GET");$(".loading-elt-overlay").show();var c=this;$.ajax({type:d,url:c.grid.get("url_base"),data:c.grid.get_url_data(),error:function(e){alert("Grid refresh failed")},success:function(e){c.init_grid($.parseJSON(e));$(".loading-elt-overlay").hide()},complete:function(){c.grid.set({operation:undefined,item_ids:undefined})}})},check_all_items:function(){var c=document.getElementById("check_all"),d=document.getElementsByTagName("input"),f=0,e;if(c.checked===true){for(e=0;e<d.length;e++){if(d[e].name.indexOf("id")!==-1){d[e].checked=true;f++}}}else{for(e=0;e<d.length;e++){if(d[e].name.indexOf("id")!==-1){d[e].checked=false}}}this.init_grid_elements()},go_page_one:function(){var c=this.grid.get("cur_page");if(c!==null&&c!==undefined&&c!=="all"){this.grid.set("cur_page",1)}},template_body:function(t){var m="";var u=0;var g=t.items.length;if(g==0){m+='<tr><td colspan="100"><em>No Items</em></td></tr>';u=1}for(var h in t.items){var r=t.items[h];var c=r.encode_id;var k="grid-"+h+"-popup";m+="<tr ";if(t.current_item_id==r.id){m+='class="current"'}m+=">";if(t.show_item_checkboxes){m+='<td style="width: 1.5em;"><input type="checkbox" name="id" value="'+c+'" id="'+c+'" class="grid-row-select-checkbox" /></td>'}for(j in t.columns){var f=t.columns[j];if(f.visible){var e="";if(f.nowrap){e='style="white-space:nowrap;"'}var s=r.column_config[f.label];var l=s.link;var n=s.value;var q=s.inbound;if(jQuery.type(n)==="string"){n=n.replace(/\/\//g,"/")}var d="";var p="";if(f.attach_popup){d="grid-"+h+"-popup";p="menubutton";if(l!=""){p+=" split"}p+=" popup"}m+="<td "+e+">";if(l){if(t.operations.length!=0){m+='<div id="'+d+'" class="'+p+'" style="float: left;">'}var o="";if(q){o="use-inbound"}else{o="use-outbound"}m+='<a class="label '+o+'" href="'+l+'" onclick="return false;">'+n+"</a>";if(t.operations.length!=0){m+="</div>"}}else{m+='<div id="'+d+'" class="'+p+'"><label id="'+f.label_id_prefix+c+'" for="'+c+'">'+n+"</label></div>"}m+="</td>"}}m+="</tr>";u++}return m},template_footer:function(q){var m="";if(q.use_paging&&q.num_pages>1){var o=q.num_page_links;var c=q.cur_page_num;var p=q.num_pages;var l=o/2;var k=c-l;var g=0;if(k==0){k=1;g=l-(c-k)}var f=l+g;var e=c+f;if(e<=p){max_offset=0}else{e=p;max_offset=f-(e+1-c)}if(max_offset!=0){k-=max_offset;if(k<1){k=1}}m+='<tr id="page-links-row">';if(q.show_item_checkboxes){m+="<td></td>"}m+='<td colspan="100"><span id="page-link-container">Page:';if(k>1){m+='<span class="page-link" id="page-link-1"><a href="'+this.grid.get_url({page:n})+'" page_num="1" onclick="return false;">1</a></span> ...'}for(var n=k;n<e+1;n++){if(n==q.cur_page_num){m+='<span class="page-link inactive-link" id="page-link-'+n+'">'+n+"</span>"}else{m+='<span class="page-link" id="page-link-'+n+'"><a href="'+this.grid.get_url({page:n})+'" onclick="return false;" page_num="'+n+'">'+n+"</a></span>"}}if(e<p){m+='...<span class="page-link" id="page-link-'+p+'"><a href="'+this.grid.get_url({page:p})+'" onclick="return false;" page_num="'+p+'">'+p+"</a></span>"}m+="</span>";m+='<span class="page-link" id="show-all-link-span"> | <a href="'+this.grid.get_url({page:"all"})+'" onclick="return false;" page_num="all">Show All</a></span></td></tr>'}if(q.show_item_checkboxes){m+='<tr><input type="hidden" id="operation" name="operation" value=""><td></td><td colspan="100">For <span class="grid-selected-count"></span> selected '+q.get_class_plural+": ";for(i in q.operations){var d=q.operations[i];if(d.allow_multiple){m+='<input type="button" value="'+d.label+'" class="action-button" onclick="gridView.submit_operation(this, \''+d.confirm+"')\"> "}}m+="</td></tr>"}var h=false;for(i in q.operations){if(q.operations[i].global_operation){h=true;break}}if(h){m+='<tr><td colspan="100">';for(i in q.operations){var d=q.operations[i];if(d.global_operation){m+='<a class="action-button" href="'+d.global_operation+'">'+d.label+"</a>"}}m+="</td></tr>"}if(q.legend){m+='<tr><td colspan="100">'+q.legend+"</td></tr>"}return m},template_message:function(c){return'<p><div class="'+c.status+'message transient-message">'+c.message+'</div><div style="clear: both"></div></p>'}});return{Grid:a,GridView:b}});
\ No newline at end of file
diff -r 44538b70d174dd36836a88123a865e1147c016b5 -r 593be4d22c470abe6bdbbfa51f2a8e7ac035de33 templates/grid_base.mako
--- a/templates/grid_base.mako
+++ b/templates/grid_base.mako
@@ -64,6 +64,7 @@
'legend' : grid.legend,
'current_item_id' : False,
'use_panels' : context.get('use_panels'),
+ 'use_hide_message' : grid.use_hide_message,
'insert' : insert,
'default_filter_dict' : default_filter_dict,
'advanced_search' : advanced_search,
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0