galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
November 2013
- 1 participants
- 208 discussions
commit/galaxy-central: jmchilton: get_intiail_value history fix. Allows ToolDataParameters inside of repeats when using API.
by commits-noreply@bitbucket.org 14 Nov '13
by commits-noreply@bitbucket.org 14 Nov '13
14 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/418993fac75d/
Changeset: 418993fac75d
User: jmchilton
Date: 2013-11-14 19:35:27
Summary: get_intiail_value history fix. Allows ToolDataParameters inside of repeats when using API.
Affected #: 1 file
diff -r 3402a11b008865ee37ee29c9d7d3d5ea27c6fe84 -r 418993fac75d07297dc317e183cf401fd9fbb869 lib/galaxy/tools/parameters/grouping.py
--- a/lib/galaxy/tools/parameters/grouping.py
+++ b/lib/galaxy/tools/parameters/grouping.py
@@ -113,7 +113,7 @@
for i in range( self.default ):
rval_dict = { '__index__': i}
for input in self.inputs.itervalues():
- rval_dict[ input.name ] = input.get_initial_value( trans, context )
+ rval_dict[ input.name ] = input.get_initial_value( trans, context, history=history )
rval.append( rval_dict )
return rval
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
16 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/2edddde7517d/
Changeset: 2edddde7517d
User: jmchilton
Date: 2013-11-11 08:45:32
Summary: Some PEP-8 fixes for lib/galaxy/tools/parameters/basic.py.
Affected #: 1 file
diff -r 1df960b4892ae935840455de9ac058e396ec9410 -r 2edddde7517d7957fe75a87fa7d45b80dedc156f lib/galaxy/tools/parameters/basic.py
--- a/lib/galaxy/tools/parameters/basic.py
+++ b/lib/galaxy/tools/parameters/basic.py
@@ -2,14 +2,20 @@
Basic tool parameters.
"""
-import logging, string, sys, os, os.path, urllib
+import logging
+import string
+import sys
+import os
+import os.path
+import urllib
from elementtree.ElementTree import XML, Element
from galaxy import config, datatypes, util
from galaxy.web import form_builder
from galaxy.util.bunch import Bunch
from galaxy.util import string_as_bool, sanitize_param, unicodify
from sanitize import ToolParameterSanitizer
-import validation, dynamic_options
+import validation
+import dynamic_options
# For BaseURLToolParameter
from galaxy.web import url_for
from galaxy.model.item_attrs import Dictifiable
@@ -53,8 +59,10 @@
def get_label( self ):
"""Return user friendly name for the parameter"""
- if self.label: return self.label
- else: return self.name
+ if self.label:
+ return self.label
+ else:
+ return self.name
def get_html_field( self, trans=None, value=None, other_values={} ):
raise TypeError( "Abstract Method" )
@@ -87,7 +95,7 @@
if a value has already been chosen from the history. This is to support the capability to
choose each dataset once
"""
- return self.get_initial_value(trans, context, history=history);
+ return self.get_initial_value(trans, context, history=history)
def get_required_enctype( self ):
"""
@@ -166,7 +174,7 @@
return value
def validate( self, value, history=None ):
- if value=="" and self.optional:
+ if value == "" and self.optional:
return
for validator in self.validators:
validator.validate( value, history )
@@ -219,7 +227,8 @@
self.area = string_as_bool( elem.get( 'area', False ) )
def get_html_field( self, trans=None, value=None, other_values={} ):
- if value is None: value = self.value
+ if value is None:
+ value = self.value
if self.area:
return form_builder.TextArea( self.name, self.size, value )
else:
@@ -228,6 +237,7 @@
def get_initial_value( self, trans, context, history=None ):
return self.value
+
class IntegerToolParameter( TextToolParameter ):
"""
Parameter that takes an integer value.
@@ -412,7 +422,7 @@
checked = self.checked
if value is not None:
checked = form_builder.CheckboxField.is_checked( value )
- return form_builder.CheckboxField( self.name, checked, refresh_on_change = self.refresh_on_change )
+ return form_builder.CheckboxField( self.name, checked, refresh_on_change=self.refresh_on_change )
def from_html( self, value, trans=None, other_values={} ):
return form_builder.CheckboxField.is_checked( value )
@@ -461,7 +471,7 @@
self.ajax = string_as_bool( elem.get( 'ajax-upload' ) )
def get_html_field( self, trans=None, value=None, other_values={} ):
- return form_builder.FileField( self.name, ajax = self.ajax, value = value )
+ return form_builder.FileField( self.name, ajax=self.ajax, value=value )
def from_html( self, value, trans=None, other_values={} ):
# Middleware or proxies may encode files in special ways (TODO: this
@@ -476,8 +486,8 @@
assert local_filename.startswith( upload_store ), \
"Filename provided by nginx is not in correct directory"
value = dict(
- filename = value["name"],
- local_filename = local_filename
+ filename=value["name"],
+ local_filename=local_filename
)
return value
@@ -533,7 +543,7 @@
user_ftp_dir = None
else:
user_ftp_dir = trans.user_ftp_dir
- return form_builder.FTPFileField( self.name, user_ftp_dir, trans.app.config.ftp_upload_site, value = value )
+ return form_builder.FTPFileField( self.name, user_ftp_dir, trans.app.config.ftp_upload_site, value=value )
def from_html( self, value, trans=None, other_values={} ):
try:
@@ -754,8 +764,9 @@
else:
return form_builder.TextField( self.name, value=(value or "") )
if value is not None:
- if not isinstance( value, list ): value = [ value ]
- field = form_builder.SelectField( self.name, self.multiple, self.display, self.refresh_on_change, refresh_on_change_values = self.refresh_on_change_values )
+ if not isinstance( value, list ):
+ value = [ value ]
+ field = form_builder.SelectField( self.name, self.multiple, self.display, self.refresh_on_change, refresh_on_change_values=self.refresh_on_change_values )
options = self.get_options( trans, context )
for text, optval, selected in options:
if isinstance( optval, UnvalidatedValue ):
@@ -793,7 +804,7 @@
rval.append( v )
return rval
else:
- value_is_none = ( value == "None" and "None" not in legal_values )
+ value_is_none = ( value == "None" and "None" not in legal_values )
if value_is_none:
if self.multiple:
if self.optional:
@@ -943,7 +954,7 @@
options = []
try:
options = self.get_options( trans, {} )
- except AssertionError, assertion:
+ except AssertionError:
# we dont/cant set other_values (the {} above), so params that require other params to be filled will error:
# required dependency in filter_options
# associated DataToolParam in get_column_list
@@ -1531,8 +1542,9 @@
TODO: There should be an alternate display that allows single selects to be
displayed as radio buttons and multiple selects as a set of checkboxes
- TODO: The following must be fixed to test correctly for the new security_check tag in the DataToolParameter ( the last test below is broken )
- Nate's next pass at the dataset security stuff will dramatically alter this anyway.
+ TODO: The following must be fixed to test correctly for the new security_check tag in
+ the DataToolParameter ( the last test below is broken ) Nate's next pass at the dataset
+ security stuff will dramatically alter this anyway.
"""
def __init__( self, tool, elem, trans=None):
@@ -1579,8 +1591,8 @@
# Load conversions required for the dataset input
self.conversions = []
for conv_elem in elem.findall( "conversion" ):
- name = conv_elem.get( "name" ) #name for commandline substitution
- conv_extensions = conv_elem.get( "type" ) #target datatype extension
+ name = conv_elem.get( "name" ) # name for commandline substitution
+ conv_extensions = conv_elem.get( "type" ) # target datatype extension
# FIXME: conv_extensions should be able to be an ordered list
assert None not in [ name, type ], 'A name (%s) and type (%s) are required for explicit conversion' % ( name, type )
conv_types = tool.app.datatypes_registry.get_datatype_by_extension( conv_extensions.lower() )
@@ -1592,14 +1604,15 @@
try:
filter_value = self.options.get_options( trans, other_values )[0][0]
except IndexError:
- pass #no valid options
+ pass # no valid options
assert trans is not None, "DataToolParameter requires a trans"
history = trans.get_history()
assert history is not None, "DataToolParameter requires a history"
if value is not None:
if type( value ) != list:
value = [ value ]
- field = form_builder.SelectField( self.name, self.multiple, None, self.refresh_on_change, refresh_on_change_values = self.refresh_on_change_values )
+ field = form_builder.SelectField( self.name, self.multiple, None, self.refresh_on_change, refresh_on_change_values=self.refresh_on_change_values )
+
# CRUCIAL: the dataset_collector function needs to be local to DataToolParameter.get_html_field()
def dataset_collector( hdas, parent_hid ):
current_user_roles = trans.get_current_user_roles()
@@ -1654,7 +1667,7 @@
return field
def get_initial_value( self, trans, context, history=None ):
- return self.get_initial_value_from_history_prevent_repeats(trans, context, None, history=history);
+ return self.get_initial_value_from_history_prevent_repeats(trans, context, None, history=history)
def get_initial_value_from_history_prevent_repeats( self, trans, context, already_used, history=None ):
"""
@@ -1676,7 +1689,8 @@
try:
filter_value = self.options.get_options( trans, context )[0][0]
except IndexError:
- pass #no valid options
+ pass # no valid options
+
def dataset_collector( datasets ):
def is_convertable( dataset ):
target_ext, converted_dataset = dataset.find_conversion_destination( self.formats )
https://bitbucket.org/galaxy/galaxy-central/commits/4f68a886036c/
Changeset: 4f68a886036c
User: jmchilton
Date: 2013-11-11 08:45:32
Summary: PEP-8 cleanup of lib/galaxy/tools/actions/upload.py.
Affected #: 1 file
diff -r 2edddde7517d7957fe75a87fa7d45b80dedc156f -r 4f68a886036c934194d4a625cb2fcccf32e64aba lib/galaxy/tools/actions/upload.py
--- a/lib/galaxy/tools/actions/upload.py
+++ b/lib/galaxy/tools/actions/upload.py
@@ -4,8 +4,10 @@
import logging
log = logging.getLogger( __name__ )
+
class UploadToolAction( ToolAction ):
- def execute( self, tool, trans, incoming={}, set_output_hid = True, history=None, **kwargs ):
+
+ def execute( self, tool, trans, incoming={}, set_output_hid=True, history=None, **kwargs ):
dataset_upload_inputs = []
for input_name, input in tool.inputs.iteritems():
if input.type == "upload_dataset":
https://bitbucket.org/galaxy/galaxy-central/commits/510cbdd44e77/
Changeset: 510cbdd44e77
User: jmchilton
Date: 2013-11-11 08:45:32
Summary: PEP-8 cleanups for lib/galaxy/tools/actions/upload_common.py.
Affected #: 1 file
diff -r 4f68a886036c934194d4a625cb2fcccf32e64aba -r 510cbdd44e772629b49d1ae134af199a006ee28d lib/galaxy/tools/actions/upload_common.py
--- a/lib/galaxy/tools/actions/upload_common.py
+++ b/lib/galaxy/tools/actions/upload_common.py
@@ -1,4 +1,8 @@
-import os, tempfile, StringIO, pwd, subprocess
+import os
+import tempfile
+import StringIO
+import pwd
+import subprocess
from cgi import FieldStorage
from galaxy import datatypes, util
from galaxy.util.odict import odict
@@ -10,13 +14,13 @@
import logging
log = logging.getLogger( __name__ )
+
def persist_uploads( params ):
"""
Turn any uploads in the submitted form to persisted files.
"""
if 'files' in params:
new_files = []
- temp_files = []
for upload_dataset in params['files']:
f = upload_dataset['file_data']
if isinstance( f, FieldStorage ):
@@ -24,8 +28,8 @@
assert f.file.name != '<fdopen>'
local_filename = util.mkstemp_ln( f.file.name, 'upload_file_data_' )
f.file.close()
- upload_dataset['file_data'] = dict( filename = f.filename,
- local_filename = local_filename )
+ upload_dataset['file_data'] = dict( filename=f.filename,
+ local_filename=local_filename )
elif type( f ) == dict and 'filename' and 'local_filename' not in f:
raise Exception( 'Uploaded file was encoded in a way not understood by Galaxy.' )
if upload_dataset['url_paste'] and upload_dataset['url_paste'].strip() != '':
@@ -35,6 +39,8 @@
new_files.append( upload_dataset )
params['files'] = new_files
return params
+
+
def handle_library_params( trans, params, folder_id, replace_dataset=None ):
# FIXME: the received params has already been parsed by util.Params() by the time it reaches here,
# so no complex objects remain. This is not good because it does not allow for those objects to be
@@ -62,6 +68,8 @@
role = trans.sa_session.query( trans.app.model.Role ).get( role_id )
library_bunch.roles.append( role )
return library_bunch
+
+
def get_precreated_datasets( trans, params, data_obj, controller='root' ):
"""
Get any precreated datasets (when using asynchronous uploads).
@@ -90,6 +98,8 @@
else:
rval.append( data )
return rval
+
+
def get_precreated_dataset( precreated_datasets, name ):
"""
Return a dataset matching a name from the list of precreated (via async
@@ -101,21 +111,24 @@
return precreated_datasets.pop( names.index( name ) )
else:
return None
+
+
def cleanup_unused_precreated_datasets( precreated_datasets ):
for data in precreated_datasets:
log.info( 'Cleaned up unclaimed precreated dataset (%s).' % ( data.id ) )
data.state = data.states.ERROR
data.info = 'No file contents were available.'
+
def __new_history_upload( trans, uploaded_dataset, history=None, state=None ):
if not history:
history = trans.history
- hda = trans.app.model.HistoryDatasetAssociation( name = uploaded_dataset.name,
- extension = uploaded_dataset.file_type,
- dbkey = uploaded_dataset.dbkey,
- history = history,
- create_dataset = True,
- sa_session = trans.sa_session )
+ hda = trans.app.model.HistoryDatasetAssociation( name=uploaded_dataset.name,
+ extension=uploaded_dataset.file_type,
+ dbkey=uploaded_dataset.dbkey,
+ history=history,
+ create_dataset=True,
+ sa_session=trans.sa_session )
if state:
hda.state = state
else:
@@ -128,6 +141,7 @@
trans.sa_session.flush()
return hda
+
def __new_library_upload( trans, cntrller, uploaded_dataset, library_bunch, state=None ):
current_user_roles = trans.get_current_user_roles()
if not ( ( trans.user_is_admin() and cntrller in [ 'library_admin', 'api' ] ) or trans.app.security_agent.can_add_library_item( current_user_roles, library_bunch.folder ) ):
@@ -156,13 +170,13 @@
trans.sa_session.add( ld )
trans.sa_session.flush()
trans.app.security_agent.copy_library_permissions( trans, folder, ld )
- ldda = trans.app.model.LibraryDatasetDatasetAssociation( name = uploaded_dataset.name,
- extension = uploaded_dataset.file_type,
- dbkey = uploaded_dataset.dbkey,
- library_dataset = ld,
- user = trans.user,
- create_dataset = True,
- sa_session = trans.sa_session )
+ ldda = trans.app.model.LibraryDatasetDatasetAssociation( name=uploaded_dataset.name,
+ extension=uploaded_dataset.file_type,
+ dbkey=uploaded_dataset.dbkey,
+ library_dataset=ld,
+ user=trans.user,
+ create_dataset=True,
+ sa_session=trans.sa_session )
trans.sa_session.add( ldda )
if state:
ldda.state = state
@@ -210,12 +224,14 @@
trans.sa_session.flush()
return ldda
+
def new_upload( trans, cntrller, uploaded_dataset, library_bunch=None, history=None, state=None ):
if library_bunch:
return __new_library_upload( trans, cntrller, uploaded_dataset, library_bunch, state )
else:
return __new_history_upload( trans, uploaded_dataset, history=history, state=state )
+
def get_uploaded_datasets( trans, cntrller, params, precreated_datasets, dataset_upload_inputs, library_bunch=None, history=None ):
uploaded_datasets = []
for dataset_upload_input in dataset_upload_inputs:
@@ -256,6 +272,8 @@
history.genome_build = uploaded_dataset.dbkey
uploaded_dataset.data = data
return uploaded_datasets
+
+
def create_paramfile( trans, uploaded_datasets ):
"""
Create the upload tool's JSON "param" file.
@@ -284,14 +302,14 @@
setattr( data.metadata, meta_name, meta_value )
trans.sa_session.add( data )
trans.sa_session.flush()
- json = dict( file_type = uploaded_dataset.file_type,
- dataset_id = data.dataset.id,
- dbkey = uploaded_dataset.dbkey,
- type = uploaded_dataset.type,
- metadata = uploaded_dataset.metadata,
- primary_file = uploaded_dataset.primary_file,
- composite_file_paths = uploaded_dataset.composite_files,
- composite_files = dict( [ ( k, v.__dict__ ) for k, v in data.datatype.get_composite_files( data ).items() ] ) )
+ json = dict( file_type=uploaded_dataset.file_type,
+ dataset_id=data.dataset.id,
+ dbkey=uploaded_dataset.dbkey,
+ type=uploaded_dataset.type,
+ metadata=uploaded_dataset.metadata,
+ primary_file=uploaded_dataset.primary_file,
+ composite_file_paths=uploaded_dataset.composite_files,
+ composite_files=dict( [ ( k, v.__dict__ ) for k, v in data.datatype.get_composite_files( data ).items() ] ) )
else:
try:
is_binary = uploaded_dataset.datatype.is_binary
@@ -305,18 +323,18 @@
uuid_str = uploaded_dataset.uuid
except:
uuid_str = None
- json = dict( file_type = uploaded_dataset.file_type,
- ext = uploaded_dataset.ext,
- name = uploaded_dataset.name,
- dataset_id = data.dataset.id,
- dbkey = uploaded_dataset.dbkey,
- type = uploaded_dataset.type,
- is_binary = is_binary,
- link_data_only = link_data_only,
- uuid = uuid_str,
- space_to_tab = uploaded_dataset.space_to_tab,
- in_place = trans.app.config.external_chown_script is None,
- path = uploaded_dataset.path )
+ json = dict( file_type=uploaded_dataset.file_type,
+ ext=uploaded_dataset.ext,
+ name=uploaded_dataset.name,
+ dataset_id=data.dataset.id,
+ dbkey=uploaded_dataset.dbkey,
+ type=uploaded_dataset.type,
+ is_binary=is_binary,
+ link_data_only=link_data_only,
+ uuid=uuid_str,
+ space_to_tab=uploaded_dataset.space_to_tab,
+ in_place=trans.app.config.external_chown_script is None,
+ path=uploaded_dataset.path )
# TODO: This will have to change when we start bundling inputs.
# Also, in_place above causes the file to be left behind since the
# user cannot remove it unless the parent directory is writable.
@@ -327,6 +345,8 @@
if trans.app.config.external_chown_script:
_chown( json_file_path )
return json_file_path
+
+
def create_job( trans, params, tool, json_file_path, data_list, folder=None, history=None ):
"""
Create the upload job.
@@ -383,6 +403,8 @@
for i, v in enumerate( data_list ):
output[ 'output%i' % i ] = v
return job, output
+
+
def active_folders( trans, folder ):
# Stolen from galaxy.web.controllers.library_common (importing from which causes a circular issues).
# Much faster way of retrieving all active sub-folders within a given folder than the
https://bitbucket.org/galaxy/galaxy-central/commits/83970bc291d2/
Changeset: 83970bc291d2
User: jmchilton
Date: 2013-11-11 08:45:32
Summary: Break up row_for_param in tool_form.mako into smaller pieces.
Affected #: 2 files
diff -r 510cbdd44e772629b49d1ae134af199a006ee28d -r 83970bc291d2866ccc8782111a64a01e48bea9d1 lib/galaxy/tools/parameters/basic.py
--- a/lib/galaxy/tools/parameters/basic.py
+++ b/lib/galaxy/tools/parameters/basic.py
@@ -1359,7 +1359,7 @@
options = []
for filter_key, filter_value in self.filtered.iteritems():
dataset = other_values[filter_key]
- if dataset.__class__.__name__.endswith( "DatasetFilenameWrapper" ): #this is a bad way to check for this, but problems importing class ( due to circular imports? )
+ if dataset.__class__.__name__.endswith( "DatasetFilenameWrapper" ): # this is a bad way to check for this, but problems importing class ( due to circular imports? )
dataset = dataset.dataset
if dataset:
for meta_key, meta_dict in filter_value.iteritems():
diff -r 510cbdd44e772629b49d1ae134af199a006ee28d -r 83970bc291d2866ccc8782111a64a01e48bea9d1 templates/webapps/galaxy/tool_form.mako
--- a/templates/webapps/galaxy/tool_form.mako
+++ b/templates/webapps/galaxy/tool_form.mako
@@ -218,12 +218,11 @@
else:
cls = "form-row"
- label = param.get_label()
-
field = param.get_html_field( trans, parent_state[ param.name ], other_values )
field.refresh_on_change = param.refresh_on_change
- # Field may contain characters submitted by user and these characters may be unicode; handle non-ascii characters gracefully.
+ # Field may contain characters submitted by user and these characters may
+ # be unicode; handle non-ascii characters gracefully.
field_html = field.get_html( prefix )
if type( field_html ) is not unicode:
field_html = unicode( field_html, 'utf-8', 'replace' )
@@ -232,25 +231,39 @@
return field_html
%><div class="${cls}">
- %if label:
- <label for="${param.name}">${label}:</label>
- %endif
- <div class="form-row-input">${field_html}</div>
- %if parent_errors.has_key( param.name ):
- <div class="form-row-error-message">
- <div><img style="vertical-align: middle;" src="${h.url_for('/static/style/error_small.png')}"> <span style="vertical-align: middle;">${parent_errors[param.name]}</span></div>
- </div>
- %endif
+ ${label_for_param( param )}
+ ${input_for_param( param, field_html )}
+ ${errors_for_param( param, parent_errors )}
+ ${help_for_param( param )}
+ <div style="clear: both;"></div>
+ </div>
+</%def>
- %if param.help:
- <div class="toolParamHelp" style="clear: both;">
- ${param.help}
- </div>
- %endif
+<%def name="input_for_param( param, field_html )">
+ <div class="form-row-input">${field_html}</div>
+</%def>
- <div style="clear: both;"></div>
+<%def name="label_for_param( param )">
+ <% label = param.get_label()%>
+ %if label:
+ <label for="${param.name}">${label}:</label>
+ %endif
+</%def>
- </div>
+<%def name="errors_for_param( param, parent_errors )">
+ %if parent_errors.has_key( param.name ):
+ <div class="form-row-error-message">
+ <div><img style="vertical-align: middle;" src="${h.url_for('/static/style/error_small.png')}"> <span style="vertical-align: middle;">${parent_errors[param.name]}</span></div>
+ </div>
+ %endif
+</%def>
+
+<%def name="help_for_param( param )">
+ %if param.help:
+ <div class="toolParamHelp" style="clear: both;">
+ ${param.help}
+ </div>
+ %endif
</%def><%def name="row_for_rerun()">
https://bitbucket.org/galaxy/galaxy-central/commits/fcb39c3fdd3e/
Changeset: fcb39c3fdd3e
User: jmchilton
Date: 2013-11-11 08:45:32
Summary: Break up tools.handle_input - split out param checking.
Affected #: 1 file
diff -r 83970bc291d2866ccc8782111a64a01e48bea9d1 -r fcb39c3fdd3ece51e9d4753a91880a634e6419e2 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -1926,24 +1926,8 @@
if len(incoming):
self.update_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, old_errors=old_errors or {} )
return "tool_form.mako", dict( errors={}, tool_state=state, param_values={}, incoming={} )
- # Process incoming data
- if not( self.check_values ):
- # If `self.check_values` is false we don't do any checking or
- # processing on input This is used to pass raw values
- # through to/from external sites. FIXME: This should be handled
- # more cleanly, there is no reason why external sites need to
- # post back to the same URL that the tool interface uses.
- errors = {}
- params = incoming
- else:
- # Update state for all inputs on the current page taking new
- # values from `incoming`.
- errors = self.update_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, old_errors=old_errors or {} )
- # If the tool provides a `validate_input` hook, call it.
- validate_input = self.get_hook( 'validate_input' )
- if validate_input:
- validate_input( trans, errors, state.inputs, self.inputs_by_page[state.page] )
- params = state.inputs
+
+ errors, params = self.__check_param_values( trans, incoming, state, old_errors )
# Did the user actually click next / execute or is this just
# a refresh?
if 'runtool_btn' in incoming or 'URL' in incoming or 'ajax_upload' in incoming:
@@ -1996,6 +1980,28 @@
if not self.display_interface:
return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
return 'tool_form.mako', dict( errors=errors, tool_state=state )
+
+ def __check_param_values( self, trans, incoming, state, old_errors ):
+ # Process incoming data
+ if not( self.check_values ):
+ # If `self.check_values` is false we don't do any checking or
+ # processing on input This is used to pass raw values
+ # through to/from external sites. FIXME: This should be handled
+ # more cleanly, there is no reason why external sites need to
+ # post back to the same URL that the tool interface uses.
+ errors = {}
+ params = incoming
+ else:
+ # Update state for all inputs on the current page taking new
+ # values from `incoming`.
+ errors = self.update_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, old_errors=old_errors or {} )
+ # If the tool provides a `validate_input` hook, call it.
+ validate_input = self.get_hook( 'validate_input' )
+ if validate_input:
+ validate_input( trans, errors, state.inputs, self.inputs_by_page[state.page] )
+ params = state.inputs
+ return errors, params
+
def find_fieldstorage( self, x ):
if isinstance( x, FieldStorage ):
raise InterruptedUpload( None )
https://bitbucket.org/galaxy/galaxy-central/commits/0c0692e52ff7/
Changeset: 0c0692e52ff7
User: jmchilton
Date: 2013-11-11 08:45:32
Summary: Break up tools.handle_input - split out state creation.
Affected #: 1 file
diff -r fcb39c3fdd3ece51e9d4753a91880a634e6419e2 -r 0c0692e52ff7ed81b7784ecaea7dff7dcdd28fa5 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -1908,13 +1908,8 @@
to the form or execute the tool (only if 'execute' was clicked and
there were no errors).
"""
- # Get the state or create if not found
- if "tool_state" in incoming:
- encoded_state = string_to_object( incoming["tool_state"] )
- state = DefaultToolState()
- state.decode( encoded_state, self, trans.app )
- else:
- state = self.new_state( trans, history=history )
+ state, state_new = self.__fetch_state( trans, incoming, history )
+ if state_new:
# This feels a bit like a hack. It allows forcing full processing
# of inputs even when there is no state in the incoming dictionary
# by providing either 'runtool_btn' (the name of the submit button
@@ -1928,6 +1923,7 @@
return "tool_form.mako", dict( errors={}, tool_state=state, param_values={}, incoming={} )
errors, params = self.__check_param_values( trans, incoming, state, old_errors )
+
# Did the user actually click next / execute or is this just
# a refresh?
if 'runtool_btn' in incoming or 'URL' in incoming or 'ajax_upload' in incoming:
@@ -1981,6 +1977,18 @@
return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
return 'tool_form.mako', dict( errors=errors, tool_state=state )
+ def __fetch_state( self, trans, incoming, history ):
+ # Get the state or create if not found
+ if "tool_state" in incoming:
+ encoded_state = string_to_object( incoming["tool_state"] )
+ state = DefaultToolState()
+ state.decode( encoded_state, self, trans.app )
+ new = False
+ else:
+ state = self.new_state( trans, history=history )
+ new = True
+ return state, new
+
def __check_param_values( self, trans, incoming, state, old_errors ):
# Process incoming data
if not( self.check_values ):
https://bitbucket.org/galaxy/galaxy-central/commits/c4632090a444/
Changeset: c4632090a444
User: jmchilton
Date: 2013-11-11 08:45:32
Summary: Break up tools.handle_input - split out refresh state logic.
Affected #: 1 file
diff -r 0c0692e52ff7ed81b7784ecaea7dff7dcdd28fa5 -r c4632090a444751660a354e997873da5fd14cfe1 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -1924,9 +1924,11 @@
errors, params = self.__check_param_values( trans, incoming, state, old_errors )
- # Did the user actually click next / execute or is this just
- # a refresh?
- if 'runtool_btn' in incoming or 'URL' in incoming or 'ajax_upload' in incoming:
+ if self.__should_refresh_state( incoming ):
+ return self.__handle_state_refresh( trans, state, errors )
+ else:
+ # User actually clicked next or execute.
+
# If there were errors, we stay on the same page and display
# error messages
if errors:
@@ -1962,7 +1964,11 @@
if not self.display_interface:
return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
return 'tool_form.mako', dict( errors=errors, tool_state=state )
- else:
+
+ def __should_refresh_state( self, incoming ):
+ return not( 'runtool_btn' in incoming or 'URL' in incoming or 'ajax_upload' in incoming )
+
+ def __handle_state_refresh( self, trans, state, errors ):
try:
self.find_fieldstorage( state.inputs )
except InterruptedUpload:
@@ -1974,7 +1980,7 @@
pass
# Just a refresh, render the form with updated state and errors.
if not self.display_interface:
- return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
+ return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
return 'tool_form.mako', dict( errors=errors, tool_state=state )
def __fetch_state( self, trans, incoming, history ):
https://bitbucket.org/galaxy/galaxy-central/commits/2cf8be3f467b/
Changeset: 2cf8be3f467b
User: jmchilton
Date: 2013-11-11 08:45:32
Summary: Break up tools.handle_input - split out page advance logic.
Affected #: 1 file
diff -r c4632090a444751660a354e997873da5fd14cfe1 -r 2cf8be3f467ba645d175782ae4c85d942c0cd88f lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -1958,12 +1958,7 @@
return 'message.mako', dict( status='error', message=message, refresh_frames=[] )
# Otherwise move on to the next page
else:
- state.page += 1
- # Fill in the default values for the next page
- self.fill_in_new_state( trans, self.inputs_by_page[ state.page ], state.inputs )
- if not self.display_interface:
- return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
- return 'tool_form.mako', dict( errors=errors, tool_state=state )
+ return self.__handle_page_advance( trans, state, errors )
def __should_refresh_state( self, incoming ):
return not( 'runtool_btn' in incoming or 'URL' in incoming or 'ajax_upload' in incoming )
@@ -1983,6 +1978,14 @@
return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
return 'tool_form.mako', dict( errors=errors, tool_state=state )
+ def __handle_page_advance( self, trans, state, errors ):
+ state.page += 1
+ # Fill in the default values for the next page
+ self.fill_in_new_state( trans, self.inputs_by_page[ state.page ], state.inputs )
+ if not self.display_interface:
+ return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
+ return 'tool_form.mako', dict( errors=errors, tool_state=state )
+
def __fetch_state( self, trans, incoming, history ):
# Get the state or create if not found
if "tool_state" in incoming:
https://bitbucket.org/galaxy/galaxy-central/commits/5541fd3245c5/
Changeset: 5541fd3245c5
User: jmchilton
Date: 2013-11-11 08:45:32
Summary: Break up tools.handle_input - split out actual tool execution intiation.
Affected #: 1 file
diff -r 2cf8be3f467ba645d175782ae4c85d942c0cd88f -r 5541fd3245c571c1cbe7a51faa734cd6aa052d4e lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -1936,26 +1936,7 @@
return "tool_form.mako", dict( errors=errors, tool_state=state, incoming=incoming, error_message=error_message )
# If we've completed the last page we can execute the tool
elif state.page == self.last_page:
- try:
- rerun_remap_job_id = None
- if 'rerun_remap_job_id' in incoming:
- rerun_remap_job_id = trans.app.security.decode_id(incoming['rerun_remap_job_id'])
- _, out_data = self.execute( trans, incoming=params, history=history, rerun_remap_job_id=rerun_remap_job_id )
- except httpexceptions.HTTPFound, e:
- #if it's a paste redirect exception, pass it up the stack
- raise e
- except Exception, e:
- log.exception('Exception caught while attempting tool execution:')
- return 'message.mako', dict( status='error', message='Error executing tool: %s' % str(e), refresh_frames=[] )
- try:
- assert isinstance( out_data, odict )
- return 'tool_executed.mako', dict( out_data=out_data )
- except:
- if isinstance( out_data, str ):
- message = out_data
- else:
- message = 'Failure executing tool (odict not returned from tool execution)'
- return 'message.mako', dict( status='error', message=message, refresh_frames=[] )
+ return self.__handle_tool_execute( trans, incoming, params, history )
# Otherwise move on to the next page
else:
return self.__handle_page_advance( trans, state, errors )
@@ -1963,6 +1944,28 @@
def __should_refresh_state( self, incoming ):
return not( 'runtool_btn' in incoming or 'URL' in incoming or 'ajax_upload' in incoming )
+ def __handle_tool_execute( self, trans, incoming, params, history ):
+ try:
+ rerun_remap_job_id = None
+ if 'rerun_remap_job_id' in incoming:
+ rerun_remap_job_id = trans.app.security.decode_id(incoming['rerun_remap_job_id'])
+ _, out_data = self.execute( trans, incoming=params, history=history, rerun_remap_job_id=rerun_remap_job_id )
+ except httpexceptions.HTTPFound, e:
+ #if it's a paste redirect exception, pass it up the stack
+ raise e
+ except Exception, e:
+ log.exception('Exception caught while attempting tool execution:')
+ return 'message.mako', dict( status='error', message='Error executing tool: %s' % str(e), refresh_frames=[] )
+ try:
+ assert isinstance( out_data, odict )
+ return 'tool_executed.mako', dict( out_data=out_data )
+ except:
+ if isinstance( out_data, str ):
+ message = out_data
+ else:
+ message = 'Failure executing tool (odict not returned from tool execution)'
+ return 'message.mako', dict( status='error', message=message, refresh_frames=[] )
+
def __handle_state_refresh( self, trans, state, errors ):
try:
self.find_fieldstorage( state.inputs )
https://bitbucket.org/galaxy/galaxy-central/commits/d4f8824c11b1/
Changeset: d4f8824c11b1
User: jmchilton
Date: 2013-11-11 08:45:32
Summary: Don't pass API key to datatype display method.
Some datatypes cannot take the keyword parameter and none of them should need it.
Affected #: 1 file
diff -r 5541fd3245c571c1cbe7a51faa734cd6aa052d4e -r d4f8824c11b1cb868f3a0a06c3d533cad123ccee lib/galaxy/webapps/galaxy/api/datasets.py
--- a/lib/galaxy/webapps/galaxy/api/datasets.py
+++ b/lib/galaxy/webapps/galaxy/api/datasets.py
@@ -269,7 +269,13 @@
hda = self.get_history_dataset_association( trans, history, history_content_id,
check_ownership=True, check_accessible=True )
- rval = hda.datatype.display_data( trans, hda, preview, filename, to_ext, chunk, **kwd )
+ display_kwd = kwd.copy()
+ try:
+ del display_kwd["key"]
+ except KeyError:
+ pass
+ rval = hda.datatype.display_data( trans, hda, preview, filename, to_ext, chunk, **display_kwd )
+
except Exception, exception:
log.error( "Error getting display data for dataset (%s) from history (%s): %s",
https://bitbucket.org/galaxy/galaxy-central/commits/4f7e715db123/
Changeset: 4f7e715db123
User: jmchilton
Date: 2013-11-11 08:45:32
Summary: Allow easier use of tool repeat and conditional params via the API.
For now, still using the flat format of web ui. So
{...
"a_repeat_0|a_repeat_param" : "value"
}
instead of the more ideal:
{...
a_repeat: [
{ "a_repeat_param": "value" }
]
}
To implement this, there is a new path through the tool state processing code that sets the state based on the supplied inputs instead of requiring iterative calls (one for each repeat addition for instance) like the UI or specifing a tool_state externally. To specify this path through the code, simply do not include a tool_state parameter when using the tools API.
Affected #: 2 files
diff -r d4f8824c11b1cb868f3a0a06c3d533cad123ccee -r 4f7e715db123cb3d31258fbe828854c36719af9e lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -1901,12 +1901,18 @@
callback( "", input, value[input.name] )
else:
input.visit_inputs( "", value[input.name], callback )
- def handle_input( self, trans, incoming, history=None, old_errors=None ):
+ def handle_input( self, trans, incoming, history=None, old_errors=None, process_state='update' ):
"""
Process incoming parameters for this tool from the dict `incoming`,
update the tool state (or create if none existed), and either return
to the form or execute the tool (only if 'execute' was clicked and
there were no errors).
+
+ process_state can be either 'update' (to incrementally build up the state
+ over several calls - one repeat per handle for instance) or 'populate'
+ force a complete build of the state and submission all at once (like
+ from API). May want an incremental version of the API also at some point,
+ that is why this is not just called for_api.
"""
state, state_new = self.__fetch_state( trans, incoming, history )
if state_new:
@@ -1922,8 +1928,7 @@
self.update_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, old_errors=old_errors or {} )
return "tool_form.mako", dict( errors={}, tool_state=state, param_values={}, incoming={} )
- errors, params = self.__check_param_values( trans, incoming, state, old_errors )
-
+ errors, params = self.__check_param_values( trans, incoming, state, old_errors, process_state, history=history )
if self.__should_refresh_state( incoming ):
return self.__handle_state_refresh( trans, state, errors )
else:
@@ -2001,7 +2006,7 @@
new = True
return state, new
- def __check_param_values( self, trans, incoming, state, old_errors ):
+ def __check_param_values( self, trans, incoming, state, old_errors, process_state, history ):
# Process incoming data
if not( self.check_values ):
# If `self.check_values` is false we don't do any checking or
@@ -2014,7 +2019,12 @@
else:
# Update state for all inputs on the current page taking new
# values from `incoming`.
- errors = self.update_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, old_errors=old_errors or {} )
+ if process_state == "update":
+ errors = self.update_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, old_errors=old_errors or {} )
+ elif process_state == "populate":
+ errors = self.populate_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, history )
+ else:
+ raise Exception("Unknown process_state type %s" % process_state)
# If the tool provides a `validate_input` hook, call it.
validate_input = self.get_hook( 'validate_input' )
if validate_input:
@@ -2059,6 +2069,137 @@
return 'message.mako', dict( status='error',
message='Your upload was interrupted. If this was uninentional, please retry it.',
refresh_frames=[], cont=None )
+
+ def populate_state( self, trans, inputs, state, incoming, history, prefix="", context=None ):
+ errors = dict()
+ # Push this level onto the context stack
+ context = ExpressionContext( state, context )
+ for input in inputs.itervalues():
+ key = prefix + input.name
+ if isinstance( input, Repeat ):
+ group_state = state[input.name]
+ # Create list of empty errors for each previously existing state
+ group_errors = [ ]
+ any_group_errors = False
+ rep_index = 0
+ while True:
+ rep_name = "%s_%d" % ( key, rep_index )
+ if not any( [ key.startswith(rep_name) for key in incoming.keys() ] ):
+ break
+ if rep_index < input.max:
+ new_state = {}
+ new_state['__index__'] = rep_index
+ self.fill_in_new_state( trans, input.inputs, new_state, context, history=history )
+ group_state.append( new_state )
+ group_errors.append( {} )
+ rep_errors = self.populate_state( trans,
+ input.inputs,
+ new_state,
+ incoming,
+ history,
+ prefix=rep_name + "|",
+ context=context )
+ if rep_errors:
+ any_group_errors = True
+ group_errors[rep_index].update( rep_errors )
+
+ else:
+ group_errors[-1] = { '__index__': 'Cannot add repeat (max size=%i).' % input.max }
+ any_group_errors = True
+ rep_index += 1
+ elif isinstance( input, Conditional ):
+ group_state = state[input.name]
+ group_prefix = "%s|" % ( key )
+ # Deal with the 'test' element and see if it's value changed
+ if input.value_ref and not input.value_ref_in_group:
+ # We are referencing an existent parameter, which is not
+ # part of this group
+ test_param_key = prefix + input.test_param.name
+ else:
+ test_param_key = group_prefix + input.test_param.name
+ test_param_error = None
+ test_incoming = get_incoming_value( incoming, test_param_key, None )
+
+ # Get value of test param and determine current case
+ value, test_param_error = \
+ check_param( trans, input.test_param, test_incoming, context )
+ current_case = input.get_current_case( value, trans )
+ # Current case has changed, throw away old state
+ group_state = state[input.name] = {}
+ # TODO: we should try to preserve values if we can
+ self.fill_in_new_state( trans, input.cases[current_case].inputs, group_state, context, history=history )
+ group_errors = self.populate_state( trans,
+ input.cases[current_case].inputs,
+ group_state,
+ incoming,
+ history,
+ prefix=group_prefix,
+ context=context,
+ )
+ if test_param_error:
+ group_errors[ input.test_param.name ] = test_param_error
+ if group_errors:
+ errors[ input.name ] = group_errors
+ # Store the current case in a special value
+ group_state['__current_case__'] = current_case
+ # Store the value of the test element
+ group_state[ input.test_param.name ] = value
+ elif isinstance( input, UploadDataset ):
+ group_state = state[input.name]
+ group_errors = []
+ any_group_errors = False
+ d_type = input.get_datatype( trans, context )
+ writable_files = d_type.writable_files
+ #remove extra files
+ while len( group_state ) > len( writable_files ):
+ del group_state[-1]
+ # Update state
+ max_index = -1
+ for i, rep_state in enumerate( group_state ):
+ rep_index = rep_state['__index__']
+ max_index = max( max_index, rep_index )
+ rep_prefix = "%s_%d|" % ( key, rep_index )
+ rep_errors = self.populate_state( trans,
+ input.inputs,
+ rep_state,
+ incoming,
+ history,
+ prefix=rep_prefix,
+ context=context)
+ if rep_errors:
+ any_group_errors = True
+ group_errors.append( rep_errors )
+ else:
+ group_errors.append( {} )
+ # Add new fileupload as needed
+ offset = 1
+ while len( writable_files ) > len( group_state ):
+ new_state = {}
+ new_state['__index__'] = max_index + offset
+ offset += 1
+ self.fill_in_new_state( trans, input.inputs, new_state, context )
+ group_state.append( new_state )
+ if any_group_errors:
+ group_errors.append( {} )
+ # Were there *any* errors for any repetition?
+ if any_group_errors:
+ errors[input.name] = group_errors
+ else:
+ if key not in incoming \
+ and "__force_update__" + key not in incoming:
+ # No new value provided, and we are only updating, so keep
+ # the old value (which should already be in the state) and
+ # preserve the old error message.
+ pass
+ else:
+ incoming_value = get_incoming_value( incoming, key, None )
+ value, error = check_param( trans, input, incoming_value, context )
+ # If a callback was provided, allow it to process the value
+ if error:
+ errors[ input.name ] = error
+ state[ input.name ] = value
+ return errors
+
def update_state( self, trans, inputs, state, incoming, prefix="", context=None,
update_only=False, old_errors={}, item_callback=None ):
"""
diff -r d4f8824c11b1cb868f3a0a06c3d533cad123ccee -r 4f7e715db123cb3d31258fbe828854c36719af9e lib/galaxy/webapps/galaxy/api/tools.py
--- a/lib/galaxy/webapps/galaxy/api/tools.py
+++ b/lib/galaxy/webapps/galaxy/api/tools.py
@@ -9,6 +9,9 @@
import logging
log = logging.getLogger( __name__ )
+DEFAULT_STATE_PROCESSING = "update" # See comment below.
+
+
class ToolsController( BaseAPIController, UsesVisualizationMixin ):
"""
RESTful controller for interactions with tools.
@@ -113,7 +116,15 @@
# TODO: encode data ids and decode ids.
# TODO: handle dbkeys
params = util.Params( inputs, sanitize = False )
- template, vars = tool.handle_input( trans, params.__dict__, history=target_history )
+ # process_state must be 'populate' or 'update'. If 'populate', fully
+ # expand repeat and conditionals when building up state, if 'update'
+ # state must be built up over several iterative calls to the API -
+ # mimicing behavior of web controller. Mimic the the web controller
+ # and modify state outright if "tool_state" is contain in input params,
+ # else "populate" the tool state from scratch using payload.
+ incoming = params.__dict__
+ process_state = "update" if "tool_state" in incoming else "populate"
+ template, vars = tool.handle_input( trans, incoming, history=target_history, process_state=process_state )
if 'errors' in vars:
trans.response.status = 400
return { "message": { "type": "error", "data" : vars[ 'errors' ] } }
https://bitbucket.org/galaxy/galaxy-central/commits/336525248012/
Changeset: 336525248012
User: jmchilton
Date: 2013-11-11 08:45:32
Summary: When parsing tool inputs - distinguish between those coming from HTML post and those from JSON.
Most of the time they have the same value parsing rules right now, but the parsing rules for booleans would kind of hackish if applied to JSON clients (only a list with two values is considered true). Ultimately this could simplify from_html in DataToolParameter which has grown unwieldy.
TODO: Refactor state building logic out into its own class, too many variables are being passed around over and over at this point.
Affected #: 4 files
diff -r 4f7e715db123cb3d31258fbe828854c36719af9e -r 33652524801287703e2abefecf2c676243c83b25 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -1901,7 +1901,7 @@
callback( "", input, value[input.name] )
else:
input.visit_inputs( "", value[input.name], callback )
- def handle_input( self, trans, incoming, history=None, old_errors=None, process_state='update' ):
+ def handle_input( self, trans, incoming, history=None, old_errors=None, process_state='update', source='html' ):
"""
Process incoming parameters for this tool from the dict `incoming`,
update the tool state (or create if none existed), and either return
@@ -1925,10 +1925,10 @@
if not self.display_interface:
return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
if len(incoming):
- self.update_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, old_errors=old_errors or {} )
+ self.update_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, old_errors=old_errors or {}, source=source )
return "tool_form.mako", dict( errors={}, tool_state=state, param_values={}, incoming={} )
- errors, params = self.__check_param_values( trans, incoming, state, old_errors, process_state, history=history )
+ errors, params = self.__check_param_values( trans, incoming, state, old_errors, process_state, history=history, source=source )
if self.__should_refresh_state( incoming ):
return self.__handle_state_refresh( trans, state, errors )
else:
@@ -2006,7 +2006,7 @@
new = True
return state, new
- def __check_param_values( self, trans, incoming, state, old_errors, process_state, history ):
+ def __check_param_values( self, trans, incoming, state, old_errors, process_state, history, source ):
# Process incoming data
if not( self.check_values ):
# If `self.check_values` is false we don't do any checking or
@@ -2020,9 +2020,9 @@
# Update state for all inputs on the current page taking new
# values from `incoming`.
if process_state == "update":
- errors = self.update_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, old_errors=old_errors or {} )
+ errors = self.update_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, old_errors=old_errors or {}, source=source )
elif process_state == "populate":
- errors = self.populate_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, history )
+ errors = self.populate_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, history, source=source )
else:
raise Exception("Unknown process_state type %s" % process_state)
# If the tool provides a `validate_input` hook, call it.
@@ -2070,7 +2070,7 @@
message='Your upload was interrupted. If this was uninentional, please retry it.',
refresh_frames=[], cont=None )
- def populate_state( self, trans, inputs, state, incoming, history, prefix="", context=None ):
+ def populate_state( self, trans, inputs, state, incoming, history, source, prefix="", context=None ):
errors = dict()
# Push this level onto the context stack
context = ExpressionContext( state, context )
@@ -2097,6 +2097,7 @@
new_state,
incoming,
history,
+ source,
prefix=rep_name + "|",
context=context )
if rep_errors:
@@ -2122,7 +2123,7 @@
# Get value of test param and determine current case
value, test_param_error = \
- check_param( trans, input.test_param, test_incoming, context )
+ check_param( trans, input.test_param, test_incoming, context, source=source )
current_case = input.get_current_case( value, trans )
# Current case has changed, throw away old state
group_state = state[input.name] = {}
@@ -2133,6 +2134,7 @@
group_state,
incoming,
history,
+ source,
prefix=group_prefix,
context=context,
)
@@ -2164,6 +2166,7 @@
rep_state,
incoming,
history,
+ source,
prefix=rep_prefix,
context=context)
if rep_errors:
@@ -2193,14 +2196,14 @@
pass
else:
incoming_value = get_incoming_value( incoming, key, None )
- value, error = check_param( trans, input, incoming_value, context )
+ value, error = check_param( trans, input, incoming_value, context, source=source )
# If a callback was provided, allow it to process the value
if error:
errors[ input.name ] = error
state[ input.name ] = value
return errors
- def update_state( self, trans, inputs, state, incoming, prefix="", context=None,
+ def update_state( self, trans, inputs, state, incoming, source='html', prefix="", context=None,
update_only=False, old_errors={}, item_callback=None ):
"""
Update the tool state in `state` using the user input in `incoming`.
@@ -2258,6 +2261,7 @@
input.inputs,
rep_state,
incoming,
+ source=source,
prefix=rep_prefix,
context=context,
update_only=update_only,
@@ -2306,7 +2310,7 @@
else:
# Get value of test param and determine current case
value, test_param_error = \
- check_param( trans, input.test_param, test_incoming, context )
+ check_param( trans, input.test_param, test_incoming, context, source=source )
current_case = input.get_current_case( value, trans )
if current_case != old_current_case:
# Current case has changed, throw away old state
@@ -2323,6 +2327,7 @@
incoming,
prefix=group_prefix,
context=context,
+ source=source,
update_only=update_only,
old_errors=group_old_errors,
item_callback=item_callback )
@@ -2364,6 +2369,7 @@
incoming,
prefix=rep_prefix,
context=context,
+ source=source,
update_only=update_only,
old_errors=rep_old_errors,
item_callback=item_callback )
@@ -2396,7 +2402,7 @@
errors[ input.name ] = old_errors[ input.name ]
else:
incoming_value = get_incoming_value( incoming, key, None )
- value, error = check_param( trans, input, incoming_value, context )
+ value, error = check_param( trans, input, incoming_value, context, source=source )
# If a callback was provided, allow it to process the value
if item_callback:
old_value = state.get( input.name, None )
diff -r 4f7e715db123cb3d31258fbe828854c36719af9e -r 33652524801287703e2abefecf2c676243c83b25 lib/galaxy/tools/parameters/__init__.py
--- a/lib/galaxy/tools/parameters/__init__.py
+++ b/lib/galaxy/tools/parameters/__init__.py
@@ -40,7 +40,7 @@
if new_value:
input_values[input.name] = new_value
-def check_param( trans, param, incoming_value, param_values ):
+def check_param( trans, param, incoming_value, param_values, source='html' ):
"""
Check the value of a single parameter `param`. The value in
`incoming_value` is converted from its HTML encoding and validated.
@@ -53,7 +53,10 @@
try:
if value is not None or isinstance(param, DataToolParameter):
# Convert value from HTML representation
- value = param.from_html( value, trans, param_values )
+ if source == 'html':
+ value = param.from_html( value, trans, param_values )
+ else:
+ value = param.from_json( value, trans, param_values )
# Allow the value to be converted if neccesary
filtered_value = param.filter_value( value, trans, param_values )
# Then do any further validation on the value
diff -r 4f7e715db123cb3d31258fbe828854c36719af9e -r 33652524801287703e2abefecf2c676243c83b25 lib/galaxy/tools/parameters/basic.py
--- a/lib/galaxy/tools/parameters/basic.py
+++ b/lib/galaxy/tools/parameters/basic.py
@@ -81,6 +81,9 @@
"""
return value
+ def from_json( self, value, trans=None, other_values={} ):
+ return self.from_html( value, trans, other_values )
+
def get_initial_value( self, trans, context, history=None ):
"""
Return the starting value of the parameter
@@ -427,6 +430,9 @@
def from_html( self, value, trans=None, other_values={} ):
return form_builder.CheckboxField.is_checked( value )
+ def from_json( self, value, trans=None, other_values={} ):
+ return string_as_bool( value )
+
def to_html_value( self, value, app ):
if value:
return [ 'true', 'true' ]
diff -r 4f7e715db123cb3d31258fbe828854c36719af9e -r 33652524801287703e2abefecf2c676243c83b25 lib/galaxy/webapps/galaxy/api/tools.py
--- a/lib/galaxy/webapps/galaxy/api/tools.py
+++ b/lib/galaxy/webapps/galaxy/api/tools.py
@@ -124,7 +124,7 @@
# else "populate" the tool state from scratch using payload.
incoming = params.__dict__
process_state = "update" if "tool_state" in incoming else "populate"
- template, vars = tool.handle_input( trans, incoming, history=target_history, process_state=process_state )
+ template, vars = tool.handle_input( trans, incoming, history=target_history, process_state=process_state, source="json" )
if 'errors' in vars:
trans.response.status = 400
return { "message": { "type": "error", "data" : vars[ 'errors' ] } }
https://bitbucket.org/galaxy/galaxy-central/commits/9e503de59f9b/
Changeset: 9e503de59f9b
User: jmchilton
Date: 2013-11-11 08:45:32
Summary: Allow raw display of data via API.
Matching functionality available with sessions at /display in root controller. The test framework uses this functionality in root controller to read test result data so an API driven approach to testing will need to have similar functionality available. I am not sure I am enitrely comfortable with serving raw data this way, but I guess it is no worse than the existing /display route. This is marked as beta and there is a note that it may be dropped at some point in the future.
Affected #: 1 file
diff -r 33652524801287703e2abefecf2c676243c83b25 -r 9e503de59f9b939e81cfa26645d7595d3c8a5df8 lib/galaxy/webapps/galaxy/api/datasets.py
--- a/lib/galaxy/webapps/galaxy/api/datasets.py
+++ b/lib/galaxy/webapps/galaxy/api/datasets.py
@@ -7,7 +7,7 @@
from galaxy.web.base.controller import UsesHistoryMixin
from galaxy.web.framework.helpers import is_true
from galaxy.datatypes import dataproviders
-
+from galaxy.util import string_as_bool_or_none
import logging
log = logging.getLogger( __name__ )
@@ -245,11 +245,16 @@
@web.expose_api_raw_anonymous
def display( self, trans, history_content_id, history_id,
- preview=False, filename=None, to_ext=None, chunk=None, **kwd ):
+ preview=False, filename=None, to_ext=None, chunk=None, raw=False, **kwd ):
"""
GET /api/histories/{encoded_history_id}/contents/{encoded_content_id}/display
Displays history content (dataset).
+
+ The query parameter 'raw' should be considered experimental and may be dropped at
+ some point in the future without warning. Generally, data should be processed by its
+ datatype prior to display (the defult if raw is unspecified or explicitly false.
"""
+ raw = string_as_bool_or_none( raw )
# Huge amount of code overlap with lib/galaxy/webapps/galaxy/api/history_content:show here.
rval = ''
try:
@@ -274,8 +279,10 @@
del display_kwd["key"]
except KeyError:
pass
- rval = hda.datatype.display_data( trans, hda, preview, filename, to_ext, chunk, **display_kwd )
-
+ if raw:
+ rval = open( hda.file_name )
+ else:
+ rval = hda.datatype.display_data( trans, hda, preview, filename, to_ext, chunk, **display_kwd )
except Exception, exception:
log.error( "Error getting display data for dataset (%s) from history (%s): %s",
https://bitbucket.org/galaxy/galaxy-central/commits/468fe654a360/
Changeset: 468fe654a360
User: jmchilton
Date: 2013-11-11 08:45:32
Summary: Fix specifing dbkeys with API uploads.
Essentially a redo of changeset 83103cff8757. In order to fix uploads when now history was specified, the only available dbkey allowed was '?'. As a result of that changeset, when uploading files via the API users could only specify '?'. Now the stock dbbuilds are also available - though builds in the history are still not aviailable (TODO) and user custom builds may not be available (TODO check this).
Affected #: 2 files
diff -r 9e503de59f9b939e81cfa26645d7595d3c8a5df8 -r 468fe654a360f41e5f20f40d2b1461a074076969 lib/galaxy/tools/parameters/basic.py
--- a/lib/galaxy/tools/parameters/basic.py
+++ b/lib/galaxy/tools/parameters/basic.py
@@ -1023,16 +1023,13 @@
self.static_options = [ ( value, key, False ) for key, value in util.dbnames ]
def get_options( self, trans, other_values ):
- if not trans.history:
- yield 'unspecified', '?', False
- else:
+ last_used_build = object()
+ if trans.history:
last_used_build = trans.history.genome_build
- for dbkey, build_name in trans.db_builds:
- yield build_name, dbkey, ( dbkey == last_used_build )
+ for dbkey, build_name in trans.db_builds:
+ yield build_name, dbkey, ( dbkey == last_used_build )
def get_legal_values( self, trans, other_values ):
- if not trans.history:
- return set( '?' )
return set( dbkey for dbkey, _ in trans.db_builds )
def to_dict( self, trans, view='collection', value_mapper=None ):
diff -r 9e503de59f9b939e81cfa26645d7595d3c8a5df8 -r 468fe654a360f41e5f20f40d2b1461a074076969 lib/galaxy/web/framework/__init__.py
--- a/lib/galaxy/web/framework/__init__.py
+++ b/lib/galaxy/web/framework/__init__.py
@@ -1070,10 +1070,11 @@
the user (chromInfo in history).
"""
dbnames = list()
- datasets = self.sa_session.query( self.app.model.HistoryDatasetAssociation ) \
- .filter_by( deleted=False, history_id=self.history.id, extension="len" )
- for dataset in datasets:
- dbnames.append( (dataset.dbkey, dataset.name) )
+ if self.history:
+ datasets = self.sa_session.query( self.app.model.HistoryDatasetAssociation ) \
+ .filter_by( deleted=False, history_id=self.history.id, extension="len" )
+ for dataset in datasets:
+ dbnames.append( (dataset.dbkey, dataset.name) )
user = self.get_user()
if user and 'dbkeys' in user.preferences:
user_keys = from_json_string( user.preferences['dbkeys'] )
https://bitbucket.org/galaxy/galaxy-central/commits/2769dec77197/
Changeset: 2769dec77197
User: jmchilton
Date: 2013-11-14 19:32:24
Summary: Clean up 4f7e715 slightly.
Affected #: 1 file
diff -r 468fe654a360f41e5f20f40d2b1461a074076969 -r 2769dec771976d29ac5904bff55fa448672d8bd6 lib/galaxy/webapps/galaxy/api/tools.py
--- a/lib/galaxy/webapps/galaxy/api/tools.py
+++ b/lib/galaxy/webapps/galaxy/api/tools.py
@@ -9,8 +9,6 @@
import logging
log = logging.getLogger( __name__ )
-DEFAULT_STATE_PROCESSING = "update" # See comment below.
-
class ToolsController( BaseAPIController, UsesVisualizationMixin ):
"""
@@ -116,12 +114,16 @@
# TODO: encode data ids and decode ids.
# TODO: handle dbkeys
params = util.Params( inputs, sanitize = False )
- # process_state must be 'populate' or 'update'. If 'populate', fully
- # expand repeat and conditionals when building up state, if 'update'
- # state must be built up over several iterative calls to the API -
- # mimicing behavior of web controller. Mimic the the web controller
- # and modify state outright if "tool_state" is contain in input params,
- # else "populate" the tool state from scratch using payload.
+ # process_state will be 'populate' or 'update'. When no tool
+ # state is specified in input - it will be 'populate', and
+ # tool will fully expand repeat and conditionals when building
+ # up state. If tool state is found in input
+ # parameters,process_state will be 'update' and complex
+ # submissions (with repeats and conditionals) must be built up
+ # over several iterative calls to the API - mimicing behavior
+ # of web controller (though frankly API never returns
+ # tool_state so this "legacy" behavior is probably impossible
+ # through API currently).
incoming = params.__dict__
process_state = "update" if "tool_state" in incoming else "populate"
template, vars = tool.handle_input( trans, incoming, history=target_history, process_state=process_state, source="json" )
https://bitbucket.org/galaxy/galaxy-central/commits/3402a11b0088/
Changeset: 3402a11b0088
User: jmchilton
Date: 2013-11-14 19:34:05
Summary: Merged in jmchilton/galaxy-central-fork-1 (pull request #256)
API Enhancements Required for API Driven Tool Testing
Affected #: 9 files
diff -r ac744f96faa5d4a61c73ca2e20655f818af3dddd -r 3402a11b008865ee37ee29c9d7d3d5ea27c6fe84 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -1901,20 +1901,21 @@
callback( "", input, value[input.name] )
else:
input.visit_inputs( "", value[input.name], callback )
- def handle_input( self, trans, incoming, history=None, old_errors=None ):
+ def handle_input( self, trans, incoming, history=None, old_errors=None, process_state='update', source='html' ):
"""
Process incoming parameters for this tool from the dict `incoming`,
update the tool state (or create if none existed), and either return
to the form or execute the tool (only if 'execute' was clicked and
there were no errors).
+
+ process_state can be either 'update' (to incrementally build up the state
+ over several calls - one repeat per handle for instance) or 'populate'
+ force a complete build of the state and submission all at once (like
+ from API). May want an incremental version of the API also at some point,
+ that is why this is not just called for_api.
"""
- # Get the state or create if not found
- if "tool_state" in incoming:
- encoded_state = string_to_object( incoming["tool_state"] )
- state = DefaultToolState()
- state.decode( encoded_state, self, trans.app )
- else:
- state = self.new_state( trans, history=history )
+ state, state_new = self.__fetch_state( trans, incoming, history )
+ if state_new:
# This feels a bit like a hack. It allows forcing full processing
# of inputs even when there is no state in the incoming dictionary
# by providing either 'runtool_btn' (the name of the submit button
@@ -1924,8 +1925,88 @@
if not self.display_interface:
return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
if len(incoming):
- self.update_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, old_errors=old_errors or {} )
+ self.update_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, old_errors=old_errors or {}, source=source )
return "tool_form.mako", dict( errors={}, tool_state=state, param_values={}, incoming={} )
+
+ errors, params = self.__check_param_values( trans, incoming, state, old_errors, process_state, history=history, source=source )
+ if self.__should_refresh_state( incoming ):
+ return self.__handle_state_refresh( trans, state, errors )
+ else:
+ # User actually clicked next or execute.
+
+ # If there were errors, we stay on the same page and display
+ # error messages
+ if errors:
+ error_message = "One or more errors were found in the input you provided. The specific errors are marked below."
+ return "tool_form.mako", dict( errors=errors, tool_state=state, incoming=incoming, error_message=error_message )
+ # If we've completed the last page we can execute the tool
+ elif state.page == self.last_page:
+ return self.__handle_tool_execute( trans, incoming, params, history )
+ # Otherwise move on to the next page
+ else:
+ return self.__handle_page_advance( trans, state, errors )
+
+ def __should_refresh_state( self, incoming ):
+ return not( 'runtool_btn' in incoming or 'URL' in incoming or 'ajax_upload' in incoming )
+
+ def __handle_tool_execute( self, trans, incoming, params, history ):
+ try:
+ rerun_remap_job_id = None
+ if 'rerun_remap_job_id' in incoming:
+ rerun_remap_job_id = trans.app.security.decode_id(incoming['rerun_remap_job_id'])
+ _, out_data = self.execute( trans, incoming=params, history=history, rerun_remap_job_id=rerun_remap_job_id )
+ except httpexceptions.HTTPFound, e:
+ #if it's a paste redirect exception, pass it up the stack
+ raise e
+ except Exception, e:
+ log.exception('Exception caught while attempting tool execution:')
+ return 'message.mako', dict( status='error', message='Error executing tool: %s' % str(e), refresh_frames=[] )
+ try:
+ assert isinstance( out_data, odict )
+ return 'tool_executed.mako', dict( out_data=out_data )
+ except:
+ if isinstance( out_data, str ):
+ message = out_data
+ else:
+ message = 'Failure executing tool (odict not returned from tool execution)'
+ return 'message.mako', dict( status='error', message=message, refresh_frames=[] )
+
+ def __handle_state_refresh( self, trans, state, errors ):
+ try:
+ self.find_fieldstorage( state.inputs )
+ except InterruptedUpload:
+ # If inputs contain a file it won't persist. Most likely this
+ # is an interrupted upload. We should probably find a more
+ # standard method of determining an incomplete POST.
+ return self.handle_interrupted( trans, state.inputs )
+ except:
+ pass
+ # Just a refresh, render the form with updated state and errors.
+ if not self.display_interface:
+ return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
+ return 'tool_form.mako', dict( errors=errors, tool_state=state )
+
+ def __handle_page_advance( self, trans, state, errors ):
+ state.page += 1
+ # Fill in the default values for the next page
+ self.fill_in_new_state( trans, self.inputs_by_page[ state.page ], state.inputs )
+ if not self.display_interface:
+ return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
+ return 'tool_form.mako', dict( errors=errors, tool_state=state )
+
+ def __fetch_state( self, trans, incoming, history ):
+ # Get the state or create if not found
+ if "tool_state" in incoming:
+ encoded_state = string_to_object( incoming["tool_state"] )
+ state = DefaultToolState()
+ state.decode( encoded_state, self, trans.app )
+ new = False
+ else:
+ state = self.new_state( trans, history=history )
+ new = True
+ return state, new
+
+ def __check_param_values( self, trans, incoming, state, old_errors, process_state, history, source ):
# Process incoming data
if not( self.check_values ):
# If `self.check_values` is false we don't do any checking or
@@ -1938,64 +2019,19 @@
else:
# Update state for all inputs on the current page taking new
# values from `incoming`.
- errors = self.update_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, old_errors=old_errors or {} )
+ if process_state == "update":
+ errors = self.update_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, old_errors=old_errors or {}, source=source )
+ elif process_state == "populate":
+ errors = self.populate_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, history, source=source )
+ else:
+ raise Exception("Unknown process_state type %s" % process_state)
# If the tool provides a `validate_input` hook, call it.
validate_input = self.get_hook( 'validate_input' )
if validate_input:
validate_input( trans, errors, state.inputs, self.inputs_by_page[state.page] )
params = state.inputs
- # Did the user actually click next / execute or is this just
- # a refresh?
- if 'runtool_btn' in incoming or 'URL' in incoming or 'ajax_upload' in incoming:
- # If there were errors, we stay on the same page and display
- # error messages
- if errors:
- error_message = "One or more errors were found in the input you provided. The specific errors are marked below."
- return "tool_form.mako", dict( errors=errors, tool_state=state, incoming=incoming, error_message=error_message )
- # If we've completed the last page we can execute the tool
- elif state.page == self.last_page:
- try:
- rerun_remap_job_id = None
- if 'rerun_remap_job_id' in incoming:
- rerun_remap_job_id = trans.app.security.decode_id(incoming['rerun_remap_job_id'])
- _, out_data = self.execute( trans, incoming=params, history=history, rerun_remap_job_id=rerun_remap_job_id )
- except httpexceptions.HTTPFound, e:
- #if it's a paste redirect exception, pass it up the stack
- raise e
- except Exception, e:
- log.exception('Exception caught while attempting tool execution:')
- return 'message.mako', dict( status='error', message='Error executing tool: %s' % str(e), refresh_frames=[] )
- try:
- assert isinstance( out_data, odict )
- return 'tool_executed.mako', dict( out_data=out_data )
- except:
- if isinstance( out_data, str ):
- message = out_data
- else:
- message = 'Failure executing tool (odict not returned from tool execution)'
- return 'message.mako', dict( status='error', message=message, refresh_frames=[] )
- # Otherwise move on to the next page
- else:
- state.page += 1
- # Fill in the default values for the next page
- self.fill_in_new_state( trans, self.inputs_by_page[ state.page ], state.inputs )
- if not self.display_interface:
- return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
- return 'tool_form.mako', dict( errors=errors, tool_state=state )
- else:
- try:
- self.find_fieldstorage( state.inputs )
- except InterruptedUpload:
- # If inputs contain a file it won't persist. Most likely this
- # is an interrupted upload. We should probably find a more
- # standard method of determining an incomplete POST.
- return self.handle_interrupted( trans, state.inputs )
- except:
- pass
- # Just a refresh, render the form with updated state and errors.
- if not self.display_interface:
- return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
- return 'tool_form.mako', dict( errors=errors, tool_state=state )
+ return errors, params
+
def find_fieldstorage( self, x ):
if isinstance( x, FieldStorage ):
raise InterruptedUpload( None )
@@ -2033,7 +2069,141 @@
return 'message.mako', dict( status='error',
message='Your upload was interrupted. If this was uninentional, please retry it.',
refresh_frames=[], cont=None )
- def update_state( self, trans, inputs, state, incoming, prefix="", context=None,
+
+ def populate_state( self, trans, inputs, state, incoming, history, source, prefix="", context=None ):
+ errors = dict()
+ # Push this level onto the context stack
+ context = ExpressionContext( state, context )
+ for input in inputs.itervalues():
+ key = prefix + input.name
+ if isinstance( input, Repeat ):
+ group_state = state[input.name]
+ # Create list of empty errors for each previously existing state
+ group_errors = [ ]
+ any_group_errors = False
+ rep_index = 0
+ while True:
+ rep_name = "%s_%d" % ( key, rep_index )
+ if not any( [ key.startswith(rep_name) for key in incoming.keys() ] ):
+ break
+ if rep_index < input.max:
+ new_state = {}
+ new_state['__index__'] = rep_index
+ self.fill_in_new_state( trans, input.inputs, new_state, context, history=history )
+ group_state.append( new_state )
+ group_errors.append( {} )
+ rep_errors = self.populate_state( trans,
+ input.inputs,
+ new_state,
+ incoming,
+ history,
+ source,
+ prefix=rep_name + "|",
+ context=context )
+ if rep_errors:
+ any_group_errors = True
+ group_errors[rep_index].update( rep_errors )
+
+ else:
+ group_errors[-1] = { '__index__': 'Cannot add repeat (max size=%i).' % input.max }
+ any_group_errors = True
+ rep_index += 1
+ elif isinstance( input, Conditional ):
+ group_state = state[input.name]
+ group_prefix = "%s|" % ( key )
+ # Deal with the 'test' element and see if it's value changed
+ if input.value_ref and not input.value_ref_in_group:
+ # We are referencing an existent parameter, which is not
+ # part of this group
+ test_param_key = prefix + input.test_param.name
+ else:
+ test_param_key = group_prefix + input.test_param.name
+ test_param_error = None
+ test_incoming = get_incoming_value( incoming, test_param_key, None )
+
+ # Get value of test param and determine current case
+ value, test_param_error = \
+ check_param( trans, input.test_param, test_incoming, context, source=source )
+ current_case = input.get_current_case( value, trans )
+ # Current case has changed, throw away old state
+ group_state = state[input.name] = {}
+ # TODO: we should try to preserve values if we can
+ self.fill_in_new_state( trans, input.cases[current_case].inputs, group_state, context, history=history )
+ group_errors = self.populate_state( trans,
+ input.cases[current_case].inputs,
+ group_state,
+ incoming,
+ history,
+ source,
+ prefix=group_prefix,
+ context=context,
+ )
+ if test_param_error:
+ group_errors[ input.test_param.name ] = test_param_error
+ if group_errors:
+ errors[ input.name ] = group_errors
+ # Store the current case in a special value
+ group_state['__current_case__'] = current_case
+ # Store the value of the test element
+ group_state[ input.test_param.name ] = value
+ elif isinstance( input, UploadDataset ):
+ group_state = state[input.name]
+ group_errors = []
+ any_group_errors = False
+ d_type = input.get_datatype( trans, context )
+ writable_files = d_type.writable_files
+ #remove extra files
+ while len( group_state ) > len( writable_files ):
+ del group_state[-1]
+ # Update state
+ max_index = -1
+ for i, rep_state in enumerate( group_state ):
+ rep_index = rep_state['__index__']
+ max_index = max( max_index, rep_index )
+ rep_prefix = "%s_%d|" % ( key, rep_index )
+ rep_errors = self.populate_state( trans,
+ input.inputs,
+ rep_state,
+ incoming,
+ history,
+ source,
+ prefix=rep_prefix,
+ context=context)
+ if rep_errors:
+ any_group_errors = True
+ group_errors.append( rep_errors )
+ else:
+ group_errors.append( {} )
+ # Add new fileupload as needed
+ offset = 1
+ while len( writable_files ) > len( group_state ):
+ new_state = {}
+ new_state['__index__'] = max_index + offset
+ offset += 1
+ self.fill_in_new_state( trans, input.inputs, new_state, context )
+ group_state.append( new_state )
+ if any_group_errors:
+ group_errors.append( {} )
+ # Were there *any* errors for any repetition?
+ if any_group_errors:
+ errors[input.name] = group_errors
+ else:
+ if key not in incoming \
+ and "__force_update__" + key not in incoming:
+ # No new value provided, and we are only updating, so keep
+ # the old value (which should already be in the state) and
+ # preserve the old error message.
+ pass
+ else:
+ incoming_value = get_incoming_value( incoming, key, None )
+ value, error = check_param( trans, input, incoming_value, context, source=source )
+ # If a callback was provided, allow it to process the value
+ if error:
+ errors[ input.name ] = error
+ state[ input.name ] = value
+ return errors
+
+ def update_state( self, trans, inputs, state, incoming, source='html', prefix="", context=None,
update_only=False, old_errors={}, item_callback=None ):
"""
Update the tool state in `state` using the user input in `incoming`.
@@ -2091,6 +2261,7 @@
input.inputs,
rep_state,
incoming,
+ source=source,
prefix=rep_prefix,
context=context,
update_only=update_only,
@@ -2139,7 +2310,7 @@
else:
# Get value of test param and determine current case
value, test_param_error = \
- check_param( trans, input.test_param, test_incoming, context )
+ check_param( trans, input.test_param, test_incoming, context, source=source )
current_case = input.get_current_case( value, trans )
if current_case != old_current_case:
# Current case has changed, throw away old state
@@ -2156,6 +2327,7 @@
incoming,
prefix=group_prefix,
context=context,
+ source=source,
update_only=update_only,
old_errors=group_old_errors,
item_callback=item_callback )
@@ -2197,6 +2369,7 @@
incoming,
prefix=rep_prefix,
context=context,
+ source=source,
update_only=update_only,
old_errors=rep_old_errors,
item_callback=item_callback )
@@ -2229,7 +2402,7 @@
errors[ input.name ] = old_errors[ input.name ]
else:
incoming_value = get_incoming_value( incoming, key, None )
- value, error = check_param( trans, input, incoming_value, context )
+ value, error = check_param( trans, input, incoming_value, context, source=source )
# If a callback was provided, allow it to process the value
if item_callback:
old_value = state.get( input.name, None )
diff -r ac744f96faa5d4a61c73ca2e20655f818af3dddd -r 3402a11b008865ee37ee29c9d7d3d5ea27c6fe84 lib/galaxy/tools/actions/upload.py
--- a/lib/galaxy/tools/actions/upload.py
+++ b/lib/galaxy/tools/actions/upload.py
@@ -4,8 +4,10 @@
import logging
log = logging.getLogger( __name__ )
+
class UploadToolAction( ToolAction ):
- def execute( self, tool, trans, incoming={}, set_output_hid = True, history=None, **kwargs ):
+
+ def execute( self, tool, trans, incoming={}, set_output_hid=True, history=None, **kwargs ):
dataset_upload_inputs = []
for input_name, input in tool.inputs.iteritems():
if input.type == "upload_dataset":
diff -r ac744f96faa5d4a61c73ca2e20655f818af3dddd -r 3402a11b008865ee37ee29c9d7d3d5ea27c6fe84 lib/galaxy/tools/actions/upload_common.py
--- a/lib/galaxy/tools/actions/upload_common.py
+++ b/lib/galaxy/tools/actions/upload_common.py
@@ -1,4 +1,8 @@
-import os, tempfile, StringIO, pwd, subprocess
+import os
+import tempfile
+import StringIO
+import pwd
+import subprocess
from cgi import FieldStorage
from galaxy import datatypes, util
from galaxy.util.odict import odict
@@ -10,13 +14,13 @@
import logging
log = logging.getLogger( __name__ )
+
def persist_uploads( params ):
"""
Turn any uploads in the submitted form to persisted files.
"""
if 'files' in params:
new_files = []
- temp_files = []
for upload_dataset in params['files']:
f = upload_dataset['file_data']
if isinstance( f, FieldStorage ):
@@ -24,8 +28,8 @@
assert f.file.name != '<fdopen>'
local_filename = util.mkstemp_ln( f.file.name, 'upload_file_data_' )
f.file.close()
- upload_dataset['file_data'] = dict( filename = f.filename,
- local_filename = local_filename )
+ upload_dataset['file_data'] = dict( filename=f.filename,
+ local_filename=local_filename )
elif type( f ) == dict and 'filename' and 'local_filename' not in f:
raise Exception( 'Uploaded file was encoded in a way not understood by Galaxy.' )
if upload_dataset['url_paste'] and upload_dataset['url_paste'].strip() != '':
@@ -35,6 +39,8 @@
new_files.append( upload_dataset )
params['files'] = new_files
return params
+
+
def handle_library_params( trans, params, folder_id, replace_dataset=None ):
# FIXME: the received params has already been parsed by util.Params() by the time it reaches here,
# so no complex objects remain. This is not good because it does not allow for those objects to be
@@ -62,6 +68,8 @@
role = trans.sa_session.query( trans.app.model.Role ).get( role_id )
library_bunch.roles.append( role )
return library_bunch
+
+
def get_precreated_datasets( trans, params, data_obj, controller='root' ):
"""
Get any precreated datasets (when using asynchronous uploads).
@@ -90,6 +98,8 @@
else:
rval.append( data )
return rval
+
+
def get_precreated_dataset( precreated_datasets, name ):
"""
Return a dataset matching a name from the list of precreated (via async
@@ -101,21 +111,24 @@
return precreated_datasets.pop( names.index( name ) )
else:
return None
+
+
def cleanup_unused_precreated_datasets( precreated_datasets ):
for data in precreated_datasets:
log.info( 'Cleaned up unclaimed precreated dataset (%s).' % ( data.id ) )
data.state = data.states.ERROR
data.info = 'No file contents were available.'
+
def __new_history_upload( trans, uploaded_dataset, history=None, state=None ):
if not history:
history = trans.history
- hda = trans.app.model.HistoryDatasetAssociation( name = uploaded_dataset.name,
- extension = uploaded_dataset.file_type,
- dbkey = uploaded_dataset.dbkey,
- history = history,
- create_dataset = True,
- sa_session = trans.sa_session )
+ hda = trans.app.model.HistoryDatasetAssociation( name=uploaded_dataset.name,
+ extension=uploaded_dataset.file_type,
+ dbkey=uploaded_dataset.dbkey,
+ history=history,
+ create_dataset=True,
+ sa_session=trans.sa_session )
if state:
hda.state = state
else:
@@ -128,6 +141,7 @@
trans.sa_session.flush()
return hda
+
def __new_library_upload( trans, cntrller, uploaded_dataset, library_bunch, state=None ):
current_user_roles = trans.get_current_user_roles()
if not ( ( trans.user_is_admin() and cntrller in [ 'library_admin', 'api' ] ) or trans.app.security_agent.can_add_library_item( current_user_roles, library_bunch.folder ) ):
@@ -156,13 +170,13 @@
trans.sa_session.add( ld )
trans.sa_session.flush()
trans.app.security_agent.copy_library_permissions( trans, folder, ld )
- ldda = trans.app.model.LibraryDatasetDatasetAssociation( name = uploaded_dataset.name,
- extension = uploaded_dataset.file_type,
- dbkey = uploaded_dataset.dbkey,
- library_dataset = ld,
- user = trans.user,
- create_dataset = True,
- sa_session = trans.sa_session )
+ ldda = trans.app.model.LibraryDatasetDatasetAssociation( name=uploaded_dataset.name,
+ extension=uploaded_dataset.file_type,
+ dbkey=uploaded_dataset.dbkey,
+ library_dataset=ld,
+ user=trans.user,
+ create_dataset=True,
+ sa_session=trans.sa_session )
trans.sa_session.add( ldda )
if state:
ldda.state = state
@@ -210,12 +224,14 @@
trans.sa_session.flush()
return ldda
+
def new_upload( trans, cntrller, uploaded_dataset, library_bunch=None, history=None, state=None ):
if library_bunch:
return __new_library_upload( trans, cntrller, uploaded_dataset, library_bunch, state )
else:
return __new_history_upload( trans, uploaded_dataset, history=history, state=state )
+
def get_uploaded_datasets( trans, cntrller, params, precreated_datasets, dataset_upload_inputs, library_bunch=None, history=None ):
uploaded_datasets = []
for dataset_upload_input in dataset_upload_inputs:
@@ -256,6 +272,8 @@
history.genome_build = uploaded_dataset.dbkey
uploaded_dataset.data = data
return uploaded_datasets
+
+
def create_paramfile( trans, uploaded_datasets ):
"""
Create the upload tool's JSON "param" file.
@@ -284,14 +302,14 @@
setattr( data.metadata, meta_name, meta_value )
trans.sa_session.add( data )
trans.sa_session.flush()
- json = dict( file_type = uploaded_dataset.file_type,
- dataset_id = data.dataset.id,
- dbkey = uploaded_dataset.dbkey,
- type = uploaded_dataset.type,
- metadata = uploaded_dataset.metadata,
- primary_file = uploaded_dataset.primary_file,
- composite_file_paths = uploaded_dataset.composite_files,
- composite_files = dict( [ ( k, v.__dict__ ) for k, v in data.datatype.get_composite_files( data ).items() ] ) )
+ json = dict( file_type=uploaded_dataset.file_type,
+ dataset_id=data.dataset.id,
+ dbkey=uploaded_dataset.dbkey,
+ type=uploaded_dataset.type,
+ metadata=uploaded_dataset.metadata,
+ primary_file=uploaded_dataset.primary_file,
+ composite_file_paths=uploaded_dataset.composite_files,
+ composite_files=dict( [ ( k, v.__dict__ ) for k, v in data.datatype.get_composite_files( data ).items() ] ) )
else:
try:
is_binary = uploaded_dataset.datatype.is_binary
@@ -305,18 +323,18 @@
uuid_str = uploaded_dataset.uuid
except:
uuid_str = None
- json = dict( file_type = uploaded_dataset.file_type,
- ext = uploaded_dataset.ext,
- name = uploaded_dataset.name,
- dataset_id = data.dataset.id,
- dbkey = uploaded_dataset.dbkey,
- type = uploaded_dataset.type,
- is_binary = is_binary,
- link_data_only = link_data_only,
- uuid = uuid_str,
- space_to_tab = uploaded_dataset.space_to_tab,
- in_place = trans.app.config.external_chown_script is None,
- path = uploaded_dataset.path )
+ json = dict( file_type=uploaded_dataset.file_type,
+ ext=uploaded_dataset.ext,
+ name=uploaded_dataset.name,
+ dataset_id=data.dataset.id,
+ dbkey=uploaded_dataset.dbkey,
+ type=uploaded_dataset.type,
+ is_binary=is_binary,
+ link_data_only=link_data_only,
+ uuid=uuid_str,
+ space_to_tab=uploaded_dataset.space_to_tab,
+ in_place=trans.app.config.external_chown_script is None,
+ path=uploaded_dataset.path )
# TODO: This will have to change when we start bundling inputs.
# Also, in_place above causes the file to be left behind since the
# user cannot remove it unless the parent directory is writable.
@@ -327,6 +345,8 @@
if trans.app.config.external_chown_script:
_chown( json_file_path )
return json_file_path
+
+
def create_job( trans, params, tool, json_file_path, data_list, folder=None, history=None ):
"""
Create the upload job.
@@ -383,6 +403,8 @@
for i, v in enumerate( data_list ):
output[ 'output%i' % i ] = v
return job, output
+
+
def active_folders( trans, folder ):
# Stolen from galaxy.web.controllers.library_common (importing from which causes a circular issues).
# Much faster way of retrieving all active sub-folders within a given folder than the
diff -r ac744f96faa5d4a61c73ca2e20655f818af3dddd -r 3402a11b008865ee37ee29c9d7d3d5ea27c6fe84 lib/galaxy/tools/parameters/__init__.py
--- a/lib/galaxy/tools/parameters/__init__.py
+++ b/lib/galaxy/tools/parameters/__init__.py
@@ -40,7 +40,7 @@
if new_value:
input_values[input.name] = new_value
-def check_param( trans, param, incoming_value, param_values ):
+def check_param( trans, param, incoming_value, param_values, source='html' ):
"""
Check the value of a single parameter `param`. The value in
`incoming_value` is converted from its HTML encoding and validated.
@@ -53,7 +53,10 @@
try:
if value is not None or isinstance(param, DataToolParameter):
# Convert value from HTML representation
- value = param.from_html( value, trans, param_values )
+ if source == 'html':
+ value = param.from_html( value, trans, param_values )
+ else:
+ value = param.from_json( value, trans, param_values )
# Allow the value to be converted if neccesary
filtered_value = param.filter_value( value, trans, param_values )
# Then do any further validation on the value
diff -r ac744f96faa5d4a61c73ca2e20655f818af3dddd -r 3402a11b008865ee37ee29c9d7d3d5ea27c6fe84 lib/galaxy/tools/parameters/basic.py
--- a/lib/galaxy/tools/parameters/basic.py
+++ b/lib/galaxy/tools/parameters/basic.py
@@ -2,14 +2,20 @@
Basic tool parameters.
"""
-import logging, string, sys, os, os.path, urllib
+import logging
+import string
+import sys
+import os
+import os.path
+import urllib
from elementtree.ElementTree import XML, Element
from galaxy import config, datatypes, util
from galaxy.web import form_builder
from galaxy.util.bunch import Bunch
from galaxy.util import string_as_bool, sanitize_param, unicodify
from sanitize import ToolParameterSanitizer
-import validation, dynamic_options
+import validation
+import dynamic_options
# For BaseURLToolParameter
from galaxy.web import url_for
from galaxy.model.item_attrs import Dictifiable
@@ -53,8 +59,10 @@
def get_label( self ):
"""Return user friendly name for the parameter"""
- if self.label: return self.label
- else: return self.name
+ if self.label:
+ return self.label
+ else:
+ return self.name
def get_html_field( self, trans=None, value=None, other_values={} ):
raise TypeError( "Abstract Method" )
@@ -73,6 +81,9 @@
"""
return value
+ def from_json( self, value, trans=None, other_values={} ):
+ return self.from_html( value, trans, other_values )
+
def get_initial_value( self, trans, context, history=None ):
"""
Return the starting value of the parameter
@@ -87,7 +98,7 @@
if a value has already been chosen from the history. This is to support the capability to
choose each dataset once
"""
- return self.get_initial_value(trans, context, history=history);
+ return self.get_initial_value(trans, context, history=history)
def get_required_enctype( self ):
"""
@@ -166,7 +177,7 @@
return value
def validate( self, value, history=None ):
- if value=="" and self.optional:
+ if value == "" and self.optional:
return
for validator in self.validators:
validator.validate( value, history )
@@ -219,7 +230,8 @@
self.area = string_as_bool( elem.get( 'area', False ) )
def get_html_field( self, trans=None, value=None, other_values={} ):
- if value is None: value = self.value
+ if value is None:
+ value = self.value
if self.area:
return form_builder.TextArea( self.name, self.size, value )
else:
@@ -228,6 +240,7 @@
def get_initial_value( self, trans, context, history=None ):
return self.value
+
class IntegerToolParameter( TextToolParameter ):
"""
Parameter that takes an integer value.
@@ -412,11 +425,14 @@
checked = self.checked
if value is not None:
checked = form_builder.CheckboxField.is_checked( value )
- return form_builder.CheckboxField( self.name, checked, refresh_on_change = self.refresh_on_change )
+ return form_builder.CheckboxField( self.name, checked, refresh_on_change=self.refresh_on_change )
def from_html( self, value, trans=None, other_values={} ):
return form_builder.CheckboxField.is_checked( value )
+ def from_json( self, value, trans=None, other_values={} ):
+ return string_as_bool( value )
+
def to_html_value( self, value, app ):
if value:
return [ 'true', 'true' ]
@@ -461,7 +477,7 @@
self.ajax = string_as_bool( elem.get( 'ajax-upload' ) )
def get_html_field( self, trans=None, value=None, other_values={} ):
- return form_builder.FileField( self.name, ajax = self.ajax, value = value )
+ return form_builder.FileField( self.name, ajax=self.ajax, value=value )
def from_html( self, value, trans=None, other_values={} ):
# Middleware or proxies may encode files in special ways (TODO: this
@@ -476,8 +492,8 @@
assert local_filename.startswith( upload_store ), \
"Filename provided by nginx is not in correct directory"
value = dict(
- filename = value["name"],
- local_filename = local_filename
+ filename=value["name"],
+ local_filename=local_filename
)
return value
@@ -533,7 +549,7 @@
user_ftp_dir = None
else:
user_ftp_dir = trans.user_ftp_dir
- return form_builder.FTPFileField( self.name, user_ftp_dir, trans.app.config.ftp_upload_site, value = value )
+ return form_builder.FTPFileField( self.name, user_ftp_dir, trans.app.config.ftp_upload_site, value=value )
def from_html( self, value, trans=None, other_values={} ):
try:
@@ -754,8 +770,9 @@
else:
return form_builder.TextField( self.name, value=(value or "") )
if value is not None:
- if not isinstance( value, list ): value = [ value ]
- field = form_builder.SelectField( self.name, self.multiple, self.display, self.refresh_on_change, refresh_on_change_values = self.refresh_on_change_values )
+ if not isinstance( value, list ):
+ value = [ value ]
+ field = form_builder.SelectField( self.name, self.multiple, self.display, self.refresh_on_change, refresh_on_change_values=self.refresh_on_change_values )
options = self.get_options( trans, context )
for text, optval, selected in options:
if isinstance( optval, UnvalidatedValue ):
@@ -793,7 +810,7 @@
rval.append( v )
return rval
else:
- value_is_none = ( value == "None" and "None" not in legal_values )
+ value_is_none = ( value == "None" and "None" not in legal_values )
if value_is_none:
if self.multiple:
if self.optional:
@@ -943,7 +960,7 @@
options = []
try:
options = self.get_options( trans, {} )
- except AssertionError, assertion:
+ except AssertionError:
# we dont/cant set other_values (the {} above), so params that require other params to be filled will error:
# required dependency in filter_options
# associated DataToolParam in get_column_list
@@ -1006,16 +1023,13 @@
self.static_options = [ ( value, key, False ) for key, value in util.dbnames ]
def get_options( self, trans, other_values ):
- if not trans.history:
- yield 'unspecified', '?', False
- else:
+ last_used_build = object()
+ if trans.history:
last_used_build = trans.history.genome_build
- for dbkey, build_name in trans.db_builds:
- yield build_name, dbkey, ( dbkey == last_used_build )
+ for dbkey, build_name in trans.db_builds:
+ yield build_name, dbkey, ( dbkey == last_used_build )
def get_legal_values( self, trans, other_values ):
- if not trans.history:
- return set( '?' )
return set( dbkey for dbkey, _ in trans.db_builds )
def to_dict( self, trans, view='collection', value_mapper=None ):
@@ -1348,7 +1362,7 @@
options = []
for filter_key, filter_value in self.filtered.iteritems():
dataset = other_values[filter_key]
- if dataset.__class__.__name__.endswith( "DatasetFilenameWrapper" ): #this is a bad way to check for this, but problems importing class ( due to circular imports? )
+ if dataset.__class__.__name__.endswith( "DatasetFilenameWrapper" ): # this is a bad way to check for this, but problems importing class ( due to circular imports? )
dataset = dataset.dataset
if dataset:
for meta_key, meta_dict in filter_value.iteritems():
@@ -1531,8 +1545,9 @@
TODO: There should be an alternate display that allows single selects to be
displayed as radio buttons and multiple selects as a set of checkboxes
- TODO: The following must be fixed to test correctly for the new security_check tag in the DataToolParameter ( the last test below is broken )
- Nate's next pass at the dataset security stuff will dramatically alter this anyway.
+ TODO: The following must be fixed to test correctly for the new security_check tag in
+ the DataToolParameter ( the last test below is broken ) Nate's next pass at the dataset
+ security stuff will dramatically alter this anyway.
"""
def __init__( self, tool, elem, trans=None):
@@ -1579,8 +1594,8 @@
# Load conversions required for the dataset input
self.conversions = []
for conv_elem in elem.findall( "conversion" ):
- name = conv_elem.get( "name" ) #name for commandline substitution
- conv_extensions = conv_elem.get( "type" ) #target datatype extension
+ name = conv_elem.get( "name" ) # name for commandline substitution
+ conv_extensions = conv_elem.get( "type" ) # target datatype extension
# FIXME: conv_extensions should be able to be an ordered list
assert None not in [ name, type ], 'A name (%s) and type (%s) are required for explicit conversion' % ( name, type )
conv_types = tool.app.datatypes_registry.get_datatype_by_extension( conv_extensions.lower() )
@@ -1592,14 +1607,15 @@
try:
filter_value = self.options.get_options( trans, other_values )[0][0]
except IndexError:
- pass #no valid options
+ pass # no valid options
assert trans is not None, "DataToolParameter requires a trans"
history = trans.get_history()
assert history is not None, "DataToolParameter requires a history"
if value is not None:
if type( value ) != list:
value = [ value ]
- field = form_builder.SelectField( self.name, self.multiple, None, self.refresh_on_change, refresh_on_change_values = self.refresh_on_change_values )
+ field = form_builder.SelectField( self.name, self.multiple, None, self.refresh_on_change, refresh_on_change_values=self.refresh_on_change_values )
+
# CRUCIAL: the dataset_collector function needs to be local to DataToolParameter.get_html_field()
def dataset_collector( hdas, parent_hid ):
current_user_roles = trans.get_current_user_roles()
@@ -1654,7 +1670,7 @@
return field
def get_initial_value( self, trans, context, history=None ):
- return self.get_initial_value_from_history_prevent_repeats(trans, context, None, history=history);
+ return self.get_initial_value_from_history_prevent_repeats(trans, context, None, history=history)
def get_initial_value_from_history_prevent_repeats( self, trans, context, already_used, history=None ):
"""
@@ -1676,7 +1692,8 @@
try:
filter_value = self.options.get_options( trans, context )[0][0]
except IndexError:
- pass #no valid options
+ pass # no valid options
+
def dataset_collector( datasets ):
def is_convertable( dataset ):
target_ext, converted_dataset = dataset.find_conversion_destination( self.formats )
diff -r ac744f96faa5d4a61c73ca2e20655f818af3dddd -r 3402a11b008865ee37ee29c9d7d3d5ea27c6fe84 lib/galaxy/web/framework/__init__.py
--- a/lib/galaxy/web/framework/__init__.py
+++ b/lib/galaxy/web/framework/__init__.py
@@ -1070,10 +1070,11 @@
the user (chromInfo in history).
"""
dbnames = list()
- datasets = self.sa_session.query( self.app.model.HistoryDatasetAssociation ) \
- .filter_by( deleted=False, history_id=self.history.id, extension="len" )
- for dataset in datasets:
- dbnames.append( (dataset.dbkey, dataset.name) )
+ if self.history:
+ datasets = self.sa_session.query( self.app.model.HistoryDatasetAssociation ) \
+ .filter_by( deleted=False, history_id=self.history.id, extension="len" )
+ for dataset in datasets:
+ dbnames.append( (dataset.dbkey, dataset.name) )
user = self.get_user()
if user and 'dbkeys' in user.preferences:
user_keys = from_json_string( user.preferences['dbkeys'] )
diff -r ac744f96faa5d4a61c73ca2e20655f818af3dddd -r 3402a11b008865ee37ee29c9d7d3d5ea27c6fe84 lib/galaxy/webapps/galaxy/api/datasets.py
--- a/lib/galaxy/webapps/galaxy/api/datasets.py
+++ b/lib/galaxy/webapps/galaxy/api/datasets.py
@@ -7,7 +7,7 @@
from galaxy.web.base.controller import UsesHistoryMixin
from galaxy.web.framework.helpers import is_true
from galaxy.datatypes import dataproviders
-
+from galaxy.util import string_as_bool_or_none
import logging
log = logging.getLogger( __name__ )
@@ -245,11 +245,16 @@
@web.expose_api_raw_anonymous
def display( self, trans, history_content_id, history_id,
- preview=False, filename=None, to_ext=None, chunk=None, **kwd ):
+ preview=False, filename=None, to_ext=None, chunk=None, raw=False, **kwd ):
"""
GET /api/histories/{encoded_history_id}/contents/{encoded_content_id}/display
Displays history content (dataset).
+
+ The query parameter 'raw' should be considered experimental and may be dropped at
+ some point in the future without warning. Generally, data should be processed by its
+ datatype prior to display (the defult if raw is unspecified or explicitly false.
"""
+ raw = string_as_bool_or_none( raw )
# Huge amount of code overlap with lib/galaxy/webapps/galaxy/api/history_content:show here.
rval = ''
try:
@@ -269,7 +274,15 @@
hda = self.get_history_dataset_association( trans, history, history_content_id,
check_ownership=True, check_accessible=True )
- rval = hda.datatype.display_data( trans, hda, preview, filename, to_ext, chunk, **kwd )
+ display_kwd = kwd.copy()
+ try:
+ del display_kwd["key"]
+ except KeyError:
+ pass
+ if raw:
+ rval = open( hda.file_name )
+ else:
+ rval = hda.datatype.display_data( trans, hda, preview, filename, to_ext, chunk, **display_kwd )
except Exception, exception:
log.error( "Error getting display data for dataset (%s) from history (%s): %s",
diff -r ac744f96faa5d4a61c73ca2e20655f818af3dddd -r 3402a11b008865ee37ee29c9d7d3d5ea27c6fe84 lib/galaxy/webapps/galaxy/api/tools.py
--- a/lib/galaxy/webapps/galaxy/api/tools.py
+++ b/lib/galaxy/webapps/galaxy/api/tools.py
@@ -9,6 +9,7 @@
import logging
log = logging.getLogger( __name__ )
+
class ToolsController( BaseAPIController, UsesVisualizationMixin ):
"""
RESTful controller for interactions with tools.
@@ -113,7 +114,19 @@
# TODO: encode data ids and decode ids.
# TODO: handle dbkeys
params = util.Params( inputs, sanitize = False )
- template, vars = tool.handle_input( trans, params.__dict__, history=target_history )
+ # process_state will be 'populate' or 'update'. When no tool
+ # state is specified in input - it will be 'populate', and
+ # tool will fully expand repeat and conditionals when building
+ # up state. If tool state is found in input
+ # parameters,process_state will be 'update' and complex
+ # submissions (with repeats and conditionals) must be built up
+ # over several iterative calls to the API - mimicing behavior
+ # of web controller (though frankly API never returns
+ # tool_state so this "legacy" behavior is probably impossible
+ # through API currently).
+ incoming = params.__dict__
+ process_state = "update" if "tool_state" in incoming else "populate"
+ template, vars = tool.handle_input( trans, incoming, history=target_history, process_state=process_state, source="json" )
if 'errors' in vars:
trans.response.status = 400
return { "message": { "type": "error", "data" : vars[ 'errors' ] } }
diff -r ac744f96faa5d4a61c73ca2e20655f818af3dddd -r 3402a11b008865ee37ee29c9d7d3d5ea27c6fe84 templates/webapps/galaxy/tool_form.mako
--- a/templates/webapps/galaxy/tool_form.mako
+++ b/templates/webapps/galaxy/tool_form.mako
@@ -218,12 +218,11 @@
else:
cls = "form-row"
- label = param.get_label()
-
field = param.get_html_field( trans, parent_state[ param.name ], other_values )
field.refresh_on_change = param.refresh_on_change
- # Field may contain characters submitted by user and these characters may be unicode; handle non-ascii characters gracefully.
+ # Field may contain characters submitted by user and these characters may
+ # be unicode; handle non-ascii characters gracefully.
field_html = field.get_html( prefix )
if type( field_html ) is not unicode:
field_html = unicode( field_html, 'utf-8', 'replace' )
@@ -232,25 +231,39 @@
return field_html
%><div class="${cls}">
- %if label:
- <label for="${param.name}">${label}:</label>
- %endif
- <div class="form-row-input">${field_html}</div>
- %if parent_errors.has_key( param.name ):
- <div class="form-row-error-message">
- <div><img style="vertical-align: middle;" src="${h.url_for('/static/style/error_small.png')}"> <span style="vertical-align: middle;">${parent_errors[param.name]}</span></div>
- </div>
- %endif
+ ${label_for_param( param )}
+ ${input_for_param( param, field_html )}
+ ${errors_for_param( param, parent_errors )}
+ ${help_for_param( param )}
+ <div style="clear: both;"></div>
+ </div>
+</%def>
- %if param.help:
- <div class="toolParamHelp" style="clear: both;">
- ${param.help}
- </div>
- %endif
+<%def name="input_for_param( param, field_html )">
+ <div class="form-row-input">${field_html}</div>
+</%def>
- <div style="clear: both;"></div>
+<%def name="label_for_param( param )">
+ <% label = param.get_label()%>
+ %if label:
+ <label for="${param.name}">${label}:</label>
+ %endif
+</%def>
- </div>
+<%def name="errors_for_param( param, parent_errors )">
+ %if parent_errors.has_key( param.name ):
+ <div class="form-row-error-message">
+ <div><img style="vertical-align: middle;" src="${h.url_for('/static/style/error_small.png')}"> <span style="vertical-align: middle;">${parent_errors[param.name]}</span></div>
+ </div>
+ %endif
+</%def>
+
+<%def name="help_for_param( param )">
+ %if param.help:
+ <div class="toolParamHelp" style="clear: both;">
+ ${param.help}
+ </div>
+ %endif
</%def><%def name="row_for_rerun()">
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
10 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/08ca76c1804d/
Changeset: 08ca76c1804d
User: BjoernGruening
Date: 2013-11-14 17:39:49
Summary: add setup_perl_environment as Tool Shed functionality
Affected #: 2 files
diff -r 516df888b97a164b7ff2d8235f5d4ea6462e185d -r 08ca76c1804d9307feb3db95fdcecfe0d74af20b lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
@@ -441,6 +441,99 @@
return_code = file_append( env_entry, env_file, skip_if_contained=True, make_executable=True )
if return_code:
return
+ elif action_type == 'setup_perl_environment':
+ # setup an Perl environment
+ # <action type="setup_perl_environment">
+ # <repository name="package_perl_5_18" owner="bgruening">
+ # <package name="perl" version="5.18.1" />
+ # </repository>
+ # <!-- allow downloading and installing an Perl package from cpan.org-->
+ # <package>XML::Parser</package>
+ # <package>http://search.cpan.org/CPAN/authors/id/C/CJ/CJFIELDS/BioPerl-1.6.922.tar.gz</package>
+ # </action>
+ filtered_actions = actions[ 1: ]
+
+ if action_dict.get( 'env_shell_file_paths', False ):
+ install_environment.add_env_shell_file_paths( action_dict[ 'env_shell_file_paths' ] )
+ else:
+ log.warning( 'Missing Rerl environment. Please check if your specified Rerl installation exists.' )
+ return
+
+ dir = os.path.curdir
+ current_dir = os.path.abspath( os.path.join( work_dir, dir ) )
+ with lcd( current_dir ):
+ with settings( warn_only=True ):
+
+ for package in action_dict[ 'perl_packages' ]:
+ """
+ If set to a true value then MakeMaker's prompt function will always return the default without waiting for user input.
+ """
+ cmd = '''export PERL_MM_USE_DEFAULT=1 && '''
+
+ if package.find('://') != -1:
+ # we assume a URL to a gem file
+ url = package
+ package_name = url.split( '/' )[ -1 ]
+ dir = td_common_util.url_download( work_dir, package_name, url, extract=True )
+ # search for Build.PL or Makefile.PL (ExtUtils::MakeMaker vs. Module::Build)
+
+ tmp_work_dir = os.path.join( work_dir, dir)
+ if os.path.exists( os.path.join( tmp_work_dir, 'Makefile.PL' ) ):
+
+ cmd += '''perl Makefile.PL INSTALL_BASE=$INSTALL_DIR && make && make install'''
+ elif os.path.exists( os.path.join( tmp_work_dir, 'Build.PL' ) ):
+ cmd += '''perl Build.PL --install_base $INSTALL_DIR && perl Build && perl Build install'''
+ else:
+ log.warning( 'No Makefile.PL or Build.PL file found in %s. Skip installation of %s.' % ( url, package_name ) )
+ return
+ with lcd( tmp_work_dir ):
+ cmd = install_environment.build_command( td_common_util.evaluate_template( cmd, install_dir ) )
+ return_code = handle_command( app, tool_dependency, install_dir, cmd )
+ if return_code:
+ return
+ else:
+ # perl package from CPAN without version number
+ # cpanm should be installed with the parent perl distribution, otherwise this will not work
+ cmd += '''cpanm --local-lib=$INSTALL_DIR %s''' % ( package )
+
+ cmd = install_environment.build_command( td_common_util.evaluate_template( cmd, install_dir ) )
+ return_code = handle_command( app, tool_dependency, install_dir, cmd )
+ if return_code:
+ return
+
+ # Ruby libraries are installed to $INSTALL_DIR (install_dir), we now set the GEM_PATH path to that directory
+ # TODO: That code is used a lot for the different environments and should be refactored, once the environments are integrated
+ installed_env_dict = install_environment.environment_dict()
+ perl5lib_path = installed_env_dict.get('PERL5LIB', False)
+ perlbin_path = installed_env_dict.get('PATH', False)
+
+ if not perl5lib_path or not perlbin_path:
+ log.warning( 'Missing RRUBYLIB or/and RUBY_HOME environment variable. Please check if your specified Ruby installation is valid.' )
+
+ modify_env_command_dict = dict( name="PATH", action="set_to", value=perlbin_path )
+ env_entry, env_file = td_common_util.create_or_update_env_shell_file( install_dir, modify_env_command_dict )
+ return_code = file_append( env_entry, env_file, skip_if_contained=True, make_executable=True )
+ if return_code:
+ return
+
+ modify_env_command_dict = dict( name="PERL5LIB", action="set_to", value=perl5lib_path )
+ env_entry, env_file = td_common_util.create_or_update_env_shell_file( install_dir, modify_env_command_dict )
+ return_code = file_append( env_entry, env_file, skip_if_contained=True, make_executable=True )
+ if return_code:
+ return
+
+ modify_env_command_dict = dict( name="PERL5LIB", action="prepend_to", value=os.path.join(install_dir, 'lib', 'perl5') )
+ env_entry, env_file = td_common_util.create_or_update_env_shell_file( install_dir, modify_env_command_dict )
+ return_code = file_append( env_entry, env_file, skip_if_contained=True, make_executable=True )
+ if return_code:
+ return
+
+ modify_env_command_dict = dict( name="PATH", action="prepend_to", value=os.path.join(install_dir, 'bin') )
+ env_entry, env_file = td_common_util.create_or_update_env_shell_file( install_dir, modify_env_command_dict )
+ return_code = file_append( env_entry, env_file, skip_if_contained=True, make_executable=True )
+ if return_code:
+ return
+
else:
# We're handling a complex repository dependency where we only have a set_environment tag set.
diff -r 516df888b97a164b7ff2d8235f5d4ea6462e185d -r 08ca76c1804d9307feb3db95fdcecfe0d74af20b lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
@@ -719,6 +719,36 @@
action_dict[ 'ruby_packages' ] = ruby_packages
else:
continue
+ elif action_type == 'setup_perl_environment':
+ # setup an Perl environment
+ # <action type="setup_perl_environment">
+ # <repository name="package_perl_5_18" owner="bgruening">
+ # <package name="perl" version="5.18.1" />
+ # </repository>
+ # <!-- allow downloading and installing an Perl package from cpan.org-->
+ # <package>XML::Parser</package>
+ # <package>http://search.cpan.org/CPAN/authors/id/C/CJ/CJFIELDS/BioPerl-1.6.922.tar.gz</package>
+ # </action>
+
+ env_shell_file_paths = td_common_util.get_env_shell_file_paths( app, action_elem.find('repository') )
+ all_env_shell_file_paths.extend( env_shell_file_paths )
+ if all_env_shell_file_paths:
+ action_dict[ 'env_shell_file_paths' ] = all_env_shell_file_paths
+ perl_packages = list()
+ for env_elem in action_elem:
+ if env_elem.tag == 'package':
+ """
+ A valid package definition can be:
+ XML::Parser
+ http://search.cpan.org/CPAN/authors/id/C/CJ/CJFIELDS/BioPerl-1.6.922.tar.gz
+ Unfortunately, CPAN does not support versioning. If you want real Reproducibility,
+ you need to specify the tarball path and the right order of different tarballs manually.
+ """
+ perl_packages.append( env_elem.text.strip() )
+ if perl_packages:
+ action_dict[ 'perl_packages' ] = perl_packages
+ else:
+ continue
elif action_type == 'make_install':
# make; make install; allow providing make options
if action_elem.text:
https://bitbucket.org/galaxy/galaxy-central/commits/bfc098707375/
Changeset: bfc098707375
User: BjoernGruening
Date: 2013-11-14 17:39:49
Summary: fix c&p leftovers in comments
Affected #: 1 file
diff -r 08ca76c1804d9307feb3db95fdcecfe0d74af20b -r bfc098707375ff25cad706a9929ac29aef84e38c lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
@@ -501,14 +501,14 @@
if return_code:
return
- # Ruby libraries are installed to $INSTALL_DIR (install_dir), we now set the GEM_PATH path to that directory
+ # Perl libraries are installed to $INSTALL_DIR (install_dir), we now set the PERL5LIB path to that directory
# TODO: That code is used a lot for the different environments and should be refactored, once the environments are integrated
installed_env_dict = install_environment.environment_dict()
perl5lib_path = installed_env_dict.get('PERL5LIB', False)
perlbin_path = installed_env_dict.get('PATH', False)
if not perl5lib_path or not perlbin_path:
- log.warning( 'Missing RRUBYLIB or/and RUBY_HOME environment variable. Please check if your specified Ruby installation is valid.' )
+ log.warning( 'Missing PERL5LIB or/and PATH environment variable. Please check if your specified Perl installation is valid.' )
modify_env_command_dict = dict( name="PATH", action="set_to", value=perlbin_path )
env_entry, env_file = td_common_util.create_or_update_env_shell_file( install_dir, modify_env_command_dict )
https://bitbucket.org/galaxy/galaxy-central/commits/78ebe94f1492/
Changeset: 78ebe94f1492
User: jmchilton
Date: 2013-11-14 17:39:49
Summary: Introduce utility method parse_setup_environment_repositories.
Use to reduce code duplication in install_util and provides target for unit tests of new functionality. Add unit tests. Add new functionality for parsing out which packages should be recursive runtime dependencies (not implemented on fabric side yet).
Affected #: 4 files
diff -r bfc098707375ff25cad706a9929ac29aef84e38c -r 78ebe94f1492e145b298539042a9d9f68e5f0ae1 lib/galaxy/util/__init__.py
--- a/lib/galaxy/util/__init__.py
+++ b/lib/galaxy/util/__init__.py
@@ -137,6 +137,11 @@
ElementInclude.include(root)
return tree
+
+def parse_xml_string(xml_string):
+ tree = ElementTree.fromstring(xml_string)
+ return tree
+
def xml_to_string( elem, pretty=False ):
"""Returns a string from an xml tree"""
if pretty:
diff -r bfc098707375ff25cad706a9929ac29aef84e38c -r 78ebe94f1492e145b298539042a9d9f68e5f0ae1 lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
@@ -665,11 +665,7 @@
# <!-- allow installing an R packages -->
# <package>https://github.com/bgruening/download_store/raw/master/DESeq2-1_0_18/BiocGe…</package>
# </action>
- env_shell_file_paths = td_common_util.get_env_shell_file_paths( app, action_elem.find('repository') )
-
- all_env_shell_file_paths.extend( env_shell_file_paths )
- if all_env_shell_file_paths:
- action_dict[ 'env_shell_file_paths' ] = all_env_shell_file_paths
+ td_common_util.parse_setup_environment_repositories( app, all_env_shell_file_paths, action_elem, action_dict )
r_packages = list()
for env_elem in action_elem:
if env_elem.tag == 'package':
@@ -690,11 +686,7 @@
# <package>protk=1.2.4</package>
# <package>http://url-to-some-gem-file.de/protk.gem</package>
# </action>
-
- env_shell_file_paths = td_common_util.get_env_shell_file_paths( app, action_elem.find('repository') )
- all_env_shell_file_paths.extend( env_shell_file_paths )
- if all_env_shell_file_paths:
- action_dict[ 'env_shell_file_paths' ] = all_env_shell_file_paths
+ td_common_util.parse_setup_environment_repositories( app, all_env_shell_file_paths, action_elem, action_dict )
ruby_packages = list()
for env_elem in action_elem:
if env_elem.tag == 'package':
diff -r bfc098707375ff25cad706a9929ac29aef84e38c -r 78ebe94f1492e145b298539042a9d9f68e5f0ae1 lib/tool_shed/galaxy_install/tool_dependencies/td_common_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/td_common_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/td_common_util.py
@@ -193,6 +193,7 @@
return os.path.abspath( os.path.join( root, name ) )
return None
+
def get_env_shell_file_paths( app, elem ):
# Currently only the following tag set is supported.
# <repository toolshed="http://localhost:9009/" name="package_numpy_1_7" owner="test" changeset_revision="c84c6a8be056">
@@ -383,7 +384,7 @@
# platform. Append the child element to the list of elements to process.
actions_elem_list.append( child_element )
elif child_element.tag == 'action':
- # Any <action> tags within an <actions_group> tag set must come after all <actions> tags.
+ # Any <action> tags within an <actions_group> tag set must come after all <actions> tags.
if actions_elems_processed == actions_elem_count:
# If all <actions> elements have been processed, then this <action> element can be appended to the list of actions to
# execute within this group.
@@ -409,6 +410,16 @@
continue
return actions_elem_tuples
+
+def parse_setup_environment_repositories( app, all_env_shell_file_paths, action_elem, action_dict ):
+ env_shell_file_paths = get_env_shell_file_paths( app, action_elem.find('repository') )
+
+ all_env_shell_file_paths.extend( env_shell_file_paths )
+ if all_env_shell_file_paths:
+ action_dict[ 'env_shell_file_paths' ] = all_env_shell_file_paths
+ action_dict[ 'action_shell_file_paths' ] = env_shell_file_paths
+
+
def url_download( install_dir, downloaded_file_name, download_url, extract=True ):
file_path = os.path.join( install_dir, downloaded_file_name )
src = None
diff -r bfc098707375ff25cad706a9929ac29aef84e38c -r 78ebe94f1492e145b298539042a9d9f68e5f0ae1 test/unit/tool_shed/test_td_common_util.py
--- /dev/null
+++ b/test/unit/tool_shed/test_td_common_util.py
@@ -0,0 +1,57 @@
+from contextlib import contextmanager
+from galaxy.util import parse_xml_string
+
+from tool_shed.galaxy_install.tool_dependencies import td_common_util
+
+
+class MockApp( object ):
+
+ def __init__( self ):
+ pass
+
+
+def test_parse_setup_environment_repositories( ):
+ xml = """<action name="setup_r_environment">
+ <repository name="package_r_3_0_1" owner="bgruening" toolshed="toolshed.g2.bx.psu.edu" changeset_revision="1234567">
+ <package name="R" version="3.0.1" />
+ </repository>
+ </action>
+ """
+ mock_app = MockApp()
+ action_elem = parse_xml_string( xml )
+ required_for_install_env_sh = '/path/to/existing.sh'
+ all_env_paths = [ required_for_install_env_sh ]
+ action_dict = {}
+
+ r_env_sh = '/path/to/go/env.sh'
+
+ def mock_get_env_shell_file_paths( app, elem):
+ assert app == mock_app
+ assert elem.get( 'name' ) == "package_r_3_0_1"
+ return [ r_env_sh ]
+
+ with __mock_common_util_method("get_env_shell_file_paths", mock_get_env_shell_file_paths):
+ td_common_util.parse_setup_environment_repositories( mock_app, all_env_paths, action_elem, action_dict )
+ ## Verify old env files weren't deleted.
+ assert required_for_install_env_sh in all_env_paths
+ ## Verify new ones added.
+ assert r_env_sh in all_env_paths
+ ## env_shell_file_paths includes everything
+ assert all( [env in action_dict[ 'env_shell_file_paths' ] for env in all_env_paths] )
+
+ ## action_shell_file_paths includes only env files defined in
+ ## inside the setup_ action element.
+ assert required_for_install_env_sh not in action_dict[ 'action_shell_file_paths' ]
+ assert r_env_sh in action_dict[ 'action_shell_file_paths' ]
+
+
+## Poor man's mocking. Need to get a real mocking library as real Galaxy development
+## dependnecy.
+@contextmanager
+def __mock_common_util_method(name, mock_method):
+ real_method = getattr(td_common_util, name)
+ try:
+ setattr(td_common_util, name, mock_method)
+ yield
+ finally:
+ setattr(td_common_util, name, real_method)
https://bitbucket.org/galaxy/galaxy-central/commits/443628e7b555/
Changeset: 443628e7b555
User: jmchilton
Date: 2013-11-14 17:39:49
Summary: Update perl to use parse_setup_environment_repositories.
Affected #: 1 file
diff -r 78ebe94f1492e145b298539042a9d9f68e5f0ae1 -r 443628e7b555600d33937bebe0ae03f637650025 lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
@@ -721,11 +721,7 @@
# <package>XML::Parser</package>
# <package>http://search.cpan.org/CPAN/authors/id/C/CJ/CJFIELDS/BioPerl-1.6.922.tar.gz</package>
# </action>
-
- env_shell_file_paths = td_common_util.get_env_shell_file_paths( app, action_elem.find('repository') )
- all_env_shell_file_paths.extend( env_shell_file_paths )
- if all_env_shell_file_paths:
- action_dict[ 'env_shell_file_paths' ] = all_env_shell_file_paths
+ td_common_util.parse_setup_environment_repositories( app, all_env_shell_file_paths, action_elem, action_dict )
perl_packages = list()
for env_elem in action_elem:
if env_elem.tag == 'package':
https://bitbucket.org/galaxy/galaxy-central/commits/bc18a1706c12/
Changeset: bc18a1706c12
User: jmchilton
Date: 2013-11-14 17:39:49
Summary: Add unit test for all existing functionality in td_common_util.create_or_update_env_shell_file
Affected #: 1 file
diff -r 443628e7b555600d33937bebe0ae03f637650025 -r bc18a1706c12849df23985c0fc5f04feff5bd956 test/unit/tool_shed/test_td_common_util.py
--- a/test/unit/tool_shed/test_td_common_util.py
+++ b/test/unit/tool_shed/test_td_common_util.py
@@ -1,15 +1,35 @@
+from os.path import join
from contextlib import contextmanager
from galaxy.util import parse_xml_string
from tool_shed.galaxy_install.tool_dependencies import td_common_util
+TEST_DEPENDENCIES_DIR = "/opt/galaxy/dependencies"
+TEST_INSTALL_DIR = "%s/test_install_dir" % TEST_DEPENDENCIES_DIR
+
+
class MockApp( object ):
def __init__( self ):
pass
+def test_create_or_update_env_shell_file( ):
+ test_path = "/usr/share/R/libs"
+ line, path = td_common_util.create_or_update_env_shell_file( TEST_INSTALL_DIR, dict(action="append_to", name="R_LIBS", value=test_path))
+ assert path == join( TEST_INSTALL_DIR, "env.sh" )
+ assert line == "R_LIBS=$R_LIBS:/usr/share/R/libs; export R_LIBS"
+
+ line, path = td_common_util.create_or_update_env_shell_file( TEST_INSTALL_DIR, dict(action="prepend_to", name="R_LIBS", value=test_path))
+ assert path == join( TEST_INSTALL_DIR, "env.sh" )
+ assert line == "R_LIBS=/usr/share/R/libs:$R_LIBS; export R_LIBS"
+
+ line, path = td_common_util.create_or_update_env_shell_file( TEST_INSTALL_DIR, dict(action="set_to", name="R_LIBS", value=test_path))
+ assert path == join( TEST_INSTALL_DIR, "env.sh" )
+ assert line == "R_LIBS=/usr/share/R/libs; export R_LIBS"
+
+
def test_parse_setup_environment_repositories( ):
xml = """<action name="setup_r_environment"><repository name="package_r_3_0_1" owner="bgruening" toolshed="toolshed.g2.bx.psu.edu" changeset_revision="1234567">
https://bitbucket.org/galaxy/galaxy-central/commits/8e7cface9bf1/
Changeset: 8e7cface9bf1
User: jmchilton
Date: 2013-11-14 17:39:49
Summary: Add new action to create_or_update_env_shell_file for sourcing other env.sh files.
With unit test.
Affected #: 2 files
diff -r bc18a1706c12849df23985c0fc5f04feff5bd956 -r 8e7cface9bf1060c41ebbda99e025572abbafe5b lib/tool_shed/galaxy_install/tool_dependencies/td_common_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/td_common_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/td_common_util.py
@@ -146,16 +146,21 @@
return None
def create_or_update_env_shell_file( install_dir, env_var_dict ):
- env_var_name = env_var_dict[ 'name' ]
env_var_action = env_var_dict[ 'action' ]
env_var_value = env_var_dict[ 'value' ]
- if env_var_action == 'prepend_to':
- changed_value = '%s:$%s' % ( env_var_value, env_var_name )
- elif env_var_action == 'set_to':
- changed_value = '%s' % env_var_value
- elif env_var_action == 'append_to':
- changed_value = '$%s:%s' % ( env_var_name, env_var_value )
- line = "%s=%s; export %s" % ( env_var_name, changed_value, env_var_name )
+ if env_var_action in ['prepend_to', 'set_to', 'append_to']:
+ env_var_name = env_var_dict[ 'name' ]
+ if env_var_action == 'prepend_to':
+ changed_value = '%s:$%s' % ( env_var_value, env_var_name )
+ elif env_var_action == 'set_to':
+ changed_value = '%s' % env_var_value
+ elif env_var_action == 'append_to':
+ changed_value = '$%s:%s' % ( env_var_name, env_var_value )
+ line = "%s=%s; export %s" % ( env_var_name, changed_value, env_var_name )
+ elif env_var_action == "source":
+ line = ". %s" % env_var_value
+ else:
+ raise Exception( "Unknown shell file action %s" % env_var_action )
env_shell_file_path = os.path.join( install_dir, 'env.sh' )
return line, env_shell_file_path
diff -r bc18a1706c12849df23985c0fc5f04feff5bd956 -r 8e7cface9bf1060c41ebbda99e025572abbafe5b test/unit/tool_shed/test_td_common_util.py
--- a/test/unit/tool_shed/test_td_common_util.py
+++ b/test/unit/tool_shed/test_td_common_util.py
@@ -29,6 +29,10 @@
assert path == join( TEST_INSTALL_DIR, "env.sh" )
assert line == "R_LIBS=/usr/share/R/libs; export R_LIBS"
+ line, path = td_common_util.create_or_update_env_shell_file( TEST_INSTALL_DIR, dict(action="source", value=test_path))
+ assert path == join( TEST_INSTALL_DIR, "env.sh" )
+ assert line == ". /usr/share/R/libs"
+
def test_parse_setup_environment_repositories( ):
xml = """<action name="setup_r_environment">
https://bitbucket.org/galaxy/galaxy-central/commits/cf33c63f9a9b/
Changeset: cf33c63f9a9b
User: jmchilton
Date: 2013-11-14 17:39:49
Summary: Introduce higher level abstraction for building up env.sh files in fabric_util.py.
Use it to reduced duplicated code in fabric_util.py. Implement unit tests describing this new class.
Affected #: 2 files
diff -r 8e7cface9bf1060c41ebbda99e025572abbafe5b -r cf33c63f9a9b14b395d02d459ae6e30236236a67 lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
@@ -200,6 +200,20 @@
return True
+class EnvFileBuilder( object ):
+
+ def __init__( self, install_dir ):
+ self.install_dir = install_dir
+ self.return_code = 0
+
+ def append_line( self, skip_if_contained=True, make_executable=True, **kwds ):
+ env_var_dict = dict(**kwds)
+ env_entry, env_file = td_common_util.create_or_update_env_shell_file( self.install_dir, env_var_dict )
+ return_code = file_append( env_entry, env_file, skip_if_contained=skip_if_contained, make_executable=make_executable )
+ self.return_code = self.return_code or return_code
+ return self.return_code
+
+
class InstallEnvironment( object ):
"""
Object describing the environment built up as part of the process of building
@@ -373,11 +387,9 @@
return
# R libraries are installed to $INSTALL_DIR (install_dir), we now set the R_LIBS path to that directory
- # TODO: That code is used a lot for the different environments and should be refactored, once the environments are integrated
- modify_env_command_dict = dict( name="R_LIBS", action="prepend_to", value=install_dir )
- env_entry, env_file = td_common_util.create_or_update_env_shell_file( install_dir, modify_env_command_dict )
- return_code = file_append( env_entry, env_file, skip_if_contained=True, make_executable=True )
-
+ env_file_builder = EnvFileBuilder( install_dir )
+ env_file_builder.append_line( name="R_LIBS", action="prepend_to", value=install_dir )
+ return_code = env_file_builder.return_code
if return_code:
return
elif action_type == 'setup_ruby_environment':
@@ -428,17 +440,10 @@
if return_code:
return
- # Ruby libraries are installed to $INSTALL_DIR (install_dir), we now set the GEM_PATH path to that directory
- # TODO: That code is used a lot for the different environments and should be refactored, once the environments are integrated
- modify_env_command_dict = dict( name="GEM_PATH", action="prepend_to", value=install_dir )
- env_entry, env_file = td_common_util.create_or_update_env_shell_file( install_dir, modify_env_command_dict )
- return_code = file_append( env_entry, env_file, skip_if_contained=True, make_executable=True )
- if return_code:
- return
-
- modify_env_command_dict = dict( name="PATH", action="prepend_to", value=os.path.join(install_dir, 'bin') )
- env_entry, env_file = td_common_util.create_or_update_env_shell_file( install_dir, modify_env_command_dict )
- return_code = file_append( env_entry, env_file, skip_if_contained=True, make_executable=True )
+ env_file_builder = EnvFileBuilder( install_dir )
+ env_file_builder.append_line( name="GEM_PATH", action="prepend_to", value=install_dir )
+ env_file_builder.append_line( name="PATH", action="prepend_to", value=os.path.join(install_dir, 'bin') )
+ return_code = env_file_builder.return_code
if return_code:
return
elif action_type == 'setup_perl_environment':
@@ -573,13 +578,14 @@
# in the set_environment action.
cmds = install_environment.environment_commands( 'set_environment' )
env_var_dicts = action_dict[ 'environment_variable' ]
+ env_file_builder = EnvFileBuilder( install_dir )
for env_var_dict in env_var_dicts:
# Check for the presence of the $ENV[] key string and populate it if possible.
env_var_dict = handle_environment_variables( app, tool_dependency, install_dir, env_var_dict, cmds )
- env_entry, env_file = td_common_util.create_or_update_env_shell_file( install_dir, env_var_dict )
- return_code = file_append( env_entry, env_file, skip_if_contained=True, make_executable=True )
- if return_code:
- return
+ env_file_builder.append_line( **env_var_dict )
+ return_code = env_file_builder.return_code
+ if return_code:
+ return
elif action_type == 'set_environment_for_install':
# Currently the only action supported in this category is a list of paths to one or more tool dependency env.sh files,
# the environment setting in each of which will be injected into the environment for all <action type="shell_command">
@@ -618,14 +624,10 @@
if not os.path.exists( output.stdout ):
log.error( "virtualenv's site-packages directory '%s' does not exist", output.stdout )
return
- modify_env_command_dict = dict( name="PYTHONPATH", action="prepend_to", value=output.stdout )
- env_entry, env_file = td_common_util.create_or_update_env_shell_file( install_dir, modify_env_command_dict )
- return_code = file_append( env_entry, env_file, skip_if_contained=True, make_executable=True )
- if return_code:
- return
- modify_env_command_dict = dict( name="PATH", action="prepend_to", value=os.path.join( venv_directory, "bin" ) )
- env_entry, env_file = td_common_util.create_or_update_env_shell_file( install_dir, modify_env_command_dict )
- return_code = file_append( env_entry, env_file, skip_if_contained=True, make_executable=True )
+ env_file_builder = EnvFileBuilder( install_dir )
+ env_file_builder.append_line( name="PYTHONPATH", action="prepend_to", value=output.stdout )
+ env_file_builder.append_line( name="PATH", action="prepend_to", value=os.path.join( venv_directory, "bin" ) )
+ return_code = env_file_builder.return_code
if return_code:
return
elif action_type == 'shell_command':
diff -r 8e7cface9bf1060c41ebbda99e025572abbafe5b -r cf33c63f9a9b14b395d02d459ae6e30236236a67 test/unit/tool_shed/test_fabric_util.py
--- /dev/null
+++ b/test/unit/tool_shed/test_fabric_util.py
@@ -0,0 +1,45 @@
+from contextlib import contextmanager
+from tool_shed.galaxy_install.tool_dependencies import fabric_util
+
+
+def test_env_file_builder():
+ install_dir = "/opt/galaxy/dependencies/foo/"
+ env_file_builder = fabric_util.EnvFileBuilder( install_dir )
+ added_lines = []
+ mock_return = dict(value=0)
+
+ def mock_file_append( text, file_path, **kwds ):
+ added_lines.append(text)
+ return mock_return["value"]
+
+ with __mock_fabric_util_method("file_append", mock_file_append):
+ env_file_builder.append_line( name="PATH", action="prepend_to", value="/usr/bin/local/R" )
+ assert added_lines == [ "PATH=/usr/bin/local/R:$PATH; export PATH" ]
+ assert env_file_builder.return_code == 0
+
+ # Reset mock lines
+ del added_lines[:]
+ # Next time file_append will fail
+ mock_return["value"] = 1
+
+ env_file_builder.append_line( action="source", value="/usr/bin/local/R/env.sh" )
+ assert added_lines == [ ". /usr/bin/local/R/env.sh" ]
+ # Check failure
+ assert env_file_builder.return_code == 1
+
+ mock_return["value"] = 0
+ env_file_builder.append_line( name="LD_LIBRARY_PATH", action="append_to", value="/usr/bin/local/R/lib" )
+ # Verify even though last append succeeded, previous failure still recorded.
+ assert env_file_builder.return_code == 1
+
+
+## Poor man's mocking. Need to get a real mocking library as real Galaxy development
+## dependnecy.
+@contextmanager
+def __mock_fabric_util_method(name, mock_method):
+ real_method = getattr(fabric_util, name)
+ try:
+ setattr(fabric_util, name, mock_method)
+ yield
+ finally:
+ setattr(fabric_util, name, real_method)
https://bitbucket.org/galaxy/galaxy-central/commits/27edf6bd91d1/
Changeset: 27edf6bd91d1
User: jmchilton
Date: 2013-11-14 17:39:49
Summary: Pull in dependant R, Ruby, and Perl complete environments at recursively runtime.
Affected #: 1 file
diff -r cf33c63f9a9b14b395d02d459ae6e30236236a67 -r 27edf6bd91d1b9b2153680adec361674c1f9b113 lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
@@ -388,6 +388,7 @@
# R libraries are installed to $INSTALL_DIR (install_dir), we now set the R_LIBS path to that directory
env_file_builder = EnvFileBuilder( install_dir )
+ handle_action_shell_file_paths( env_file_builder, action_dict ) # Pull in R environment (runtime).
env_file_builder.append_line( name="R_LIBS", action="prepend_to", value=install_dir )
return_code = env_file_builder.return_code
if return_code:
@@ -441,6 +442,7 @@
return
env_file_builder = EnvFileBuilder( install_dir )
+ handle_action_shell_file_paths( env_file_builder, action_dict ) # Pull in ruby dependencies (runtime).
env_file_builder.append_line( name="GEM_PATH", action="prepend_to", value=install_dir )
env_file_builder.append_line( name="PATH", action="prepend_to", value=os.path.join(install_dir, 'bin') )
return_code = env_file_builder.return_code
@@ -506,40 +508,16 @@
if return_code:
return
- # Perl libraries are installed to $INSTALL_DIR (install_dir), we now set the PERL5LIB path to that directory
- # TODO: That code is used a lot for the different environments and should be refactored, once the environments are integrated
- installed_env_dict = install_environment.environment_dict()
- perl5lib_path = installed_env_dict.get('PERL5LIB', False)
- perlbin_path = installed_env_dict.get('PATH', False)
+ env_file_builder = EnvFileBuilder( install_dir )
+ # Recursively add dependent PERL5LIB and PATH to env.sh & anything else needed.
+ handle_action_shell_file_paths( env_file_builder, action_dict ) # Pull in ruby dependencies (runtime).
- if not perl5lib_path or not perlbin_path:
- log.warning( 'Missing PERL5LIB or/and PATH environment variable. Please check if your specified Perl installation is valid.' )
-
- modify_env_command_dict = dict( name="PATH", action="set_to", value=perlbin_path )
- env_entry, env_file = td_common_util.create_or_update_env_shell_file( install_dir, modify_env_command_dict )
- return_code = file_append( env_entry, env_file, skip_if_contained=True, make_executable=True )
+ env_file_builder.append_line( name="PERL5LIB", action="prepend_to", value=os.path.join(install_dir, 'lib', 'perl5') )
+ env_file_builder.append_line( name="PATH", action="prepend_to", value=os.path.join(install_dir, 'bin') )
+ return_code = env_file_builder.return_code
if return_code:
return
- modify_env_command_dict = dict( name="PERL5LIB", action="set_to", value=perl5lib_path )
- env_entry, env_file = td_common_util.create_or_update_env_shell_file( install_dir, modify_env_command_dict )
- return_code = file_append( env_entry, env_file, skip_if_contained=True, make_executable=True )
- if return_code:
- return
-
- modify_env_command_dict = dict( name="PERL5LIB", action="prepend_to", value=os.path.join(install_dir, 'lib', 'perl5') )
- env_entry, env_file = td_common_util.create_or_update_env_shell_file( install_dir, modify_env_command_dict )
- return_code = file_append( env_entry, env_file, skip_if_contained=True, make_executable=True )
- if return_code:
- return
-
- modify_env_command_dict = dict( name="PATH", action="prepend_to", value=os.path.join(install_dir, 'bin') )
- env_entry, env_file = td_common_util.create_or_update_env_shell_file( install_dir, modify_env_command_dict )
- return_code = file_append( env_entry, env_file, skip_if_contained=True, make_executable=True )
- if return_code:
- return
-
-
else:
# We're handling a complex repository dependency where we only have a set_environment tag set.
# <action type="set_environment">
@@ -714,6 +692,13 @@
source=downloaded_filename,
destination=full_path_to_dir )
+
+def handle_action_shell_file_paths( env_file_builder, action_dict ):
+ shell_file_paths = action_dict.get( 'action_shell_file_paths', [])
+ for shell_file_path in shell_file_paths:
+ env_file_builder.append_line( action="source", value=shell_file_path )
+
+
def log_results( command, fabric_AttributeString, file_path ):
"""
Write attributes of fabric.operations._AttributeString (which is the output of executing command using fabric's local() method)
https://bitbucket.org/galaxy/galaxy-central/commits/a545da034f0a/
Changeset: a545da034f0a
User: jmchilton
Date: 2013-11-14 17:39:49
Summary: Bring in action fix from @BjoernGruening, not included in previous commit.
Affected #: 1 file
diff -r 27edf6bd91d1b9b2153680adec361674c1f9b113 -r a545da034f0a4ffe525df3cd5e0346838bffb723 lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
@@ -404,6 +404,8 @@
# <package>protk=1.2.4</package>
# <package>http://url-to-some-gem-file.de/protk.gem</package>
# </action>
+ filtered_actions = actions[ 1: ]
+
if action_dict.get( 'env_shell_file_paths', False ):
install_environment.add_env_shell_file_paths( action_dict[ 'env_shell_file_paths' ] )
else:
https://bitbucket.org/galaxy/galaxy-central/commits/ac744f96faa5/
Changeset: ac744f96faa5
User: jmchilton
Date: 2013-11-14 17:39:49
Summary: Outline some enhancements to InstallEnvironment, not used yet.
Affected #: 1 file
diff -r a545da034f0a4ffe525df3cd5e0346838bffb723 -r ac744f96faa5d4a61c73ca2e20655f818af3dddd lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
@@ -21,6 +21,7 @@
from fabric.api import lcd
from fabric.api import local
from fabric.api import settings
+from fabric.api import prefix
log = logging.getLogger( __name__ )
@@ -245,6 +246,17 @@
log.debug( 'Invalid file %s specified, ignoring %s action.', env_shell_file_path, action_type )
return cmds
+ def __call__( self, install_dir ):
+ with settings( warn_only=True, **td_common_util.get_env_var_values( install_dir ) ):
+ with prefix( self.__setup_environment() ):
+ yield
+
+ def __setup_environment(self):
+ return "&&".join( [". %s" % file for file in self.__valid_env_shell_file_paths() ] )
+
+ def __valid_env_shell_file_paths(self):
+ return [ file for file in self.env_shell_file_paths if os.path.exists( file ) ]
+
def environment_dict(self, action_type='template_command'):
env_vars = dict()
for env_shell_file_path in self.env_shell_file_paths:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/e54372d60f11/
Changeset: e54372d60f11
Branch: stable
User: jgoecks
Date: 2013-11-14 17:00:19
Summary: Trackster: only allow datasets with proper dbkey to be added to visualization. This is a regression because summary data format [bigwig] requires a proper dbkey.
Affected #: 1 file
diff -r 88a08db3c73393ef4f64b0c2f50262d5aa03967b -r e54372d60f111f4be26868ba416649697b551a13 lib/galaxy/webapps/galaxy/controllers/visualization.py
--- a/lib/galaxy/webapps/galaxy/controllers/visualization.py
+++ b/lib/galaxy/webapps/galaxy/controllers/visualization.py
@@ -86,15 +86,19 @@
class DbKeyColumn( grids.GridColumn ):
""" Column for filtering by and displaying dataset dbkey. """
def filter( self, trans, user, query, dbkey ):
- """ Filter by dbkey; datasets without a dbkey are returned as well. """
- # use raw SQL b/c metadata is a BLOB
+ """ Filter by dbkey. """
+ # Use raw SQL b/c metadata is a BLOB.
dbkey_user, dbkey = decode_dbkey( dbkey )
dbkey = dbkey.replace("'", "\\'")
- return query.filter( or_( \
- or_( "metadata like '%%\"dbkey\": [\"%s\"]%%'" % dbkey, "metadata like '%%\"dbkey\": \"%s\"%%'" % dbkey ), \
- or_( "metadata like '%%\"dbkey\": [\"?\"]%%'", "metadata like '%%\"dbkey\": \"?\"%%'" ) \
- )
- )
+ return query.filter( or_( "metadata like '%%\"dbkey\": [\"%s\"]%%'" % dbkey, "metadata like '%%\"dbkey\": \"%s\"%%'" % dbkey ) )
+
+ #Use this query when datasets with matching dbkey *or* no dbkey can be added to the visualization.
+ #return query.filter( or_( \
+ # or_( "metadata like '%%\"dbkey\": [\"%s\"]%%'" % dbkey, "metadata like '%%\"dbkey\": \"%s\"%%'" % dbkey ), \
+ # or_( "metadata like '%%\"dbkey\": [\"?\"]%%'", "metadata like '%%\"dbkey\": \"?\"%%'" ) \
+ # )
+ # )
+
class HistoryColumn( grids.GridColumn ):
""" Column for filtering by history id. """
https://bitbucket.org/galaxy/galaxy-central/commits/516df888b97a/
Changeset: 516df888b97a
User: jgoecks
Date: 2013-11-14 17:00:58
Summary: Merge stable with default branch.
Affected #: 1 file
diff -r 7553213e0646fd4337ceacc78820990c6ce0c710 -r 516df888b97a164b7ff2d8235f5d4ea6462e185d lib/galaxy/webapps/galaxy/controllers/visualization.py
--- a/lib/galaxy/webapps/galaxy/controllers/visualization.py
+++ b/lib/galaxy/webapps/galaxy/controllers/visualization.py
@@ -86,15 +86,19 @@
class DbKeyColumn( grids.GridColumn ):
""" Column for filtering by and displaying dataset dbkey. """
def filter( self, trans, user, query, dbkey ):
- """ Filter by dbkey; datasets without a dbkey are returned as well. """
- # use raw SQL b/c metadata is a BLOB
+ """ Filter by dbkey. """
+ # Use raw SQL b/c metadata is a BLOB.
dbkey_user, dbkey = decode_dbkey( dbkey )
dbkey = dbkey.replace("'", "\\'")
- return query.filter( or_( \
- or_( "metadata like '%%\"dbkey\": [\"%s\"]%%'" % dbkey, "metadata like '%%\"dbkey\": \"%s\"%%'" % dbkey ), \
- or_( "metadata like '%%\"dbkey\": [\"?\"]%%'", "metadata like '%%\"dbkey\": \"?\"%%'" ) \
- )
- )
+ return query.filter( or_( "metadata like '%%\"dbkey\": [\"%s\"]%%'" % dbkey, "metadata like '%%\"dbkey\": \"%s\"%%'" % dbkey ) )
+
+ #Use this query when datasets with matching dbkey *or* no dbkey can be added to the visualization.
+ #return query.filter( or_( \
+ # or_( "metadata like '%%\"dbkey\": [\"%s\"]%%'" % dbkey, "metadata like '%%\"dbkey\": \"%s\"%%'" % dbkey ), \
+ # or_( "metadata like '%%\"dbkey\": [\"?\"]%%'", "metadata like '%%\"dbkey\": \"?\"%%'" ) \
+ # )
+ # )
+
class HistoryColumn( grids.GridColumn ):
""" Column for filtering by history id. """
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: guerler: UI: Fix async submit of grids
by commits-noreply@bitbucket.org 14 Nov '13
by commits-noreply@bitbucket.org 14 Nov '13
14 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/7553213e0646/
Changeset: 7553213e0646
User: guerler
Date: 2013-11-14 08:31:07
Summary: UI: Fix async submit of grids
Affected #: 1 file
diff -r 1c4bb73d630a21bf2057bdbb8b604eb0dab4ea0e -r 7553213e0646fd4337ceacc78820990c6ce0c710 static/scripts/galaxy.grids.js
--- a/static/scripts/galaxy.grids.js
+++ b/static/scripts/galaxy.grids.js
@@ -133,27 +133,8 @@
// Code to handle grid operations: filtering, sorting, paging, and operations.
//
-// Init operation buttons.
-function init_operation_buttons() {
- // Initialize operation buttons.
- $('input[name=operation]:submit').each(function() {
- $(this).click( function() {
- var operation_name = $(this).val();
- // For some reason, $('input[name=id]:checked').val() does not return all ids for checked boxes.
- // The code below performs this function.
- var item_ids = [];
- $('input[name=id]:checked').each(function() {
- item_ids.push( $(this).val() );
- });
- do_operation(operation_name, item_ids);
- });
- });
-}
-
// Initialize grid controls
function init_grid_controls() {
- init_operation_buttons();
-
// Initialize submit image elements.
$('.submit-image').each( function() {
// On mousedown, add class to simulate click.
@@ -498,7 +479,6 @@
// Init grid.
init_grid_elements();
- init_operation_buttons();
make_popup_menus();
// Hide loading overlay.
@@ -558,11 +538,13 @@
if(!confirm(confirmation_text))
return false;
- // set up hidden field to parse the command/operation to controller
- $('#operation').val(selected_button.value);
-
- // submit form
- selected_button.form.submit();
+ // add ids
+ var operation_name = selected_button.value;
+ var item_ids = [];
+ $('input[name=id]:checked').each(function() {
+ item_ids.push( $(this).val() );
+ });
+ do_operation(operation_name, item_ids);
// return
return true;
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/88a08db3c733/
Changeset: 88a08db3c733
Branch: stable
User: jgoecks
Date: 2013-11-14 00:41:23
Summary: Implement a hack to avoid infinite recursion in custom builds.
Affected #: 1 file
diff -r 9bde6f2b9c59a29a35b85b7fe3c2e3bef3ae6a2e -r 88a08db3c73393ef4f64b0c2f50262d5aa03967b lib/galaxy/tools/actions/__init__.py
--- a/lib/galaxy/tools/actions/__init__.py
+++ b/lib/galaxy/tools/actions/__init__.py
@@ -206,12 +206,19 @@
db_datasets[ "chromInfo" ] = db_dataset
incoming[ "chromInfo" ] = db_dataset.file_name
else:
- # For custom builds, chrom info resides in converted dataset; for built-in builds, chrom info resides in tool-data/shared.
+ # -- Get chrom_info from either a custom or built-in build. --
+
chrom_info = None
if trans.user and ( 'dbkeys' in trans.user.preferences ) and ( input_dbkey in from_json_string( trans.user.preferences[ 'dbkeys' ] ) ):
# Custom build.
custom_build_dict = from_json_string( trans.user.preferences[ 'dbkeys' ] )[ input_dbkey ]
- if 'fasta' in custom_build_dict:
+ # HACK: the attempt to get chrom_info below will trigger the
+ # fasta-to-len converter if the dataset is not available or,
+ # which will in turn create a recursive loop when
+ # running the fasta-to-len tool. So, use a hack in the second
+ # condition below to avoid getting chrom_info when running the
+ # fasta-to-len converter.
+ if 'fasta' in custom_build_dict and tool.id != 'CONVERTER_fasta_to_len':
build_fasta_dataset = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( custom_build_dict[ 'fasta' ] )
chrom_info = build_fasta_dataset.get_converted_dataset( trans, 'len' ).file_name
https://bitbucket.org/galaxy/galaxy-central/commits/1c4bb73d630a/
Changeset: 1c4bb73d630a
User: jgoecks
Date: 2013-11-14 00:42:54
Summary: Merge stable branch to default.
Affected #: 0 files
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jgoecks: Implement a hack to avoid infinite recursion in custom builds.
by commits-noreply@bitbucket.org 13 Nov '13
by commits-noreply@bitbucket.org 13 Nov '13
13 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/39256e03655f/
Changeset: 39256e03655f
User: jgoecks
Date: 2013-11-14 00:41:23
Summary: Implement a hack to avoid infinite recursion in custom builds.
Affected #: 1 file
diff -r a9d90d2d13e121ffda929baa6ffab6ee43e8892f -r 39256e03655f35b03b940d2b91fc2436106f4e88 lib/galaxy/tools/actions/__init__.py
--- a/lib/galaxy/tools/actions/__init__.py
+++ b/lib/galaxy/tools/actions/__init__.py
@@ -206,12 +206,19 @@
db_datasets[ "chromInfo" ] = db_dataset
incoming[ "chromInfo" ] = db_dataset.file_name
else:
- # For custom builds, chrom info resides in converted dataset; for built-in builds, chrom info resides in tool-data/shared.
+ # -- Get chrom_info from either a custom or built-in build. --
+
chrom_info = None
if trans.user and ( 'dbkeys' in trans.user.preferences ) and ( input_dbkey in from_json_string( trans.user.preferences[ 'dbkeys' ] ) ):
# Custom build.
custom_build_dict = from_json_string( trans.user.preferences[ 'dbkeys' ] )[ input_dbkey ]
- if 'fasta' in custom_build_dict:
+ # HACK: the attempt to get chrom_info below will trigger the
+ # fasta-to-len converter if the dataset is not available or,
+ # which will in turn create a recursive loop when
+ # running the fasta-to-len tool. So, use a hack in the second
+ # condition below to avoid getting chrom_info when running the
+ # fasta-to-len converter.
+ if 'fasta' in custom_build_dict and tool.id != 'CONVERTER_fasta_to_len':
build_fasta_dataset = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( custom_build_dict[ 'fasta' ] )
chrom_info = build_fasta_dataset.get_converted_dataset( trans, 'len' ).file_name
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jmchilton: LWR: Expand user properties (__user_name__, __user_email__, __user_id__) in job params before creating client.
by commits-noreply@bitbucket.org 13 Nov '13
by commits-noreply@bitbucket.org 13 Nov '13
13 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/a9d90d2d13e1/
Changeset: a9d90d2d13e1
User: jmchilton
Date: 2013-11-13 23:37:11
Summary: LWR: Expand user properties (__user_name__, __user_email__, __user_id__) in job params before creating client.
This was final piece of puzzle for running jobs as real user through LWR. Now on just needs to add <param id="submit_user">$__user_name__</param> to LWR destination in job_conf.xml on Galaxy side to pass this information to the LWR. More complicated on the LWR side obviously, need to configure DRMAA, set job manager to be 'queued_external_drmaa', and setup sudo rules. Will add some details to LWR documentation shortly.
Affected #: 1 file
diff -r 521b4ff9060b72f1f8c9f5977f8c4fa2b0bd7877 -r a9d90d2d13e121ffda929baa6ffab6ee43e8892f lib/galaxy/jobs/runners/lwr.py
--- a/lib/galaxy/jobs/runners/lwr.py
+++ b/lib/galaxy/jobs/runners/lwr.py
@@ -108,7 +108,10 @@
job_id = job_wrapper.job_id
if hasattr(job_wrapper, 'task_id'):
job_id = "%s_%s" % (job_id, job_wrapper.task_id)
- return self.get_client( job_wrapper.job_destination.params, job_id )
+ params = job_wrapper.job_destination.params.copy()
+ for key, value in params.iteritems():
+ params[key] = model.User.expand_user_properties( job_wrapper.get_job().user, value )
+ return self.get_client( params, job_id )
def get_client_from_state(self, job_state):
job_destination_params = job_state.job_destination.params
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Improved temporary directory management in the Tool Shed.
by commits-noreply@bitbucket.org 13 Nov '13
by commits-noreply@bitbucket.org 13 Nov '13
13 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/521b4ff9060b/
Changeset: 521b4ff9060b
User: greg
Date: 2013-11-13 22:17:33
Summary: Improved temporary directory management in the Tool Shed.
Affected #: 6 files
diff -r c70a6f517aa96506b3245db857025f567aac0511 -r 521b4ff9060b72f1f8c9f5977f8c4fa2b0bd7877 lib/galaxy/webapps/tool_shed/controllers/repository.py
--- a/lib/galaxy/webapps/tool_shed/controllers/repository.py
+++ b/lib/galaxy/webapps/tool_shed/controllers/repository.py
@@ -1093,7 +1093,8 @@
str( repository.name ),
changeset_revision,
file_type,
- export_repository_dependencies )
+ export_repository_dependencies,
+ api=False )
repositories_archive_filename = os.path.basename( repositories_archive.name )
if error_message:
message = error_message
@@ -1104,6 +1105,8 @@
opened_archive = open( repositories_archive.name )
# Make sure the file is removed from disk after the contents have been downloaded.
os.unlink( repositories_archive.name )
+ repositories_archive_path, file_name = os.path.split( repositories_archive )
+ suc.remove_dir( repositories_archive_path )
return opened_archive
repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans, repository_id, changeset_revision )
metadata = repository_metadata.metadata
@@ -3091,6 +3094,7 @@
work_dir )
if message:
status = 'error'
+ suc.remove_dir( work_dir )
break
if guid:
tool_lineage = self.get_versions_of_tool( trans, repository, repository_metadata, guid )
diff -r c70a6f517aa96506b3245db857025f567aac0511 -r 521b4ff9060b72f1f8c9f5977f8c4fa2b0bd7877 lib/galaxy/webapps/tool_shed/controllers/upload.py
--- a/lib/galaxy/webapps/tool_shed/controllers/upload.py
+++ b/lib/galaxy/webapps/tool_shed/controllers/upload.py
@@ -248,6 +248,8 @@
status = 'error'
# Reset the tool_data_tables by loading the empty tool_data_table_conf.xml file.
tool_util.reset_tool_data_tables( trans.app )
+ if uploaded_directory:
+ suc.remove_dir( uploaded_directory )
trans.response.send_redirect( web.url_for( controller='repository',
action='browse_repository',
id=repository_id,
@@ -255,6 +257,8 @@
message=message,
status=status ) )
else:
+ if uploaded_directory:
+ suc.remove_dir( uploaded_directory )
status = 'error'
# Reset the tool_data_tables by loading the empty tool_data_table_conf.xml file.
tool_util.reset_tool_data_tables( trans.app )
diff -r c70a6f517aa96506b3245db857025f567aac0511 -r 521b4ff9060b72f1f8c9f5977f8c4fa2b0bd7877 lib/tool_shed/galaxy_install/install_manager.py
--- a/lib/tool_shed/galaxy_install/install_manager.py
+++ b/lib/tool_shed/galaxy_install/install_manager.py
@@ -464,10 +464,7 @@
if display_path:
# Load proprietary datatype display applications
self.app.datatypes_registry.load_display_applications( installed_repository_dict=repository_dict )
- try:
- shutil.rmtree( work_dir )
- except:
- pass
+ suc.remove_dir( work_dir )
def install_repository( self, repository_elem, tool_shed_repository, install_dependencies, is_repository_dependency=False ):
"""Install a single repository, loading contained tools into the tool panel."""
diff -r c70a6f517aa96506b3245db857025f567aac0511 -r 521b4ff9060b72f1f8c9f5977f8c4fa2b0bd7877 lib/tool_shed/galaxy_install/repository_util.py
--- a/lib/tool_shed/galaxy_install/repository_util.py
+++ b/lib/tool_shed/galaxy_install/repository_util.py
@@ -575,10 +575,7 @@
tool_shed_repository=tool_shed_repository,
tool_dependencies_config=tool_dependencies_config,
tool_dependencies=tool_shed_repository.tool_dependencies )
- try:
- shutil.rmtree( work_dir )
- except:
- pass
+ suc.remove_dir( work_dir )
suc.update_tool_shed_repository_status( trans.app, tool_shed_repository, trans.model.ToolShedRepository.installation_status.INSTALLED )
else:
# An error occurred while cloning the repository, so reset everything necessary to enable another attempt.
@@ -817,10 +814,7 @@
for installed_tool_dependency in installed_tool_dependencies:
if installed_tool_dependency.status in [ trans.model.ToolDependency.installation_status.ERROR ]:
repair_dict = add_repair_dict_entry( repository.name, installed_tool_dependency.error_message )
- try:
- shutil.rmtree( work_dir )
- except:
- pass
+ suc.remove_dir( work_dir )
suc.update_tool_shed_repository_status( trans.app, repository, trans.model.ToolShedRepository.installation_status.INSTALLED )
return repair_dict
diff -r c70a6f517aa96506b3245db857025f567aac0511 -r 521b4ff9060b72f1f8c9f5977f8c4fa2b0bd7877 lib/tool_shed/util/export_util.py
--- a/lib/tool_shed/util/export_util.py
+++ b/lib/tool_shed/util/export_util.py
@@ -103,7 +103,7 @@
attributes, sub_elements = get_repository_attributes_and_sub_elements( ordered_repository, archive_name )
elem = xml_util.create_element( 'repository', attributes=attributes, sub_elements=sub_elements )
exported_repository_registry.exported_repository_elems.append( elem )
- shutil.rmtree( work_dir )
+ suc.remove_dir( work_dir )
# Keep information about the export in a file name export_info.xml in the archive.
sub_elements = generate_export_elem( tool_shed_url, repository, changeset_revision, export_repository_dependencies, api )
export_elem = xml_util.create_element( 'export_info', attributes=None, sub_elements=sub_elements )
diff -r c70a6f517aa96506b3245db857025f567aac0511 -r 521b4ff9060b72f1f8c9f5977f8c4fa2b0bd7877 lib/tool_shed/util/metadata_util.py
--- a/lib/tool_shed/util/metadata_util.py
+++ b/lib/tool_shed/util/metadata_util.py
@@ -747,6 +747,7 @@
# Reset the value of the app's tool_data_path and tool_data_table_config_path to their respective original values.
app.config.tool_data_path = original_tool_data_path
app.config.tool_data_table_config_path = original_tool_data_table_config_path
+ suc.remove_dir( work_dir )
return metadata_dict, invalid_file_tups
def generate_package_dependency_metadata( app, elem, valid_tool_dependencies_dict, invalid_tool_dependencies_dict ):
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: Dave Bouvier: When running the install and test framework, explicitly uninstall any tool dependency that is not in the state Installed.
by commits-noreply@bitbucket.org 13 Nov '13
by commits-noreply@bitbucket.org 13 Nov '13
13 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/c70a6f517aa9/
Changeset: c70a6f517aa9
User: Dave Bouvier
Date: 2013-11-13 20:46:25
Summary: When running the install and test framework, explicitly uninstall any tool dependency that is not in the state Installed.
Affected #: 2 files
diff -r e6b3faf87f026f03412ff7b1b133fa0872c8be45 -r c70a6f517aa96506b3245db857025f567aac0511 test/install_and_test_tool_shed_repositories/base/test_db_util.py
--- a/test/install_and_test_tool_shed_repositories/base/test_db_util.py
+++ b/test/install_and_test_tool_shed_repositories/base/test_db_util.py
@@ -36,12 +36,17 @@
return role
raise AssertionError( "Private role not found for user '%s'" % user.email )
-def get_tool_dependencies_for_installed_repository( repository_id, status=None ):
+def get_tool_dependencies_for_installed_repository( repository_id, status=None, exclude_status=None ):
if status is not None:
return sa_session.query( model.ToolDependency ) \
.filter( and_( model.ToolDependency.table.c.tool_shed_repository_id == repository_id,
model.ToolDependency.table.c.status == status ) ) \
.all()
+ elif exclude_status is not None:
+ return sa_session.query( model.ToolDependency ) \
+ .filter( and_( model.ToolDependency.table.c.tool_shed_repository_id == repository_id,
+ model.ToolDependency.table.c.status != exclude_status ) ) \
+ .all()
else:
return sa_session.query( model.ToolDependency ) \
.filter( model.ToolDependency.table.c.tool_shed_repository_id == repository_id ) \
diff -r e6b3faf87f026f03412ff7b1b133fa0872c8be45 -r c70a6f517aa96506b3245db857025f567aac0511 test/install_and_test_tool_shed_repositories/base/twilltestcase.py
--- a/test/install_and_test_tool_shed_repositories/base/twilltestcase.py
+++ b/test/install_and_test_tool_shed_repositories/base/twilltestcase.py
@@ -150,10 +150,10 @@
else:
strings_displayed.append( 'has been uninstalled' )
self.check_for_strings( strings_displayed, strings_not_displayed=[] )
- # Get all tool dependencies that are in an error state and uninstall them explicitly, so that the next installation attempt
+ # Get all tool dependencies that are not in an installed state and uninstall them explicitly, so that the next installation attempt
# may succeed.
- error_state = model.ToolDependency.installation_status.ERROR
- tool_dependencies = test_db_util.get_tool_dependencies_for_installed_repository( installed_repository.id, status=error_state )
+ installed_state = model.ToolDependency.installation_status.INSTALLED
+ tool_dependencies = test_db_util.get_tool_dependencies_for_installed_repository( installed_repository.id, exclude_status=installed_state )
if len( tool_dependencies ) > 0:
encoded_tool_dependency_ids = [ self.security.encode_id( tool_dependency.id ) for tool_dependency in tool_dependencies ]
self.uninstall_tool_dependencies( self.security.encode_id( installed_repository.id ), encoded_tool_dependency_ids )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0