galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
November 2013
- 1 participants
- 208 discussions
commit/galaxy-central: Dave Bouvier: Apply the unicodify fix to repository dependencies' error messages as well.
by commits-noreply@bitbucket.org 21 Nov '13
by commits-noreply@bitbucket.org 21 Nov '13
21 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/e982fcaaaab5/
Changeset: e982fcaaaab5
User: Dave Bouvier
Date: 2013-11-21 16:10:03
Summary: Apply the unicodify fix to repository dependencies' error messages as well.
Affected #: 1 file
diff -r 9a998cee3ea5acaf0f058b4743da37787c916594 -r e982fcaaaab558e6d4ecb3750d2ec721d5fd861d test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -1131,7 +1131,7 @@
repository_status[ 'installation_errors' ][ 'tool_dependencies' ].append( test_result )
for dependency in repository.missing_repository_dependencies:
log.error( 'Missing repository dependency %s changeset revision %s owned by %s: %s' % \
- ( str( dependency.name ), str( dependency.changeset_revision ), str( dependency.owner ), str( dependency.error_message ) ) )
+ ( str( dependency.name ), str( dependency.changeset_revision ), str( dependency.owner ), unicodify( dependency.error_message ) ) )
test_result = dict( tool_shed=dependency.tool_shed,
name=dependency.name,
owner=dependency.owner,
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/7117a2655ade/
Changeset: 7117a2655ade
User: jmchilton
Date: 2013-11-21 15:02:50
Summary: Fix model/check.py for app being None such as when using create_db.sh
Thanks to Aaron Petkau for reporting this issue.
Affected #: 1 file
diff -r e6e9349e8da45a058fb5811eb89813694aeef729 -r 7117a2655ade212cfee20c9499d12b8ed5dc729b lib/galaxy/model/migrate/check.py
--- a/lib/galaxy/model/migrate/check.py
+++ b/lib/galaxy/model/migrate/check.py
@@ -57,7 +57,7 @@
migrate_to_current_version( engine, db_schema )
meta = MetaData( bind=engine )
- if getattr( app.config, 'database_auto_migrate', False ):
+ if app and getattr( app.config, 'database_auto_migrate', False ):
migrate()
return
https://bitbucket.org/galaxy/galaxy-central/commits/9a998cee3ea5/
Changeset: 9a998cee3ea5
User: jmchilton
Date: 2013-11-21 15:05:36
Summary: Merge.
Affected #: 1 file
diff -r 7117a2655ade212cfee20c9499d12b8ed5dc729b -r 9a998cee3ea5acaf0f058b4743da37787c916594 test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -43,6 +43,7 @@
from functional_tests import generate_config_file
from galaxy import eggs
+from galaxy.util import unicodify
eggs.require( "nose" )
eggs.require( "NoseHTML" )
@@ -1122,7 +1123,7 @@
# In keeping with the standard display layout, add the error message to the dict for each tool individually.
for dependency in repository.missing_tool_dependencies:
log.error( 'Missing tool dependency %s of type %s version %s: %s' % \
- ( str( dependency.name ), str( dependency.type ), str( dependency.version ), str( dependency.error_message ) ) )
+ ( str( dependency.name ), str( dependency.type ), str( dependency.version ), unicodify( dependency.error_message ) ) )
test_result = dict( type=dependency.type,
name=dependency.name,
version=dependency.version,
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: Dave Bouvier: Handle unicode characters when logging tool dependency installation errors.
by commits-noreply@bitbucket.org 21 Nov '13
by commits-noreply@bitbucket.org 21 Nov '13
21 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/f6d687147f65/
Changeset: f6d687147f65
User: Dave Bouvier
Date: 2013-11-21 14:59:22
Summary: Handle unicode characters when logging tool dependency installation errors.
Affected #: 1 file
diff -r e6e9349e8da45a058fb5811eb89813694aeef729 -r f6d687147f65098a82854f5c7d6aaaa2dc602c8a test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -43,6 +43,7 @@
from functional_tests import generate_config_file
from galaxy import eggs
+from galaxy.util import unicodify
eggs.require( "nose" )
eggs.require( "NoseHTML" )
@@ -1122,7 +1123,7 @@
# In keeping with the standard display layout, add the error message to the dict for each tool individually.
for dependency in repository.missing_tool_dependencies:
log.error( 'Missing tool dependency %s of type %s version %s: %s' % \
- ( str( dependency.name ), str( dependency.type ), str( dependency.version ), str( dependency.error_message ) ) )
+ ( str( dependency.name ), str( dependency.type ), str( dependency.version ), unicodify( dependency.error_message ) ) )
test_result = dict( type=dependency.type,
name=dependency.name,
version=dependency.version,
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: dannon: Add object_store_conf.xml to .hgignore
by commits-noreply@bitbucket.org 20 Nov '13
by commits-noreply@bitbucket.org 20 Nov '13
20 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/e6e9349e8da4/
Changeset: e6e9349e8da4
User: dannon
Date: 2013-11-21 02:39:51
Summary: Add object_store_conf.xml to .hgignore
Affected #: 1 file
diff -r 672987cdb4bf67ed14915d7997618bf008ce844e -r e6e9349e8da45a058fb5811eb89813694aeef729 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -60,6 +60,7 @@
job_conf.xml
data_manager_conf.xml
shed_data_manager_conf.xml
+object_store_conf.xml
config/*
static/welcome.html.*
static/welcome.html
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: dannon: Swap impersonate to select2 deferred load w/ filter. This adds a 'f_email' filter parameter to the Users api collection -- we should revisit this before the next stable release to decide how we want API filtering to actually work down the road.
by commits-noreply@bitbucket.org 20 Nov '13
by commits-noreply@bitbucket.org 20 Nov '13
20 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/672987cdb4bf/
Changeset: 672987cdb4bf
User: dannon
Date: 2013-11-21 01:59:01
Summary: Swap impersonate to select2 deferred load w/ filter. This adds a 'f_email' filter parameter to the Users api collection -- we should revisit this before the next stable release to decide how we want API filtering to actually work down the road.
Affected #: 2 files
diff -r 7a882c5fdd34636ae5ca98f63709c13a6648c3cf -r 672987cdb4bf67ed14915d7997618bf008ce844e lib/galaxy/webapps/galaxy/api/users.py
--- a/lib/galaxy/webapps/galaxy/api/users.py
+++ b/lib/galaxy/webapps/galaxy/api/users.py
@@ -14,7 +14,7 @@
class UserAPIController( BaseAPIController, UsesTagsMixin, CreatesUsersMixin, CreatesApiKeysMixin ):
@web.expose_api
- def index( self, trans, deleted='False', **kwd ):
+ def index( self, trans, deleted='False', f_email=None, **kwd ):
"""
GET /api/users
GET /api/users/deleted
@@ -23,6 +23,8 @@
rval = []
query = trans.sa_session.query( trans.app.model.User )
deleted = util.string_as_bool( deleted )
+ if f_email:
+ query = query.filter(trans.app.model.User.email.like("%%%s%%" % f_email))
if deleted:
query = query.filter( trans.app.model.User.table.c.deleted == True )
# only admins can see deleted users
diff -r 7a882c5fdd34636ae5ca98f63709c13a6648c3cf -r 672987cdb4bf67ed14915d7997618bf008ce844e templates/admin/impersonate.mako
--- a/templates/admin/impersonate.mako
+++ b/templates/admin/impersonate.mako
@@ -14,11 +14,8 @@
<label>
User to impersonate:
</label>
- <select name="email" class='text-and-autocomplete-select'>
- %for email in emails:
- <option>${email}</option>
- %endfor
- </select>
+ <input type="hidden" id="email_select" name="email">
+ </input></div><div class="form-row"><input type="submit" name="impersonate_button" value="Impersonate"/>
@@ -26,4 +23,35 @@
</form></div></div>
+ <script type="text/javascript">
+ /* This should be ripped out and made generic at some point for the
+ * various API bindings available, and once the API can filter list
+ * queries (term, below) */
+ $("#email_select").select2({
+ placeholder: "Select a user",
+ ajax: {
+ url: "/api/users/",
+ dataType: 'json',
+ quietMillis: 250,
+ matcher: function(term, text) { return text.toUpperCase().indexOf(term.toUpperCase())>=0; },
+ data: function (term) {
+ return {
+ f_email: term
+ };
+ },
+ results: function (data) {
+ var results = [];
+ $.each(data, function(index, item){
+ results.push({
+ id: item.email,
+ text: item.email
+ });
+ });
+ return {
+ results: results
+ };
+ }
+ }
+ });
+ </script>
%endif
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
3 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/23afd47845fe/
Changeset: 23afd47845fe
User: jmchilton
Date: 2013-11-20 23:44:09
Summary: Add stderr and stdout to provenance information.
Affected #: 1 file
diff -r ef45d85b1f579c5bb4b07284dd96ad8403003c03 -r 23afd47845fec9b8169712d0e78ee8d2baf0d96d lib/galaxy/webapps/galaxy/api/provenance.py
--- a/lib/galaxy/webapps/galaxy/api/provenance.py
+++ b/lib/galaxy/webapps/galaxy/api/provenance.py
@@ -48,6 +48,8 @@
"uuid": ( lambda uuid: str( uuid ) if uuid else None )( item.dataset.uuid),
"tool_id": job.tool_id,
"parameters": self._get_job_record(trans, job, follow),
+ "stderr": job.stderr,
+ "stdout": job.stdout,
}
return None
https://bitbucket.org/galaxy/galaxy-central/commits/bd8b3e2a9938/
Changeset: bd8b3e2a9938
User: jmchilton
Date: 2013-11-20 23:44:09
Summary: Allow raw display of composite files via API.
Affected #: 1 file
diff -r 23afd47845fec9b8169712d0e78ee8d2baf0d96d -r bd8b3e2a99389f0564a1ff0dbfb9e3d56e56f990 lib/galaxy/webapps/galaxy/api/datasets.py
--- a/lib/galaxy/webapps/galaxy/api/datasets.py
+++ b/lib/galaxy/webapps/galaxy/api/datasets.py
@@ -265,7 +265,12 @@
except KeyError:
pass
if raw:
- rval = open( hda.file_name )
+ if filename and filename != 'index':
+ file_path = trans.app.object_store.get_filename(hda.dataset, extra_dir='dataset_%s_files' % hda.dataset.id, alt_name=filename)
+ else:
+ file_path = hda.file_name
+ rval = open( file_path )
+
else:
rval = hda.datatype.display_data( trans, hda, preview, filename, to_ext, chunk, **display_kwd )
https://bitbucket.org/galaxy/galaxy-central/commits/7a882c5fdd34/
Changeset: 7a882c5fdd34
User: jmchilton
Date: 2013-11-20 23:44:09
Summary: Allow simple use of multi-page tools via API.
In tools module, when process_state=='populate' (i.e. coming from tool API without existing tool_state specified), populate and validate state of all pages of tools.
Affected #: 1 file
diff -r bd8b3e2a99389f0564a1ff0dbfb9e3d56e56f990 -r 7a882c5fdd34636ae5ca98f63709c13a6648c3cf lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -1811,7 +1811,8 @@
from API). May want an incremental version of the API also at some point,
that is why this is not just called for_api.
"""
- state, state_new = self.__fetch_state( trans, incoming, history )
+ all_pages = ( process_state == "populate" ) # If process_state = update, handle all pages at once.
+ state, state_new = self.__fetch_state( trans, incoming, history, all_pages=all_pages )
if state_new:
# This feels a bit like a hack. It allows forcing full processing
# of inputs even when there is no state in the incoming dictionary
@@ -1837,7 +1838,7 @@
error_message = "One or more errors were found in the input you provided. The specific errors are marked below."
return "tool_form.mako", dict( errors=errors, tool_state=state, incoming=incoming, error_message=error_message )
# If we've completed the last page we can execute the tool
- elif state.page == self.last_page:
+ elif all_pages or state.page == self.last_page:
return self.__handle_tool_execute( trans, incoming, params, history )
# Otherwise move on to the next page
else:
@@ -1891,7 +1892,7 @@
return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
return 'tool_form.mako', dict( errors=errors, tool_state=state )
- def __fetch_state( self, trans, incoming, history ):
+ def __fetch_state( self, trans, incoming, history, all_pages ):
# Get the state or create if not found
if "tool_state" in incoming:
encoded_state = string_to_object( incoming["tool_state"] )
@@ -1899,7 +1900,7 @@
state.decode( encoded_state, self, trans.app )
new = False
else:
- state = self.new_state( trans, history=history )
+ state = self.new_state( trans, history=history, all_pages=all_pages )
new = True
return state, new
@@ -1917,15 +1918,17 @@
# Update state for all inputs on the current page taking new
# values from `incoming`.
if process_state == "update":
- errors = self.update_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, old_errors=old_errors or {}, source=source )
+ inputs = self.inputs_by_page[state.page]
+ errors = self.update_state( trans, inputs, state.inputs, incoming, old_errors=old_errors or {}, source=source )
elif process_state == "populate":
- errors = self.populate_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, history, source=source )
+ inputs = self.inputs
+ errors = self.populate_state( trans, inputs, state.inputs, incoming, history, source=source )
else:
raise Exception("Unknown process_state type %s" % process_state)
# If the tool provides a `validate_input` hook, call it.
validate_input = self.get_hook( 'validate_input' )
if validate_input:
- validate_input( trans, errors, state.inputs, self.inputs_by_page[state.page] )
+ validate_input( trans, errors, state.inputs, inputs )
params = state.inputs
return errors, params
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/98962174b0b1/
Changeset: 98962174b0b1
User: jmchilton
Date: 2013-11-19 20:07:50
Summary: Allow implicit use of tool default parameter values when using API.
Affected #: 1 file
diff -r f2186f4796ad4c6aa2c9191f9804d8f5d59eb15d -r 98962174b0b12e239eff913353d683e396d2b004 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -2015,12 +2015,15 @@
test_param_key = prefix + input.test_param.name
else:
test_param_key = group_prefix + input.test_param.name
- test_param_error = None
- test_incoming = get_incoming_value( incoming, test_param_key, None )
+ # Get value of test param and determine current case
+ value, test_param_error = check_param_from_incoming( trans,
+ group_state,
+ input.test_param,
+ incoming,
+ test_param_key,
+ context,
+ source )
- # Get value of test param and determine current case
- value, test_param_error = \
- check_param( trans, input.test_param, test_incoming, context, source=source )
current_case = input.get_current_case( value, trans )
# Current case has changed, throw away old state
group_state = state[input.name] = {}
@@ -2083,8 +2086,7 @@
if any_group_errors:
errors[input.name] = group_errors
else:
- incoming_value = get_incoming_value( incoming, key, None )
- value, error = check_param( trans, input, incoming_value, context, source=source )
+ value, error = check_param_from_incoming( trans, state, input, incoming, key, context, source )
if error:
errors[ input.name ] = error
state[ input.name ] = value
@@ -3481,7 +3483,24 @@
else:
return val
+
+def check_param_from_incoming( trans, state, input, incoming, key, context, source ):
+ """
+ Unlike "update" state, this preserves default if no incoming value found.
+ This lets API user specify just a subset of params and allow defaults to be
+ used when available.
+ """
+ default_input_value = state.get( input.name, None )
+ incoming_value = get_incoming_value( incoming, key, default_input_value )
+ value, error = check_param( trans, input, incoming_value, context, source=source )
+ return value, error
+
+
def get_incoming_value( incoming, key, default ):
+ """
+ Fetch value from incoming dict directly or check special nginx upload
+ created variants of this key.
+ """
if "__" + key + "__is_composite" in incoming:
composite_keys = incoming["__" + key + "__keys"].split()
value = dict()
https://bitbucket.org/galaxy/galaxy-central/commits/ef45d85b1f57/
Changeset: ef45d85b1f57
User: jmchilton
Date: 2013-11-20 23:37:49
Summary: Merged in jmchilton/galaxy-central-fork-1 (pull request #265)
Allow implicit use of tool default parameter values when using API.
Affected #: 1 file
diff -r e507124fbe79903378c4b0fe150f2f3fd816ad32 -r ef45d85b1f579c5bb4b07284dd96ad8403003c03 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -2015,12 +2015,15 @@
test_param_key = prefix + input.test_param.name
else:
test_param_key = group_prefix + input.test_param.name
- test_param_error = None
- test_incoming = get_incoming_value( incoming, test_param_key, None )
+ # Get value of test param and determine current case
+ value, test_param_error = check_param_from_incoming( trans,
+ group_state,
+ input.test_param,
+ incoming,
+ test_param_key,
+ context,
+ source )
- # Get value of test param and determine current case
- value, test_param_error = \
- check_param( trans, input.test_param, test_incoming, context, source=source )
current_case = input.get_current_case( value, trans )
# Current case has changed, throw away old state
group_state = state[input.name] = {}
@@ -2083,8 +2086,7 @@
if any_group_errors:
errors[input.name] = group_errors
else:
- incoming_value = get_incoming_value( incoming, key, None )
- value, error = check_param( trans, input, incoming_value, context, source=source )
+ value, error = check_param_from_incoming( trans, state, input, incoming, key, context, source )
if error:
errors[ input.name ] = error
state[ input.name ] = value
@@ -3481,7 +3483,24 @@
else:
return val
+
+def check_param_from_incoming( trans, state, input, incoming, key, context, source ):
+ """
+ Unlike "update" state, this preserves default if no incoming value found.
+ This lets API user specify just a subset of params and allow defaults to be
+ used when available.
+ """
+ default_input_value = state.get( input.name, None )
+ incoming_value = get_incoming_value( incoming, key, default_input_value )
+ value, error = check_param( trans, input, incoming_value, context, source=source )
+ return value, error
+
+
def get_incoming_value( incoming, key, default ):
+ """
+ Fetch value from incoming dict directly or check special nginx upload
+ created variants of this key.
+ """
if "__" + key + "__is_composite" in incoming:
composite_keys = incoming["__" + key + "__keys"].split()
value = dict()
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
4 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/b7a7c9516429/
Changeset: b7a7c9516429
User: dannon
Date: 2013-11-16 23:36:17
Summary: Update S3ObjectStore init to supply config, config_xml
Affected #: 1 file
diff -r e057f2263f4bf8d2f5c277ae09360e36aa96cbbb -r b7a7c951642952129a05536c3e46cdae1c3000d1 lib/galaxy/objectstore/s3.py
--- a/lib/galaxy/objectstore/s3.py
+++ b/lib/galaxy/objectstore/s3.py
@@ -34,7 +34,7 @@
Galaxy and S3.
"""
def __init__(self, config, config_xml=None):
- super(S3ObjectStore, self).__init__()
+ super(S3ObjectStore, self).__init__(config, config_xml)
self.config = config
self.staging_path = self.config.file_path
self.s3_conn = get_OS_connection(self.config)
https://bitbucket.org/galaxy/galaxy-central/commits/95db640a4071/
Changeset: 95db640a4071
User: dannon
Date: 2013-11-18 17:01:42
Summary: Merge.
Affected #: 33 files
diff -r b7a7c951642952129a05536c3e46cdae1c3000d1 -r 95db640a4071ba613814bfafe1a0448b93955e5d lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -1570,113 +1570,10 @@
store in `self.tests`.
"""
self.tests = []
- # Composite datasets need a unique name: each test occurs in a fresh
- # history, but we'll keep it unique per set of tests
- composite_data_names_counter = 0
for i, test_elem in enumerate( tests_elem.findall( 'test' ) ):
- name = test_elem.get( 'name', 'Test-%d' % (i+1) )
- maxseconds = int( test_elem.get( 'maxseconds', '120' ) )
- test = ToolTestBuilder( self, name, maxseconds )
- try:
- for param_elem in test_elem.findall( "param" ):
- attrib = dict( param_elem.attrib )
- if 'values' in attrib:
- value = attrib[ 'values' ].split( ',' )
- elif 'value' in attrib:
- value = attrib['value']
- else:
- value = None
- attrib['children'] = list( param_elem.getchildren() )
- if attrib['children']:
- # At this time, we can assume having children only
- # occurs on DataToolParameter test items but this could
- # change and would cause the below parsing to change
- # based upon differences in children items
- attrib['metadata'] = []
- attrib['composite_data'] = []
- attrib['edit_attributes'] = []
- # Composite datasets need to be renamed uniquely
- composite_data_name = None
- for child in attrib['children']:
- if child.tag == 'composite_data':
- attrib['composite_data'].append( child )
- if composite_data_name is None:
- # Generate a unique name; each test uses a
- # fresh history
- composite_data_name = '_COMPOSITE_RENAMED_%i_' \
- % ( composite_data_names_counter )
- composite_data_names_counter += 1
- elif child.tag == 'metadata':
- attrib['metadata'].append( child )
- elif child.tag == 'metadata':
- attrib['metadata'].append( child )
- elif child.tag == 'edit_attributes':
- attrib['edit_attributes'].append( child )
- if composite_data_name:
- # Composite datasets need implicit renaming;
- # inserted at front of list so explicit declarations
- # take precedence
- attrib['edit_attributes'].insert( 0, { 'type': 'name', 'value': composite_data_name } )
- test.add_param( attrib.pop( 'name' ), value, attrib )
- for output_elem in test_elem.findall( "output" ):
- attrib = dict( output_elem.attrib )
- name = attrib.pop( 'name', None )
- if name is None:
- raise Exception( "Test output does not have a 'name'" )
- assert_elem = output_elem.find("assert_contents")
- assert_list = None
- # Trying to keep testing patch as localized as
- # possible, this function should be relocated
- # somewhere more conventional.
- def convert_elem(elem):
- """ Converts and XML element to a dictionary format, used by assertion checking code. """
- tag = elem.tag
- attributes = dict( elem.attrib )
- child_elems = list( elem.getchildren() )
- converted_children = []
- for child_elem in child_elems:
- converted_children.append( convert_elem(child_elem) )
- return {"tag" : tag, "attributes" : attributes, "children" : converted_children}
- if assert_elem is not None:
- assert_list = []
- for assert_child in list(assert_elem):
- assert_list.append(convert_elem(assert_child))
- file = attrib.pop( 'file', None )
- # File no longer required if an list of assertions was present.
- if assert_list is None and file is None:
- raise Exception( "Test output does not have a 'file'")
- attributes = {}
- # Method of comparison
- attributes['compare'] = attrib.pop( 'compare', 'diff' ).lower()
- # Number of lines to allow to vary in logs (for dates, etc)
- attributes['lines_diff'] = int( attrib.pop( 'lines_diff', '0' ) )
- # Allow a file size to vary if sim_size compare
- attributes['delta'] = int( attrib.pop( 'delta', '10000' ) )
- attributes['sort'] = string_as_bool( attrib.pop( 'sort', False ) )
- attributes['extra_files'] = []
- attributes['assert_list'] = assert_list
- if 'ftype' in attrib:
- attributes['ftype'] = attrib['ftype']
- for extra in output_elem.findall( 'extra_files' ):
- # File or directory, when directory, compare basename
- # by basename
- extra_type = extra.get( 'type', 'file' )
- extra_name = extra.get( 'name', None )
- assert extra_type == 'directory' or extra_name is not None, \
- 'extra_files type (%s) requires a name attribute' % extra_type
- extra_value = extra.get( 'value', None )
- assert extra_value is not None, 'extra_files requires a value attribute'
- extra_attributes = {}
- extra_attributes['compare'] = extra.get( 'compare', 'diff' ).lower()
- extra_attributes['delta'] = extra.get( 'delta', '0' )
- extra_attributes['lines_diff'] = int( extra.get( 'lines_diff', '0' ) )
- extra_attributes['sort'] = string_as_bool( extra.get( 'sort', False ) )
- attributes['extra_files'].append( ( extra_type, extra_value, extra_name, extra_attributes ) )
- test.add_output( name, file, attributes )
- except Exception, e:
- test.error = True
- test.exception = e
+ test = ToolTestBuilder( self, test_elem, i )
self.tests.append( test )
+
def parse_input_page( self, input_elem, enctypes ):
"""
Parse a page of inputs. This basically just calls 'parse_input_elem',
@@ -2155,11 +2052,19 @@
#remove extra files
while len( group_state ) > len( writable_files ):
del group_state[-1]
+
+ # Add new fileupload as needed
+ while len( writable_files ) > len( group_state ):
+ new_state = {}
+ new_state['__index__'] = len( group_state )
+ self.fill_in_new_state( trans, input.inputs, new_state, context )
+ group_state.append( new_state )
+ if any_group_errors:
+ group_errors.append( {} )
+
# Update state
- max_index = -1
for i, rep_state in enumerate( group_state ):
rep_index = rep_state['__index__']
- max_index = max( max_index, rep_index )
rep_prefix = "%s_%d|" % ( key, rep_index )
rep_errors = self.populate_state( trans,
input.inputs,
@@ -2174,16 +2079,6 @@
group_errors.append( rep_errors )
else:
group_errors.append( {} )
- # Add new fileupload as needed
- offset = 1
- while len( writable_files ) > len( group_state ):
- new_state = {}
- new_state['__index__'] = max_index + offset
- offset += 1
- self.fill_in_new_state( trans, input.inputs, new_state, context )
- group_state.append( new_state )
- if any_group_errors:
- group_errors.append( {} )
# Were there *any* errors for any repetition?
if any_group_errors:
errors[input.name] = group_errors
diff -r b7a7c951642952129a05536c3e46cdae1c3000d1 -r 95db640a4071ba613814bfafe1a0448b93955e5d lib/galaxy/tools/test.py
--- a/lib/galaxy/tools/test.py
+++ b/lib/galaxy/tools/test.py
@@ -1,21 +1,23 @@
-import new, sys
import os.path
-import galaxy.util
-import parameters
from parameters import basic
from parameters import grouping
-from elementtree.ElementTree import XML
+from galaxy.util import string_as_bool
import logging
log = logging.getLogger( __name__ )
+
class ToolTestBuilder( object ):
"""
Encapsulates information about a tool test, and allows creation of a
dynamic TestCase class (the unittest framework is very class oriented,
doing dynamic tests in this way allows better integration)
"""
- def __init__( self, tool, name, maxseconds ):
+
+ def __init__( self, tool, test_elem, i ):
+ name = test_elem.get( 'name', 'Test-%d' % (i + 1) )
+ maxseconds = int( test_elem.get( 'maxseconds', '120' ) )
+
self.tool = tool
self.name = name
self.maxseconds = maxseconds
@@ -24,7 +26,125 @@
self.outputs = []
self.error = False
self.exception = None
- def add_param( self, name, value, extra ):
+
+ self.__parse_elem( test_elem, i )
+
+ def __parse_elem( self, test_elem, i ):
+ # Composite datasets need a unique name: each test occurs in a fresh
+ # history, but we'll keep it unique per set of tests - use i (test #)
+ # and composite_data_names_counter (instance per test #)
+ composite_data_names_counter = 0
+ try:
+ for param_elem in test_elem.findall( "param" ):
+ attrib = dict( param_elem.attrib )
+ if 'values' in attrib:
+ value = attrib[ 'values' ].split( ',' )
+ elif 'value' in attrib:
+ value = attrib['value']
+ else:
+ value = None
+ attrib['children'] = list( param_elem.getchildren() )
+ if attrib['children']:
+ # At this time, we can assume having children only
+ # occurs on DataToolParameter test items but this could
+ # change and would cause the below parsing to change
+ # based upon differences in children items
+ attrib['metadata'] = []
+ attrib['composite_data'] = []
+ attrib['edit_attributes'] = []
+ # Composite datasets need to be renamed uniquely
+ composite_data_name = None
+ for child in attrib['children']:
+ if child.tag == 'composite_data':
+ attrib['composite_data'].append( child )
+ if composite_data_name is None:
+ # Generate a unique name; each test uses a
+ # fresh history
+ composite_data_name = '_COMPOSITE_RENAMED_t%i_d%i' \
+ % ( i, composite_data_names_counter )
+ composite_data_names_counter += 1
+ elif child.tag == 'metadata':
+ attrib['metadata'].append( child )
+ elif child.tag == 'metadata':
+ attrib['metadata'].append( child )
+ elif child.tag == 'edit_attributes':
+ attrib['edit_attributes'].append( child )
+ if composite_data_name:
+ # Composite datasets need implicit renaming;
+ # inserted at front of list so explicit declarations
+ # take precedence
+ attrib['edit_attributes'].insert( 0, { 'type': 'name', 'value': composite_data_name } )
+ self.__add_param( attrib.pop( 'name' ), value, attrib )
+ for output_elem in test_elem.findall( "output" ):
+ attrib = dict( output_elem.attrib )
+ name = attrib.pop( 'name', None )
+ if name is None:
+ raise Exception( "Test output does not have a 'name'" )
+
+ assert_list = self.__parse_assert_list( output_elem )
+ file = attrib.pop( 'file', None )
+ # File no longer required if an list of assertions was present.
+ if not assert_list and file is None:
+ raise Exception( "Test output does not have a 'file' to compare with or list of assertions to check")
+ attributes = {}
+ # Method of comparison
+ attributes['compare'] = attrib.pop( 'compare', 'diff' ).lower()
+ # Number of lines to allow to vary in logs (for dates, etc)
+ attributes['lines_diff'] = int( attrib.pop( 'lines_diff', '0' ) )
+ # Allow a file size to vary if sim_size compare
+ attributes['delta'] = int( attrib.pop( 'delta', '10000' ) )
+ attributes['sort'] = string_as_bool( attrib.pop( 'sort', False ) )
+ attributes['extra_files'] = []
+ attributes['assert_list'] = assert_list
+ if 'ftype' in attrib:
+ attributes['ftype'] = attrib['ftype']
+ for extra in output_elem.findall( 'extra_files' ):
+ attributes['extra_files'].append( self.__parse_extra_files_elem( extra ) )
+ self.__add_output( name, file, attributes )
+ except Exception, e:
+ self.error = True
+ self.exception = e
+
+ def __parse_assert_list( self, output_elem ):
+ assert_elem = output_elem.find("assert_contents")
+ assert_list = None
+
+ # Trying to keep testing patch as localized as
+ # possible, this function should be relocated
+ # somewhere more conventional.
+ def convert_elem(elem):
+ """ Converts and XML element to a dictionary format, used by assertion checking code. """
+ tag = elem.tag
+ attributes = dict( elem.attrib )
+ child_elems = list( elem.getchildren() )
+ converted_children = []
+ for child_elem in child_elems:
+ converted_children.append( convert_elem(child_elem) )
+ return {"tag": tag, "attributes": attributes, "children": converted_children}
+ if assert_elem is not None:
+ assert_list = []
+ for assert_child in list(assert_elem):
+ assert_list.append(convert_elem(assert_child))
+
+ return assert_list
+
+ def __parse_extra_files_elem( self, extra ):
+ # File or directory, when directory, compare basename
+ # by basename
+ extra_type = extra.get( 'type', 'file' )
+ extra_name = extra.get( 'name', None )
+ assert extra_type == 'directory' or extra_name is not None, \
+ 'extra_files type (%s) requires a name attribute' % extra_type
+ extra_value = extra.get( 'value', None )
+ assert extra_value is not None, 'extra_files requires a value attribute'
+ extra_attributes = {}
+ extra_attributes['compare'] = extra.get( 'compare', 'diff' ).lower()
+ extra_attributes['delta'] = extra.get( 'delta', '0' )
+ extra_attributes['lines_diff'] = int( extra.get( 'lines_diff', '0' ) )
+ extra_attributes['sort'] = string_as_bool( extra.get( 'sort', False ) )
+ return extra_type, extra_value, extra_name, extra_attributes
+
+ def __add_param( self, name, value, extra ):
try:
if name not in self.tool.inputs:
found_parameter = False
@@ -43,8 +163,10 @@
except Exception, e:
log.debug( "Error for tool %s: could not add test parameter %s. %s" % ( self.tool.id, name, e ) )
self.inputs.append( ( name, value, extra ) )
- def add_output( self, name, file, extra ):
+
+ def __add_output( self, name, file, extra ):
self.outputs.append( ( name, file, extra ) )
+
def __expand_grouping_for_data_input( self, name, value, extra, grouping_name, grouping_value ):
# Currently handles grouping.Conditional and grouping.Repeat
if isinstance( grouping_value, grouping.Conditional ):
@@ -93,20 +215,21 @@
if found_parameter:
return True, new_value
return False, value
+
def __add_uploaded_dataset( self, name, value, extra, input_parameter ):
if value is None:
assert input_parameter.optional, '%s is not optional. You must provide a valid filename.' % name
return value
if ( value, extra ) not in self.required_files:
- self.required_files.append( ( value, extra ) ) #these files will be uploaded
+ self.required_files.append( ( value, extra ) ) # these files will be uploaded
name_change = [ att for att in extra.get( 'edit_attributes', [] ) if att.get( 'type' ) == 'name' ]
if name_change:
- name_change = name_change[-1].get( 'value' ) #only the last name change really matters
- value = name_change #change value for select to renamed uploaded file for e.g. composite dataset
+ name_change = name_change[-1].get( 'value' ) # only the last name change really matters
+ value = name_change # change value for select to renamed uploaded file for e.g. composite dataset
else:
for end in [ '.zip', '.gz' ]:
if value.endswith( end ):
value = value[ :-len( end ) ]
break
- value = os.path.basename( value ) #if uploading a file in a path other than root of test-data
+ value = os.path.basename( value ) # if uploading a file in a path other than root of test-data
return value
diff -r b7a7c951642952129a05536c3e46cdae1c3000d1 -r 95db640a4071ba613814bfafe1a0448b93955e5d lib/galaxy/web/base/controller.py
--- a/lib/galaxy/web/base/controller.py
+++ b/lib/galaxy/web/base/controller.py
@@ -568,6 +568,31 @@
error( "Please wait until this dataset finishes uploading before attempting to view it." )
return hda
+ def get_history_dataset_association_from_ids( self, trans, id, history_id ):
+ # Just to echo other TODOs, there seems to be some overlap here, still
+ # this block appears multiple places (dataset show, history_contents
+ # show, upcoming history job show) so I am consolodating it here.
+ # Someone smarter than me should determine if there is some redundancy here.
+
+ # for anon users:
+ #TODO: check login_required?
+ #TODO: this isn't actually most_recently_used (as defined in histories)
+ if( ( trans.user == None )
+ and ( history_id == trans.security.encode_id( trans.history.id ) ) ):
+ history = trans.history
+ #TODO: dataset/hda by id (from history) OR check_ownership for anon user
+ hda = self.get_history_dataset_association( trans, history, id,
+ check_ownership=False, check_accessible=True )
+ else:
+ #TODO: do we really need the history?
+ history = self.get_history( trans, history_id,
+ check_ownership=True, check_accessible=True, deleted=False )
+ hda = self.get_history_dataset_association( trans, history, id,
+ check_ownership=True, check_accessible=True )
+ return hda
+
+
+
def get_hda_list( self, trans, hda_ids, check_ownership=True, check_accessible=False, check_state=True ):
"""
Returns one or more datasets in a list.
diff -r b7a7c951642952129a05536c3e46cdae1c3000d1 -r 95db640a4071ba613814bfafe1a0448b93955e5d lib/galaxy/web/framework/helpers/grids.py
--- a/lib/galaxy/web/framework/helpers/grids.py
+++ b/lib/galaxy/web/framework/helpers/grids.py
@@ -321,7 +321,7 @@
class GridColumn( object ):
def __init__( self, label, key=None, model_class=None, method=None, format=None, \
- link=None, attach_popup=False, visible=True, ncells=1, nowrap=False, \
+ link=None, attach_popup=False, visible=True, nowrap=False, \
# Valid values for filterable are ['standard', 'advanced', None]
filterable=None, sortable=True, label_id_prefix=None ):
"""Create a grid column."""
@@ -334,7 +334,6 @@
self.nowrap = nowrap
self.attach_popup = attach_popup
self.visible = visible
- self.ncells = ncells
self.filterable = filterable
# Column must have a key to be sortable.
self.sortable = ( self.key is not None and sortable )
diff -r b7a7c951642952129a05536c3e46cdae1c3000d1 -r 95db640a4071ba613814bfafe1a0448b93955e5d lib/galaxy/webapps/galaxy/api/datasets.py
--- a/lib/galaxy/webapps/galaxy/api/datasets.py
+++ b/lib/galaxy/webapps/galaxy/api/datasets.py
@@ -255,24 +255,9 @@
datatype prior to display (the defult if raw is unspecified or explicitly false.
"""
raw = string_as_bool_or_none( raw )
- # Huge amount of code overlap with lib/galaxy/webapps/galaxy/api/history_content:show here.
rval = ''
try:
- # for anon users:
- #TODO: check login_required?
- #TODO: this isn't actually most_recently_used (as defined in histories)
- if( ( trans.user == None )
- and ( history_id == trans.security.encode_id( trans.history.id ) ) ):
- history = trans.history
- #TODO: dataset/hda by id (from history) OR check_ownership for anon user
- hda = self.get_history_dataset_association( trans, history, history_content_id,
- check_ownership=False, check_accessible=True )
-
- else:
- history = self.get_history( trans, history_id,
- check_ownership=True, check_accessible=True, deleted=False )
- hda = self.get_history_dataset_association( trans, history, history_content_id,
- check_ownership=True, check_accessible=True )
+ hda = self.get_history_dataset_association_from_ids( trans, history_content_id, history_id )
display_kwd = kwd.copy()
try:
diff -r b7a7c951642952129a05536c3e46cdae1c3000d1 -r 95db640a4071ba613814bfafe1a0448b93955e5d lib/galaxy/webapps/galaxy/api/history_contents.py
--- a/lib/galaxy/webapps/galaxy/api/history_contents.py
+++ b/lib/galaxy/webapps/galaxy/api/history_contents.py
@@ -135,32 +135,17 @@
:returns: dictionary containing detailed HDA information
.. seealso:: :func:`galaxy.web.base.controller.UsesHistoryDatasetAssociationMixin.get_hda_dict`
"""
- hda_dict = {}
try:
- # for anon users:
- #TODO: check login_required?
- #TODO: this isn't actually most_recently_used (as defined in histories)
- if( ( trans.user == None )
- and ( history_id == trans.security.encode_id( trans.history.id ) ) ):
- history = trans.history
- #TODO: dataset/hda by id (from history) OR check_ownership for anon user
- hda = self.get_history_dataset_association( trans, history, id,
- check_ownership=False, check_accessible=True )
- else:
- #TODO: do we really need the history?
- history = self.get_history( trans, history_id,
- check_ownership=True, check_accessible=True, deleted=False )
- hda = self.get_history_dataset_association( trans, history, id,
- check_ownership=True, check_accessible=True )
+ hda = self.get_history_dataset_association_from_ids( trans, id, history_id )
hda_dict = self.get_hda_dict( trans, hda )
hda_dict[ 'display_types' ] = self.get_old_display_applications( trans, hda )
hda_dict[ 'display_apps' ] = self.get_display_apps( trans, hda )
+ return hda_dict
except Exception, e:
msg = "Error in history API at listing dataset: %s" % ( str(e) )
log.error( msg, exc_info=True )
trans.response.status = 500
return msg
- return hda_dict
@web.expose_api
def create( self, trans, history_id, payload, **kwd ):
diff -r b7a7c951642952129a05536c3e46cdae1c3000d1 -r 95db640a4071ba613814bfafe1a0448b93955e5d lib/galaxy/webapps/galaxy/api/provenance.py
--- a/lib/galaxy/webapps/galaxy/api/provenance.py
+++ b/lib/galaxy/webapps/galaxy/api/provenance.py
@@ -42,11 +42,12 @@
if item is not None:
if item.copied_from_library_dataset_dataset_association:
item = item.copied_from_library_dataset_dataset_association
+ job = item.creating_job
return {
- "id" : trans.security.encode_id(item.id),
- "uuid" : ( lambda uuid: str( uuid ) if uuid else None )( item.dataset.uuid),
- "tool_id" : item.creating_job.tool_id,
- "parameters" : self._get_job_record(trans, item.creating_job, follow)
+ "id": trans.security.encode_id(item.id),
+ "uuid": ( lambda uuid: str( uuid ) if uuid else None )( item.dataset.uuid),
+ "tool_id": job.tool_id,
+ "parameters": self._get_job_record(trans, job, follow),
}
return None
@@ -59,8 +60,8 @@
out[in_d.name] = self._get_record(trans, in_d.dataset, follow)
else:
out[in_d.name] = {
- "id" : trans.security.encode_id(in_d.dataset.id),
- "uuid" : ( lambda uuid: str( uuid ) if uuid else None )( in_d.dataset.dataset.uuid )
+ "id": trans.security.encode_id(in_d.dataset.id),
+ "uuid": ( lambda uuid: str( uuid ) if uuid else None )( in_d.dataset.dataset.uuid ),
}
return out
@@ -75,5 +76,3 @@
controller_name = "ldda_provenance"
provenance_item_class = "LibraryDatasetDatasetAssociation"
provenance_item_id = "library_content_id"
-
-
diff -r b7a7c951642952129a05536c3e46cdae1c3000d1 -r 95db640a4071ba613814bfafe1a0448b93955e5d lib/galaxy/webapps/galaxy/controllers/dataset.py
--- a/lib/galaxy/webapps/galaxy/controllers/dataset.py
+++ b/lib/galaxy/webapps/galaxy/controllers/dataset.py
@@ -100,7 +100,7 @@
grids.TextColumn( "Name", key="name",
# Link name to dataset's history.
link=( lambda item: iff( item.history.deleted, None, dict( operation="switch", id=item.id ) ) ), filterable="advanced", attach_popup=True ),
- HistoryColumn( "History", key="history",
+ HistoryColumn( "History", key="history", sortable=False,
link=( lambda item: iff( item.history.deleted, None, dict( operation="switch_history", id=item.id ) ) ) ),
grids.IndividualTagsColumn( "Tags", key="tags", model_tag_association_class=model.HistoryDatasetAssociationTagAssociation, filterable="advanced", grid_name="HistoryDatasetAssocationListGrid" ),
StatusColumn( "Status", key="deleted", attach_popup=False ),
@@ -113,11 +113,12 @@
key="free-text-search", visible=False, filterable="standard" )
)
operations = [
- grids.GridOperation( "Copy to current history", condition=( lambda item: not item.deleted ), async_compatible=False ),
+ grids.GridOperation( "Copy to current history", condition=( lambda item: not item.deleted ), async_compatible=True ),
]
standard_filters = []
default_filter = dict( name="All", deleted="False", tags="All" )
preserve_state = False
+ use_async = True
use_paging = True
num_rows_per_page = 50
def build_initial_query( self, trans, **kwargs ):
diff -r b7a7c951642952129a05536c3e46cdae1c3000d1 -r 95db640a4071ba613814bfafe1a0448b93955e5d lib/galaxy/webapps/galaxy/controllers/history.py
--- a/lib/galaxy/webapps/galaxy/controllers/history.py
+++ b/lib/galaxy/webapps/galaxy/controllers/history.py
@@ -30,16 +30,13 @@
def get_value( self, trans, grid, history ):
state_count_dict = self.get_hda_state_counts( trans, history )
- rval = []
+ rval = ''
for state in ( 'ok', 'running', 'queued', 'error' ):
count = state_count_dict.get( state, 0 )
if count:
- rval.append( '<div class="count-box state-color-%s">%s</div>' % ( state, count ) )
- else:
- rval.append( '' )
+ rval += '<div class="count-box state-color-%s">%s</div>' % (state, count)
return rval
-
class HistoryListNameColumn( NameColumn ):
def get_link( self, trans, grid, history ):
link = None
@@ -72,7 +69,7 @@
default_sort_key = "-update_time"
columns = [
HistoryListNameColumn( "Name", key="name", attach_popup=True, filterable="advanced" ),
- DatasetsByStateColumn( "Datasets", key="datasets_by_state", ncells=4, sortable=False ),
+ DatasetsByStateColumn( "Datasets", key="datasets_by_state", sortable=False ),
grids.IndividualTagsColumn( "Tags", key="tags", model_tag_association_class=model.HistoryTagAssociation, \
filterable="advanced", grid_name="HistoryListGrid" ),
grids.SharingStatusColumn( "Sharing", key="sharing", filterable="advanced", sortable=False ),
@@ -118,13 +115,11 @@
# Custom column types
class DatasetsByStateColumn( grids.GridColumn ):
def get_value( self, trans, grid, history ):
- rval = []
+ rval = ''
for state in ( 'ok', 'running', 'queued', 'error' ):
total = sum( 1 for d in history.active_datasets if d.state == state )
if total:
- rval.append( '<div class="count-box state-color-%s">%s</div>' % ( state, total ) )
- else:
- rval.append( '' )
+ rval += '<div class="count-box state-color-%s">%s</div>' % ( state, total )
return rval
class SharedByColumn( grids.GridColumn ):
@@ -138,7 +133,7 @@
default_filter = {}
columns = [
grids.GridColumn( "Name", key="name", attach_popup=True ), # link=( lambda item: dict( operation="View", id=item.id ) ), attach_popup=True ),
- DatasetsByStateColumn( "Datasets", ncells=4, sortable=False ),
+ DatasetsByStateColumn( "Datasets", sortable=False ),
grids.GridColumn( "Created", key="create_time", format=time_ago ),
grids.GridColumn( "Last Updated", key="update_time", format=time_ago ),
SharedByColumn( "Shared by", key="user_id" )
diff -r b7a7c951642952129a05536c3e46cdae1c3000d1 -r 95db640a4071ba613814bfafe1a0448b93955e5d lib/galaxy/webapps/galaxy/controllers/visualization.py
--- a/lib/galaxy/webapps/galaxy/controllers/visualization.py
+++ b/lib/galaxy/webapps/galaxy/controllers/visualization.py
@@ -147,11 +147,11 @@
# Grid definition.
title = "Insert into visualization"
template = "/tracks/add_to_viz.mako"
- async_template = "/page/select_items_grid_async.mako"
model_class = model.Visualization
default_sort_key = "-update_time"
use_async = True
use_paging = False
+ show_item_checkboxes = True
columns = [
grids.TextColumn( "Title", key="title", model_class=model.Visualization, filterable="standard" ),
grids.TextColumn( "Dbkey", key="dbkey", model_class=model.Visualization ),
diff -r b7a7c951642952129a05536c3e46cdae1c3000d1 -r 95db640a4071ba613814bfafe1a0448b93955e5d scripts/functional_tests.py
--- a/scripts/functional_tests.py
+++ b/scripts/functional_tests.py
@@ -217,9 +217,9 @@
database_auto_migrate = False
+ galaxy_test_proxy_port = None
+ psu_production = False
if start_server:
- psu_production = False
- galaxy_test_proxy_port = None
if 'GALAXY_TEST_PSU_PRODUCTION' in os.environ:
if not galaxy_test_port:
raise Exception( 'Please set GALAXY_TEST_PORT to the port to which the proxy server will proxy' )
diff -r b7a7c951642952129a05536c3e46cdae1c3000d1 -r 95db640a4071ba613814bfafe1a0448b93955e5d static/scripts/galaxy.grids.js
--- a/static/scripts/galaxy.grids.js
+++ b/static/scripts/galaxy.grids.js
@@ -119,433 +119,456 @@
}
// Add filter arguments to data, placing "f-" in front of all arguments.
- // FIXME: when underscore updated, use pairs function().
var self = this;
- _.each(_.keys(self.attributes.filters), function(k) {
- url_data['f-' + k] = self.attributes.filters[k];
+ _.each(_.pairs(self.attributes.filters), function(k) {
+ url_data['f-' + k[0]] = k[1];
});
return url_data;
}
});
-//
-// Code to handle grid operations: filtering, sorting, paging, and operations.
-//
+// grid view
+var GridView = Backbone.View.extend({
-// Initialize grid controls
-function init_grid_controls() {
- // Initialize submit image elements.
- $('.submit-image').each( function() {
- // On mousedown, add class to simulate click.
- $(this).mousedown( function() {
- $(this).addClass('gray-background');
+ // model
+ grid: null,
+
+ // Initialize
+ initialize: function(grid)
+ {
+ this.grid = grid;
+ this.init_grid_elements();
+ this.init_grid_controls();
+
+ // Initialize text filters to select text on click and use normal font when user is typing.
+ $('input[type=text]').each(function() {
+ $(this).click(function() { $(this).select(); } )
+ .keyup(function () { $(this).css("font-style", "normal"); });
+ });
+ },
+
+ // Initialize grid controls
+ init_grid_controls: function() {
+ // Initialize submit image elements.
+ $('.submit-image').each( function() {
+ // On mousedown, add class to simulate click.
+ $(this).mousedown( function() {
+ $(this).addClass('gray-background');
+ });
+
+ // On mouseup, add class to simulate click.
+ $(this).mouseup( function() {
+ $(this).removeClass('gray-background');
+ });
});
- // On mouseup, add class to simulate click.
- $(this).mouseup( function() {
- $(this).removeClass('gray-background');
+ // link
+ var self = this;
+
+ // Initialize sort links.
+ $('.sort-link').each( function() {
+ $(this).click( function() {
+ self.set_sort_condition( $(this).attr('sort_key') );
+ return false;
+ });
});
- });
+
+ // Initialize page links.
+ $('.page-link > a').each( function() {
+ $(this).click( function() {
+ self.set_page( $(this).attr('page_num') );
+ return false;
+ });
+ });
+
+ // Initialize categorical filters.
+ $('.categorical-filter > a').each( function() {
+ $(this).click( function() {
+ self.set_categorical_filter( $(this).attr('filter_key'), $(this).attr('filter_val') );
+ return false;
+ });
+ });
+
+ // Initialize text filters.
+ $('.text-filter-form').each( function() {
+ $(this).submit( function() {
+ var column_key = $(this).attr('column_key');
+ var text_input_obj = $('#input-' + column_key + '-filter');
+ var text_input = text_input_obj.val();
+ text_input_obj.val('');
+ self.add_filter_condition(column_key, text_input, true);
+ return false;
+ });
+ });
+
+ // Initialize autocomplete for text inputs in search UI.
+ var t = $("#input-tags-filter");
+ if (t.length) {
+ t.autocomplete(this.grid.history_tag_autocomplete_url,
+ { selectFirst: false, autoFill: false, highlight: false, mustMatch: false });
+ }
+
+ var t2 = $("#input-name-filter");
+ if (t2.length) {
+ t2.autocomplete(this.grid.history_name_autocomplete_url,
+ { selectFirst: false, autoFill: false, highlight: false, mustMatch: false });
+ }
+
+ // Initialize standard, advanced search toggles.
+ $('.advanced-search-toggle').each( function() {
+ $(this).click( function() {
+ $("#standard-search").slideToggle('fast');
+ $('#advanced-search').slideToggle('fast');
+ return false;
+ });
+ });
+ },
+
+ // Initialize grid elements.
+ init_grid_elements : function() {
- // Initialize sort links.
- $('.sort-link').each( function() {
- $(this).click( function() {
- set_sort_condition( $(this).attr('sort_key') );
- return false;
+ // Initialize grid selection checkboxes.
+ $(".grid").each( function() {
+ var checkboxes = $(this).find("input.grid-row-select-checkbox");
+ var check_count = $(this).find("span.grid-selected-count");
+ var update_checked = function() {
+ check_count.text( $(checkboxes).filter(":checked").length );
+ };
+
+ $(checkboxes).each( function() {
+ $(this).change(update_checked);
+ });
+ update_checked();
});
- });
-
- // Initialize page links.
- $('.page-link > a').each( function() {
- $(this).click( function() {
- set_page( $(this).attr('page_num') );
- return false;
+
+ // Initialize item labels.
+ var self = this;
+ $(".label").each( function() {
+ // If href has an operation in it, do operation when clicked. Otherwise do nothing.
+ var href = $(this).attr('href');
+ if ( href !== undefined && href.indexOf('operation=') != -1 ) {
+ $(this).click( function() {
+ self.do_operation_from_href( $(this).attr('href') );
+ return false;
+ });
+ }
});
- });
+
+ // Initialize ratings.
+ if ($('.community_rating_star').length !== 0)
+ $('.community_rating_star').rating({});
+
+ // Initialize item menu operations.
+ make_popup_menus();
+ },
- // Initialize categorical filters.
- $('.categorical-filter > a').each( function() {
- $(this).click( function() {
- set_categorical_filter( $(this).attr('filter_key'), $(this).attr('filter_val') );
+ // Go back to page one; this is useful when a filter is applied.
+ go_page_one: function () {
+ // Need to go back to page 1 if not showing all.
+ var cur_page = this.grid.get('cur_page');
+ if (cur_page !== null && cur_page !== undefined && cur_page !== 'all') {
+ this.grid.set('cur_page', 1);
+ }
+ },
+
+ // Add a condition to the grid filter; this adds the condition and refreshes the grid.
+ add_filter_condition: function (name, value, append) {
+ // Do nothing is value is empty.
+ if (value === "") {
return false;
+ }
+
+ // Add condition to grid.
+ this.grid.add_filter(name, value, append);
+
+ // Add button that displays filter and provides a button to delete it.
+ var t = $("<span>" + value + "<a href='javascript:void(0);'><span class='delete-search-icon' /></a></span>");
+ t.addClass('text-filter-val');
+ var self = this;
+ t.click(function() {
+ // Remove filter condition.
+ self.grid.remove_filter(name, value);
+
+ // Remove visible element.
+ $(this).remove();
+
+ self.go_page_one();
+ self.update_grid();
});
- });
-
- // Initialize text filters.
- $('.text-filter-form').each( function() {
- $(this).submit( function() {
- var column_key = $(this).attr('column_key');
- var text_input_obj = $('#input-' + column_key + '-filter');
- var text_input = text_input_obj.val();
- text_input_obj.val('');
- add_filter_condition(column_key, text_input, true);
+
+ var container = $('#' + name + "-filtering-criteria");
+ container.append(t);
+
+ this.go_page_one();
+ this.update_grid();
+ },
+
+ // Set sort condition for grid.
+ set_sort_condition: function (col_key) {
+ // Set new sort condition. New sort is col_key if sorting new column; if reversing sort on
+ // currently sorted column, sort is reversed.
+ var cur_sort = this.grid.get('sort_key');
+ var new_sort = col_key;
+ if (cur_sort.indexOf(col_key) !== -1) {
+ // Reverse sort.
+ if (cur_sort.substring(0,1) !== '-') {
+ new_sort = '-' + col_key;
+ } else {
+ // Sort reversed by using just col_key.
+ }
+ }
+
+ // Remove sort arrows elements.
+ $('.sort-arrow').remove();
+
+ // Add sort arrow element to new sort column.
+ var sort_arrow = (new_sort.substring(0,1) == '-') ? "↑" : "↓";
+ var t = $("<span>" + sort_arrow + "</span>").addClass('sort-arrow');
+ var th = $("#" + col_key + '-header');
+ th.append(t);
+
+ // Update grid.
+ this.grid.set('sort_key', new_sort);
+ this.go_page_one();
+ this.update_grid();
+ },
+
+ // Set new value for categorical filter.
+ set_categorical_filter: function (name, new_value) {
+ // Update filter hyperlinks to reflect new filter value.
+ var category_filter = this.grid.get('categorical_filters')[name],
+ cur_value = this.grid.get('filters')[name];
+ var self = this;
+ $("." + name + "-filter").each( function() {
+ var text = $.trim( $(this).text() );
+ var filter = category_filter[text];
+ var filter_value = filter[name];
+ if (filter_value == new_value) {
+ // Remove filter link since grid will be using this filter. It is assumed that
+ // this element has a single child, a hyperlink/anchor with text.
+ $(this).empty();
+ $(this).addClass("current-filter");
+ $(this).append(text);
+ } else if (filter_value == cur_value) {
+ // Add hyperlink for this filter since grid will no longer be using this filter. It is assumed that
+ // this element has a single child, a hyperlink/anchor.
+ $(this).empty();
+ var t = $("<a href='#'>" + text + "</a>");
+ t.click(function() {
+ self.set_categorical_filter( name, filter_value );
+ });
+ $(this).removeClass("current-filter");
+ $(this).append(t);
+ }
+ });
+
+ // Update grid.
+ this.grid.add_filter(name, new_value);
+ this.go_page_one();
+ this.update_grid();
+ },
+
+ // Set page to view.
+ set_page: function (new_page) {
+ // Update page hyperlink to reflect new page.
+ var self = this;
+ $(".page-link").each( function() {
+ var id = $(this).attr('id'),
+ page_num = parseInt( id.split("-")[2], 10 ), // Id has form 'page-link-<page_num>
+ cur_page = self.grid.get('cur_page'),
+ text;
+ if (page_num === new_page) {
+ // Remove link to page since grid will be on this page. It is assumed that
+ // this element has a single child, a hyperlink/anchor with text.
+ text = $(this).children().text();
+ $(this).empty();
+ $(this).addClass("inactive-link");
+ $(this).text(text);
+ }
+ else if (page_num === cur_page) {
+ // Add hyperlink to this page since grid will no longer be on this page. It is assumed that
+ // this element has a single child, a hyperlink/anchor.
+ text = $(this).text();
+ $(this).empty();
+ $(this).removeClass("inactive-link");
+ var t = $("<a href='#'>" + text + "</a>");
+ t.click(function() {
+ set_page(page_num);
+ });
+ $(this).append(t);
+ }
+ });
+
+ var maintain_page_links = true;
+ if (new_page === "all") {
+ this.grid.set('cur_page', new_page);
+ maintain_page_links = false;
+ } else {
+ this.grid.set('cur_page', parseInt(new_page, 10));
+ }
+ this.update_grid(maintain_page_links);
+ },
+
+ // Perform a grid operation.
+ do_operation: function (operation, item_ids) {
+ operation = operation.toLowerCase();
+
+ // Update grid.
+ this.grid.set({
+ operation: operation,
+ item_ids: item_ids
+ });
+
+ // Do operation. If operation cannot be performed asynchronously, redirect to location.
+ if (this.grid.can_async_op(operation)) {
+ this.update_grid(true);
+ }
+ else {
+ this.go_to_URL();
+ }
+ },
+
+ // Perform a hyperlink click that initiates an operation. If there is no operation, ignore click.
+ do_operation_from_href: function (href) {
+ // Get operation, id in hyperlink's href.
+ var href_parts = href.split("?");
+ if (href_parts.length > 1) {
+ var href_parms_str = href_parts[1];
+ var href_parms = href_parms_str.split("&");
+ var operation = null;
+ var id = -1;
+ for (var index = 0; index < href_parms.length; index++) {
+ if (href_parms[index].indexOf('operation') != -1) {
+ // Found operation parm; get operation value.
+ operation = href_parms[index].split('=')[1];
+ } else if (href_parms[index].indexOf('id') != -1) {
+ // Found id parm; get id value.
+ id = href_parms[index].split('=')[1];
+ }
+ }
+ // Do operation.
+ this.do_operation(operation, id);
return false;
+ }
+ },
+
+ // Navigate window to the URL defined by url_args. This method can be used to short-circuit grid AJAXing.
+ go_to_URL: function () {
+ // Not async request.
+ this.grid.set('async', false);
+
+ // Go.
+ window.location = this.grid.get('url_base') + "?" + $.param(this.grid.get_url_data());
+
+ // Clear grid of transient request attributes.
+ this.grid.set({
+ operation: undefined,
+ item_ids: undefined
});
- });
-
- // Initialize autocomplete for text inputs in search UI.
- var t = $("#input-tags-filter");
- if (t.length) {
- t.autocomplete(history_tag_autocomplete_url,
- { selectFirst: false, autoFill: false, highlight: false, mustMatch: false });
- }
-
- var t2 = $("#input-name-filter");
- if (t2.length) {
- t2.autocomplete(history_name_autocomplete_url,
- { selectFirst: false, autoFill: false, highlight: false, mustMatch: false });
- }
-
- // Initialize standard, advanced search toggles.
- $('.advanced-search-toggle').each( function() {
- $(this).click( function() {
- $("#standard-search").slideToggle('fast');
- $('#advanced-search').slideToggle('fast');
- return false;
- });
- });
-}
-
-// Initialize grid elements.
-function init_grid_elements() {
- // Initialize grid selection checkboxes.
- $(".grid").each( function() {
- var checkboxes = $(this).find("input.grid-row-select-checkbox");
- var check_count = $(this).find("span.grid-selected-count");
- var update_checked = function() {
- check_count.text( $(checkboxes).filter(":checked").length );
- };
-
- $(checkboxes).each( function() {
- $(this).change(update_checked);
- });
- update_checked();
- });
-
- // Initialize item labels.
- $(".label").each( function() {
- // If href has an operation in it, do operation when clicked. Otherwise do nothing.
- var href = $(this).attr('href');
- if ( href !== undefined && href.indexOf('operation=') != -1 ) {
- $(this).click( function() {
- do_operation_from_href( $(this).attr('href') );
- return false;
- });
- }
- });
-
- // Initialize ratings.
- if ($('.community_rating_star').length !== 0)
- $('.community_rating_star').rating({});
-
- // Initialize item menu operations.
- make_popup_menus();
-}
-
-// Go back to page one; this is useful when a filter is applied.
-function go_page_one() {
- // Need to go back to page 1 if not showing all.
- var cur_page = grid.get('cur_page');
- if (cur_page !== null && cur_page !== undefined && cur_page !== 'all') {
- grid.set('cur_page', 1);
- }
-}
-
-// Add a condition to the grid filter; this adds the condition and refreshes the grid.
-function add_filter_condition(name, value, append) {
- // Do nothing is value is empty.
- if (value === "") {
- return false;
- }
-
- // Add condition to grid.
- grid.add_filter(name, value, append);
-
- // Add button that displays filter and provides a button to delete it.
- var t = $("<span>" + value + "<a href='javascript:void(0);'><span class='delete-search-icon' /></a></span>");
- t.addClass('text-filter-val');
- t.click(function() {
- // Remove filter condition.
- grid.remove_filter(name, value);
-
- // Remove visible element.
- $(this).remove();
-
- go_page_one();
- update_grid();
- });
-
- var container = $('#' + name + "-filtering-criteria");
- container.append(t);
-
- go_page_one();
- update_grid();
-}
-
-// Add tag to grid filter.
-function add_tag_to_grid_filter(tag_name, tag_value) {
- // Put tag name and value together.
- var tag = tag_name + (tag_value !== undefined && tag_value !== "" ? ":" + tag_value : "");
- $('#advanced-search').show('fast');
- add_filter_condition("tags", tag, true);
-}
-
-// Set sort condition for grid.
-function set_sort_condition(col_key) {
- // Set new sort condition. New sort is col_key if sorting new column; if reversing sort on
- // currently sorted column, sort is reversed.
- var cur_sort = grid.get('sort_key');
- var new_sort = col_key;
- if (cur_sort.indexOf(col_key) !== -1) {
- // Reverse sort.
- if (cur_sort.substring(0,1) !== '-') {
- new_sort = '-' + col_key;
- } else {
- // Sort reversed by using just col_key.
- }
- }
-
- // Remove sort arrows elements.
- $('.sort-arrow').remove();
-
- // Add sort arrow element to new sort column.
- var sort_arrow = (new_sort.substring(0,1) == '-') ? "↑" : "↓";
- var t = $("<span>" + sort_arrow + "</span>").addClass('sort-arrow');
- var th = $("#" + col_key + '-header');
- th.append(t);
-
- // Update grid.
- grid.set('sort_key', new_sort);
- go_page_one();
- update_grid();
-}
-
-// Set new value for categorical filter.
-function set_categorical_filter(name, new_value) {
- // Update filter hyperlinks to reflect new filter value.
- var category_filter = grid.get('categorical_filters')[name],
- cur_value = grid.get('filters')[name];
- $("." + name + "-filter").each( function() {
- var text = $.trim( $(this).text() );
- var filter = category_filter[text];
- var filter_value = filter[name];
- if (filter_value == new_value) {
- // Remove filter link since grid will be using this filter. It is assumed that
- // this element has a single child, a hyperlink/anchor with text.
- $(this).empty();
- $(this).addClass("current-filter");
- $(this).append(text);
- } else if (filter_value == cur_value) {
- // Add hyperlink for this filter since grid will no longer be using this filter. It is assumed that
- // this element has a single child, a hyperlink/anchor.
- $(this).empty();
- var t = $("<a href='#'>" + text + "</a>");
- t.click(function() {
- set_categorical_filter( name, filter_value );
- });
- $(this).removeClass("current-filter");
- $(this).append(t);
- }
- });
-
- // Update grid.
- grid.add_filter(name, new_value);
- go_page_one();
- update_grid();
-}
-
-// Set page to view.
-function set_page(new_page) {
- // Update page hyperlink to reflect new page.
- $(".page-link").each( function() {
- var id = $(this).attr('id'),
- page_num = parseInt( id.split("-")[2], 10 ), // Id has form 'page-link-<page_num>
- cur_page = grid.get('cur_page'),
- text;
- if (page_num === new_page) {
- // Remove link to page since grid will be on this page. It is assumed that
- // this element has a single child, a hyperlink/anchor with text.
- text = $(this).children().text();
- $(this).empty();
- $(this).addClass("inactive-link");
- $(this).text(text);
- }
- else if (page_num === cur_page) {
- // Add hyperlink to this page since grid will no longer be on this page. It is assumed that
- // this element has a single child, a hyperlink/anchor.
- text = $(this).text();
- $(this).empty();
- $(this).removeClass("inactive-link");
- var t = $("<a href='#'>" + text + "</a>");
- t.click(function() {
- set_page(page_num);
- });
- $(this).append(t);
- }
- });
-
- var maintain_page_links = true;
- if (new_page === "all") {
- grid.set('cur_page', new_page);
- maintain_page_links = false;
- } else {
- grid.set('cur_page', parseInt(new_page, 10));
- }
- update_grid(maintain_page_links);
-}
-
-// Perform a grid operation.
-function do_operation(operation, item_ids) {
- operation = operation.toLowerCase();
+ },
// Update grid.
- grid.set({
- operation: operation,
- item_ids: item_ids
- });
-
- // Do operation. If operation cannot be performed asynchronously, redirect to location.
- if (grid.can_async_op(operation)) {
- update_grid(true);
- }
- else {
- go_to_URL();
- }
-}
+ update_grid: function (maintain_page_links) {
+ // If grid is not using async, then go to URL.
+ if (!this.grid.get('async')) {
+ this.go_to_URL();
+ return;
+ }
-// Perform a hyperlink click that initiates an operation. If there is no operation, ignore click.
-function do_operation_from_href(href) {
- // Get operation, id in hyperlink's href.
- var href_parts = href.split("?");
- if (href_parts.length > 1) {
- var href_parms_str = href_parts[1];
- var href_parms = href_parms_str.split("&");
- var operation = null;
- var id = -1;
- for (var index = 0; index < href_parms.length; index++) {
- if (href_parms[index].indexOf('operation') != -1) {
- // Found operation parm; get operation value.
- operation = href_parms[index].split('=')[1];
- } else if (href_parms[index].indexOf('id') != -1) {
- // Found id parm; get id value.
- id = href_parms[index].split('=')[1];
+ // If there's an operation, do POST; otherwise, do GET.
+ var method = (this.grid.get('operation') ? "POST" : "GET" );
+ $('.loading-elt-overlay').show(); // Show overlay to indicate loading and prevent user actions.
+ var self = this;
+ $.ajax({
+ type: method,
+ url: self.grid.get('url_base'),
+ data: self.grid.get_url_data(),
+ error: function() { alert( "Grid refresh failed" ); },
+ success: function(response_text) {
+ // HACK: use a simple string to separate the elements in the
+ // response: (1) table body; (2) number of pages in table; and (3) message.
+ var parsed_response_text = response_text.split("*****");
+
+ // Update grid body and footer.
+ $('#grid-table-body').html(parsed_response_text[0]);
+ // FIXME: this does not work at all; what's needed is a function
+ // that updates page links when number of pages changes.
+ $('#grid-table-footer').html(parsed_response_text[1]);
+
+ // Trigger custom event to indicate grid body has changed.
+ $('#grid-table-body').trigger('update');
+
+ // Init grid.
+ self.init_grid_elements();
+ make_popup_menus();
+
+ // Hide loading overlay.
+ $('.loading-elt-overlay').hide();
+
+ // Show message if there is one.
+ var message = $.trim( parsed_response_text[2] );
+ if (message !== "") {
+ $('#grid-message').html( message ).show();
+ setTimeout( function() { $('#grid-message').hide(); }, 5000);
+ }
+ },
+ complete: function() {
+ // Clear grid of transient request attributes.
+ self.grid.set({
+ operation: undefined,
+ item_ids: undefined
+ });
+ }
+ });
+ },
+
+ check_all_items: function () {
+ var chk_all = document.getElementById('check_all'),
+ checks = document.getElementsByTagName('input'),
+ total = 0,
+ i;
+ if ( chk_all.checked === true ) {
+ for ( i=0; i < checks.length; i++ ) {
+ if ( checks[i].name.indexOf( 'id' ) !== -1) {
+ checks[i].checked = true;
+ total++;
+ }
}
}
- // Do operation.
- do_operation(operation, id);
- return false;
- }
-}
+ else {
+ for ( i=0; i < checks.length; i++ ) {
+ if ( checks[i].name.indexOf( 'id' ) !== -1) {
+ checks[i].checked = false;
+ }
-// Navigate window to the URL defined by url_args. This method can be used to short-circuit grid AJAXing.
-function go_to_URL() {
- // Not async request.
- grid.set('async', false);
-
- // Go.
- window.location = grid.get('url_base') + "?" + $.param(grid.get_url_data());
-}
-
-// Update grid.
-function update_grid(maintain_page_links) {
- // If grid is not using async, then go to URL.
- if (!grid.get('async')) {
- go_to_URL();
- return;
- }
-
- // If there's an operation, do POST; otherwise, do GET.
- var method = (grid.get('operation') ? "POST" : "GET" );
- $('.loading-elt-overlay').show(); // Show overlay to indicate loading and prevent user actions.
- $.ajax({
- type: method,
- url: grid.get('url_base'),
- data: grid.get_url_data(),
- error: function() { alert( "Grid refresh failed" ); },
- success: function(response_text) {
- // HACK: use a simple string to separate the elements in the
- // response: (1) table body; (2) number of pages in table; and (3) message.
- var parsed_response_text = response_text.split("*****");
-
- // Update grid body and footer.
- $('#grid-table-body').html(parsed_response_text[0]);
- // FIXME: this does not work at all; what's needed is a function
- // that updates page links when number of pages changes.
- $('#grid-table-footer').html(parsed_response_text[1]);
-
- // Trigger custom event to indicate grid body has changed.
- $('#grid-table-body').trigger('update');
-
- // Init grid.
- init_grid_elements();
- make_popup_menus();
-
- // Hide loading overlay.
- $('.loading-elt-overlay').hide();
-
- // Show message if there is one.
- var message = $.trim( parsed_response_text[2] );
- if (message !== "") {
- $('#grid-message').html( message ).show();
- setTimeout( function() { $('#grid-message').hide(); }, 5000);
- }
- },
- complete: function() {
- // Clear grid of transient request attributes.
- grid.set({
- operation: undefined,
- item_ids: undefined
- });
- }
- });
-}
-
-function check_all_items() {
- var chk_all = document.getElementById('check_all'),
- checks = document.getElementsByTagName('input'),
- total = 0,
- i;
- if ( chk_all.checked === true ) {
- for ( i=0; i < checks.length; i++ ) {
- if ( checks[i].name.indexOf( 'id' ) !== -1) {
- checks[i].checked = true;
- total++;
}
}
+ this.init_grid_elements();
+ },
+
+ // confirmation/submission of operation request
+ submit_operation: function (selected_button, confirmation_text)
+ {
+ // verify in any item is selected
+ var number_of_checked_ids = $('input[name="id"]:checked').length;
+ if (!number_of_checked_ids > 0)
+ return false;
+
+ // show confirmation box
+ if (confirmation_text != 'None' && confirmation_text != '')
+ if(!confirm(confirmation_text))
+ return false;
+
+ // collect ids
+ var operation_name = $(selected_button).val();
+ var item_ids = [];
+ $('input[name=id]:checked').each(function() {
+ item_ids.push( $(this).val() );
+ });
+ this.do_operation(operation_name, item_ids);
+
+ // return
+ return true;
}
- else {
- for ( i=0; i < checks.length; i++ ) {
- if ( checks[i].name.indexOf( 'id' ) !== -1) {
- checks[i].checked = false;
- }
- }
- }
- init_grid_elements();
-}
-
-
-// confirmation/submission of operation request
-function submit_operation(selected_button, confirmation_text)
-{
- // verify in any item is selected
- var number_of_checked_ids = $('input[name="id"]:checked').length;
- if (!number_of_checked_ids > 0)
- return false;
-
- // show confirmation box
- if (confirmation_text != 'None' && confirmation_text != '')
- if(!confirm(confirmation_text))
- return false;
-
- // add ids
- var operation_name = selected_button.value;
- var item_ids = [];
- $('input[name=id]:checked').each(function() {
- item_ids.push( $(this).val() );
- });
- do_operation(operation_name, item_ids);
-
- // return
- return true;
-}
+});
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/galaxy/galaxy-central/commits/7b889ba6fd44/
Changeset: 7b889ba6fd44
User: dannon
Date: 2013-11-20 23:30:03
Summary: S3/Swift objectstore refactoring.
Affected #: 3 files
diff -r 95db640a4071ba613814bfafe1a0448b93955e5d -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b lib/galaxy/objectstore/__init__.py
--- a/lib/galaxy/objectstore/__init__.py
+++ b/lib/galaxy/objectstore/__init__.py
@@ -11,9 +11,9 @@
import threading
from galaxy import util
+from galaxy.exceptions import ObjectInvalid, ObjectNotFound
from galaxy.jobs import Sleeper
from galaxy.model import directory_hash_id
-from galaxy.exceptions import ObjectNotFound, ObjectInvalid
from galaxy.util.odict import odict
from sqlalchemy.orm import object_session
@@ -609,9 +609,12 @@
if store == 'disk':
return DiskObjectStore(config=config, config_xml=config_xml)
- elif store == 's3' or store == 'swift':
+ elif store == 's3':
from galaxy.objectstore.s3 import S3ObjectStore
return S3ObjectStore(config=config, config_xml=config_xml)
+ elif store == 'swift':
+ from galaxy.objectstore.s3 import SwiftObjectStore
+ return SwiftObjectStore(config=config, config_xml=config_xml)
elif store == 'distributed':
return DistributedObjectStore(config=config, fsmon=fsmon, config_xml=config_xml)
elif store == 'hierarchical':
diff -r 95db640a4071ba613814bfafe1a0448b93955e5d -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b lib/galaxy/objectstore/s3.py
--- a/lib/galaxy/objectstore/s3.py
+++ b/lib/galaxy/objectstore/s3.py
@@ -33,15 +33,13 @@
cache exists that is used as an intermediate location for files between
Galaxy and S3.
"""
- def __init__(self, config, config_xml=None):
+ def __init__(self, config, config_xml):
super(S3ObjectStore, self).__init__(config, config_xml)
self.config = config
self.staging_path = self.config.file_path
- self.s3_conn = get_OS_connection(self.config)
- self.bucket = self._get_bucket(self.config.os_bucket_name)
- self.use_rr = self.config.os_use_reduced_redundancy
- self.cache_size = self.config.object_store_cache_size
self.transfer_progress = 0
+ self._parse_config_xml(config_xml)
+ self._configure_connection()
# Clean cache only if value is set in universe_wsgi.ini
if self.cache_size != -1:
# Convert GBs to bytes for comparison
@@ -58,6 +56,31 @@
except OSError:
self.use_axel = False
+ def _configure_connection(self):
+ log.debug("Configuring S3 Connection")
+ self.conn = S3Connection(self.access_key, self.secret_key)
+
+ def _parse_config_xml(self, config_xml):
+ try:
+ a_xml = config_xml.findall('auth')[0]
+ self.access_key = a_xml.get('access_key')
+ self.secret_key = a_xml.get('secret_key')
+ b_xml = config_xml.findall('bucket')[0]
+ self.bucket = b_xml.get('name')
+ self.use_rr = b_xml.get('use_reduced_redundancy', False)
+ cn_xml = config_xml.findall('connection')[0]
+ self.host = cn_xml.get('host', None)
+ self.port = int(cn_xml.get('port', 6000))
+ self.is_secure = cn_xml.get('is_secure', True)
+ self.conn_path = cn_xml.get('conn_path', '/')
+ c_xml = config_xml.findall('cache')[0]
+ self.cache_size = float(c_xml.get('size', -1))
+ self.cache_path = c_xml.get('path')
+ except Exception:
+ # Toss it back up after logging, we can't continue loading at this point.
+ log.exception("Malformed ObjectStore Configuration XML -- unable to continue")
+ raise
+
def __cache_monitor(self):
time.sleep(2) # Wait for things to load before starting the monitor
while self.running:
@@ -127,7 +150,7 @@
it a few times. Raise error is connection is not established. """
for i in range(5):
try:
- bucket = self.s3_conn.get_bucket(bucket_name)
+ bucket = self.conn.get_bucket(bucket_name)
log.debug("Using cloud object store with bucket '%s'" % bucket.name)
return bucket
except S3ResponseError:
@@ -290,7 +313,8 @@
# print "Pushing cache file '%s' of size %s bytes to key '%s'" % (source_file, os.path.getsize(source_file), rel_path)
# print "+ Push started at '%s'" % start_time
mb_size = os.path.getsize(source_file) / 1e6
- if mb_size < 60 or self.config.object_store == 'swift':
+ #DBTODO Hack, refactor this logic.
+ if mb_size < 60 or type(self) == SwiftObjectStore:
self.transfer_progress = 0 # Reset transfer progress counter
key.set_contents_from_filename(source_file, reduced_redundancy=self.use_rr,
cb=self._transfer_cb, num_cb=10)
@@ -512,24 +536,20 @@
return 0.0
-def get_OS_connection(config):
+class SwiftObjectStore(S3ObjectStore):
"""
- Get a connection object for a cloud Object Store specified in the config.
- Currently, this is a ``boto`` connection object.
+ Object store that stores objects as items in a Swift bucket. A local
+ cache exists that is used as an intermediate location for files between
+ Galaxy and Swift.
"""
- log.debug("Getting a connection object for '{0}' object store".format(config.object_store))
- a_key = config.os_access_key
- s_key = config.os_secret_key
- if config.object_store == 's3':
- return S3Connection(a_key, s_key)
- else:
- # Establish the connection now
- calling_format = boto.s3.connection.OrdinaryCallingFormat()
- s3_conn = boto.connect_s3(aws_access_key_id=a_key,
- aws_secret_access_key=s_key,
- is_secure=config.os_is_secure,
- host=config.os_host,
- port=int(config.os_port),
- calling_format=calling_format,
- path=config.os_conn_path)
- return s3_conn
+
+ def _configure_connection(self):
+ log.debug("Configuring Swift Connection")
+ self.conn = boto.connect_s3(aws_access_key_id=self.access_key,
+ aws_secret_access_key=self.secret_key,
+ is_secure=self.is_secure,
+ host=self.host,
+ port=self.port,
+ calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+ path=self.conn_path)
+
diff -r 95db640a4071ba613814bfafe1a0448b93955e5d -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b object_store_conf.xml.sample
--- a/object_store_conf.xml.sample
+++ b/object_store_conf.xml.sample
@@ -20,5 +20,16 @@
<extra_dir type="temp" path="database/tmp3"/><extra_dir type="job_work" path="database/job_working_directory3"/></object_store>
+ <!-- Sample S3 Object Store
+
+ <object_store type="s3">
+ <auth access_key="...." secret_key="....." />
+ <bucket name="unique_bucket_name" use_reduced_redundancy="False" />
+ <connection host="" port="" is_secure="" conn_path="" />
+ <cache path="database/files/" size="100" />
+ </object_store>
+
+ --></backends></object_store>
+
https://bitbucket.org/galaxy/galaxy-central/commits/e507124fbe79/
Changeset: e507124fbe79
User: dannon
Date: 2013-11-20 23:30:28
Summary: Merge.
Affected #: 55 files
diff -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b -r e507124fbe79903378c4b0fe150f2f3fd816ad32 config/plugins/visualizations/scatterplot/Gruntfile.js
--- a/config/plugins/visualizations/scatterplot/Gruntfile.js
+++ b/config/plugins/visualizations/scatterplot/Gruntfile.js
@@ -9,7 +9,7 @@
// compile all hb templates into a single file in the build dir
compile: {
options: {
- namespace: 'Templates',
+ namespace: 'scatterplot',
processName : function( filepath ){
return filepath.match( /\w*\.handlebars/ )[0].replace( '.handlebars', '' );
}
diff -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b -r e507124fbe79903378c4b0fe150f2f3fd816ad32 config/plugins/visualizations/scatterplot/src/scatterplot-config-editor.js
--- a/config/plugins/visualizations/scatterplot/src/scatterplot-config-editor.js
+++ b/config/plugins/visualizations/scatterplot/src/scatterplot-config-editor.js
@@ -37,14 +37,19 @@
/** initialize requires a configuration Object containing a dataset Object */
initialize : function( attributes ){
//console.log( this + '.initialize, attributes:', attributes );
- if( !attributes || !attributes.config || !attributes.config.dataset ){
+ if( !attributes || !attributes.config || !attributes.dataset ){
throw new Error( "ScatterplotView requires a configuration and dataset" );
}
- this.dataset = attributes.config.dataset;
+ //console.log( 'config:', attributes.config );
+
+ this.dataset = attributes.dataset;
//console.log( 'dataset:', this.dataset );
+//TODO: ScatterplotView -> ScatterplotDisplay, this.plotView -> this.display
this.plotView = new ScatterplotView({
+ dataset : attributes.dataset,
config : attributes.config
+//TODO: if data
});
},
@@ -197,8 +202,8 @@
// parse the column values for both indeces (for the data fetch) and names (for the chart)
var $dataControls = this.$el.find( '#data-control' );
var settings = {
- xColumn : $dataControls.find( '[name="xColumn"]' ).val(),
- yColumn : $dataControls.find( '[name="yColumn"]' ).val()
+ xColumn : Number( $dataControls.find( '[name="xColumn"]' ).val() ),
+ yColumn : Number( $dataControls.find( '[name="yColumn"]' ).val() )
};
if( $dataControls.find( '#include-id-checkbox' ).prop( 'checked' ) ){
settings.idColumn = $dataControls.find( '[name="idColumn"]' ).val();
@@ -229,9 +234,9 @@
});
ScatterplotConfigEditor.templates = {
- mainLayout : Templates.editor,
- dataControl : Templates.datacontrol,
- chartControl : Templates.chartcontrol
+ mainLayout : scatterplot.editor,
+ dataControl : scatterplot.datacontrol,
+ chartControl : scatterplot.chartcontrol
};
//==============================================================================
diff -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b -r e507124fbe79903378c4b0fe150f2f3fd816ad32 config/plugins/visualizations/scatterplot/src/scatterplot-display.js
--- a/config/plugins/visualizations/scatterplot/src/scatterplot-display.js
+++ b/config/plugins/visualizations/scatterplot/src/scatterplot-display.js
@@ -10,14 +10,10 @@
//TODO: should be a view on visualization(revision) model
defaults : {
- dataset : {
- },
metadata : {
dataLines : undefined
},
- ajaxFn : null,
-
pagination : {
currPage : 0,
perPage : 3000
@@ -48,6 +44,7 @@
initialize : function( attributes ){
this.config = _.extend( _.clone( this.defaults ), attributes.config || {});
+ this.dataset = attributes.dataset;
//console.debug( this + '.config:', this.config );
},
@@ -65,7 +62,7 @@
//console.debug( 'currPage', this.config.pagination.currPage );
var view = this;
//TODO: very tied to datasets - should be generalized eventually
- xhr = jQuery.getJSON( '/api/datasets/' + this.config.dataset.id, {
+ xhr = jQuery.getJSON( '/api/datasets/' + this.dataset.id, {
data_type : 'raw_data',
provider : 'dataset-column',
limit : this.config.pagination.perPage,
@@ -151,7 +148,7 @@
},
renderLineInfo : function( data ){
- var totalLines = this.config.dataset.metadata_data_lines || 'an unknown number of',
+ var totalLines = this.dataset.metadata_data_lines || 'an unknown number of',
lineStart = ( this.config.pagination.currPage * this.config.pagination.perPage ),
lineEnd = lineStart + data.length;
return $( '<p/>' ).addClass( 'scatterplot-data-info' )
@@ -168,9 +165,9 @@
}
//TODO: cache numPages/numLines in config
var view = this,
- dataLines = this.config.dataset.metadata_data_lines,
+ dataLines = this.dataset.metadata_data_lines,
numPages = ( dataLines )?( Math.ceil( dataLines / this.config.pagination.perPage ) ):( undefined );
- //console.debug( 'data:', this.config.dataset.metadata_data_lines, 'numPages:', numPages );
+ //console.debug( 'data:', this.dataset.metadata_data_lines, 'numPages:', numPages );
// prev next buttons
var $prev = makePage$Li( 'Prev' ).click( function(){
@@ -207,9 +204,9 @@
}
//TODO: cache numPages/numLines in config
var view = this,
- dataLines = this.config.dataset.metadata_data_lines,
+ dataLines = this.dataset.metadata_data_lines,
numPages = ( dataLines )?( Math.ceil( dataLines / this.config.pagination.perPage ) ):( undefined );
- //console.debug( 'data:', this.config.dataset.metadata_data_lines, 'numPages:', numPages );
+ //console.debug( 'data:', this.dataset.metadata_data_lines, 'numPages:', numPages );
// page numbers (as separate control)
//var $paginationContainer = $( '<div/>' ).addClass( 'pagination-container' ),
diff -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b -r e507124fbe79903378c4b0fe150f2f3fd816ad32 config/plugins/visualizations/scatterplot/src/visualization-templates.html
--- a/config/plugins/visualizations/scatterplot/src/visualization-templates.html
+++ /dev/null
@@ -1,197 +0,0 @@
-<script type="text/template" class="template-visualization" id="template-visualization-scatterplotControlForm">
-{{! main layout }}
-
-<h1>WHAAAAA?</h1>
-<div class="scatterplot-container chart-container tabbable tabs-left">
- {{! tab buttons/headers using Bootstrap }}
- <ul class="nav nav-tabs">
- {{! start with the data controls as the displayed tab }}
- <li class="active">
- <a title="Use this tab to change which data are used"
- href="#data-control" data-toggle="tab">Data Controls</a>
- </li>
- <li>
- <a title="Use this tab to change how the chart is drawn"
- href="#chart-control" data-toggle="tab" >Chart Controls</a>
- </li>
- <li>
- <a title="This tab will display overall statistics for your data"
- href="#stats-display" data-toggle="tab">Statistics</a>
- </li>
- <li>
- <a title="This tab will display the chart"
- href="#chart-display" data-toggle="tab">Chart</a>
-
- <div id="loading-indicator" style="display: none;">
- <img class="loading-img" src="{{loadingIndicatorImagePath}}" />
- <span class="loading-message">{{message}}</span>
- </div>
- </li>
- </ul>
-
- {{! data form, chart config form, stats, and chart all get their own tab }}
- <div class="tab-content">
- {{! ---------------------------- tab for data settings form }}
- <div id="data-control" class="tab-pane active">
- {{! rendered separately }}
- </div>
-
- {{! ---------------------------- tab for chart graphics control form }}
- <div id="chart-control" class="tab-pane">
- {{! rendered separately }}
- </div>
-
- {{! ---------------------------- tab for data statistics }}
- <div id="stats-display" class="tab-pane">
- <p class="help-text">By column:</p>
- <table id="chart-stats-table">
- <thead><th></th><th>X</th><th>Y</th></thead>
- {{#each stats}}
- <tr><td>{{name}}</td><td>{{xval}}</td><td>{{yval}}</td></tr>
- </tr>
- {{/each}}
- </table>
- </div>
-
- {{! ---------------------------- tab for actual chart }}
- <div id="chart-display" class="tab-pane">
- <svg width="{{width}}" height="{{height}}"></svg>
- </div>
-
- </div>{{! end .tab-content }}
-</div>{{! end .chart-control }}
-</script>
-
-<script type="text/template" class="template-visualization" id="template-visualization-dataControl">
-
- <p class="help-text">
- Use the following controls to change the data used by the chart.
- Use the 'Draw' button to render (or re-render) the chart with the current settings.
- </p>
-
- {{! column selector containers }}
- <div class="column-select">
- <label for="X-select">Data column for X: </label>
- <select name="X" id="X-select">
- {{#each numericColumns}}
- <option value="{{index}}">{{name}}</option>
- {{/each}}
- </select>
- </div>
- <div class="column-select">
- <label for="Y-select">Data column for Y: </label>
- <select name="Y" id="Y-select">
- {{#each numericColumns}}
- <option value="{{index}}">{{name}}</option>
- {{/each}}
- </select>
- </div>
-
- {{! optional id column }}
- <div id="include-id">
- <label for="include-id-checkbox">Include a third column as data point IDs?</label>
- <input type="checkbox" name="include-id" id="include-id-checkbox" />
- <p class="help-text-small">
- These will be displayed (along with the x and y values) when you hover over
- a data point.
- </p>
- </div>
- <div class="column-select" style="display: none">
- <label for="ID-select">Data column for IDs: </label>
- <select name="ID" id="ID-select">
- {{#each allColumns}}
- <option value="{{index}}">{{name}}</option>
- {{/each}}
- </select>
- </div>
-
- {{! if we're using generic column selection names ('column 1') - allow the user to use the first line }}
- <div id="first-line-header" style="display: none;">
- <p>Possible headers: {{ possibleHeaders }}
- </p>
- <label for="first-line-header-checkbox">Use the above as column headers?</label>
- <input type="checkbox" name="include-id" id="first-line-header-checkbox"
- {{#if usePossibleHeaders }}checked="true"{{/if}}/>
- <p class="help-text-small">
- It looks like Galaxy couldn't get proper column headers for this data.
- Would you like to use the column headers above as column names to select columns?
- </p>
- </div>
-
- <input id="render-button" type="button" value="Draw" />
- <div class="clear"></div>
-</script>
-
-<script type="text/template" class="template-visualization" id="template-visualization-chartControl">
- <p class="help-text">
- Use the following controls to how the chart is displayed.
- The slide controls can be moved by the mouse or, if the 'handle' is in focus, your keyboard's arrow keys.
- Move the focus between controls by using the tab or shift+tab keys on your keyboard.
- Use the 'Draw' button to render (or re-render) the chart with the current settings.
- </p>
-
- <div id="datapointSize" class="form-input numeric-slider-input">
- <label for="datapointSize">Size of data point: </label>
- <div class="slider-output">{{datapointSize}}</div>
- <div class="slider"></div>
- <p class="form-help help-text-small">
- Size of the graphic representation of each data point
- </p>
- </div>
-
- <div id="animDuration" class="form-input checkbox-input">
- <label for="animate-chart">Animate chart transitions?: </label>
- <input type="checkbox" id="animate-chart"
- class="checkbox control"{{#if animDuration}} checked="true"{{/if}} />
- <p class="form-help help-text-small">
- Uncheck this to disable the animations used on the chart
- </p>
- </div>
-
- <div id="width" class="form-input numeric-slider-input">
- <label for="width">Chart width: </label>
- <div class="slider-output">{{width}}</div>
- <div class="slider"></div>
- <p class="form-help help-text-small">
- (not including chart margins and axes)
- </p>
- </div>
-
- <div id="height" class="form-input numeric-slider-input">
- <label for="height">Chart height: </label>
- <div class="slider-output">{{height}}</div>
- <div class="slider"></div>
- <p class="form-help help-text-small">
- (not including chart margins and axes)
- </p>
- </div>
-
- <div id="X-axis-label"class="text-input form-input">
- <label for="X-axis-label">Re-label the X axis: </label>
- <input type="text" name="X-axis-label" id="X-axis-label" value="{{xLabel}}" />
- <p class="form-help help-text-small"></p>
- </div>
-
- <div id="Y-axis-label" class="text-input form-input">
- <label for="Y-axis-label">Re-label the Y axis: </label>
- <input type="text" name="Y-axis-label" id="Y-axis-label" value="{{yLabel}}" />
- <p class="form-help help-text-small"></p>
- </div>
-
- <input id="render-button" type="button" value="Draw" />
-</script>
-
-<script type="text/template" class="template-visualization" id="template-visualization-statsDisplay">
- <p class="help-text">By column:</p>
- <table id="chart-stats-table">
- <thead><th></th><th>X</th><th>Y</th></thead>
- {{#each stats}}
- <tr><td>{{name}}</td><td>{{xval}}</td><td>{{yval}}</td></tr>
- </tr>
- {{/each}}
- </table>
-</script>
-
-<script type="text/template" class="template-visualization" id="template-visualization-chartDisplay">
- <svg width="{{width}}" height="{{height}}"></svg>
-</script>
diff -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b -r e507124fbe79903378c4b0fe150f2f3fd816ad32 config/plugins/visualizations/scatterplot/static/scatterplot-edit.js
--- a/config/plugins/visualizations/scatterplot/static/scatterplot-edit.js
+++ b/config/plugins/visualizations/scatterplot/static/scatterplot-edit.js
@@ -1,1 +1,1 @@
-function scatterplot(a,b,c){function d(){var a={v:{},h:{}};return a.v.lines=p.selectAll("line.v-grid-line").data(m.x.ticks(q.x.fn.ticks()[0])),a.v.lines.enter().append("svg:line").classed("grid-line v-grid-line",!0),a.v.lines.attr("x1",m.x).attr("x2",m.x).attr("y1",0).attr("y2",b.height),a.v.lines.exit().remove(),a.h.lines=p.selectAll("line.h-grid-line").data(m.y.ticks(q.y.fn.ticks()[0])),a.h.lines.enter().append("svg:line").classed("grid-line h-grid-line",!0),a.h.lines.attr("x1",0).attr("x2",b.width).attr("y1",m.y).attr("y2",m.y),a.h.lines.exit().remove(),a}function e(){return t.attr("cx",function(a,b){return m.x(j(a,b))}).attr("cy",function(a,b){return m.y(k(a,b))}).style("display","block").filter(function(){var a=d3.select(this).attr("cx"),c=d3.select(this).attr("cy");return 0>a||a>b.width?!0:0>c||c>b.height?!0:!1}).style("display","none")}function f(){q.redraw(),e(),s=d(),$(".chart-info-box").remove(),$(o.node()).trigger("zoom.scatterplot",[])}function g(a,c,d){return c+=8,$(['<div class="chart-info-box" style="position: absolute">',b.idColumn?"<div>"+d[b.idColumn]+"</div>":"","<div>",j(d),"</div>","<div>",k(d),"</div>","</div>"].join("")).css({top:a,left:c,"z-index":2})}var h=function(a,b){return"translate("+a+","+b+")"},i=function(a,b,c){return"rotate("+a+","+b+","+c+")"},j=function(a){return a[b.xColumn]},k=function(a){return a[b.yColumn]},l={x:{extent:d3.extent(c,j)},y:{extent:d3.extent(c,k)}},m={x:d3.scale.linear().domain(l.x.extent).range([0,b.width]),y:d3.scale.linear().domain(l.y.extent).range([b.height,0])},n=d3.behavior.zoom().x(m.x).y(m.y).scaleExtent([1,10]),o=d3.select(a).attr("class","scatterplot").attr("width","100%").attr("height",b.height+(b.margin.top+b.margin.bottom)),p=o.append("g").attr("class","content").attr("transform",h(b.margin.left,b.margin.top)).call(n);p.append("rect").attr("class","zoom-rect").attr("width",b.width).attr("height",b.height).style("fill","transparent");var q={x:{},y:{}};q.x.fn=d3.svg.axis().orient("bottom").scale(m.x).ticks(b.x.ticks).tickFormat(d3.format("s")),q.y.fn=d3.svg.axis().orient("left").scale(m.y).ticks(b.y.ticks).tickFormat(d3.format("s")),q.x.g=p.append("g").attr("class","x axis").attr("transform",h(0,b.height)).call(q.x.fn),q.y.g=p.append("g").attr("class","y axis").call(q.y.fn);var r=4;q.x.label=o.append("text").attr("class","axis-label").text(b.x.label).attr("text-anchor","middle").attr("dominant-baseline","text-after-edge").attr("x",b.width/2+b.margin.left).attr("y",b.height+b.margin.bottom+b.margin.top-r),q.y.label=o.append("text").attr("class","axis-label").text(b.y.label).attr("text-anchor","middle").attr("dominant-baseline","text-before-edge").attr("x",r).attr("y",b.height/2).attr("transform",i(-90,r,b.height/2)),q.redraw=function(){o.select(".x.axis").call(q.x.fn),o.select(".y.axis").call(q.y.fn)};var s=d(),t=p.selectAll(".glyph").data(c).enter().append("svg:circle").classed("glyph",!0).attr("cx",function(a,b){return m.x(j(a,b))}).attr("cy",b.height).attr("r",0);t.transition().duration(b.animDuration).attr("cy",function(a,b){return m.y(k(a,b))}).attr("r",b.datapointSize),n.on("zoom",f),t.on("mouseover",function(a,c){var d=d3.select(this);d.style("fill","red").style("fill-opacity",1),p.append("line").attr("stroke","red").attr("stroke-width",1).attr("x1",d.attr("cx")-b.datapointSize).attr("y1",d.attr("cy")).attr("x2",0).attr("y2",d.attr("cy")).classed("hoverline",!0),d.attr("cy")<b.height&&p.append("line").attr("stroke","red").attr("stroke-width",1).attr("x1",d.attr("cx")).attr("y1",+d.attr("cy")+b.datapointSize).attr("x2",d.attr("cx")).attr("y2",b.height).classed("hoverline",!0);var e=this.getBoundingClientRect();$("body").append(g(e.top,e.right,a)),$(o.node()).trigger("mouseover-datapoint.scatterplot",[this,a,c])}),t.on("mouseout",function(){d3.select(this).style("fill","black").style("fill-opacity",.2),p.selectAll(".hoverline").remove(),$(".chart-info-box").remove()})}this.Templates=this.Templates||{},this.Templates.chartcontrol=Handlebars.template(function(a,b,c,d,e){this.compilerInfo=[4,">= 1.0.0"],c=this.merge(c,a.helpers),e=e||{};var f,g="",h="function",i=this.escapeExpression;return g+='<p class="help-text">\n Use the following controls to how the chart is displayed.\n The slide controls can be moved by the mouse or, if the \'handle\' is in focus, your keyboard\'s arrow keys.\n Move the focus between controls by using the tab or shift+tab keys on your keyboard.\n Use the \'Draw\' button to render (or re-render) the chart with the current settings.\n</p>\n\n<div data-config-key="datapointSize" class="form-input numeric-slider-input">\n <label for="datapointSize">Size of data point: </label>\n <div class="slider-output">',(f=c.datapointSize)?f=f.call(b,{hash:{},data:e}):(f=b.datapointSize,f=typeof f===h?f.apply(b):f),g+=i(f)+'</div>\n <div class="slider"></div>\n <p class="form-help help-text-small">\n Size of the graphic representation of each data point\n </p>\n</div>\n\n<div data-config-key="width" class="form-input numeric-slider-input">\n <label for="width">Chart width: </label>\n <div class="slider-output">',(f=c.width)?f=f.call(b,{hash:{},data:e}):(f=b.width,f=typeof f===h?f.apply(b):f),g+=i(f)+'</div>\n <div class="slider"></div>\n <p class="form-help help-text-small">\n (not including chart margins and axes)\n </p>\n</div>\n\n<div data-config-key="height" class="form-input numeric-slider-input">\n <label for="height">Chart height: </label>\n <div class="slider-output">',(f=c.height)?f=f.call(b,{hash:{},data:e}):(f=b.height,f=typeof f===h?f.apply(b):f),g+=i(f)+'</div>\n <div class="slider"></div>\n <p class="form-help help-text-small">\n (not including chart margins and axes)\n </p>\n</div>\n\n<div data-config-key="X-axis-label"class="text-input form-input">\n <label for="X-axis-label">Re-label the X axis: </label>\n <input type="text" name="X-axis-label" id="X-axis-label" value="'+i((f=b.x,f=null==f||f===!1?f:f.label,typeof f===h?f.apply(b):f))+'" />\n <p class="form-help help-text-small"></p>\n</div>\n\n<div data-config-key="Y-axis-label" class="text-input form-input">\n <label for="Y-axis-label">Re-label the Y axis: </label>\n <input type="text" name="Y-axis-label" id="Y-axis-label" value="'+i((f=b.y,f=null==f||f===!1?f:f.label,typeof f===h?f.apply(b):f))+'" />\n <p class="form-help help-text-small"></p>\n</div>\n\n<button class="render-button btn btn-primary active">Draw</button>\n'}),this.Templates.datacontrol=Handlebars.template(function(a,b,c,d,e){function f(a,b){var d,e="";return e+='\n <option value="',(d=c.index)?d=d.call(a,{hash:{},data:b}):(d=a.index,d=typeof d===j?d.apply(a):d),e+=k(d)+'">',(d=c.name)?d=d.call(a,{hash:{},data:b}):(d=a.name,d=typeof d===j?d.apply(a):d),e+=k(d)+"</option>\n "}function g(){return'checked="true"'}this.compilerInfo=[4,">= 1.0.0"],c=this.merge(c,a.helpers),e=e||{};var h,i="",j="function",k=this.escapeExpression,l=this;return i+='<p class="help-text">\n Use the following controls to change the data used by the chart.\n Use the \'Draw\' button to render (or re-render) the chart with the current settings.\n</p>\n\n\n<div class="column-select">\n <label>Data column for X: </label>\n <select name="xColumn">\n ',h=c.each.call(b,b.numericColumns,{hash:{},inverse:l.noop,fn:l.program(1,f,e),data:e}),(h||0===h)&&(i+=h),i+='\n </select>\n</div>\n<div class="column-select">\n <label>Data column for Y: </label>\n <select name="yColumn">\n ',h=c.each.call(b,b.numericColumns,{hash:{},inverse:l.noop,fn:l.program(1,f,e),data:e}),(h||0===h)&&(i+=h),i+='\n </select>\n</div>\n\n\n<div id="include-id">\n <label for="include-id-checkbox">Include a third column as data point IDs?</label>\n <input type="checkbox" name="include-id" id="include-id-checkbox" />\n <p class="help-text-small">\n These will be displayed (along with the x and y values) when you hover over\n a data point.\n </p>\n</div>\n<div class="column-select" style="display: none">\n <label for="ID-select">Data column for IDs: </label>\n <select name="idColumn">\n ',h=c.each.call(b,b.allColumns,{hash:{},inverse:l.noop,fn:l.program(1,f,e),data:e}),(h||0===h)&&(i+=h),i+='\n </select>\n</div>\n\n\n<div id="first-line-header" style="display: none;">\n <p>Possible headers: ',(h=c.possibleHeaders)?h=h.call(b,{hash:{},data:e}):(h=b.possibleHeaders,h=typeof h===j?h.apply(b):h),i+=k(h)+'\n </p>\n <label for="first-line-header-checkbox">Use the above as column headers?</label>\n <input type="checkbox" name="include-id" id="first-line-header-checkbox"\n ',h=c["if"].call(b,b.usePossibleHeaders,{hash:{},inverse:l.noop,fn:l.program(3,g,e),data:e}),(h||0===h)&&(i+=h),i+='/>\n <p class="help-text-small">\n It looks like Galaxy couldn\'t get proper column headers for this data.\n Would you like to use the column headers above as column names to select columns?\n </p>\n</div>\n\n<button class="render-button btn btn-primary active">Draw</button>\n'}),this.Templates.editor=Handlebars.template(function(a,b,c,d,e){this.compilerInfo=[4,">= 1.0.0"],c=this.merge(c,a.helpers),e=e||{};var f="";return f+='<div class="scatterplot-editor tabbable tabs-left">\n \n <ul class="nav nav-tabs">\n \n <li class="active">\n <a title="Use this tab to change which data are used"\n href="#data-control" data-toggle="tab">Data Controls</a>\n </li>\n <li>\n <a title="Use this tab to change how the chart is drawn"\n href="#chart-control" data-toggle="tab" >Chart Controls</a>\n </li>\n \n <li class="disabled">\n <a title="This tab will display the chart"\n href="#chart-display" data-toggle="tab">Chart</a>\n </li>\n </ul>\n\n \n <div class="tab-content">\n \n <div id="data-control" class="scatterplot-config-control tab-pane active">\n \n </div>\n \n \n <div id="chart-control" class="scatterplot-config-control tab-pane">\n \n </div>\n\n \n <div id="chart-display" class="scatterplot-display tab-pane"></div>\n\n </div>\n</div>\n'});var ScatterplotConfigEditor=BaseView.extend(LoggableMixin).extend({className:"scatterplot-control-form",initialize:function(a){if(!a||!a.config||!a.config.dataset)throw new Error("ScatterplotView requires a configuration and dataset");this.dataset=a.config.dataset,this.plotView=new ScatterplotView({config:a.config})},render:function(){return this.$el.append(ScatterplotConfigEditor.templates.mainLayout({})),this.$el.find("#data-control").append(this._render_dataControl()),this._render_chartControls(this.$el.find("#chart-control")),this._render_chartDisplay(),this.$el.find("[title]").tooltip(),this},_render_dataControl:function(){var a=this.dataset,b=_.map(a.metadata_column_types,function(b,c){var d={index:c,type:b,name:"column "+(c+1)};return a.metadata_column_names&&a.metadata_column_names[c]&&(d.name=a.metadata_column_names[c]),d}),c=_.filter(b,function(a){return"int"===a.type||"float"===a.type});2>c&&(c=b);var d=this.$el.find(".tab-pane#data-control");return d.html(ScatterplotConfigEditor.templates.dataControl({allColumns:b,numericColumns:c})),d.find('[name="xColumn"]').val(this.plotView.config.xColumn||c[0].index),d.find('[name="yColumn"]').val(this.plotView.config.yColumn||c[1].index),void 0!==this.plotView.config.idColumn&&(d.find("#include-id-checkbox").prop("checked",!0).trigger("change"),d.find('select[name="idColumn"]').val(this.plotView.config.idColumn)),d},_render_chartControls:function(a){function b(){var a=$(this);a.siblings(".slider-output").text(a.slider("value"))}a.html(ScatterplotConfigEditor.templates.chartControl(this.plotView.config));var c=this,d={datapointSize:{min:2,max:10,step:1},width:{min:200,max:800,step:20},height:{min:200,max:800,step:20}};return a.find(".numeric-slider-input").each(function(){var a=$(this),e=a.attr("data-config-key"),f=_.extend(d[e],{value:c.plotView.config[e],change:b,slide:b});a.find(".slider").slider(f)}),this.dataset.metadata_column_names,a},_render_chartDisplay:function(){var a=this.$el.find(".tab-pane#chart-display");return this.plotView.setElement(a),this.plotView.render(),a},events:{"change #include-id-checkbox":"toggleThirdColumnSelector","click #data-control .render-button":"renderChart","click #chart-control .render-button":"renderChart"},toggleThirdColumnSelector:function(){this.$el.find('select[name="idColumn"]').parent().toggle()},renderChart:function(){this.$el.find(".nav li.disabled").removeClass("disabled"),this.updateConfigWithDataSettings(),this.updateConfigWithChartSettings(),this.$el.find("ul.nav").find('a[href="#chart-display"]').tab("show"),this.plotView.fetchData()},updateConfigWithDataSettings:function(){var a=this.$el.find("#data-control"),b={xColumn:a.find('[name="xColumn"]').val(),yColumn:a.find('[name="yColumn"]').val()};return a.find("#include-id-checkbox").prop("checked")&&(b.idColumn=a.find('[name="idColumn"]').val()),_.extend(this.plotView.config,b)},updateConfigWithChartSettings:function(){var a=this.plotView,b=this.$el.find("#chart-control");return["datapointSize","width","height"].forEach(function(c){a.config[c]=b.find('.numeric-slider-input[data-config-key="'+c+'"]').find(".slider").slider("value")}),a.config.x.label=b.find('input[name="X-axis-label"]').val(),a.config.y.label=b.find('input[name="Y-axis-label"]').val(),a.config},toString:function(){return"ScatterplotConfigEditor("+(this.dataset?this.dataset.id:"")+")"}});ScatterplotConfigEditor.templates={mainLayout:Templates.editor,dataControl:Templates.datacontrol,chartControl:Templates.chartcontrol};var ScatterplotView=Backbone.View.extend({defaults:{dataset:{},metadata:{dataLines:void 0},ajaxFn:null,pagination:{currPage:0,perPage:3e3},width:400,height:400,margin:{top:16,right:16,bottom:40,left:54},x:{ticks:10,label:"X"},y:{ticks:10,label:"Y"},datapointSize:4,animDuration:500},initialize:function(a){this.config=_.extend(_.clone(this.defaults),a.config||{})},updateConfig:function(a){this.config=this.config||{},_.extend(this.config,a)},fetchData:function(){this.showLoadingIndicator("getting data");var a=this;return xhr=jQuery.getJSON("/api/datasets/"+this.config.dataset.id,{data_type:"raw_data",provider:"dataset-column",limit:this.config.pagination.perPage,offset:this.config.pagination.currPage*this.config.pagination.perPage}),xhr.done(function(b){a.renderData(b.data)}),xhr.fail(function(a,b,c){alert("Error loading data:\n"+a.responseText),console.error(a,b,c)}),xhr.always(function(){a.hideLoadingIndicator()}),xhr},render:function(a){return this.$el.addClass("scatterplot-display").html(['<div class="controls clear"></div>','<div class="loading-indicator">','<span class="fa fa-spinner fa-spin"></span>','<span class="loading-indicator-message"></span>',"</div>","<svg/>",'<div class="stats-display"></div>'].join("")),this.$el.children().hide(),a&&this.renderData(a),this},showLoadingIndicator:function(a,b){a=a||"",b=b||"fast";var c=this.$el.find(".loading-indicator");a&&c.find(".loading-indicator-message").text(a),c.is(":visible")||(this.toggleStats(!1),c.css({left:this.config.width/2,top:this.config.height/2}).show())},hideLoadingIndicator:function(a){a=a||"fast",this.$el.find(".loading-indicator").hide()},renderData:function(a){this.$el.find(".controls").empty().append(this.renderControls(a)).show(),this.renderPlot(a),this.getStats(a)},renderControls:function(a){var b=this,c=$('<div class="left"></div>'),d=$('<div class="right"></div>');return c.append([this.renderPrevNext(a),this.renderPagination(a)]),d.append([this.renderLineInfo(a),$("<button>Stats</button>").addClass("stats-toggle-btn").click(function(){b.toggleStats()}),$("<button>Redraw</button>").addClass("rerender-btn").click(function(){b.renderPlot(a)})]),[c,d]},renderLineInfo:function(a){var b=this.config.dataset.metadata_data_lines||"an unknown number of",c=this.config.pagination.currPage*this.config.pagination.perPage,d=c+a.length;return $("<p/>").addClass("scatterplot-data-info").text(["Displaying lines",c+1,"to",d,"of",b,"lines"].join(" "))},renderPrevNext:function(a){function b(a){return $(['<li><a href="javascript:void(0);">',a,"</a></li>"].join(""))}if(!a||0===this.config.pagination.currPage&&a.length<this.config.pagination.perPage)return null;var c=this,d=this.config.dataset.metadata_data_lines,e=d?Math.ceil(d/this.config.pagination.perPage):void 0,f=b("Prev").click(function(){c.config.pagination.currPage>0&&(c.config.pagination.currPage-=1,c.fetchData())}),g=b("Next").click(function(){(!e||c.config.pagination.currPage<e-1)&&(c.config.pagination.currPage+=1,c.fetchData())}),h=$("<ul/>").addClass("pagination data-prev-next").append([f,g]);return 0===c.config.pagination.currPage&&f.addClass("disabled"),e&&c.config.pagination.currPage===e-1&&g.addClass("disabled"),h},renderPagination:function(a){function b(a){return $(['<li><a href="javascript:void(0);">',a,"</a></li>"].join(""))}function c(){d.config.pagination.currPage=$(this).data("page"),d.fetchData()}if(!a||0===this.config.pagination.currPage&&a.length<this.config.pagination.perPage)return null;for(var d=this,e=this.config.dataset.metadata_data_lines,f=e?Math.ceil(e/this.config.pagination.perPage):void 0,g=$("<ul/>").addClass("pagination data-pages"),h=0;f>h;h+=1){var i=b(h+1).attr("data-page",h).click(c);h===this.config.pagination.currPage&&i.addClass("active"),g.append(i)}return g},renderPlot:function(a){this.toggleStats(!1);var b=this.$el.find("svg");b.off().empty().show(),scatterplot(b.get(0),this.config,a)},getStats:function(a){var b=this;meanWorker=new Worker("/plugins/visualizations/scatterplot/static/worker-stats.js"),meanWorker.postMessage({data:a,keys:[this.config.xColumn,this.config.yColumn]}),meanWorker.onerror=function(){meanWorker.terminate()},meanWorker.onmessage=function(a){b.renderStats(a.data)}},renderStats:function(a){var b=this.$el.find(".stats-display"),c=this.config.x.label,d=this.config.y.label,e=$("<table/>").addClass("table").append(["<thead><th></th><th>",c,"</th><th>",d,"</th></thead>"].join("")).append(_.map(a,function(a,b){return $(["<tr><td>",b,"</td><td>",a[0],"</td><td>",a[1],"</td></tr>"].join(""))}));b.empty().append(e)},toggleStats:function(a){var b=this.$el.find(".stats-display");a=void 0===a?b.is(":hidden"):a,a?(this.$el.find("svg").hide(),b.show(),this.$el.find(".controls .stats-toggle-btn").text("Plot")):(b.hide(),this.$el.find("svg").show(),this.$el.find(".controls .stats-toggle-btn").text("Stats"))},toString:function(){return"ScatterplotView()"}});
\ No newline at end of file
+function scatterplot(a,b,c){function d(){var a={v:{},h:{}};return a.v.lines=p.selectAll("line.v-grid-line").data(m.x.ticks(q.x.fn.ticks()[0])),a.v.lines.enter().append("svg:line").classed("grid-line v-grid-line",!0),a.v.lines.attr("x1",m.x).attr("x2",m.x).attr("y1",0).attr("y2",b.height),a.v.lines.exit().remove(),a.h.lines=p.selectAll("line.h-grid-line").data(m.y.ticks(q.y.fn.ticks()[0])),a.h.lines.enter().append("svg:line").classed("grid-line h-grid-line",!0),a.h.lines.attr("x1",0).attr("x2",b.width).attr("y1",m.y).attr("y2",m.y),a.h.lines.exit().remove(),a}function e(){return t.attr("cx",function(a,b){return m.x(j(a,b))}).attr("cy",function(a,b){return m.y(k(a,b))}).style("display","block").filter(function(){var a=d3.select(this).attr("cx"),c=d3.select(this).attr("cy");return 0>a||a>b.width?!0:0>c||c>b.height?!0:!1}).style("display","none")}function f(){q.redraw(),e(),s=d(),$(".chart-info-box").remove(),$(o.node()).trigger("zoom.scatterplot",[])}function g(a,c,d){return c+=8,$(['<div class="chart-info-box" style="position: absolute">',b.idColumn?"<div>"+d[b.idColumn]+"</div>":"","<div>",j(d),"</div>","<div>",k(d),"</div>","</div>"].join("")).css({top:a,left:c,"z-index":2})}var h=function(a,b){return"translate("+a+","+b+")"},i=function(a,b,c){return"rotate("+a+","+b+","+c+")"},j=function(a){return a[b.xColumn]},k=function(a){return a[b.yColumn]},l={x:{extent:d3.extent(c,j)},y:{extent:d3.extent(c,k)}},m={x:d3.scale.linear().domain(l.x.extent).range([0,b.width]),y:d3.scale.linear().domain(l.y.extent).range([b.height,0])},n=d3.behavior.zoom().x(m.x).y(m.y).scaleExtent([1,10]),o=d3.select(a).attr("class","scatterplot").attr("width","100%").attr("height",b.height+(b.margin.top+b.margin.bottom)),p=o.append("g").attr("class","content").attr("transform",h(b.margin.left,b.margin.top)).call(n);p.append("rect").attr("class","zoom-rect").attr("width",b.width).attr("height",b.height).style("fill","transparent");var q={x:{},y:{}};q.x.fn=d3.svg.axis().orient("bottom").scale(m.x).ticks(b.x.ticks).tickFormat(d3.format("s")),q.y.fn=d3.svg.axis().orient("left").scale(m.y).ticks(b.y.ticks).tickFormat(d3.format("s")),q.x.g=p.append("g").attr("class","x axis").attr("transform",h(0,b.height)).call(q.x.fn),q.y.g=p.append("g").attr("class","y axis").call(q.y.fn);var r=4;q.x.label=o.append("text").attr("class","axis-label").text(b.x.label).attr("text-anchor","middle").attr("dominant-baseline","text-after-edge").attr("x",b.width/2+b.margin.left).attr("y",b.height+b.margin.bottom+b.margin.top-r),q.y.label=o.append("text").attr("class","axis-label").text(b.y.label).attr("text-anchor","middle").attr("dominant-baseline","text-before-edge").attr("x",r).attr("y",b.height/2).attr("transform",i(-90,r,b.height/2)),q.redraw=function(){o.select(".x.axis").call(q.x.fn),o.select(".y.axis").call(q.y.fn)};var s=d(),t=p.selectAll(".glyph").data(c).enter().append("svg:circle").classed("glyph",!0).attr("cx",function(a,b){return m.x(j(a,b))}).attr("cy",b.height).attr("r",0);t.transition().duration(b.animDuration).attr("cy",function(a,b){return m.y(k(a,b))}).attr("r",b.datapointSize),n.on("zoom",f),t.on("mouseover",function(a,c){var d=d3.select(this);d.style("fill","red").style("fill-opacity",1),p.append("line").attr("stroke","red").attr("stroke-width",1).attr("x1",d.attr("cx")-b.datapointSize).attr("y1",d.attr("cy")).attr("x2",0).attr("y2",d.attr("cy")).classed("hoverline",!0),d.attr("cy")<b.height&&p.append("line").attr("stroke","red").attr("stroke-width",1).attr("x1",d.attr("cx")).attr("y1",+d.attr("cy")+b.datapointSize).attr("x2",d.attr("cx")).attr("y2",b.height).classed("hoverline",!0);var e=this.getBoundingClientRect();$("body").append(g(e.top,e.right,a)),$(o.node()).trigger("mouseover-datapoint.scatterplot",[this,a,c])}),t.on("mouseout",function(){d3.select(this).style("fill","black").style("fill-opacity",.2),p.selectAll(".hoverline").remove(),$(".chart-info-box").remove()})}this.Templates=this.Templates||{},this.Templates.chartcontrol=Handlebars.template(function(a,b,c,d,e){this.compilerInfo=[4,">= 1.0.0"],c=this.merge(c,a.helpers),e=e||{};var f,g="",h="function",i=this.escapeExpression;return g+='<p class="help-text">\n Use the following controls to how the chart is displayed.\n The slide controls can be moved by the mouse or, if the \'handle\' is in focus, your keyboard\'s arrow keys.\n Move the focus between controls by using the tab or shift+tab keys on your keyboard.\n Use the \'Draw\' button to render (or re-render) the chart with the current settings.\n</p>\n\n<div data-config-key="datapointSize" class="form-input numeric-slider-input">\n <label for="datapointSize">Size of data point: </label>\n <div class="slider-output">',(f=c.datapointSize)?f=f.call(b,{hash:{},data:e}):(f=b.datapointSize,f=typeof f===h?f.apply(b):f),g+=i(f)+'</div>\n <div class="slider"></div>\n <p class="form-help help-text-small">\n Size of the graphic representation of each data point\n </p>\n</div>\n\n<div data-config-key="width" class="form-input numeric-slider-input">\n <label for="width">Chart width: </label>\n <div class="slider-output">',(f=c.width)?f=f.call(b,{hash:{},data:e}):(f=b.width,f=typeof f===h?f.apply(b):f),g+=i(f)+'</div>\n <div class="slider"></div>\n <p class="form-help help-text-small">\n (not including chart margins and axes)\n </p>\n</div>\n\n<div data-config-key="height" class="form-input numeric-slider-input">\n <label for="height">Chart height: </label>\n <div class="slider-output">',(f=c.height)?f=f.call(b,{hash:{},data:e}):(f=b.height,f=typeof f===h?f.apply(b):f),g+=i(f)+'</div>\n <div class="slider"></div>\n <p class="form-help help-text-small">\n (not including chart margins and axes)\n </p>\n</div>\n\n<div data-config-key="X-axis-label"class="text-input form-input">\n <label for="X-axis-label">Re-label the X axis: </label>\n <input type="text" name="X-axis-label" id="X-axis-label" value="'+i((f=b.x,f=null==f||f===!1?f:f.label,typeof f===h?f.apply(b):f))+'" />\n <p class="form-help help-text-small"></p>\n</div>\n\n<div data-config-key="Y-axis-label" class="text-input form-input">\n <label for="Y-axis-label">Re-label the Y axis: </label>\n <input type="text" name="Y-axis-label" id="Y-axis-label" value="'+i((f=b.y,f=null==f||f===!1?f:f.label,typeof f===h?f.apply(b):f))+'" />\n <p class="form-help help-text-small"></p>\n</div>\n\n<button class="render-button btn btn-primary active">Draw</button>\n'}),this.Templates.datacontrol=Handlebars.template(function(a,b,c,d,e){function f(a,b){var d,e="";return e+='\n <option value="',(d=c.index)?d=d.call(a,{hash:{},data:b}):(d=a.index,d=typeof d===j?d.apply(a):d),e+=k(d)+'">',(d=c.name)?d=d.call(a,{hash:{},data:b}):(d=a.name,d=typeof d===j?d.apply(a):d),e+=k(d)+"</option>\n "}function g(){return'checked="true"'}this.compilerInfo=[4,">= 1.0.0"],c=this.merge(c,a.helpers),e=e||{};var h,i="",j="function",k=this.escapeExpression,l=this;return i+='<p class="help-text">\n Use the following controls to change the data used by the chart.\n Use the \'Draw\' button to render (or re-render) the chart with the current settings.\n</p>\n\n\n<div class="column-select">\n <label>Data column for X: </label>\n <select name="xColumn">\n ',h=c.each.call(b,b.numericColumns,{hash:{},inverse:l.noop,fn:l.program(1,f,e),data:e}),(h||0===h)&&(i+=h),i+='\n </select>\n</div>\n<div class="column-select">\n <label>Data column for Y: </label>\n <select name="yColumn">\n ',h=c.each.call(b,b.numericColumns,{hash:{},inverse:l.noop,fn:l.program(1,f,e),data:e}),(h||0===h)&&(i+=h),i+='\n </select>\n</div>\n\n\n<div id="include-id">\n <label for="include-id-checkbox">Include a third column as data point IDs?</label>\n <input type="checkbox" name="include-id" id="include-id-checkbox" />\n <p class="help-text-small">\n These will be displayed (along with the x and y values) when you hover over\n a data point.\n </p>\n</div>\n<div class="column-select" style="display: none">\n <label for="ID-select">Data column for IDs: </label>\n <select name="idColumn">\n ',h=c.each.call(b,b.allColumns,{hash:{},inverse:l.noop,fn:l.program(1,f,e),data:e}),(h||0===h)&&(i+=h),i+='\n </select>\n</div>\n\n\n<div id="first-line-header" style="display: none;">\n <p>Possible headers: ',(h=c.possibleHeaders)?h=h.call(b,{hash:{},data:e}):(h=b.possibleHeaders,h=typeof h===j?h.apply(b):h),i+=k(h)+'\n </p>\n <label for="first-line-header-checkbox">Use the above as column headers?</label>\n <input type="checkbox" name="include-id" id="first-line-header-checkbox"\n ',h=c["if"].call(b,b.usePossibleHeaders,{hash:{},inverse:l.noop,fn:l.program(3,g,e),data:e}),(h||0===h)&&(i+=h),i+='/>\n <p class="help-text-small">\n It looks like Galaxy couldn\'t get proper column headers for this data.\n Would you like to use the column headers above as column names to select columns?\n </p>\n</div>\n\n<button class="render-button btn btn-primary active">Draw</button>\n'}),this.Templates.editor=Handlebars.template(function(a,b,c,d,e){this.compilerInfo=[4,">= 1.0.0"],c=this.merge(c,a.helpers),e=e||{};var f="";return f+='<div class="scatterplot-editor tabbable tabs-left">\n \n <ul class="nav nav-tabs">\n \n <li class="active">\n <a title="Use this tab to change which data are used"\n href="#data-control" data-toggle="tab">Data Controls</a>\n </li>\n <li>\n <a title="Use this tab to change how the chart is drawn"\n href="#chart-control" data-toggle="tab" >Chart Controls</a>\n </li>\n \n <li class="disabled">\n <a title="This tab will display the chart"\n href="#chart-display" data-toggle="tab">Chart</a>\n </li>\n </ul>\n\n \n <div class="tab-content">\n \n <div id="data-control" class="scatterplot-config-control tab-pane active">\n \n </div>\n \n \n <div id="chart-control" class="scatterplot-config-control tab-pane">\n \n </div>\n\n \n <div id="chart-display" class="scatterplot-display tab-pane"></div>\n\n </div>\n</div>\n'});var ScatterplotConfigEditor=BaseView.extend(LoggableMixin).extend({className:"scatterplot-control-form",initialize:function(a){if(!a||!a.config||!a.dataset)throw new Error("ScatterplotView requires a configuration and dataset");this.dataset=a.dataset,this.plotView=new ScatterplotView({dataset:a.dataset,config:a.config})},render:function(){return this.$el.append(ScatterplotConfigEditor.templates.mainLayout({})),this.$el.find("#data-control").append(this._render_dataControl()),this._render_chartControls(this.$el.find("#chart-control")),this._render_chartDisplay(),this.$el.find("[title]").tooltip(),this},_render_dataControl:function(){var a=this.dataset,b=_.map(a.metadata_column_types,function(b,c){var d={index:c,type:b,name:"column "+(c+1)};return a.metadata_column_names&&a.metadata_column_names[c]&&(d.name=a.metadata_column_names[c]),d}),c=_.filter(b,function(a){return"int"===a.type||"float"===a.type});2>c&&(c=b);var d=this.$el.find(".tab-pane#data-control");return d.html(ScatterplotConfigEditor.templates.dataControl({allColumns:b,numericColumns:c})),d.find('[name="xColumn"]').val(this.plotView.config.xColumn||c[0].index),d.find('[name="yColumn"]').val(this.plotView.config.yColumn||c[1].index),void 0!==this.plotView.config.idColumn&&(d.find("#include-id-checkbox").prop("checked",!0).trigger("change"),d.find('select[name="idColumn"]').val(this.plotView.config.idColumn)),d},_render_chartControls:function(a){function b(){var a=$(this);a.siblings(".slider-output").text(a.slider("value"))}a.html(ScatterplotConfigEditor.templates.chartControl(this.plotView.config));var c=this,d={datapointSize:{min:2,max:10,step:1},width:{min:200,max:800,step:20},height:{min:200,max:800,step:20}};return a.find(".numeric-slider-input").each(function(){var a=$(this),e=a.attr("data-config-key"),f=_.extend(d[e],{value:c.plotView.config[e],change:b,slide:b});a.find(".slider").slider(f)}),this.dataset.metadata_column_names,a},_render_chartDisplay:function(){var a=this.$el.find(".tab-pane#chart-display");return this.plotView.setElement(a),this.plotView.render(),a},events:{"change #include-id-checkbox":"toggleThirdColumnSelector","click #data-control .render-button":"renderChart","click #chart-control .render-button":"renderChart"},toggleThirdColumnSelector:function(){this.$el.find('select[name="idColumn"]').parent().toggle()},renderChart:function(){this.$el.find(".nav li.disabled").removeClass("disabled"),this.updateConfigWithDataSettings(),this.updateConfigWithChartSettings(),this.$el.find("ul.nav").find('a[href="#chart-display"]').tab("show"),this.plotView.fetchData()},updateConfigWithDataSettings:function(){var a=this.$el.find("#data-control"),b={xColumn:Number(a.find('[name="xColumn"]').val()),yColumn:Number(a.find('[name="yColumn"]').val())};return a.find("#include-id-checkbox").prop("checked")&&(b.idColumn=a.find('[name="idColumn"]').val()),_.extend(this.plotView.config,b)},updateConfigWithChartSettings:function(){var a=this.plotView,b=this.$el.find("#chart-control");return["datapointSize","width","height"].forEach(function(c){a.config[c]=b.find('.numeric-slider-input[data-config-key="'+c+'"]').find(".slider").slider("value")}),a.config.x.label=b.find('input[name="X-axis-label"]').val(),a.config.y.label=b.find('input[name="Y-axis-label"]').val(),a.config},toString:function(){return"ScatterplotConfigEditor("+(this.dataset?this.dataset.id:"")+")"}});ScatterplotConfigEditor.templates={mainLayout:Templates.editor,dataControl:Templates.datacontrol,chartControl:Templates.chartcontrol};var ScatterplotView=Backbone.View.extend({defaults:{metadata:{dataLines:void 0},pagination:{currPage:0,perPage:3e3},width:400,height:400,margin:{top:16,right:16,bottom:40,left:54},x:{ticks:10,label:"X"},y:{ticks:10,label:"Y"},datapointSize:4,animDuration:500},initialize:function(a){this.config=_.extend(_.clone(this.defaults),a.config||{}),this.dataset=a.dataset},updateConfig:function(a){this.config=this.config||{},_.extend(this.config,a)},fetchData:function(){this.showLoadingIndicator("getting data");var a=this;return xhr=jQuery.getJSON("/api/datasets/"+this.dataset.id,{data_type:"raw_data",provider:"dataset-column",limit:this.config.pagination.perPage,offset:this.config.pagination.currPage*this.config.pagination.perPage}),xhr.done(function(b){a.renderData(b.data)}),xhr.fail(function(a,b,c){alert("Error loading data:\n"+a.responseText),console.error(a,b,c)}),xhr.always(function(){a.hideLoadingIndicator()}),xhr},render:function(a){return this.$el.addClass("scatterplot-display").html(['<div class="controls clear"></div>','<div class="loading-indicator">','<span class="fa fa-spinner fa-spin"></span>','<span class="loading-indicator-message"></span>',"</div>","<svg/>",'<div class="stats-display"></div>'].join("")),this.$el.children().hide(),a&&this.renderData(a),this},showLoadingIndicator:function(a,b){a=a||"",b=b||"fast";var c=this.$el.find(".loading-indicator");a&&c.find(".loading-indicator-message").text(a),c.is(":visible")||(this.toggleStats(!1),c.css({left:this.config.width/2,top:this.config.height/2}).show())},hideLoadingIndicator:function(a){a=a||"fast",this.$el.find(".loading-indicator").hide()},renderData:function(a){this.$el.find(".controls").empty().append(this.renderControls(a)).show(),this.renderPlot(a),this.getStats(a)},renderControls:function(a){var b=this,c=$('<div class="left"></div>'),d=$('<div class="right"></div>');return c.append([this.renderPrevNext(a),this.renderPagination(a)]),d.append([this.renderLineInfo(a),$("<button>Stats</button>").addClass("stats-toggle-btn").click(function(){b.toggleStats()}),$("<button>Redraw</button>").addClass("rerender-btn").click(function(){b.renderPlot(a)})]),[c,d]},renderLineInfo:function(a){var b=this.dataset.metadata_data_lines||"an unknown number of",c=this.config.pagination.currPage*this.config.pagination.perPage,d=c+a.length;return $("<p/>").addClass("scatterplot-data-info").text(["Displaying lines",c+1,"to",d,"of",b,"lines"].join(" "))},renderPrevNext:function(a){function b(a){return $(['<li><a href="javascript:void(0);">',a,"</a></li>"].join(""))}if(!a||0===this.config.pagination.currPage&&a.length<this.config.pagination.perPage)return null;var c=this,d=this.dataset.metadata_data_lines,e=d?Math.ceil(d/this.config.pagination.perPage):void 0,f=b("Prev").click(function(){c.config.pagination.currPage>0&&(c.config.pagination.currPage-=1,c.fetchData())}),g=b("Next").click(function(){(!e||c.config.pagination.currPage<e-1)&&(c.config.pagination.currPage+=1,c.fetchData())}),h=$("<ul/>").addClass("pagination data-prev-next").append([f,g]);return 0===c.config.pagination.currPage&&f.addClass("disabled"),e&&c.config.pagination.currPage===e-1&&g.addClass("disabled"),h},renderPagination:function(a){function b(a){return $(['<li><a href="javascript:void(0);">',a,"</a></li>"].join(""))}function c(){d.config.pagination.currPage=$(this).data("page"),d.fetchData()}if(!a||0===this.config.pagination.currPage&&a.length<this.config.pagination.perPage)return null;for(var d=this,e=this.dataset.metadata_data_lines,f=e?Math.ceil(e/this.config.pagination.perPage):void 0,g=$("<ul/>").addClass("pagination data-pages"),h=0;f>h;h+=1){var i=b(h+1).attr("data-page",h).click(c);h===this.config.pagination.currPage&&i.addClass("active"),g.append(i)}return g},renderPlot:function(a){this.toggleStats(!1);var b=this.$el.find("svg");b.off().empty().show(),scatterplot(b.get(0),this.config,a)},getStats:function(a){var b=this;meanWorker=new Worker("/plugins/visualizations/scatterplot/static/worker-stats.js"),meanWorker.postMessage({data:a,keys:[this.config.xColumn,this.config.yColumn]}),meanWorker.onerror=function(){meanWorker.terminate()},meanWorker.onmessage=function(a){b.renderStats(a.data)}},renderStats:function(a){var b=this.$el.find(".stats-display"),c=this.config.x.label,d=this.config.y.label,e=$("<table/>").addClass("table").append(["<thead><th></th><th>",c,"</th><th>",d,"</th></thead>"].join("")).append(_.map(a,function(a,b){return $(["<tr><td>",b,"</td><td>",a[0],"</td><td>",a[1],"</td></tr>"].join(""))}));b.empty().append(e)},toggleStats:function(a){var b=this.$el.find(".stats-display");a=void 0===a?b.is(":hidden"):a,a?(this.$el.find("svg").hide(),b.show(),this.$el.find(".controls .stats-toggle-btn").text("Plot")):(b.hide(),this.$el.find("svg").show(),this.$el.find(".controls .stats-toggle-btn").text("Stats"))},toString:function(){return"ScatterplotView()"}});
\ No newline at end of file
diff -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b -r e507124fbe79903378c4b0fe150f2f3fd816ad32 config/plugins/visualizations/scatterplot/templates/scatterplot.mako
--- a/config/plugins/visualizations/scatterplot/templates/scatterplot.mako
+++ b/config/plugins/visualizations/scatterplot/templates/scatterplot.mako
@@ -43,22 +43,18 @@
data = None
##data = list( hda.datatype.dataset_column_dataprovider( hda, limit=10000 ) )
%>
- var hda = ${h.to_json_string( trans.security.encode_dict_ids( hda.to_dict() ) )},
- data = ${h.to_json_string( data )},
- querySettings = ${h.to_json_string( query_args )},
- config = _.extend( querySettings, {
- containerSelector : '#chart',
- dataset : hda,
- });
- //console.debug( querySettings );
+ var hda = ${h.to_json_string( trans.security.encode_dict_ids( hda.to_dict() ) )};
var editor = new ScatterplotConfigEditor({
el : $( '.scatterplot-editor' ).attr( 'id', 'scatterplot-editor-hda-' + hda.id ),
- config : config
+ config : ${h.to_json_string( query_args )},
+ dataset : ${h.to_json_string( trans.security.encode_dict_ids( hda.to_dict() ) )}
}).render();
+ window.editor = editor;
// uncomment to auto render for development
//$( '.render-button:visible' ).click();
});
+
</script>
%endif
diff -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b -r e507124fbe79903378c4b0fe150f2f3fd816ad32 lib/galaxy/datatypes/interval.py
--- a/lib/galaxy/datatypes/interval.py
+++ b/lib/galaxy/datatypes/interval.py
@@ -389,7 +389,7 @@
MetadataElement( name="endCol", default=3, desc="End column", param=metadata.ColumnParameter )
MetadataElement( name="strandCol", desc="Strand column (click box & select)", param=metadata.ColumnParameter, optional=True, no_value=0 )
MetadataElement( name="columns", default=3, desc="Number of columns", readonly=True, visible=False )
- MetadataElement( name="viz_filter_cols", desc="Score column for visualization", default=[4], param=metadata.ColumnParameter, multiple=True )
+ MetadataElement( name="viz_filter_cols", desc="Score column for visualization", default=[4], param=metadata.ColumnParameter, optional=True, multiple=True )
###do we need to repeat these? they are the same as should be inherited from interval type
def set_meta( self, dataset, overwrite = True, **kwd ):
diff -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b -r e507124fbe79903378c4b0fe150f2f3fd816ad32 lib/galaxy/datatypes/tabular.py
--- a/lib/galaxy/datatypes/tabular.py
+++ b/lib/galaxy/datatypes/tabular.py
@@ -652,7 +652,7 @@
MetadataElement( name="columns", default=10, desc="Number of columns", readonly=True, visible=False )
MetadataElement( name="column_types", default=['str','int','str','str','str','int','str','list','str','str'], param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False )
- MetadataElement( name="viz_filter_cols", desc="Score column for visualization", default=[5], param=metadata.ColumnParameter, multiple=True, visible=False )
+ MetadataElement( name="viz_filter_cols", desc="Score column for visualization", default=[5], param=metadata.ColumnParameter, optional=True, multiple=True, visible=False )
MetadataElement( name="sample_names", default=[], desc="Sample names", readonly=True, visible=False, optional=True, no_value=[] )
def sniff( self, filename ):
diff -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b -r e507124fbe79903378c4b0fe150f2f3fd816ad32 lib/galaxy/model/search.py
--- a/lib/galaxy/model/search.py
+++ b/lib/galaxy/model/search.py
@@ -297,10 +297,14 @@
FIELDS = {
'name' : ViewField('name', sqlalchemy_field=HistoryDatasetAssociation.name),
'id' : ViewField('id',sqlalchemy_field=HistoryDatasetAssociation.id, id_decode=True),
+ 'history_id' : ViewField('history_id',sqlalchemy_field=HistoryDatasetAssociation.history_id, id_decode=True),
'tag' : ViewField("tag", handler=history_dataset_handle_tag),
'copied_from_ldda_id' : ViewField("copied_from_ldda_id",
sqlalchemy_field=HistoryDatasetAssociation.copied_from_library_dataset_dataset_association_id,
id_decode=True),
+ 'copied_from_hda_id' : ViewField("copied_from_hda_id",
+ sqlalchemy_field=HistoryDatasetAssociation.copied_from_history_dataset_association_id,
+ id_decode=True),
'deleted' : ViewField('deleted', sqlalchemy_field=HistoryDatasetAssociation.deleted)
}
diff -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b -r e507124fbe79903378c4b0fe150f2f3fd816ad32 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -2083,19 +2083,11 @@
if any_group_errors:
errors[input.name] = group_errors
else:
- if key not in incoming \
- and "__force_update__" + key not in incoming:
- # No new value provided, and we are only updating, so keep
- # the old value (which should already be in the state) and
- # preserve the old error message.
- pass
- else:
- incoming_value = get_incoming_value( incoming, key, None )
- value, error = check_param( trans, input, incoming_value, context, source=source )
- # If a callback was provided, allow it to process the value
- if error:
- errors[ input.name ] = error
- state[ input.name ] = value
+ incoming_value = get_incoming_value( incoming, key, None )
+ value, error = check_param( trans, input, incoming_value, context, source=source )
+ if error:
+ errors[ input.name ] = error
+ state[ input.name ] = value
return errors
def update_state( self, trans, inputs, state, incoming, source='html', prefix="", context=None,
diff -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b -r e507124fbe79903378c4b0fe150f2f3fd816ad32 lib/galaxy/visualization/registry.py
--- a/lib/galaxy/visualization/registry.py
+++ b/lib/galaxy/visualization/registry.py
@@ -477,7 +477,7 @@
return lambda o: getattr( o, next_attr_name )
# recursive case
- return lambda o: getattr( self._build_getattr_lambda( attr_name_list[:-1] ), next_attr_name )
+ return lambda o: getattr( self._build_getattr_lambda( attr_name_list[:-1] )( o ), next_attr_name )
def parse_tests( self, xml_tree_list ):
"""
@@ -493,7 +493,7 @@
return tests
for test_elem in xml_tree_list:
- test_type = test_elem.get( 'type' )
+ test_type = test_elem.get( 'type', 'eq' )
test_result = test_elem.text
if not test_type or not test_result:
log.warn( 'Skipping test. Needs both type attribute and text node to be parsed: '
@@ -509,9 +509,10 @@
getter = self._build_getattr_lambda( test_attr )
# result type should tell the registry how to convert the result before the test
- test_result_type = test_elem.get( 'result_type' ) or 'string'
+ test_result_type = test_elem.get( 'result_type', 'string' )
# test functions should be sent an object to test, and the parsed result expected from the test
+
# is test_attr attribute an instance of result
if test_type == 'isinstance':
#TODO: wish we could take this further but it would mean passing in the datatypes_registry
diff -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b -r e507124fbe79903378c4b0fe150f2f3fd816ad32 lib/galaxy/webapps/galaxy/api/visualizations.py
--- a/lib/galaxy/webapps/galaxy/api/visualizations.py
+++ b/lib/galaxy/webapps/galaxy/api/visualizations.py
@@ -10,7 +10,7 @@
from sqlalchemy import or_
from galaxy import web, util
-from galaxy.web.base.controller import BaseAPIController, UsesVisualizationMixin
+from galaxy.web.base.controller import BaseAPIController, UsesVisualizationMixin, SharableMixin
from galaxy.model.item_attrs import UsesAnnotations
from galaxy.exceptions import ( ItemAccessibilityException, ItemDeletionException, ItemOwnershipException,
MessageException )
@@ -20,7 +20,7 @@
import logging
log = logging.getLogger( __name__ )
-class VisualizationsController( BaseAPIController, UsesVisualizationMixin, UsesAnnotations ):
+class VisualizationsController( BaseAPIController, UsesVisualizationMixin, SharableMixin, UsesAnnotations ):
"""
RESTful controller for interactions with visualizations.
"""
@@ -122,9 +122,10 @@
else:
payload = self._validate_and_parse_payload( payload )
+ vis_type = payload.pop( 'type', False )
payload[ 'save' ] = True
- # create needs defaults like wizard needs food - generate defaults - this will err if given a weird key?
- visualization = self.create_visualization( trans, **payload )
+ # generate defaults - this will err if given a weird key?
+ visualization = self.create_visualization( trans, vis_type, **payload )
rval = { 'id' : trans.security.encode_id( visualization.id ) }
@@ -217,11 +218,20 @@
#TODO: deleted
#TODO: importable
+ # must have a type (I've taken this to be the visualization name)
+ if 'type' not in payload:
+ raise ValueError( "key/value 'type' is required" )
+
validated_payload = {}
for key, val in payload.items():
- if key == 'config':
+ #TODO: validate types in VALID_TYPES/registry names at the mixin/model level?
+ if key == 'type':
+ if not ( isinstance( val, str ) or isinstance( val, unicode ) ):
+ raise ValueError( '%s must be a string or unicode: %s' %( key, str( type( val ) ) ) )
+ val = util.sanitize_html.sanitize_html( val, 'utf-8' )
+ elif key == 'config':
if not isinstance( val, dict ):
- raise ValueError( '%s must be a dictionary (JSON): %s' %( key, str( type( val ) ) ) )
+ raise ValueError( '%s must be a dictionary: %s' %( key, str( type( val ) ) ) )
elif key == 'annotation':
if not ( isinstance( val, str ) or isinstance( val, unicode ) ):
@@ -235,21 +245,16 @@
raise ValueError( '%s must be a string or unicode: %s' %( key, str( type( val ) ) ) )
val = util.sanitize_html.sanitize_html( val, 'utf-8' )
elif key == 'slug':
- if not isinstance( val, str ):
+ if not ( isinstance( val, str ) or isinstance( val, unicode ) ):
raise ValueError( '%s must be a string: %s' %( key, str( type( val ) ) ) )
val = util.sanitize_html.sanitize_html( val, 'utf-8' )
- elif key == 'type':
- if not isinstance( val, str ):
- raise ValueError( '%s must be a string: %s' %( key, str( type( val ) ) ) )
- val = util.sanitize_html.sanitize_html( val, 'utf-8' )
- #TODO: validate types in VALID_TYPES/registry names at the mixin/model level?
elif key == 'dbkey':
if not ( isinstance( val, str ) or isinstance( val, unicode ) ):
raise ValueError( '%s must be a string or unicode: %s' %( key, str( type( val ) ) ) )
val = util.sanitize_html.sanitize_html( val, 'utf-8' )
- elif key not in valid_but_uneditable_keys:
- raise AttributeError( 'unknown key: %s' %( str( key ) ) )
+ #elif key not in valid_but_uneditable_keys:
+ # raise AttributeError( 'unknown key: %s' %( str( key ) ) )
validated_payload[ key ] = val
return validated_payload
diff -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b -r e507124fbe79903378c4b0fe150f2f3fd816ad32 lib/galaxy/webapps/galaxy/api/workflows.py
--- a/lib/galaxy/webapps/galaxy/api/workflows.py
+++ b/lib/galaxy/webapps/galaxy/api/workflows.py
@@ -73,7 +73,10 @@
inputs = {}
for step in latest_workflow.steps:
if step.type == 'data_input':
- inputs[step.id] = {'label':step.tool_inputs['name'], 'value':""}
+ if step.tool_inputs and "name" in step.tool_inputs:
+ inputs[step.id] = {'label':step.tool_inputs['name'], 'value':""}
+ else:
+ inputs[step.id] = {'label':"Input Dataset", 'value':""}
else:
pass
# Eventually, allow regular tool parameters to be inserted and modified at runtime.
diff -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b -r e507124fbe79903378c4b0fe150f2f3fd816ad32 lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
--- a/lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
+++ b/lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
@@ -436,7 +436,9 @@
tool_dependencies=tool_dependencies )
for installed_tool_dependency in installed_tool_dependencies:
if installed_tool_dependency.status == trans.app.model.ToolDependency.installation_status.ERROR:
- message += ' %s' % str( installed_tool_dependency.error_message )
+ text = util.unicodify( installed_tool_dependency.error_message )
+ if text is not None:
+ message += ' %s' % text
tool_dependency_ids = [ trans.security.encode_id( td.id ) for td in tool_dependencies ]
if message:
status = 'error'
@@ -681,8 +683,8 @@
action='uninstall_tool_dependencies',
**kwd ) )
else:
- kwd[ 'message' ] = 'All selected tool dependencies are already uninstalled.'
- kwd[ 'status' ] = 'error'
+ message = 'No selected tool dependencies can be uninstalled, you may need to use the <b>Repair repository</b> feature.'
+ status = 'error'
elif operation == "install":
if trans.app.config.tool_dependency_dir:
tool_dependencies_for_installation = []
@@ -694,13 +696,12 @@
if tool_dependencies_for_installation:
self.initiate_tool_dependency_installation( trans, tool_dependencies_for_installation )
else:
- kwd[ 'message' ] = 'All selected tool dependencies are already installed.'
- kwd[ 'status' ] = 'error'
+ message = 'All selected tool dependencies are already installed.'
+ status = 'error'
else:
message = 'Set the value of your <b>tool_dependency_dir</b> setting in your Galaxy config file (universe_wsgi.ini) '
message += ' and restart your Galaxy server to install tool dependencies.'
- kwd[ 'message' ] = message
- kwd[ 'status' ] = 'error'
+ status = 'error'
installed_tool_dependencies_select_field = suc.build_tool_dependencies_select_field( trans,
tool_shed_repository=tool_shed_repository,
name='inst_td_ids',
diff -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b -r e507124fbe79903378c4b0fe150f2f3fd816ad32 lib/galaxy/webapps/galaxy/controllers/workflow.py
--- a/lib/galaxy/webapps/galaxy/controllers/workflow.py
+++ b/lib/galaxy/webapps/galaxy/controllers/workflow.py
@@ -1223,6 +1223,7 @@
for hid in dataset_ids:
step = model.WorkflowStep()
step.type = 'data_input'
+ step.tool_inputs = dict( name="Input Dataset" )
hid_to_output_pair[ hid ] = ( step, 'output' )
steps.append( step )
# Tool steps
diff -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b -r e507124fbe79903378c4b0fe150f2f3fd816ad32 lib/tool_shed/galaxy_install/repository_util.py
--- a/lib/tool_shed/galaxy_install/repository_util.py
+++ b/lib/tool_shed/galaxy_install/repository_util.py
@@ -795,12 +795,11 @@
for tool_dependency in repository.missing_tool_dependencies:
if tool_dependency.status in [ trans.model.ToolDependency.installation_status.ERROR,
trans.model.ToolDependency.installation_status.INSTALLING ]:
- tool_dependency_util.set_tool_dependency_attributes( trans,
- tool_dependency,
- trans.model.ToolDependency.installation_status.UNINSTALLED,
- None,
- remove_from_disk=True )
- trans.sa_session.refresh( tool_dependency )
+ tool_dependency = tool_dependency_util.set_tool_dependency_attributes( trans.app,
+ tool_dependency=tool_dependency,
+ status=trans.model.ToolDependency.installation_status.UNINSTALLED,
+ error_message=None,
+ remove_from_disk=True )
# Install tool dependencies.
suc.update_tool_shed_repository_status( trans.app,
repository,
diff -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b -r e507124fbe79903378c4b0fe150f2f3fd816ad32 lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
@@ -28,6 +28,73 @@
INSTALLATION_LOG = 'INSTALLATION.log'
VIRTUALENV_URL = 'https://pypi.python.org/packages/source/v/virtualenv/virtualenv-1.9.1.tar.gz'
+
+class EnvFileBuilder( object ):
+
+ def __init__( self, install_dir ):
+ self.install_dir = install_dir
+ self.return_code = 0
+
+ def append_line( self, skip_if_contained=True, make_executable=True, **kwd ):
+ env_var_dict = dict( **kwd )
+ env_entry, env_file = td_common_util.create_or_update_env_shell_file( self.install_dir, env_var_dict )
+ return_code = file_append( env_entry, env_file, skip_if_contained=skip_if_contained, make_executable=make_executable )
+ self.return_code = self.return_code or return_code
+ return self.return_code
+
+
+class InstallEnvironment( object ):
+ """Object describing the environment built up as part of the process of building and installing a package."""
+
+ def add_env_shell_file_paths( self, paths ):
+ for path in paths:
+ self.env_shell_file_paths.append( str( path ) )
+
+ def build_command( self, command, action_type='shell_command' ):
+ """
+ Build command line for execution from simple command, but
+ configuring environment described by this object.
+ """
+ env_cmds = self.environment_commands( action_type )
+ return '\n'.join( env_cmds + [ command ] )
+
+ def __call__( self, install_dir ):
+ with settings( warn_only=True, **td_common_util.get_env_var_values( install_dir ) ):
+ with prefix( self.__setup_environment() ):
+ yield
+
+ def environment_commands( self, action_type ):
+ """Build a list of commands used to construct the environment described by this object."""
+ cmds = []
+ for env_shell_file_path in self.env_shell_file_paths:
+ if os.path.exists( env_shell_file_path ):
+ for env_setting in open( env_shell_file_path ):
+ cmds.append( env_setting.strip( '\n' ) )
+ else:
+ log.debug( 'Invalid file %s specified, ignoring %s action.' % ( str( env_shell_file_path ), str( action_type ) ) )
+ return cmds
+
+ def environment_dict( self, action_type='template_command' ):
+ env_vars = dict()
+ for env_shell_file_path in self.env_shell_file_paths:
+ if os.path.exists( env_shell_file_path ):
+ for env_setting in open( env_shell_file_path ):
+ env_string = env_setting.split( ';' )[ 0 ]
+ env_name, env_path = env_string.split( '=' )
+ env_vars[ env_name ] = env_path
+ else:
+ log.debug( 'Invalid file %s specified, ignoring template_command action.' % str( env_shell_file_path ) )
+ return env_vars
+
+ def __init__( self ):
+ self.env_shell_file_paths = []
+
+ def __setup_environment( self ):
+ return "&&".join( [ ". %s" % file for file in self.__valid_env_shell_file_paths() ] )
+
+ def __valid_env_shell_file_paths( self ):
+ return [ file for file in self.env_shell_file_paths if os.path.exists( file ) ]
+
def check_fabric_version():
version = env.version
if int( version.split( "." )[ 0 ] ) < 1:
@@ -76,6 +143,11 @@
filtered_actions.append( action )
return filtered_actions
+def handle_action_shell_file_paths( env_file_builder, action_dict ):
+ shell_file_paths = action_dict.get( 'action_shell_file_paths', [] )
+ for shell_file_path in shell_file_paths:
+ env_file_builder.append_line( action="source", value=shell_file_path )
+
def handle_command( app, tool_dependency, install_dir, cmd, return_output=False ):
sa_session = app.model.context.current
with settings( warn_only=True ):
@@ -200,79 +272,6 @@
shutil.move( full_path_to_dir, venv_dir )
return True
-
-class EnvFileBuilder( object ):
-
- def __init__( self, install_dir ):
- self.install_dir = install_dir
- self.return_code = 0
-
- def append_line( self, skip_if_contained=True, make_executable=True, **kwds ):
- env_var_dict = dict(**kwds)
- env_entry, env_file = td_common_util.create_or_update_env_shell_file( self.install_dir, env_var_dict )
- return_code = file_append( env_entry, env_file, skip_if_contained=skip_if_contained, make_executable=make_executable )
- self.return_code = self.return_code or return_code
- return self.return_code
-
-
-class InstallEnvironment( object ):
- """
- Object describing the environment built up as part of the process of building
- and installing a package.
- """
-
- def __init__( self ):
- self.env_shell_file_paths = []
-
- def build_command( self, command, action_type='shell_command' ):
- """
- Build command line for execution from simple command, but
- configuring environment described by this object.
- """
- env_cmds = self.environment_commands(action_type)
- return '\n'.join(env_cmds + [command])
-
- def environment_commands(self, action_type):
- """
- Build a list of commands used to construct the environment described by
- this object.
- """
- cmds = []
- for env_shell_file_path in self.env_shell_file_paths:
- if os.path.exists( env_shell_file_path ):
- for env_setting in open( env_shell_file_path ):
- cmds.append( env_setting.strip( '\n' ) )
- else:
- log.debug( 'Invalid file %s specified, ignoring %s action.', env_shell_file_path, action_type )
- return cmds
-
- def __call__( self, install_dir ):
- with settings( warn_only=True, **td_common_util.get_env_var_values( install_dir ) ):
- with prefix( self.__setup_environment() ):
- yield
-
- def __setup_environment(self):
- return "&&".join( [". %s" % file for file in self.__valid_env_shell_file_paths() ] )
-
- def __valid_env_shell_file_paths(self):
- return [ file for file in self.env_shell_file_paths if os.path.exists( file ) ]
-
- def environment_dict(self, action_type='template_command'):
- env_vars = dict()
- for env_shell_file_path in self.env_shell_file_paths:
- if os.path.exists( env_shell_file_path ):
- for env_setting in open( env_shell_file_path ):
- env_string = env_setting.split( ';' )[ 0 ]
- env_name, env_path = env_string.split( '=' )
- env_vars[ env_name ] = env_path
- else:
- log.debug( 'Invalid file %s specified, ignoring template_command action.', env_shell_file_path )
- return env_vars
-
- def add_env_shell_file_paths(self, paths):
- self.env_shell_file_paths.extend(paths)
-
-
def install_and_build_package( app, tool_dependency, actions_dict ):
"""Install a Galaxy tool dependency package either via a url or a mercurial or git clone command."""
sa_session = app.model.context.current
@@ -335,7 +334,6 @@
dir = td_common_util.url_download( work_dir, downloaded_filename, url, extract=True )
if is_binary:
log_file = os.path.join( install_dir, INSTALLATION_LOG )
- log.debug( 'log_file: %s' % log_file )
if os.path.exists( log_file ):
logfile = open( log_file, 'ab' )
else:
@@ -349,7 +347,7 @@
filtered_actions = actions[ 1: ]
return_code = handle_command( app, tool_dependency, install_dir, action_dict[ 'command' ] )
if return_code:
- return
+ return tool_dependency
dir = package_name
elif action_type == 'download_file':
# <action type="download_file">http://effectors.org/download/version/TTSS_GUI-1.0.1.jar</action>
@@ -374,13 +372,15 @@
# <package>https://github.com/bgruening/download_store/raw/master/DESeq2-1_0_18/BiocGe…</package>
# </action>
filtered_actions = actions[ 1: ]
-
- if action_dict.get( 'env_shell_file_paths', False ):
- install_environment.add_env_shell_file_paths( action_dict[ 'env_shell_file_paths' ] )
+ env_shell_file_paths = action_dict.get( 'env_shell_file_paths', None )
+ if env_shell_file_paths is None:
+ log.debug( 'Missing R environment. Please check your specified R installation exists.' )
+ return tool_dependency
else:
- log.warning( 'Missing R environment. Please check your specified R installation exists.' )
- return
- tarball_names = list()
+ install_environment.add_env_shell_file_paths( env_shell_file_paths )
+ log.debug( 'Handling setup_r_environment for tool dependency %s with install_environment.env_shell_file_paths:\n%s"' % \
+ ( str( tool_dependency.name ), str( install_environment.env_shell_file_paths ) ) )
+ tarball_names = []
for url in action_dict[ 'r_packages' ]:
filename = url.split( '/' )[ -1 ]
tarball_names.append( filename )
@@ -391,20 +391,18 @@
with settings( warn_only=True ):
for tarball_name in tarball_names:
cmd = '''export PATH=$PATH:$R_HOME/bin && export R_LIBS=$INSTALL_DIR &&
- Rscript -e "install.packages(c('%s'),lib='$INSTALL_DIR', repos=NULL, dependencies=FALSE)"''' % (tarball_name)
-
+ Rscript -e "install.packages(c('%s'),lib='$INSTALL_DIR', repos=NULL, dependencies=FALSE)"''' % ( str( tarball_name ) )
cmd = install_environment.build_command( td_common_util.evaluate_template( cmd, install_dir ) )
return_code = handle_command( app, tool_dependency, install_dir, cmd )
if return_code:
- return
-
+ return tool_dependency
# R libraries are installed to $INSTALL_DIR (install_dir), we now set the R_LIBS path to that directory
env_file_builder = EnvFileBuilder( install_dir )
handle_action_shell_file_paths( env_file_builder, action_dict ) # Pull in R environment (runtime).
env_file_builder.append_line( name="R_LIBS", action="prepend_to", value=install_dir )
return_code = env_file_builder.return_code
if return_code:
- return
+ return tool_dependency
elif action_type == 'setup_ruby_environment':
# setup an Ruby environment
# <action type="setup_ruby_environment">
@@ -417,24 +415,27 @@
# <package>http://url-to-some-gem-file.de/protk.gem</package>
# </action>
filtered_actions = actions[ 1: ]
-
- if action_dict.get( 'env_shell_file_paths', False ):
- install_environment.add_env_shell_file_paths( action_dict[ 'env_shell_file_paths' ] )
+ env_shell_file_paths = action_dict.get( 'env_shell_file_paths', None )
+ if env_shell_file_paths is None:
+ log.debug( 'Missing Ruby environment, make sure your specified Ruby installation exists.' )
+ return tool_dependency
else:
- log.warning( 'Missing Ruby environment. Please check if your specified Ruby installation exists.' )
- return
-
+ install_environment.add_env_shell_file_paths( env_shell_file_paths )
+ log.debug( 'Handling setup_ruby_environment for tool dependency %s with install_environment.env_shell_file_paths:\n%s"' % \
+ ( str( tool_dependency.name ), str( install_environment.env_shell_file_paths ) ) )
dir = os.path.curdir
current_dir = os.path.abspath( os.path.join( work_dir, dir ) )
with lcd( current_dir ):
with settings( warn_only=True ):
- for (gem, gem_version) in action_dict[ 'ruby_packages' ]:
+ ruby_package_tups = action_dict.get( 'ruby_package_tups', [] )
+ for ruby_package_tup in ruby_package_tups:
+ gem, gem_version = ruby_package_tup
if os.path.isfile( gem ):
# we assume a local shipped gem file
cmd = '''export PATH=$PATH:$RUBY_HOME/bin && export GEM_HOME=$INSTALL_DIR &&
gem install --local %s''' % ( gem )
- elif gem.find('://') != -1:
- # we assume a URL to a gem file
+ elif gem.find( '://' ) != -1:
+ # We assume a URL to a gem file.
url = gem
gem_name = url.split( '/' )[ -1 ]
td_common_util.url_download( work_dir, gem_name, url, extract=False )
@@ -453,15 +454,15 @@
cmd = install_environment.build_command( td_common_util.evaluate_template( cmd, install_dir ) )
return_code = handle_command( app, tool_dependency, install_dir, cmd )
if return_code:
- return
-
+ return tool_dependency
env_file_builder = EnvFileBuilder( install_dir )
- handle_action_shell_file_paths( env_file_builder, action_dict ) # Pull in ruby dependencies (runtime).
+ # Pull in ruby dependencies (runtime).
+ handle_action_shell_file_paths( env_file_builder, action_dict )
env_file_builder.append_line( name="GEM_PATH", action="prepend_to", value=install_dir )
env_file_builder.append_line( name="PATH", action="prepend_to", value=os.path.join(install_dir, 'bin') )
return_code = env_file_builder.return_code
if return_code:
- return
+ return tool_dependency
elif action_type == 'setup_perl_environment':
# setup an Perl environment
# <action type="setup_perl_environment">
@@ -473,65 +474,59 @@
# <package>http://search.cpan.org/CPAN/authors/id/C/CJ/CJFIELDS/BioPerl-1.6.922.tar.gz</package>
# </action>
filtered_actions = actions[ 1: ]
-
- if action_dict.get( 'env_shell_file_paths', False ):
- install_environment.add_env_shell_file_paths( action_dict[ 'env_shell_file_paths' ] )
+ env_shell_file_paths = action_dict.get( 'env_shell_file_paths', None )
+ if env_shell_file_paths is None:
+ log.debug( 'Missing Rerl environment, make sure your specified Rerl installation exists.' )
+ return tool_dependency
else:
- log.warning( 'Missing Rerl environment. Please check if your specified Rerl installation exists.' )
- return
-
+ install_environment.add_env_shell_file_paths( env_shell_file_paths )
+ log.debug( 'Handling setup_perl_environment for tool dependency %s with install_environment.env_shell_file_paths:\n%s"' % \
+ ( str( tool_dependency.name ), str( install_environment.env_shell_file_paths ) ) )
dir = os.path.curdir
current_dir = os.path.abspath( os.path.join( work_dir, dir ) )
with lcd( current_dir ):
with settings( warn_only=True ):
-
- for package in action_dict[ 'perl_packages' ]:
- """
- If set to a true value then MakeMaker's prompt function will always return the default without waiting for user input.
- """
+ perl_packages = action_dict.get( 'perl_packages', [] )
+ for perl_package in perl_packages:
+ # If set to a true value then MakeMaker's prompt function will always
+ # return the default without waiting for user input.
cmd = '''export PERL_MM_USE_DEFAULT=1 && '''
-
- if package.find('://') != -1:
- # we assume a URL to a gem file
- url = package
- package_name = url.split( '/' )[ -1 ]
- dir = td_common_util.url_download( work_dir, package_name, url, extract=True )
- # search for Build.PL or Makefile.PL (ExtUtils::MakeMaker vs. Module::Build)
-
- tmp_work_dir = os.path.join( work_dir, dir)
+ if perl_package.find( '://' ) != -1:
+ # We assume a URL to a gem file.
+ url = perl_package
+ perl_package_name = url.split( '/' )[ -1 ]
+ dir = td_common_util.url_download( work_dir, perl_package_name, url, extract=True )
+ # Search for Build.PL or Makefile.PL (ExtUtils::MakeMaker vs. Module::Build).
+ tmp_work_dir = os.path.join( work_dir, dir )
if os.path.exists( os.path.join( tmp_work_dir, 'Makefile.PL' ) ):
-
cmd += '''perl Makefile.PL INSTALL_BASE=$INSTALL_DIR && make && make install'''
elif os.path.exists( os.path.join( tmp_work_dir, 'Build.PL' ) ):
cmd += '''perl Build.PL --install_base $INSTALL_DIR && perl Build && perl Build install'''
else:
- log.warning( 'No Makefile.PL or Build.PL file found in %s. Skip installation of %s.' % ( url, package_name ) )
- return
+ log.debug( 'No Makefile.PL or Build.PL file found in %s. Skipping installation of %s.' % ( url, perl_package_name ) )
+ return tool_dependency
with lcd( tmp_work_dir ):
cmd = install_environment.build_command( td_common_util.evaluate_template( cmd, install_dir ) )
return_code = handle_command( app, tool_dependency, install_dir, cmd )
if return_code:
- return
+ return tool_dependency
else:
- # perl package from CPAN without version number
- # cpanm should be installed with the parent perl distribution, otherwise this will not work
- cmd += '''cpanm --local-lib=$INSTALL_DIR %s''' % ( package )
-
+ # perl package from CPAN without version number.
+ # cpanm should be installed with the parent perl distribution, otherwise this will not work.
+ cmd += '''cpanm --local-lib=$INSTALL_DIR %s''' % ( perl_package )
cmd = install_environment.build_command( td_common_util.evaluate_template( cmd, install_dir ) )
return_code = handle_command( app, tool_dependency, install_dir, cmd )
if return_code:
- return
-
+ return tool_dependency
env_file_builder = EnvFileBuilder( install_dir )
+ # Pull in perl dependencies (runtime).
+ handle_action_shell_file_paths( env_file_builder, action_dict )
# Recursively add dependent PERL5LIB and PATH to env.sh & anything else needed.
- handle_action_shell_file_paths( env_file_builder, action_dict ) # Pull in ruby dependencies (runtime).
-
- env_file_builder.append_line( name="PERL5LIB", action="prepend_to", value=os.path.join(install_dir, 'lib', 'perl5') )
- env_file_builder.append_line( name="PATH", action="prepend_to", value=os.path.join(install_dir, 'bin') )
+ env_file_builder.append_line( name="PERL5LIB", action="prepend_to", value=os.path.join( install_dir, 'lib', 'perl5' ) )
+ env_file_builder.append_line( name="PATH", action="prepend_to", value=os.path.join( install_dir, 'bin' ) )
return_code = env_file_builder.return_code
if return_code:
- return
-
+ return tool_dependency
else:
# We're handling a complex repository dependency where we only have a set_environment tag set.
# <action type="set_environment">
@@ -577,7 +572,7 @@
env_file_builder.append_line( **env_var_dict )
return_code = env_file_builder.return_code
if return_code:
- return
+ return tool_dependency
elif action_type == 'set_environment_for_install':
# Currently the only action supported in this category is a list of paths to one or more tool dependency env.sh files,
# the environment setting in each of which will be injected into the environment for all <action type="shell_command">
@@ -588,7 +583,7 @@
venv_src_directory = os.path.abspath( os.path.join( app.config.tool_dependency_dir, '__virtualenv_src' ) )
if not install_virtualenv( app, venv_src_directory ):
log.error( 'Unable to install virtualenv' )
- return
+ return tool_dependency
requirements = action_dict[ 'requirements' ]
if os.path.exists( os.path.join( dir, requirements ) ):
# requirements specified as path to a file
@@ -608,26 +603,26 @@
full_setup_command = "%s; %s; %s" % ( setup_command, activate_command, install_command )
return_code = handle_command( app, tool_dependency, install_dir, full_setup_command )
if return_code:
- return
+ return tool_dependency
site_packages_command = "%s -c 'import os, sys; print os.path.join(sys.prefix, \"lib\", \"python\" + sys.version[:3], \"site-packages\")'" % os.path.join( venv_directory, "bin", "python" )
output = handle_command( app, tool_dependency, install_dir, site_packages_command, return_output=True )
if output.return_code:
- return
+ return tool_dependency
if not os.path.exists( output.stdout ):
- log.error( "virtualenv's site-packages directory '%s' does not exist", output.stdout )
- return
+ log.debug( "virtualenv's site-packages directory '%s' does not exist", output.stdout )
+ return tool_dependency
env_file_builder = EnvFileBuilder( install_dir )
env_file_builder.append_line( name="PYTHONPATH", action="prepend_to", value=output.stdout )
env_file_builder.append_line( name="PATH", action="prepend_to", value=os.path.join( venv_directory, "bin" ) )
return_code = env_file_builder.return_code
if return_code:
- return
+ return tool_dependency
elif action_type == 'shell_command':
with settings( warn_only=True ):
cmd = install_environment.build_command( action_dict[ 'command' ] )
return_code = handle_command( app, tool_dependency, install_dir, cmd )
if return_code:
- return
+ return tool_dependency
elif action_type == 'template_command':
env_vars = dict()
env_vars = install_environment.environment_dict()
@@ -639,7 +634,7 @@
cmd = fill_template( '#from fabric.api import env\n%s' % action_dict[ 'command' ], context=env_vars )
return_code = handle_command( app, tool_dependency, install_dir, cmd )
if return_code:
- return
+ return tool_dependency
elif action_type == 'make_install':
# make; make install; allow providing make options
with settings( warn_only=True ):
@@ -647,7 +642,7 @@
cmd = install_environment.build_command( 'make %s && make install' % make_opts )
return_code = handle_command( app, tool_dependency, install_dir, cmd )
if return_code:
- return
+ return tool_dependency
elif action_type == 'autoconf':
# Handle configure, make and make install allow providing configuration options
with settings( warn_only=True ):
@@ -659,7 +654,7 @@
cmd = install_environment.build_command( td_common_util.evaluate_template( pre_cmd, install_dir ) )
return_code = handle_command( app, tool_dependency, install_dir, cmd )
if return_code:
- return
+ return tool_dependency
elif action_type == 'download_file':
# Download a single file to the current working directory.
url = action_dict[ 'url' ]
@@ -705,13 +700,7 @@
td_common_util.move_file( current_dir=work_dir,
source=downloaded_filename,
destination=full_path_to_dir )
-
-
-def handle_action_shell_file_paths( env_file_builder, action_dict ):
- shell_file_paths = action_dict.get( 'action_shell_file_paths', [])
- for shell_file_path in shell_file_paths:
- env_file_builder.append_line( action="source", value=shell_file_path )
-
+ return tool_dependency
def log_results( command, fabric_AttributeString, file_path ):
"""
diff -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b -r e507124fbe79903378c4b0fe150f2f3fd816ad32 lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/install_util.py
@@ -92,7 +92,7 @@
return text
def handle_complex_repository_dependency_for_package( app, elem, package_name, package_version, tool_shed_repository ):
- tool_dependency = None
+ handled_tool_dependencies = []
tool_shed = elem.attrib[ 'toolshed' ]
required_repository_name = elem.attrib[ 'name' ]
required_repository_owner = elem.attrib[ 'owner' ]
@@ -156,26 +156,28 @@
required_repository_owner,
required_repository_changeset_revision )
config_to_use = tmp_filename
- tool_dependency, actions_dict = populate_actions_dict( app=app,
- dependent_install_dir=dependent_install_dir,
- required_install_dir=required_repository_package_install_dir,
- tool_shed_repository=tool_shed_repository,
- required_repository=required_repository,
- package_name=package_name,
- package_version=package_version,
- tool_dependencies_config=config_to_use )
+ tool_dependencies, actions_dict = populate_actions_dict( app=app,
+ dependent_install_dir=dependent_install_dir,
+ required_install_dir=required_repository_package_install_dir,
+ tool_shed_repository=tool_shed_repository,
+ required_repository=required_repository,
+ package_name=package_name,
+ package_version=package_version,
+ tool_dependencies_config=config_to_use )
if tmp_filename:
try:
os.remove( tmp_filename )
except:
pass
- # Install and build the package via fabric.
- install_and_build_package_via_fabric( app, tool_dependency, actions_dict )
+ for tool_dependency in tool_dependencies:
+ # Install and build the package via fabric and update the tool_dependency record accordingly..
+ tool_dependency = install_and_build_package_via_fabric( app, tool_dependency, actions_dict )
+ handled_tool_dependencies.append( tool_dependency )
else:
message = "Unable to locate required tool shed repository named %s owned by %s with revision %s." % \
( str( required_repository_name ), str( required_repository_owner ), str( default_required_repository_changeset_revision ) )
raise Exception( message )
- return tool_dependency
+ return handled_tool_dependencies
def handle_set_environment_entry_for_package( app, install_dir, tool_shed_repository, package_name, package_version, elem, required_repository ):
"""
@@ -184,19 +186,24 @@
"""
action_dict = {}
actions = []
+ tool_dependencies = []
for package_elem in elem:
if package_elem.tag == 'install':
- # Create the tool_dependency record in the database.
+ # Create the new tool_dependency record in the database.
tool_dependency = tool_dependency_util.create_or_update_tool_dependency( app=app,
tool_shed_repository=tool_shed_repository,
name=package_name,
version=package_version,
type='package',
- status=app.model.ToolDependency.installation_status.INSTALLING,
+ status=app.model.ToolDependency.installation_status.NEVER_INSTALLED,
set_status=True )
# Get the installation method version from a tag like: <install version="1.0">
package_install_version = package_elem.get( 'version', '1.0' )
if package_install_version == '1.0':
+ # Update the tool dependency's status.
+ tool_dependency = tool_dependency_util.set_tool_dependency_attributes( app,
+ tool_dependency=tool_dependency,
+ status=app.model.ToolDependency.installation_status.INSTALLING )
# Since the required tool dependency is installed for a repository dependency, we first need to inspect the <actions> tag set to find
# the <action type="set_environment"> tag.
env_var_dicts = []
@@ -219,9 +226,10 @@
site_packages_command = "%s -c 'import os, sys; print os.path.join(sys.prefix, \"lib\", \"python\" + sys.version[:3], \"site-packages\")'" % os.path.join( install_dir, "venv", "bin", "python" )
output = fabric_util.handle_command( app, tool_dependency, install_dir, site_packages_command, return_output=True )
if output.return_code:
- log.error( 'Dependency includes a setup_virtualenv action but venv python is broken:', output.stderr )
+ log.error( 'Tool dependency %s includes a setup_virtualenv action but venv python is broken: ' % \
+ ( str( tool_dependency.name ), str( output.stderr ) ) )
elif not os.path.exists( output.stdout ):
- log.error( "virtualenv's site-packages directory '%s' does not exist", output.stdout )
+ log.error( "virtualenv's site-packages directory '%s' does not exist", str( output.stdout ) )
else:
env_var_dicts.append( dict( name="PYTHONPATH", action="prepend_to", value=output.stdout ) )
env_var_dicts.append( dict( name="PATH", action="prepend_to", value=os.path.join( install_dir, 'venv', 'bin' ) ) )
@@ -234,8 +242,8 @@
# we will replace the current "value" entries in each env_var_dict with the actual path taken from the env.sh
# file generated for the installed required repository. Each env_var_dict currently looks something like this:
# {'action': 'append_to', 'name': 'LD_LIBRARY_PATH', 'value': '$BOOST_ROOT_DIR/lib/'}
- # We'll read the contents of the received required_repository's env.sh file and replace the 'value' entry of each env_var_dict
- # with the associated value in the env.sh file.
+ # We'll read the contents of the received required_repository's env.sh file and replace the 'value' entry of
+ # each env_var_dict with the associated value in the env.sh file.
new_env_var_dicts = []
env_sh_file_dir = tool_dependency_util.get_tool_dependency_install_dir( app=app,
repository_name=required_repository.name,
@@ -265,16 +273,25 @@
else:
action_dict[ 'environment_variable' ] = env_var_dicts
actions.append( ( 'set_environment', action_dict ) )
+ if tool_dependency.status not in [ app.model.ToolDependency.installation_status.ERROR,
+ app.model.ToolDependency.installation_status.INSTALLED ]:
+ # Update the tool dependency's status.
+ tool_dependency = \
+ tool_dependency_util.set_tool_dependency_attributes( app,
+ tool_dependency=tool_dependency,
+ status=app.model.ToolDependency.installation_status.INSTALLED )
+ # Accumulate processed tool dependencies to return to the caller.
+ tool_dependencies.append( tool_dependency )
else:
raise NotImplementedError( 'Only install version 1.0 is currently supported (i.e., change your tag to be <install version="1.0">).' )
- return tool_dependency, actions
- return None, actions
+ return tool_dependencies, actions
+ return tool_dependencies, actions
def install_and_build_package_via_fabric( app, tool_dependency, actions_dict ):
sa_session = app.model.context.current
try:
# There is currently only one fabric method.
- fabric_util.install_and_build_package( app, tool_dependency, actions_dict )
+ tool_dependency = fabric_util.install_and_build_package( app, tool_dependency, actions_dict )
except Exception, e:
log.exception( 'Error installing tool dependency %s version %s.', str( tool_dependency.name ), str( tool_dependency.version ) )
# Since there was an installation error, update the tool dependency status to Error. The remove_installation_path option must
@@ -284,10 +301,16 @@
tool_dependency,
error_message,
remove_installation_path=False )
- if tool_dependency.status != app.model.ToolDependency.installation_status.ERROR:
- tool_dependency.status = app.model.ToolDependency.installation_status.INSTALLED
- sa_session.add( tool_dependency )
- sa_session.flush()
+ if tool_dependency.status not in [ app.model.ToolDependency.installation_status.ERROR,
+ app.model.ToolDependency.installation_status.INSTALLED ]:
+ log.debug( 'Changing status for tool dependency %s from %s to %s.' % \
+ ( str( tool_dependency.name ), str( tool_dependency.status ), str( app.model.ToolDependency.installation_status.INSTALLED ) ) )
+ tool_dependency = tool_dependency_util.set_tool_dependency_attributes( app,
+ tool_dependency=tool_dependency,
+ status=app.model.ToolDependency.installation_status.INSTALLED,
+ error_message=None,
+ remove_from_disk=False )
+ return tool_dependency
def install_package( app, elem, tool_shed_repository, tool_dependencies=None ):
# The value of tool_dependencies is a partial or full list of ToolDependency records associated with the tool_shed_repository.
@@ -300,9 +323,15 @@
for package_elem in elem:
if package_elem.tag == 'repository':
# We have a complex repository dependency definition.
- rd_tool_dependency = handle_complex_repository_dependency_for_package( app, package_elem, package_name, package_version, tool_shed_repository )
- if rd_tool_dependency and rd_tool_dependency.status == app.model.ToolDependency.installation_status.ERROR:
- print "Error installing tool dependency for required repository: %s" % str( rd_tool_dependency.error_message )
+ rd_tool_dependencies = handle_complex_repository_dependency_for_package( app,
+ package_elem,
+ package_name,
+ package_version,
+ tool_shed_repository )
+ for rd_tool_dependency in rd_tool_dependencies:
+ if rd_tool_dependency.status == app.model.ToolDependency.installation_status.ERROR:
+ # We'll log the error here, but continue installing packages since some may not require this dependency.
+ print "Error installing tool dependency for required repository: %s" % str( rd_tool_dependency.error_message )
elif package_elem.tag == 'install':
# <install version="1.0">
# Get the installation directory for tool dependencies that will be installed for the received tool_shed_repository.
@@ -324,24 +353,27 @@
package_version,
'package' )
if tool_dependency.status == app.model.ToolDependency.installation_status.INSTALLING:
- # The tool dependency is in an Installing state, so we don't want to do anything to it.
+ # The tool dependency is in an Installing state, so we don't want to do anything to it. If the tool
+ # dependency is being installed by someone else, we don't want to interfere with that. This assumes
+ # the installation by "someone else" is not hung in an Installing state, which is a weakness if that
+ # "someone else" never repaired it.
log.debug( 'Skipping installation of tool dependency %s version %s because it has a status of %s' % \
- ( tool_dependency.name, tool_dependency.version, tool_dependency.status ) )
+ ( str( tool_dependency.name ), str( tool_dependency.version ), str( tool_dependency.status ) ) )
can_install_tool_dependency = False
else:
- # If the tool dependency is being installed by someone else, we don't want to interfere with that.
- # This assumes the installation by "someone else" is not hung in an Installing state, which is a
- # weakness if "someone else" never repaired it.
tool_dependency_installation_directory_contents = os.listdir( install_dir )
if fabric_util.INSTALLATION_LOG in tool_dependency_installation_directory_contents:
- print '\nSkipping installation of tool dependency', package_name, 'version', package_version, \
- 'since it is installed in', install_dir, '\n'
+ # Since this tool dependency's installation directory contains an installation log, we consider it to be
+ # installed. In some cases the record may be missing from the database due to some activity outside of
+ # the control of the Tool Shed. Since a new record was created for it and we don't know the state of the
+ # files on disk, we will set it to an error state (unless we are running Tool Shed functional tests - see
+ # below).
+ log.debug( 'Skipping installation of tool dependency %s version %s because it is installed in %s' % \
+ ( str( tool_dependency.name ), str( tool_dependency.version ), str( install_dir ) ) )
can_install_tool_dependency = False
- # This tool dependency was previously installed, but the record was missing from the database due to some
- # activity outside of the control of the tool shed. Since a new record was created for it and we don't know
- # the state of the files on disk, we will set it to an error state. If we are running functional tests, the
- # state will be set to Installed, because previously compiled tool dependencies are not deleted by default.
if app.config.running_functional_tests:
+ # If we are running functional tests, the state will be set to Installed because previously compiled
+ # tool dependencies are not deleted by default, from the "install and test" framework..
tool_dependency.status = app.model.ToolDependency.installation_status.INSTALLED
else:
error_message = 'The installation directory for this tool dependency had contents, but the database had no record. '
@@ -351,9 +383,9 @@
tool_dependency.error_message = error_message
else:
error_message = '\nInstallation path %s for tool dependency %s version %s exists, but the expected file %s' % \
- ( install_dir, package_name, package_version, fabric_util.INSTALLATION_LOG )
- error_message += ' is missing. This indicates an installation error, so the tool dependency is being'
- error_message += ' prepared for reinstallation.'
+ ( str( install_dir ), str( package_name ), str( package_version ), str( fabric_util.INSTALLATION_LOG ) )
+ error_message += ' is missing. This indicates an installation error so the tool dependency is being'
+ error_message += ' prepared for re-installation.'
print error_message
tool_dependency.status = app.model.ToolDependency.installation_status.NEVER_INSTALLED
try:
@@ -373,7 +405,7 @@
version=package_version,
type='package',
status=app.model.ToolDependency.installation_status.INSTALLING,
- set_status=False )
+ set_status=True )
# Get the information about the current platform in case the tool dependency definition includes tag sets for installing
# compiled binaries.
platform_info_dict = tool_dependency_util.get_platform_info_dict()
@@ -404,13 +436,12 @@
if binary_installed:
continue
# No platform-specific <actions> recipe has yet resulted in a successful installation.
- install_via_fabric( app,
- tool_dependency,
- install_dir,
- package_name=package_name,
- actions_elem=actions_elem,
- action_elem=None )
- sa_session.refresh( tool_dependency )
+ tool_dependency = install_via_fabric( app,
+ tool_dependency,
+ install_dir,
+ package_name=package_name,
+ actions_elem=actions_elem,
+ action_elem=None )
if tool_dependency.status == app.model.ToolDependency.installation_status.INSTALLED:
# If an <actions> tag was found that matches the current platform, and the install_via_fabric method
# did not result in an error state, set binary_installed to True in order to skip any remaining
@@ -419,47 +450,53 @@
else:
# Process the next matching <actions> tag, or any defined <actions> tags that do not contain platform
# dependent recipes.
- print 'Error downloading binary for %s version %s: %s' % \
- ( package_name, package_version, tool_dependency.error_message )
+ log.debug( 'Error downloading binary for tool dependency %s version %s: %s' % \
+ ( str( package_name ), str( package_version ), str( tool_dependency.error_message ) ) )
else:
# If no <actions> tags have been defined that match our current platform, or none of the matching
# <actions> tags resulted in a successful tool dependency status, proceed with one and only one
# <actions> tag that is not defined to be platform-specific.
if not binary_installed:
- print 'Binary installation did not occur, so proceeding with install and compile recipe.'
+ log.debug( 'Proceeding with install and compile recipe for tool dependency %s.' % str( tool_dependency.name ) )
# Make sure to reset for installation if attempt at binary installation resulted in an error.
+ can_install = True
if tool_dependency.status != app.model.ToolDependency.installation_status.NEVER_INSTALLED:
removed, error_message = tool_dependency_util.remove_tool_dependency( app, tool_dependency )
- install_via_fabric( app,
- tool_dependency,
- install_dir,
- package_name=package_name,
- actions_elem=actions_elem,
- action_elem=None )
+ if not removed:
+ log.debug( 'Error removing old files from installation directory %s: %s' % \
+ ( str( tool_dependency.installation_directory( app ), str( error_message ) ) ) )
+ can_install = False
+ if can_install:
+ tool_dependency = install_via_fabric( app,
+ tool_dependency,
+ install_dir,
+ package_name=package_name,
+ actions_elem=actions_elem,
+ action_elem=None )
# Perform any final actions that have been defined within the actions_group tag set, but outside of
# an <actions> tag, such as a set_environment entry, or a download_file or download_by_url command to
# retrieve extra data for this tool dependency. Only do this if the tool dependency is not in an error
# state, otherwise skip this action.
if actions_elem.tag == 'action' and tool_dependency.status != app.model.ToolDependency.installation_status.ERROR:
- install_via_fabric( app,
- tool_dependency,
- install_dir,
- package_name=package_name,
- actions_elem=None,
- action_elem=actions_elem )
+ tool_dependency = install_via_fabric( app,
+ tool_dependency,
+ install_dir,
+ package_name=package_name,
+ actions_elem=None,
+ action_elem=actions_elem )
else:
# <actions> tags outside of an <actions_group> tag shall not check os or architecture, and if the attributes are
# defined, they will be ignored. All <actions> tags outside of an <actions_group> tag set shall always be processed.
# This is the default and original behavior of the install_package method.
- install_via_fabric( app,
- tool_dependency,
- install_dir,
- package_name=package_name,
- actions_elem=actions_elems,
- action_elem=None )
- sa_session.refresh( tool_dependency )
+ tool_dependency = install_via_fabric( app,
+ tool_dependency,
+ install_dir,
+ package_name=package_name,
+ actions_elem=actions_elems,
+ action_elem=None )
if tool_dependency.status != app.model.ToolDependency.installation_status.ERROR:
- print package_name, 'version', package_version, 'installed in', install_dir
+ log.debug( 'Tool dependency %s version %s has been installed in %s.' % \
+ ( str( package_name ), str( package_version ), str( install_dir ) ) )
else:
error_message = 'Version %s of the %s package cannot be installed because ' % ( str( package_version ), str( package_name ) )
error_message += 'the recipe for installing the package is missing either an <actions> tag set or an <actions_group> '
@@ -489,7 +526,6 @@
def install_via_fabric( app, tool_dependency, install_dir, package_name=None, proprietary_fabfile_path=None, actions_elem=None, action_elem=None, **kwd ):
"""Parse a tool_dependency.xml file's <actions> tag set to gather information for the installation via fabric."""
-
sa_session = app.model.context.current
if not os.path.exists( install_dir ):
os.makedirs( install_dir )
@@ -657,7 +693,7 @@
configure_opts = td_common_util.evaluate_template( action_elem.text, install_dir )
action_dict[ 'configure_opts' ] = configure_opts
elif action_type == 'setup_r_environment':
- # setup an R environment
+ # setup an R environment.
# <action type="setup_r_environment">
# <repository name="package_r_3_0_1" owner="bgruening">
# <package name="R" version="3.0.1" />
@@ -665,18 +701,19 @@
# <!-- allow installing an R packages -->
# <package>https://github.com/bgruening/download_store/raw/master/DESeq2-1_0_18/BiocGe…</package>
# </action>
- td_common_util.parse_setup_environment_repositories( app, all_env_shell_file_paths, action_elem, action_dict )
+ # Discover all child repository dependency tags and define the path to an env.sh file associated with each repository.
+ # This will potentially update the value of the 'env_shell_file_paths' entry in action_dict.
+ action_dict = td_common_util.get_env_shell_file_paths_from_setup_environment_elem( app, all_env_shell_file_paths, action_elem, action_dict )
r_packages = list()
for env_elem in action_elem:
if env_elem.tag == 'package':
r_packages.append( env_elem.text.strip() )
-
if r_packages:
action_dict[ 'r_packages' ] = r_packages
else:
continue
elif action_type == 'setup_ruby_environment':
- # setup an Ruby environment
+ # setup a Ruby environment.
# <action type="setup_ruby_environment">
# <repository name="package_ruby_2_0" owner="bgruening">
# <package name="ruby" version="2.0" />
@@ -686,33 +723,32 @@
# <package>protk=1.2.4</package>
# <package>http://url-to-some-gem-file.de/protk.gem</package>
# </action>
- td_common_util.parse_setup_environment_repositories( app, all_env_shell_file_paths, action_elem, action_dict )
- ruby_packages = list()
+ # Discover all child repository dependency tags and define the path to an env.sh file associated with each repository.
+ # This will potentially update the value of the 'env_shell_file_paths' entry in action_dict.
+ action_dict = td_common_util.get_env_shell_file_paths_from_setup_environment_elem( app, all_env_shell_file_paths, action_elem, action_dict )
+ ruby_package_tups = []
for env_elem in action_elem:
if env_elem.tag == 'package':
- """
- A valid gem definition can be:
- protk=1.2.4
- protk
- ftp://ftp.gruening.de/protk.gem
- """
- gem_token = env_elem.text.strip().split('=')
- if len(gem_token) == 2:
+ #A valid gem definition can be:
+ # protk=1.2.4
+ # protk
+ # ftp://ftp.gruening.de/protk.gem
+ gem_token = env_elem.text.strip().split( '=' )
+ if len( gem_token ) == 2:
# version string
- gem_name = gem_token[0]
- gem_version = gem_token[1]
- ruby_packages.append( [gem_name, gem_version] )
+ gem_name = gem_token[ 0 ]
+ gem_version = gem_token[ 1 ]
+ ruby_package_tups.append( ( gem_name, gem_version ) )
else:
# gem name for rubygems.org without version number
gem = env_elem.text.strip()
- ruby_packages.append( [gem, None] )
-
- if ruby_packages:
- action_dict[ 'ruby_packages' ] = ruby_packages
+ ruby_package_tups.append( ( gem, None ) )
+ if ruby_package_tups:
+ action_dict[ 'ruby_package_tups' ] = ruby_package_tups
else:
continue
elif action_type == 'setup_perl_environment':
- # setup an Perl environment
+ # setup a Perl environment.
# <action type="setup_perl_environment">
# <repository name="package_perl_5_18" owner="bgruening">
# <package name="perl" version="5.18.1" />
@@ -721,17 +757,17 @@
# <package>XML::Parser</package>
# <package>http://search.cpan.org/CPAN/authors/id/C/CJ/CJFIELDS/BioPerl-1.6.922.tar.gz</package>
# </action>
- td_common_util.parse_setup_environment_repositories( app, all_env_shell_file_paths, action_elem, action_dict )
- perl_packages = list()
+ # Discover all child repository dependency tags and define the path to an env.sh file associated with each repository.
+ # This will potentially update the value of the 'env_shell_file_paths' entry in action_dict.
+ action_dict = td_common_util.get_env_shell_file_paths_from_setup_environment_elem( app, all_env_shell_file_paths, action_elem, action_dict )
+ perl_packages = []
for env_elem in action_elem:
if env_elem.tag == 'package':
- """
- A valid package definition can be:
- XML::Parser
- http://search.cpan.org/CPAN/authors/id/C/CJ/CJFIELDS/BioPerl-1.6.922.tar.gz
- Unfortunately, CPAN does not support versioning. If you want real Reproducibility,
- you need to specify the tarball path and the right order of different tarballs manually.
- """
+ # A valid package definition can be:
+ # XML::Parser
+ # http://search.cpan.org/CPAN/authors/id/C/CJ/CJFIELDS/BioPerl-1.6.922.tar.gz
+ # Unfortunately CPAN does not support versioning, so if you want real reproducibility you need to specify
+ # the tarball path and the right order of different tarballs manually.
perl_packages.append( env_elem.text.strip() )
if perl_packages:
action_dict[ 'perl_packages' ] = perl_packages
@@ -782,7 +818,8 @@
# run_proprietary_fabric_method( app, elem, proprietary_fabfile_path, install_dir, package_name=package_name )
raise Exception( 'Tool dependency installation using proprietary fabric scripts is not yet supported.' )
else:
- install_and_build_package_via_fabric( app, tool_dependency, actions_dict )
+ tool_dependency = install_and_build_package_via_fabric( app, tool_dependency, actions_dict )
+ return tool_dependency
def parse_env_shell_entry( action, name, value, line ):
new_value = value
@@ -814,7 +851,7 @@
actions_dict = dict( install_dir=dependent_install_dir )
if package_name:
actions_dict[ 'package_name' ] = package_name
- tool_dependency = None
+ tool_dependencies = []
action_dict = {}
if tool_dependencies_config:
required_td_tree, error_message = xml_util.parse_xml( tool_dependencies_config )
@@ -827,17 +864,17 @@
required_td_package_name = required_td_elem.get( 'name', None )
required_td_package_version = required_td_elem.get( 'version', None )
if required_td_package_name==package_name and required_td_package_version==package_version:
- tool_dependency, actions = handle_set_environment_entry_for_package( app=app,
- install_dir=required_install_dir,
- tool_shed_repository=tool_shed_repository,
- package_name=package_name,
- package_version=package_version,
- elem=required_td_elem,
- required_repository=required_repository )
+ tool_dependencies, actions = handle_set_environment_entry_for_package( app=app,
+ install_dir=required_install_dir,
+ tool_shed_repository=tool_shed_repository,
+ package_name=package_name,
+ package_version=package_version,
+ elem=required_td_elem,
+ required_repository=required_repository )
if actions:
actions_dict[ 'actions' ] = actions
break
- return tool_dependency, actions_dict
+ return tool_dependencies, actions_dict
def run_proprietary_fabric_method( app, elem, proprietary_fabfile_path, install_dir, package_name=None, **kwd ):
"""
@@ -950,13 +987,22 @@
# Handle setting environment variables using a fabric method.
fabric_util.file_append( env_entry, env_file, skip_if_contained=True, make_executable=True )
sa_session.refresh( tool_dependency )
- if tool_dependency.status != app.model.ToolDependency.installation_status.ERROR:
- tool_dependency.status = app.model.ToolDependency.installation_status.INSTALLED
- sa_session.add( tool_dependency )
- sa_session.flush()
- print 'Environment variable ', env_var_name, 'set in', install_dir
+ if tool_dependency.status not in [ app.model.ToolDependency.installation_status.ERROR,
+ app.model.ToolDependency.installation_status.INSTALLED ]:
+ tool_dependency = tool_dependency_util.set_tool_dependency_attributes( app,
+ tool_dependency=tool_dependency,
+ status=app.model.ToolDependency.installation_status.INSTALLED,
+ error_message=None,
+ remove_from_disk=False )
+ log.debug( 'Environment variable %s set in %s for tool dependency %s.' % \
+ ( str( env_var_name ), str( install_dir ), str( tool_dependency.name ) ) )
else:
- raise NotImplementedError( 'Only set_environment version 1.0 is currently supported (i.e., change your tag to be <set_environment version="1.0">).' )
+ error_message = 'Only set_environment version 1.0 is currently supported (i.e., change your tag to be <set_environment version="1.0">).'
+ tool_dependency = tool_dependency_util.set_tool_dependency_attributes( app,
+ tool_dependency=tool_dependency,
+ status=app.model.ToolDependency.installation_status.ERROR,
+ error_message=error_message,
+ remove_from_disk=False )
return tool_dependency
def strip_path( fpath ):
diff -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b -r e507124fbe79903378c4b0fe150f2f3fd816ad32 lib/tool_shed/galaxy_install/tool_dependencies/td_common_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/td_common_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/td_common_util.py
@@ -257,6 +257,33 @@
log.debug( error_message )
return env_shell_file_paths
+def get_env_shell_file_paths_from_setup_environment_elem( app, all_env_shell_file_paths, elem, action_dict ):
+ """
+ Parse an XML tag set to discover all child repository dependency tags and define the path to an env.sh file associated
+ with the repository (this requires the repository dependency to be in an installed state). The received action_dict
+ will be updated with these discovered paths and returned to the caller. This method handles tool dependency definition
+ tag sets <setup_r_environment>, <setup_ruby_environment> and <setup_perl_environment>.
+ """
+ # An example elem is:
+ # <action type="setup_perl_environment">
+ # <repository name="package_perl_5_18" owner="iuc">
+ # <package name="perl" version="5.18.1" />
+ # </repository>
+ # <repository name="package_expat_2_1" owner="iuc" prior_installation_required="True">
+ # <package name="expat" version="2.1.0" />
+ # </repository>
+ # <package>http://search.cpan.org/CPAN/authors/id/T/TO/TODDR/XML-Parser-2.41.tar.gz</package>
+ # <package>http://search.cpan.org/CPAN/authors/id/L/LD/LDS/CGI.pm-3.43.tar.gz</package>
+ # </action>
+ for action_elem in elem:
+ if action_elem.tag == 'repository':
+ env_shell_file_paths = get_env_shell_file_paths( app, action_elem )
+ all_env_shell_file_paths.extend( env_shell_file_paths )
+ if all_env_shell_file_paths:
+ action_dict[ 'env_shell_file_paths' ] = all_env_shell_file_paths
+ action_dict[ 'action_shell_file_paths' ] = env_shell_file_paths
+ return action_dict
+
def get_env_var_values( install_dir ):
env_var_dict = {}
env_var_dict[ 'INSTALL_DIR' ] = install_dir
@@ -415,16 +442,6 @@
continue
return actions_elem_tuples
-
-def parse_setup_environment_repositories( app, all_env_shell_file_paths, action_elem, action_dict ):
- env_shell_file_paths = get_env_shell_file_paths( app, action_elem.find('repository') )
-
- all_env_shell_file_paths.extend( env_shell_file_paths )
- if all_env_shell_file_paths:
- action_dict[ 'env_shell_file_paths' ] = all_env_shell_file_paths
- action_dict[ 'action_shell_file_paths' ] = env_shell_file_paths
-
-
def url_download( install_dir, downloaded_file_name, download_url, extract=True ):
file_path = os.path.join( install_dir, downloaded_file_name )
src = None
diff -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b -r e507124fbe79903378c4b0fe150f2f3fd816ad32 lib/tool_shed/scripts/check_repositories_for_functional_tests.py
--- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
+++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
@@ -284,18 +284,38 @@
# If there are no tests, this tool should not be tested, since the tool functional tests only report failure if the test itself fails,
# not if it's missing or undefined. Filtering out those repositories at this step will reduce the number of "false negatives" the
# automated functional test framework produces.
- tool_has_tests = True
- if 'tests' not in tool_metadata or not tool_metadata[ 'tests' ]:
- tool_has_tests = False
- if verbosity >= 2:
- print '# No functional tests defined for %s.' % tool_id
- no_tests += 1
- else:
- tool_has_tests = True
+ tool_has_tests = False
+ defined_test_dicts = tool_metadata.get( 'tests', None )
+ if defined_test_dicts is not None:
+ # We need to inspect the <test> tags because the following tags...
+ # <tests>
+ # </tests>
+ # ...will produce the following metadata:
+ # "tests": []
+ # And the following tags...
+ # <tests>
+ # <test>
+ # </test>
+ # </tests>
+ # ...will produce the following metadata:
+ # "tests":
+ # [{"inputs": [], "name": "Test-1", "outputs": [], "required_files": []}]
+ for defined_test_dict in defined_test_dicts:
+ inputs = defined_test_dict.get( 'inputs', [] )
+ outputs = defined_test_dict.get( 'outputs', [] )
+ if inputs and outputs:
+ # At least one tool within the repository has a valid <test> tag.
+ tool_has_tests = True
+ break
+ if tool_has_tests:
if verbosity >= 2:
print "# Tool ID '%s' in changeset revision %s of %s has one or more valid functional tests defined." % \
( tool_id, changeset_revision, name )
has_tests += 1
+ else:
+ if verbosity >= 2:
+ print '# No functional tests defined for %s.' % tool_id
+ no_tests += 1
failure_reason = ''
problem_found = False
missing_test_files = []
diff -r 7b889ba6fd441c4ef41172e90a7342e77637fb4b -r e507124fbe79903378c4b0fe150f2f3fd816ad32 lib/tool_shed/scripts/check_s3_for_empty_tool_dependency_installation_paths.py
--- /dev/null
+++ b/lib/tool_shed/scripts/check_s3_for_empty_tool_dependency_installation_paths.py
@@ -0,0 +1,155 @@
+import argparse
+import os
+import sys
+
+new_path = [ os.path.join( os.getcwd(), "lib" ) ]
+new_path.extend( sys.path[1:] )
+sys.path = new_path
+
+from galaxy.util import asbool
+from galaxy import eggs
+eggs.require( 'boto' )
+
+import boto
+
+from tool_shed.galaxy_install.tool_dependencies.fabric_util import INSTALLATION_LOG
+
+
+class BucketList( object ):
+
+ def __init__( self, amazon_id, amazon_secret, bucket ):
+ # Connect to S3 using the provided Amazon access key and secret identifier.
+ self.s3 = boto.connect_s3( amazon_id, amazon_secret )
+ self.bucket_name = bucket
+ # Connect to S3 using the received bucket name.
+ self.bucket = boto.s3.bucket.Bucket( self.s3, bucket )
+ self.install_dirs = self.get_tool_dependency_install_paths()
+ self.empty_installation_paths = self.check_for_empty_tool_dependency_installation_paths()
+
+ def display_empty_installation_paths( self ):
+ for empty_installation_path in self.empty_installation_paths:
+ print empty_installation_path
+
+ def delete_empty_installation_paths( self ):
+ print 'Deleting empty installation paths.'
+ for empty_installation_path in self.empty_installation_paths:
+ # Get all keys in the S3 bucket that start with the installation path, and delete each one.
+ for path_to_delete in self.bucket.list( prefix=empty_installation_path ):
+ self.bucket.delete_key( path_to_delete.key )
+ print 'Deleted empty path %s' % str( empty_installation_path )
+
+ def get_tool_dependency_install_paths( self ):
+ found_paths = []
+ for item in self.bucket.list():
+ name = str( item.name )
+ # Skip environment_settings and __virtualenv_src, since these directories do not contain package tool dependencies.
+ if name.startswith( 'environment_settings' ) or name.startswith( '__virtualenv_src' ):
+ continue
+ paths = name.rstrip('/').split( '/' )
+ # Paths are in the format name/version/owner/repository/changeset_revision. If the changeset revision is
+ # present, we need to check the contents of that path. If not, then the tool dependency was completely
+ # uninstalled.
+ if len( paths ) >= 5:
+ td_install_dir = '/'.join( paths[ :5 ] ) + '/'
+ if td_install_dir not in found_paths:
+ found_paths.append( name )
+ return found_paths
+
+ def check_for_empty_tool_dependency_installation_paths( self ):
+ empty_directories = []
+ for item in self.install_dirs:
+ # Get all entries under the path for this tool dependency.
+ contents = self.bucket.list( prefix=item )
+ tool_dependency_path_contents = []
+ # Find out if there are two or less items in the path. The first entry will be the installation path itself.
+ # If only one other item exists, and the full path ends with the installation log, this is an incorrectly installed
+ # tool dependency.
+ for item in contents:
+ tool_dependency_path_contents.append( item )
+ # If there are more than two items in the path, we cannot safely assume that the dependency failed to
+ # install correctly.
+ if len( tool_dependency_path_contents ) > 2:
+ break
+ # If the root directory is the only entry in the path, we have an empty tool dependency installation path.
+ if len( tool_dependency_path_contents ) == 1:
+ empty_directories.append( tool_dependency_path_contents[ 0 ] )
+ # Otherwise, if the only other entry is the installation log, we have an installation path that should be deleted.
+ # This would not be the case in a Galaxy instance, since the Galaxy admin will need to verify the contents of
+ # the installation path in order to determine which action should be taken.
+ elif len( tool_dependency_path_contents ) == 2 and tool_dependency_path_contents[1].name.endswith( INSTALLATION_LOG ):
+ empty_directories.append( tool_dependency_path_contents[ 0 ] )
+ return [ item.name for item in empty_directories ]
+
+def main( args ):
+ '''
+ Amazon credentials can be provided in one of three ways:
+ 1. By specifying them on the command line with the --id and --secret arguments.
+ 2. By specifying a path to a file that contains the credentials in the form ACCESS_KEY:SECRET_KEY
+ using the --s3passwd argument.
+ 3. By specifying the above path in the 's3passwd' environment variable.
+ Each listed option will override the ones below it, if present.
+ '''
+ if None in [ args.id, args.secret ]:
+ if args.s3passwd is None:
+ args.s3passwd = os.environ.get( 's3passwd', None )
+ if args.s3passwd is not None and os.path.exists( args.s3passwd ):
+ awsid, secret = file( args.s3passwd, 'r' ).read().rstrip( '\n' ).split( ':' )
+ else:
+ print 'Amazon ID and secret not provided, and no s3passwd file found.'
+ return 1
+ else:
+ awsid = args.id
+ secret = args.secret
+ dependency_cleaner = BucketList( awsid, secret, args.bucket )
+ if len( dependency_cleaner.empty_installation_paths ) == 0:
+ print 'No empty installation paths found, exiting.'
+ return 0
+ print 'The following %d tool dependency installation paths were found to be empty or contain only the file %s.' % \
+ ( len( dependency_cleaner.empty_installation_paths ), INSTALLATION_LOG )
+ if asbool( args.delete ):
+ dependency_cleaner.delete_empty_installation_paths()
+ else:
+ for empty_installation_path in dependency_cleaner.empty_installation_paths:
+ print empty_installation_path
+ return 0
+
+if __name__ == '__main__':
+ description = 'Determine if there are any tool dependency installation paths that should be removed. Remove them if '
+ description += 'the --delete command line argument is provided with a true value.'
+ parser = argparse.ArgumentParser( description=description )
+ parser.add_argument( '--delete',
+ dest='delete',
+ required=True,
+ action='store',
+ default=False,
+ type=asbool,
+ help='Whether to delete empty folders or list them on exit.' )
+ parser.add_argument( '--bucket',
+ dest='bucket',
+ required=True,
+ action='store',
+ metavar='name',
+ help='The S3 bucket where tool dependencies are installed.' )
+ parser.add_argument( '--id',
+ dest='id',
+ required=False,
+ action='store',
+ default=None,
+ metavar='ACCESS_KEY',
+ help='The identifier for an amazon account that has read access to the bucket.' )
+ parser.add_argument( '--secret',
+ dest='secret',
+ required=False,
+ action='store',
+ default=None,
+ metavar='SECRET_KEY',
+ help='The secret key for an amazon account that has upload/delete access to the bucket.' )
+ parser.add_argument( '--s3passwd',
+ dest='s3passwd',
+ required=False,
+ action='store',
+ default=None,
+ metavar='path/file',
+ help='The path to a file containing Amazon access credentials, in the format KEY:SECRET.' )
+ args = parser.parse_args()
+ sys.exit( main( args ) )
This diff is so big that we needed to truncate the remainder.
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: Dave Bouvier: Fix string interpolation in a log message.
by commits-noreply@bitbucket.org 20 Nov '13
by commits-noreply@bitbucket.org 20 Nov '13
20 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/30d30843b57f/
Changeset: 30d30843b57f
User: Dave Bouvier
Date: 2013-11-20 22:51:02
Summary: Fix string interpolation in a log message.
Affected #: 1 file
diff -r d0fe08c597a40b32a5d6c9a57b4be07f610fdfc7 -r 30d30843b57f5b8fe809ed2c226cf41e82b8465e lib/tool_shed/scripts/check_tool_dependency_definition_repositories.py
--- a/lib/tool_shed/scripts/check_tool_dependency_definition_repositories.py
+++ b/lib/tool_shed/scripts/check_tool_dependency_definition_repositories.py
@@ -86,7 +86,7 @@
now = strftime( "%Y-%m-%d %H:%M:%S" )
print "#############################################################################"
- print "# %s - Validating repositories of type %s on %s..." % ( TOOL_DEPENDENCY_DEFINITION, now )
+ print "# %s - Validating repositories of type %s on %s..." % ( TOOL_DEPENDENCY_DEFINITION, now, config_parser.get( config_section, 'host' ) )
print "# This tool shed is configured to listen on %s:%s" % ( config_parser.get( config_section, 'host' ), config_parser.get( config_section, 'port' ) )
app = RepositoriesApplication( config )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Add a script to check repositories of type tool dependency definition in the tool shed to prep for installation testing.
by commits-noreply@bitbucket.org 20 Nov '13
by commits-noreply@bitbucket.org 20 Nov '13
20 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/d0fe08c597a4/
Changeset: d0fe08c597a4
User: greg
Date: 2013-11-20 22:41:33
Summary: Add a script to check repositories of type tool dependency definition in the tool shed to prep for installation testing.
Affected #: 3 files
diff -r f8bebd9e12f53fca874739df55a34b73be224c0b -r d0fe08c597a40b32a5d6c9a57b4be07f610fdfc7 lib/tool_shed/scripts/check_tool_dependency_definition_repositories.py
--- /dev/null
+++ b/lib/tool_shed/scripts/check_tool_dependency_definition_repositories.py
@@ -0,0 +1,191 @@
+#!/usr/bin/env python
+
+from datetime import datetime
+from optparse import OptionParser
+from time import strftime
+
+import ConfigParser
+import logging
+import os
+import shutil
+import sys
+import time
+import tempfile
+
+new_path = [ os.path.join( os.getcwd(), "lib" ), os.path.join( os.getcwd(), "test" ) ]
+new_path.extend( sys.path[1:] )
+sys.path = new_path
+
+from galaxy import eggs
+eggs.require( "SQLAlchemy >= 0.4" )
+eggs.require( 'mercurial' )
+
+from mercurial import __version__
+
+import galaxy.webapps.tool_shed.config as tool_shed_config
+import galaxy.webapps.tool_shed.model.mapping
+
+from base.util import get_database_version
+from base.util import get_repository_current_revision
+from base.util import get_test_environment
+from galaxy.model.orm import and_, not_, select
+from galaxy.web import url_for
+from tool_shed.repository_types.util import TOOL_DEPENDENCY_DEFINITION
+
+log = logging.getLogger()
+log.setLevel( 10 )
+log.addHandler( logging.StreamHandler( sys.stdout ) )
+
+assert sys.version_info[ :2 ] >= ( 2, 6 )
+
+class RepositoriesApplication( object ):
+ """Encapsulates the state of a Universe application"""
+ def __init__( self, config ):
+ if config.database_connection is False:
+ config.database_connection = "sqlite:///%s?isolation_level=IMMEDIATE" % config.database
+ # Setup the database engine and ORM
+ self.model = galaxy.webapps.tool_shed.model.mapping.init( config.file_path, config.database_connection, engine_options={}, create_tables=False )
+ self.hgweb_config_manager = self.model.hgweb_config_manager
+ self.hgweb_config_manager.hgweb_config_dir = config.hgweb_config_dir
+ print "# Using configured hgweb.config file: ", self.hgweb_config_manager.hgweb_config
+
+ @property
+ def sa_session( self ):
+ """
+ Returns a SQLAlchemy session -- currently just gets the current
+ session from the threadlocal session context, but this is provided
+ to allow migration toward a more SQLAlchemy 0.4 style of use.
+ """
+ return self.model.context.current
+
+ def shutdown( self ):
+ pass
+
+def main():
+ '''Script that validates all repositories of type tool_dependency_definition.'''
+ parser = OptionParser()
+ parser.add_option( "-i", "--info_only", action="store_true", dest="info_only", help="info about the requested action", default=False )
+ parser.add_option( "-s", "--section", action="store", dest="section", default='server:main',
+ help=".ini file section from which to extract the host and port" )
+ parser.add_option( "-v", "--verbose", action="count", dest="verbosity", default=1, help="Control the amount of detail in the log output." )
+ parser.add_option( "--verbosity", action="store", dest="verbosity", metavar='VERBOSITY', type="int",
+ help="Control the amount of detail in the log output, --verbosity=1 is the same as -v" )
+ ( options, args ) = parser.parse_args()
+ try:
+ ini_file = args[ 0 ]
+ except IndexError:
+ print "Usage: python %s <tool shed .ini file> [options]" % sys.argv[ 0 ]
+ exit( 127 )
+ config_parser = ConfigParser.ConfigParser( { 'here':os.getcwd() } )
+ config_parser.read( ini_file )
+ config_dict = {}
+ for key, value in config_parser.items( "app:main" ):
+ config_dict[ key ] = value
+ config = tool_shed_config.Configuration( **config_dict )
+ config_section = options.section
+
+ now = strftime( "%Y-%m-%d %H:%M:%S" )
+ print "#############################################################################"
+ print "# %s - Validating repositories of type %s on %s..." % ( TOOL_DEPENDENCY_DEFINITION, now )
+ print "# This tool shed is configured to listen on %s:%s" % ( config_parser.get( config_section, 'host' ), config_parser.get( config_section, 'port' ) )
+
+ app = RepositoriesApplication( config )
+
+ if options.info_only:
+ print "# Displaying info only ( --info_only )"
+ if options.verbosity:
+ print "# Displaying extra information ( --verbosity = %d )" % options.verbosity
+ validate_repositories( app, info_only=options.info_only, verbosity=options.verbosity )
+
+def validate_repositories( app, info_only=False, verbosity=1 ):
+ """
+ Inspect records in the repository_metadata table that are associated with repositories of type TOOL_DEPENDENCY_DEFINITION
+ to ensure they are valid and set the repository_metadata.do_not_test column value to True if the metadata is invalid.
+ Each repository's metadata should look something like:
+ "{"tool_dependencies":
+ {"libpng/1.2.5": {"name": "libpng",
+ "readme": "README content",
+ "type": "package",
+ "version": "1.2.5"}}}"
+ or:
+ "{"repository_dependencies":
+ {"description": null,
+ "repository_dependencies":
+ [["http://localhost:9009", "package_libpng_1_2", "iuc", "5788512d4c0a", "True", "False"]]},
+ "tool_dependencies":
+ {"libgd/2.1.0":
+ {"name": "libgd", "readme": "text"},
+ "libpng/1.2.5":
+ {"name": "libpng", "type": "package", "version": "1.2.5"}}}"
+ """
+ invalid_metadata = 0
+ records_checked = 0
+ skip_metadata_ids = []
+ start = time.time()
+ valid_metadata = 0
+ # Restrict testing to repositories of type TOOL_DEPENDENCY_DEFINITION
+ tool_dependency_defintion_repository_ids = []
+ for repository in app.sa_session.query( app.model.Repository ) \
+ .filter( and_( app.model.Repository.table.c.deleted == False,
+ app.model.Repository.table.c.type == TOOL_DEPENDENCY_DEFINITION ) ):
+ tool_dependency_defintion_repository_ids.append( repository.id )
+ # Do not check metadata records that have an entry in the skip_tool_tests table, since they won't be tested anyway.
+ skip_metadata_ids = select( [ app.model.SkipToolTest.table.c.repository_metadata_id ] )
+ # Get the list of metadata records to check, restricting it to records that have not been flagged do_not_test.
+ for repository_metadata in app.sa_session.query( app.model.RepositoryMetadata ) \
+ .filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True,
+ app.model.RepositoryMetadata.table.c.do_not_test == False,
+ app.model.RepositoryMetadata.table.c.repository_id.in_( tool_dependency_defintion_repository_ids ),
+ not_( app.model.RepositoryMetadata.table.c.id.in_( skip_metadata_ids ) ) ) ):
+ records_checked += 1
+ # Create the repository_status dictionary, using the dictionary from the previous test run if available.
+ if repository_metadata.tool_test_results:
+ repository_status = repository_metadata.tool_test_results
+ else:
+ repository_status = {}
+ # Initialize the repository_status dictionary with the information about the current test environment.
+ last_test_environment = repository_status.get( 'test_environment', None )
+ if last_test_environment is None:
+ test_environment = get_test_environment()
+ else:
+ test_environment = get_test_environment( last_test_environment )
+ test_environment[ 'tool_shed_database_version' ] = get_database_version( app )
+ test_environment[ 'tool_shed_mercurial_version' ] = __version__.version
+ test_environment[ 'tool_shed_revision' ] = get_repository_current_revision( os.getcwd() )
+ repository_status[ 'test_environment' ] = test_environment
+ # Check the next repository revision.
+ changeset_revision = str( repository_metadata.changeset_revision )
+ name = repository.name
+ owner = repository.user.username
+ metadata = repository_metadata.metadata
+ repository = repository_metadata.repository
+ if verbosity >= 1:
+ print '# -------------------------------------------------------------------------------------------'
+ print '# Checking revision %s of %s owned by %s.' % ( changeset_revision, name, owner )
+ if metadata:
+ # Valid metadata will undoubtedly have a tool_dependencies entry or repository_dependencies entry.
+ repository_dependencies = metadata.get( 'repository_dependencies', None )
+ tool_dependencies = metadata.get( 'tool_dependencies', None )
+ if repository_dependencies or tool_dependencies:
+ print 'Revision %s of %s owned by %s has valid metadata.' % ( changeset_revision, name, owner )
+ valid_metadata += 1
+ else:
+ if verbosity >= 1:
+ print 'Revision %s of %s owned by %s has invalid metadata.' % ( changeset_revision, name, owner )
+ invalid_metadata += 1
+ if not info_only:
+ repository_metadata.tool_test_results = repository_status
+ repository_metadata.time_last_tested = datetime.utcnow()
+ app.sa_session.add( repository_metadata )
+ app.sa_session.flush()
+ stop = time.time()
+ print '# -------------------------------------------------------------------------------------------'
+ print '# Checked %d repository revisions.' % records_checked
+ print '# %d revisions found with valid tool dependency definition metadata.' % valid_metadata
+ print '# %d revisions found with valid tool dependency definition metadata.' % invalid_metadata
+ if info_only:
+ print '# Database not updated with any information from this run.'
+ print "# Elapsed time: ", stop - start
+ print "#############################################################################"
+
+if __name__ == "__main__": main()
diff -r f8bebd9e12f53fca874739df55a34b73be224c0b -r d0fe08c597a40b32a5d6c9a57b4be07f610fdfc7 templates/webapps/tool_shed/repository/manage_repository.mako
--- a/templates/webapps/tool_shed/repository/manage_repository.mako
+++ b/templates/webapps/tool_shed/repository/manage_repository.mako
@@ -203,7 +203,11 @@
%if includes_tools or repository.type == TOOL_DEPENDENCY_DEFINITION:
<p/><div class="toolForm">
- <div class="toolFormTitle">Automated tool tests</div>
+ %if repository.type == TOOL_DEPENDENCY_DEFINITION:
+ <div class="toolFormTitle">Automated tool dependency test</div>
+ %else:
+ <div class="toolFormTitle">Automated tool tests</div>
+ %endif
<div class="toolFormBody"><form name="skip_tool_tests" id="skip_tool_tests" action="${h.url_for( controller='repository', action='manage_repository', id=trans.security.encode_id( repository.id ), changeset_revision=repository_metadata.changeset_revision )}" method="post" ><div class="form-row">
diff -r f8bebd9e12f53fca874739df55a34b73be224c0b -r d0fe08c597a40b32a5d6c9a57b4be07f610fdfc7 test/base/util.py
--- a/test/base/util.py
+++ b/test/base/util.py
@@ -1,6 +1,7 @@
-import os, sys, logging, platform
-
-log = logging.getLogger(__name__)
+import logging
+import os
+import platform
+import sys
cwd = os.getcwd()
if cwd not in sys.path:
@@ -16,23 +17,10 @@
eggs.require( 'mercurial' )
-from mercurial import hg, ui, commands
+from mercurial import hg
+from mercurial import ui
-def get_repository_current_revision( repo_path ):
- '''
- This method uses the python mercurial API to get the current working directory's mercurial changeset hash. Note that if the author of mercurial
- changes the API, this method will have to be updated or replaced.
- '''
- # Initialize a mercurial repo object from the provided path.
- repo = hg.repository( ui.ui(), repo_path )
- # Get the working directory's change context.
- ctx = repo[ None ]
- # Extract the changeset hash of the first parent of that change context (the most recent changeset to which the working directory was updated).
- changectx = ctx.parents()[ 0 ]
- # Also get the numeric revision, so we can return the customary id:hash changeset identifiers.
- ctx_rev = changectx.rev()
- hg_id = '%d:%s' % ( ctx_rev, str( changectx ) )
- return hg_id
+log = logging.getLogger(__name__)
def get_database_version( app ):
'''
@@ -74,7 +62,25 @@
return None, repository_name, changeset_revision
return last_galaxy_test_file_dir, last_tested_repository_name, last_tested_changeset_revision
-def get_test_environment( current_environment={} ):
+def get_repository_current_revision( repo_path ):
+ '''
+ This method uses the python mercurial API to get the current working directory's mercurial changeset hash. Note that if the author of mercurial
+ changes the API, this method will have to be updated or replaced.
+ '''
+ # Initialize a mercurial repo object from the provided path.
+ repo = hg.repository( ui.ui(), repo_path )
+ # Get the working directory's change context.
+ ctx = repo[ None ]
+ # Extract the changeset hash of the first parent of that change context (the most recent changeset to which the working directory was updated).
+ changectx = ctx.parents()[ 0 ]
+ # Also get the numeric revision, so we can return the customary id:hash changeset identifiers.
+ ctx_rev = changectx.rev()
+ hg_id = '%d:%s' % ( ctx_rev, str( changectx ) )
+ return hg_id
+
+def get_test_environment( current_environment=None ):
+ if current_environment is None:
+ current_environment = {}
rval = current_environment
rval[ 'python_version' ] = platform.python_version()
rval[ 'architecture' ] = platform.machine()
@@ -132,4 +138,3 @@
shed_tools_dict[ guid ] = galaxy_test_file_dir
last_galaxy_test_file_dir = galaxy_test_file_dir
return has_test_data, shed_tools_dict
-
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0