galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
January 2014
- 1 participants
- 280 discussions
commit/galaxy-central: greg: Improved logging in the tool shed's install and test framework.
by commits-noreply@bitbucket.org 04 Jan '14
by commits-noreply@bitbucket.org 04 Jan '14
04 Jan '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/1d57caac677c/
Changeset: 1d57caac677c
User: greg
Date: 2014-01-05 03:25:53
Summary: Improved logging in the tool shed's install and test framework.
Affected #: 3 files
diff -r cd8ca90e93a1f896ee6ff3ac56665e64373ace00 -r 1d57caac677cbbd8465c149b6a091684b21af1f8 test/install_and_test_tool_shed_repositories/base/util.py
--- a/test/install_and_test_tool_shed_repositories/base/util.py
+++ b/test/install_and_test_tool_shed_repositories/base/util.py
@@ -547,13 +547,18 @@
tool_test_results_dict[ 'installation_errors' ][ 'repository_dependencies' ]\
.append( missing_repository_dependency_info_dict )
# Record the status of this repository in the tool shed.
- # TODO: do something useful with response_dict
+ log.debug('=============================================================' )
+ log.debug( 'Inserting the following into tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
+ ( str( repository.changeset_revision ), str( repository.name ), str( repository.owner ), str( tool_test_results_dict ) ) )
response_dict = register_test_result( galaxy_tool_shed_url,
tool_test_results_dicts,
tool_test_results_dict,
repository_dict,
params,
can_update_tool_shed )
+ log.debug( 'Result of inserting tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
+ ( str( repository.changeset_revision ), str( repository.name ), str( repository.owner ), str( response_dict ) ) )
+ log.debug('=============================================================' )
def initialize_install_and_test_statistics_dict( test_framework ):
# Initialize a dictionary for the summary that will be printed to stdout.
@@ -818,6 +823,7 @@
if can_update_tool_shed:
metadata_revision_id = repository_dict.get( 'id', None )
if metadata_revision_id is not None:
+ log.debug( 'Updating tool_test_results for repository_metadata id %s.' % str( metadata_revision_id ) )
tool_test_results_dicts.insert( 0, tool_test_results_dict )
params[ 'tool_test_results' ] = tool_test_results_dicts
# Set the time_last_tested entry so that the repository_metadata.time_last_tested will be set in the tool shed.
@@ -826,7 +832,8 @@
try:
return update( tool_shed_api_key, url, params, return_formatted=False )
except Exception, e:
- log.exception( 'Error attempting to register test results: %s' % str( e ) )
+ log.exception( 'Error updating tool_test_results for repository_metadata id %s:\n%s' % \
+ ( str( metadata_revision_id ), str( e ) ) )
return {}
else:
return {}
diff -r cd8ca90e93a1f896ee6ff3ac56665e64373ace00 -r 1d57caac677cbbd8465c149b6a091684b21af1f8 test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
@@ -189,18 +189,23 @@
changeset_revision,
encoded_repository_metadata_id )
if is_excluded:
+ log.debug( "Not testing revision %s of repository %s owned by %s because it is in the exclude list for this test run." % \
+ ( changeset_revision, name, owner ) )
# If this repository is being skipped, register the reason.
tool_test_results_dict[ 'not_tested' ] = dict( reason=reason )
params = dict( do_not_test=False )
- # TODO: do something useful with response_dict
+ log.debug('=============================================================' )
+ log.debug( 'Inserting the following into tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
+ ( changeset_revision, name, owner, str( tool_test_results_dict ) ) )
response_dict = install_and_test_base_util.register_test_result( install_and_test_base_util.galaxy_tool_shed_url,
tool_test_results_dicts,
tool_test_results_dict,
repository_dict,
params,
can_update_tool_shed )
- log.debug( "Not testing revision %s of repository %s owned by %s because it is in the exclude list for this test run." % \
- ( changeset_revision, name, owner ) )
+ log.debug( 'Result of inserting tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
+ ( changeset_revision, name, owner, str( response_dict ) ) )
+ log.debug('=============================================================' )
else:
# See if the repository was installed in a previous test.
repository = install_and_test_base_util.get_repository( name, owner, changeset_revision )
@@ -215,13 +220,18 @@
install_and_test_statistics_dict[ 'repositories_with_installation_error' ].append( repository_identifier_dict )
tool_test_results_dict[ 'installation_errors' ][ 'current_repository' ] = error_message
params = dict( test_install_error=True )
- # TODO: do something useful with response_dict
+ log.debug('=============================================================' )
+ log.debug( 'Inserting the following into tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
+ ( changeset_revision, name, owner, str( tool_test_results_dict ) ) )
response_dict = install_and_test_base_util.register_test_result( install_and_test_base_util.galaxy_tool_shed_url,
tool_test_results_dicts,
tool_test_results_dict,
repository_dict,
params,
can_update_tool_shed )
+ log.debug( 'Result of inserting tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
+ ( changeset_revision, name, owner, str( response_dict ) ) )
+ log.debug('=============================================================' )
else:
# The repository was successfully installed.
log.debug( 'Installation succeeded for revision %s of repository %s owned by %s.' % \
@@ -277,7 +287,9 @@
install_and_test_statistics_dict[ 'at_least_one_test_failed' ].append( repository_identifier_dict )
# Record the status of this repository in the tool shed.
params[ 'tools_functionally_correct' ] = False
- # TODO: do something useful with response_dict
+ log.debug('=============================================================' )
+ log.debug( 'Inserting the following into tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
+ ( changeset_revision, name, owner, str( tool_test_results_dict ) ) )
response_dict = \
install_and_test_base_util.register_test_result( install_and_test_base_util.galaxy_tool_shed_url,
tool_test_results_dicts,
@@ -285,6 +297,9 @@
repository_dict,
params,
can_update_tool_shed )
+ log.debug( 'Result of inserting tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
+ ( changeset_revision, name, owner, str( response_dict ) ) )
+ log.debug('=============================================================' )
else:
log.debug( 'Skipped attempt to install revision %s of repository %s owned by %s because ' % \
( changeset_revision, name, owner ) )
@@ -627,6 +642,8 @@
if result.wasSuccessful():
# This repository's tools passed all functional tests. Use the ReportResults nose plugin to get a list
# of tests that passed.
+ log.debug( 'Revision %s of repository %s owned by %s installed and passed functional tests.' % \
+ ( changeset_revision, name, owner ) )
for plugin in test_plugins:
if hasattr( plugin, 'getTestStatus' ):
test_identifier = '%s/%s' % ( owner, name )
@@ -647,17 +664,22 @@
# Call the register_test_result() method to execute a PUT request to the repository_revisions API
# controller with the status of the test. This also sets the do_not_test and tools_functionally
# correct flags and updates the time_last_tested field to today's date.
- # TODO: do something useful with response_dict
+ log.debug('=============================================================' )
+ log.debug( 'Inserting the following into tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
+ ( changeset_revision, name, owner, str( tool_test_results_dict ) ) )
response_dict = install_and_test_base_util.register_test_result( install_and_test_base_util.galaxy_tool_shed_url,
tool_test_results_dicts,
tool_test_results_dict,
repository_dict,
params,
can_update_tool_shed )
- log.debug( 'Revision %s of repository %s owned by %s installed and passed functional tests.' % \
- ( changeset_revision, name, owner ) )
+ log.debug( 'Result of inserting tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
+ ( changeset_revision, name, owner, str( response_dict ) ) )
+ log.debug('=============================================================' )
else:
# The get_failed_test_dicts() method returns a list.
+ log.debug( 'Revision %s of repository %s owned by %s installed successfully but did not pass functional tests.' % \
+ ( changeset_revision, name, owner ) )
failed_test_dicts = get_failed_test_dicts( result, from_tool_test=True )
tool_test_results_dict[ 'failed_tests' ] = failed_test_dicts
failed_repository_dict = repository_identifier_dict
@@ -668,15 +690,18 @@
params = dict( tools_functionally_correct=False,
test_install_error=False,
do_not_test=str( set_do_not_test ) )
- # TODO: do something useful with response_dict
+ log.debug('=============================================================' )
+ log.debug( 'Inserting the following into tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
+ ( changeset_revision, name, owner, str( tool_test_results_dict ) ) )
response_dict = install_and_test_base_util.register_test_result( install_and_test_base_util.galaxy_tool_shed_url,
tool_test_results_dicts,
tool_test_results_dict,
repository_dict,
params,
can_update_tool_shed )
- log.debug( 'Revision %s of repository %s owned by %s installed successfully but did not pass functional tests.' % \
- ( changeset_revision, name, owner ) )
+ log.debug( 'Result of inserting tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
+ ( changeset_revision, name, owner, str( response_dict ) ) )
+ log.debug('=============================================================' )
# Remove the just-executed tests so twill will not find and re-test them along with the tools
# contained in the next repository.
remove_tests( app )
diff -r cd8ca90e93a1f896ee6ff3ac56665e64373ace00 -r 1d57caac677cbbd8465c149b6a091684b21af1f8 test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py
@@ -124,17 +124,22 @@
encoded_repository_metadata_id )
if is_excluded:
# If this repository is being skipped, register the reason.
+ log.debug( "Not testing revision %s of repository %s owned by %s because it is in the exclude list for this test run." % \
+ ( changeset_revision, name, owner ) )
tool_test_results_dict[ 'not_tested' ] = dict( reason=reason )
params = dict( do_not_test=False )
- # TODO: do something useful with response_dict
+ log.debug('=============================================================' )
+ log.debug( 'Inserting the following into tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
+ ( changeset_revision, name, owner, str( tool_test_results_dict ) ) )
response_dict = install_and_test_base_util.register_test_result( install_and_test_base_util.galaxy_tool_shed_url,
tool_test_results_dicts,
tool_test_results_dict,
repository_dict,
params,
can_update_tool_shed )
- log.debug( "Not testing revision %s of repository %s owned by %s because it is in the exclude list for this test run." % \
- ( changeset_revision, name, owner ) )
+ log.debug( 'Result of inserting tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
+ ( changeset_revision, name, owner, str( response_dict ) ) )
+ log.debug('=============================================================' )
else:
# See if the repository was installed in a previous test.
repository = install_and_test_base_util.get_repository( name, owner, changeset_revision )
@@ -149,13 +154,18 @@
install_and_test_statistics_dict[ 'repositories_with_installation_error' ].append( repository_identifier_dict )
tool_test_results_dict[ 'installation_errors' ][ 'current_repository' ] = error_message
params = dict( test_install_error=True )
- # TODO: do something useful with response_dict
+ log.debug('=============================================================' )
+ log.debug( 'Inserting the following into tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
+ ( changeset_revision, name, owner, str( tool_test_results_dict ) ) )
response_dict = install_and_test_base_util.register_test_result( install_and_test_base_util.galaxy_tool_shed_url,
tool_test_results_dicts,
tool_test_results_dict,
repository_dict,
params,
can_update_tool_shed )
+ log.debug( 'Result of inserting tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
+ ( changeset_revision, name, owner, str( response_dict ) ) )
+ log.debug('=============================================================' )
else:
# The repository was successfully installed.
log.debug( 'Installation succeeded for revision %s of repository %s owned by %s.' % \
@@ -166,13 +176,18 @@
repository_identifier_dict,
install_and_test_statistics_dict,
tool_test_results_dict )
- # TODO: do something useful with response_dict
+ log.debug('=============================================================' )
+ log.debug( 'Inserting the following into tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
+ ( changeset_revision, name, owner, str( tool_test_results_dict ) ) )
response_dict = install_and_test_base_util.register_test_result( install_and_test_base_util.galaxy_tool_shed_url,
tool_test_results_dicts,
tool_test_results_dict,
repository_dict,
params,
can_update_tool_shed )
+ log.debug( 'Result of inserting tool_test_results for revision %s of repository %s owned by %s:\n%s' % \
+ ( changeset_revision, name, owner, str( response_dict ) ) )
+ log.debug('=============================================================' )
else:
log.debug( 'Skipped attempt to install revision %s of repository %s owned by %s because ' % \
( changeset_revision, name, owner ) )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Set a repository and its dependent repositories created from a capsule to be defined as installable only if its creation resulted in no errors.
by commits-noreply@bitbucket.org 04 Jan '14
by commits-noreply@bitbucket.org 04 Jan '14
04 Jan '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/cd8ca90e93a1/
Changeset: cd8ca90e93a1
User: greg
Date: 2014-01-05 01:25:43
Summary: Set a repository and its dependent repositories created from a capsule to be defined as installable only if its creation resulted in no errors.
Affected #: 7 files
diff -r f11a729d42cb66753b8b1616ad625c1b2a306b3d -r cd8ca90e93a1f896ee6ff3ac56665e64373ace00 lib/galaxy/webapps/tool_shed/api/repositories.py
--- a/lib/galaxy/webapps/tool_shed/api/repositories.py
+++ b/lib/galaxy/webapps/tool_shed/api/repositories.py
@@ -219,11 +219,12 @@
import_results_tups = repository_maintenance_util.create_repository_and_import_archive( trans,
repository_status_info_dict,
import_results_tups )
+ import_util.check_status_and_reset_downloadable( trans, import_results_tups )
suc.remove_dir( file_path )
# NOTE: the order of installation is defined in import_results_tups, but order will be lost when transferred to return_dict.
return_dict = {}
for import_results_tup in import_results_tups:
- name_owner, message = import_results_tup
+ ok, name_owner, message = import_results_tup
name, owner = name_owner
key = 'Archive of repository "%s" owned by "%s"' % ( str( name ), str( owner ) )
val = message.replace( '<b>', '"' ).replace( '</b>', '"' )
diff -r f11a729d42cb66753b8b1616ad625c1b2a306b3d -r cd8ca90e93a1f896ee6ff3ac56665e64373ace00 lib/galaxy/webapps/tool_shed/controllers/repository.py
--- a/lib/galaxy/webapps/tool_shed/controllers/repository.py
+++ b/lib/galaxy/webapps/tool_shed/controllers/repository.py
@@ -1883,9 +1883,11 @@
# Add the capsule_file_name and encoded_file_path to the repository_status_info_dict.
repository_status_info_dict[ 'capsule_file_name' ] = capsule_file_name
repository_status_info_dict[ 'encoded_file_path' ] = encoded_file_path
- import_results_tups = repository_maintenance_util.create_repository_and_import_archive( trans,
- repository_status_info_dict,
- import_results_tups )
+ import_results_tups = \
+ repository_maintenance_util.create_repository_and_import_archive( trans,
+ repository_status_info_dict,
+ import_results_tups )
+ import_util.check_status_and_reset_downloadable( trans, import_results_tups )
suc.remove_dir( file_path )
return trans.fill_template( '/webapps/tool_shed/repository/import_capsule_results.mako',
export_info_dict=export_info_dict,
diff -r f11a729d42cb66753b8b1616ad625c1b2a306b3d -r cd8ca90e93a1f896ee6ff3ac56665e64373ace00 lib/tool_shed/util/import_util.py
--- a/lib/tool_shed/util/import_util.py
+++ b/lib/tool_shed/util/import_util.py
@@ -21,6 +21,37 @@
log = logging.getLogger( __name__ )
+def check_status_and_reset_downloadable( trans, import_results_tups ):
+ """Check the status of each imported repository and set downloadable to False if errors."""
+ flush = False
+ for import_results_tup in import_results_tups:
+ ok, name_owner, message = import_results_tup
+ name, owner = name_owner
+ if not ok:
+ repository = suc.get_repository_by_name_and_owner( trans.app, name, owner )
+ # Do not allow the repository to be automatically installed if population resulted in errors.
+ tip_changeset_revision = repository.tip( trans.app )
+ repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans,
+ trans.security.encode_id( repository.id ),
+ tip_changeset_revision )
+ if repository_metadata:
+ if repository_metadata.downloadable:
+ repository_metadata.downloadable = False
+ trans.sa_session.add( repository_metadata )
+ if not flush:
+ flush = True
+ # Do not allow dependent repository revisions to be automatically installed if population
+ # resulted in errors.
+ dependent_downloadable_revisions = suc.get_dependent_downloadable_revisions( trans, repository_metadata )
+ for dependent_downloadable_revision in dependent_downloadable_revisions:
+ if dependent_downloadable_revision.downloadable:
+ dependent_downloadable_revision.downloadable = False
+ trans.sa_session.add( dependent_downloadable_revision )
+ if not flush:
+ flush = True
+ if flush:
+ trans.sa_session.flush()
+
def extract_capsule_files( trans, **kwd ):
"""Extract the uploaded capsule archive into a temporary location for inspection, validation and potential import."""
return_dict = {}
@@ -243,13 +274,16 @@
commit_message,
undesirable_dirs_removed,
undesirable_files_removed )
+ if error_message:
+ results_dict[ 'ok' ] = False
+ results_dict[ 'error_message' ] += error_message
try:
- metadata_util.set_repository_metadata_due_to_new_tip( trans, repository, content_alert_str=content_alert_str )
+ metadata_util.set_repository_metadata_due_to_new_tip( trans,
+ repository,
+ content_alert_str=content_alert_str )
except Exception, e:
log.debug( "Error setting metadata on repository %s created from imported archive %s: %s" % \
( str( repository.name ), str( archive_file_name ), str( e ) ) )
- results_dict[ 'ok' ] = ok
- results_dict[ 'error_message' ] += error_message
else:
archive.close()
results_dict[ 'ok' ] = False
diff -r f11a729d42cb66753b8b1616ad625c1b2a306b3d -r cd8ca90e93a1f896ee6ff3ac56665e64373ace00 lib/tool_shed/util/metadata_util.py
--- a/lib/tool_shed/util/metadata_util.py
+++ b/lib/tool_shed/util/metadata_util.py
@@ -296,14 +296,19 @@
includes_tool_dependencies = True
if 'workflows' in metadata_dict:
includes_workflows = True
- if has_repository_dependencies or has_repository_dependencies_only_if_compiling_contained_td or includes_datatypes or \
- includes_tools or includes_tool_dependencies or includes_workflows:
+ if has_repository_dependencies or \
+ has_repository_dependencies_only_if_compiling_contained_td or \
+ includes_datatypes or \
+ includes_tools or \
+ includes_tool_dependencies or \
+ includes_workflows:
downloadable = True
else:
downloadable = False
repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans, id, changeset_revision )
if repository_metadata:
- # A repository metadata record already exists with the received changeset_revision, so we don't need to check the skip_tool_test table.
+ # A repository metadata record already exists with the received changeset_revision, so we don't need to
+ # check the skip_tool_test table.
check_skip_tool_test = False
repository_metadata.metadata = metadata_dict
repository_metadata.downloadable = downloadable
@@ -313,7 +318,8 @@
repository_metadata.includes_tool_dependencies = includes_tool_dependencies
repository_metadata.includes_workflows = includes_workflows
else:
- # No repository_metadata record exists for the received changeset_revision, so we may need to update the skip_tool_test table.
+ # No repository_metadata record exists for the received changeset_revision, so we may need to update the
+ # skip_tool_test table.
check_skip_tool_test = True
repository_metadata = trans.model.RepositoryMetadata( repository_id=repository.id,
changeset_revision=changeset_revision,
@@ -324,7 +330,8 @@
includes_tools=includes_tools,
includes_tool_dependencies=includes_tool_dependencies,
includes_workflows=includes_workflows )
- # Always set the default values for the following columns. When resetting all metadata on a repository, this will reset the values.
+ # Always set the default values for the following columns. When resetting all metadata on a repository
+ # this will reset the values.
repository_metadata.tools_functionally_correct = False
repository_metadata.missing_test_components = False
repository_metadata.test_install_error = False
@@ -1738,7 +1745,8 @@
# NO_METADATA - no metadata for either ancestor or current, so continue from current
# EQUAL - ancestor metadata is equivalent to current metadata, so continue from current
# SUBSET - ancestor metadata is a subset of current metadata, so continue from current
- # NOT_EQUAL_AND_NOT_SUBSET - ancestor metadata is neither equal to nor a subset of current metadata, so persist ancestor metadata.
+ # NOT_EQUAL_AND_NOT_SUBSET - ancestor metadata is neither equal to nor a subset of current
+ # metadata, so persist ancestor metadata.
comparison = compare_changeset_revisions( trans,
ancestor_changeset_revision,
ancestor_metadata_dict,
@@ -1750,7 +1758,11 @@
elif comparison == NOT_EQUAL_AND_NOT_SUBSET:
metadata_changeset_revision = ancestor_changeset_revision
metadata_dict = ancestor_metadata_dict
- repository_metadata = create_or_update_repository_metadata( trans, id, repository, metadata_changeset_revision, metadata_dict )
+ repository_metadata = create_or_update_repository_metadata( trans,
+ id,
+ repository,
+ metadata_changeset_revision,
+ metadata_dict )
changeset_revisions.append( metadata_changeset_revision )
ancestor_changeset_revision = current_changeset_revision
ancestor_metadata_dict = current_metadata_dict
@@ -1762,7 +1774,11 @@
metadata_changeset_revision = current_changeset_revision
metadata_dict = current_metadata_dict
# We're at the end of the change log.
- repository_metadata = create_or_update_repository_metadata( trans, id, repository, metadata_changeset_revision, metadata_dict )
+ repository_metadata = create_or_update_repository_metadata( trans,
+ id,
+ repository,
+ metadata_changeset_revision,
+ metadata_dict )
changeset_revisions.append( metadata_changeset_revision )
ancestor_changeset_revision = None
ancestor_metadata_dict = None
@@ -1770,14 +1786,20 @@
# We reach here only if current_metadata_dict is empty and ancestor_metadata_dict is not.
if not ctx.children():
# We're at the end of the change log.
- repository_metadata = create_or_update_repository_metadata( trans, id, repository, metadata_changeset_revision, metadata_dict )
+ repository_metadata = create_or_update_repository_metadata( trans,
+ id,
+ repository,
+ metadata_changeset_revision,
+ metadata_dict )
changeset_revisions.append( metadata_changeset_revision )
ancestor_changeset_revision = None
ancestor_metadata_dict = None
suc.remove_dir( work_dir )
- # Delete all repository_metadata records for this repository that do not have a changeset_revision value in changeset_revisions.
+ # Delete all repository_metadata records for this repository that do not have a changeset_revision
+ # value in changeset_revisions.
clean_repository_metadata( trans, id, changeset_revisions )
- # Set tool version information for all downloadable changeset revisions. Get the list of changeset revisions from the changelog.
+ # Set tool version information for all downloadable changeset revisions. Get the list of changeset
+ # revisions from the changelog.
reset_all_tool_versions( trans, id, repo )
# Reset the tool_data_tables by loading the empty tool_data_table_conf.xml file.
tool_util.reset_tool_data_tables( trans.app )
@@ -1785,8 +1807,8 @@
def reset_metadata_on_selected_repositories( trans, **kwd ):
"""
- Inspect the repository changelog to reset metadata for all appropriate changeset revisions. This method is called from both Galaxy and the
- Tool Shed.
+ Inspect the repository changelog to reset metadata for all appropriate changeset revisions.
+ This method is called from both Galaxy and the Tool Shed.
"""
repository_ids = util.listify( kwd.get( 'repository_ids', None ) )
message = ''
@@ -1879,7 +1901,11 @@
tip_only = isinstance( repository_type_class, TipOnly )
if not tip_only and new_metadata_required_for_utilities( trans, repository, metadata_dict ):
# Create a new repository_metadata table row.
- repository_metadata = create_or_update_repository_metadata( trans, encoded_id, repository, repository.tip( trans.app ), metadata_dict )
+ repository_metadata = create_or_update_repository_metadata( trans,
+ encoded_id,
+ repository,
+ repository.tip( trans.app ),
+ metadata_dict )
# If this is the first record stored for this repository, see if we need to send any email alerts.
if len( repository.downloadable_revisions ) == 1:
suc.handle_email_alerts( trans, repository, content_alert_str='', new_repo_alert=True, admin_only=False )
@@ -1896,8 +1922,8 @@
repository_metadata.includes_datatypes = True
else:
repository_metadata.includes_datatypes = False
- # We don't store information about the special type of repository dependency that is needed only for compiling a tool dependency
- # defined for the dependent repository.
+ # We don't store information about the special type of repository dependency that is needed only for
+ # compiling a tool dependency defined for the dependent repository.
repository_dependencies_dict = metadata_dict.get( 'repository_dependencies', {} )
repository_dependencies = repository_dependencies_dict.get( 'repository_dependencies', [] )
has_repository_dependencies, has_repository_dependencies_only_if_compiling_contained_td = \
@@ -1924,9 +1950,14 @@
trans.sa_session.flush()
else:
# There are no metadata records associated with the repository.
- repository_metadata = create_or_update_repository_metadata( trans, encoded_id, repository, repository.tip( trans.app ), metadata_dict )
+ repository_metadata = create_or_update_repository_metadata( trans,
+ encoded_id,
+ repository,
+ repository.tip( trans.app ),
+ metadata_dict )
if 'tools' in metadata_dict and repository_metadata and status != 'error':
- # Set tool versions on the new downloadable change set. The order of the list of changesets is critical, so we use the repo's changelog.
+ # Set tool versions on the new downloadable change set. The order of the list of changesets is
+ # critical, so we use the repo's changelog.
changeset_revisions = []
for changeset in repo.changelog:
changeset_revision = str( repo.changectx( changeset ) )
@@ -1945,8 +1976,12 @@
return message, status
def set_repository_metadata_due_to_new_tip( trans, repository, content_alert_str=None, **kwd ):
- """Set metadata on the repository tip in the tool shed - this method is not called from Galaxy."""
- error_message, status = set_repository_metadata( trans, repository, content_alert_str=content_alert_str, **kwd )
+ """Set metadata on the repository tip in the tool shed."""
+ # This method is not called from Galaxy.
+ error_message, status = set_repository_metadata( trans,
+ repository,
+ content_alert_str=content_alert_str,
+ **kwd )
if error_message:
# FIXME: This probably should not redirect since this method is called from the upload controller as well as the repository controller.
# If there is an error, display it.
@@ -1994,8 +2029,12 @@
break
if new_dependency_name and new_dependency_type and new_dependency_version:
# Update all attributes of the tool_dependency record in the database.
- log.debug( "Updating tool dependency '%s' with type '%s' and version '%s' to have new type '%s' and version '%s'." % \
- ( str( tool_dependency.name ), str( tool_dependency.type ), str( tool_dependency.version ), str( new_dependency_type ), str( new_dependency_version ) ) )
+ log.debug( "Updating version %s of tool dependency %s %s to have new version %s and type %s." % \
+ ( str( tool_dependency.version ),
+ str( tool_dependency.type ),
+ str( tool_dependency.name ),
+ str( new_dependency_version ),
+ str( new_dependency_type ) ) )
tool_dependency.type = new_dependency_type
tool_dependency.version = new_dependency_version
tool_dependency.status = app.install_model.ToolDependency.installation_status.UNINSTALLED
@@ -2004,9 +2043,10 @@
context.flush()
new_tool_dependency = tool_dependency
else:
- # We have no new tool dependency definition based on a matching dependency name, so remove the existing tool dependency record from the database.
- log.debug( "Deleting tool dependency with name '%s', type '%s' and version '%s' from the database since it is no longer defined." % \
- ( str( tool_dependency.name ), str( tool_dependency.type ), str( tool_dependency.version ) ) )
+ # We have no new tool dependency definition based on a matching dependency name, so remove
+ # the existing tool dependency record from the database.
+ log.debug( "Deleting version %s of tool dependency %s %s from the database since it is no longer defined." % \
+ ( str( tool_dependency.version ), str( tool_dependency.type ), str( tool_dependency.name ) ) )
context.delete( tool_dependency )
context.flush()
return new_tool_dependency
diff -r f11a729d42cb66753b8b1616ad625c1b2a306b3d -r cd8ca90e93a1f896ee6ff3ac56665e64373ace00 lib/tool_shed/util/repository_maintenance_util.py
--- a/lib/tool_shed/util/repository_maintenance_util.py
+++ b/lib/tool_shed/util/repository_maintenance_util.py
@@ -102,15 +102,17 @@
def create_repository_and_import_archive( trans, repository_archive_dict, import_results_tups ):
"""
- Create a new repository in the tool shed and populate it with the contents of a gzip compressed tar archive that was exported
- as part or all of the contents of a capsule.
+ Create a new repository in the tool shed and populate it with the contents of a gzip compressed tar archive
+ that was exported as part or all of the contents of a capsule.
"""
results_message = ''
name = repository_archive_dict.get( 'name', None )
username = repository_archive_dict.get( 'owner', None )
if name is None or username is None:
- results_message += 'Import failed: required repository name <b>%s</b> or owner <b>%s</b> is missing.' % ( str( name ), str( username ))
- import_results_tups.append( ( ( str( name ), str( username ) ), results_message ) )
+ ok = False
+ results_message += 'Import failed: required repository name <b>%s</b> or owner <b>%s</b> is missing.' % \
+ ( str( name ), str( username ))
+ import_results_tups.append( ( ok, ( str( name ), str( username ) ), results_message ) )
else:
if repository_archive_dict[ 'status' ] is None:
# The repository does not yet exist in this Tool Shed and the current user is authorized to import
@@ -122,8 +124,9 @@
# the exported repository archive.
user = suc.get_user_by_username( trans.app, username )
if user is None:
+ ok = False
results_message += 'Import failed: repository owner <b>%s</b> does not have an account in this Tool Shed.' % str( username )
- import_results_tups.append( ( ( str( name ), str( username ) ), results_message ) )
+ import_results_tups.append( ( ok, ( str( name ), str( username ) ), results_message ) )
else:
user_id = user.id
# The categories entry in the repository_archive_dict is a list of category names. If a name does not
@@ -134,8 +137,8 @@
for category_name in category_names:
category = suc.get_category_by_name( trans, category_name )
if category is None:
- results_message += 'This Tool Shed does not have the category <b>%s</b> so it will not be associated with this repository.' % \
- str( category_name )
+ results_message += 'This Tool Shed does not have the category <b>%s</b> so it ' % str( category_name )
+ results_message += 'will not be associated with this repository.'
else:
category_ids.append( trans.security.encode_id( category.id ) )
# Create the repository record in the database.
@@ -150,11 +153,14 @@
results_message += create_message
# Populate the new repository with the contents of exported repository archive.
results_dict = import_util.import_repository_archive( trans, repository, repository_archive_dict )
- import_results_tups.append( ( ( str( name ), str( username ) ), results_message ) )
+ ok = results_dict.get( 'ok', False )
+ import_results_tups.append( ( ok, ( str( name ), str( username ) ), results_message ) )
else:
# The repository either already exists in this Tool Shed or the current user is not authorized to create it.
- results_message += 'Import not necessary: repository status for this Tool Shed is: %s.' % str( repository_archive_dict[ 'status' ] )
- import_results_tups.append( ( ( str( name ), str( username ) ), results_message ) )
+ ok = True
+ results_message += 'Import not necessary: repository status for this Tool Shed is: %s.' % \
+ str( repository_archive_dict[ 'status' ] )
+ import_results_tups.append( ( ok, ( str( name ), str( username ) ), results_message ) )
return import_results_tups
def validate_repository_name( app, name, user ):
diff -r f11a729d42cb66753b8b1616ad625c1b2a306b3d -r cd8ca90e93a1f896ee6ff3ac56665e64373ace00 lib/tool_shed/util/shed_util_common.py
--- a/lib/tool_shed/util/shed_util_common.py
+++ b/lib/tool_shed/util/shed_util_common.py
@@ -559,12 +559,65 @@
return repository_metadata
return None
+def get_dependent_downloadable_revisions( trans, repository_metadata ):
+ """
+ Return all repository_metadata records that are downloadable and that depend upon the received
+ repository_metadata record.
+ """
+ # This method is called only from the tool shed.
+ rm_changeset_revision = repository_metadata.changeset_revision
+ rm_repository = repository_metadata.repository
+ rm_repository_name = str( rm_repository.name )
+ rm_repository_owner = str( rm_repository.user.username )
+ dependent_downloadable_revisions = []
+ for repository in trans.sa_session.query( trans.model.Repository ) \
+ .filter( and_( trans.model.Repository.table.c.id != rm_repository.id,
+ trans.model.Repository.table.c.deleted == False,
+ trans.model.Repository.table.c.deprecated == False ) ):
+ downloadable_revisions = repository.downloadable_revisions
+ if downloadable_revisions:
+ for downloadable_revision in downloadable_revisions:
+ if downloadable_revision.has_repository_dependencies:
+ metadata = downloadable_revision.metadata
+ if metadata:
+ repository_dependencies_dict = metadata.get( 'repository_dependencies', {} )
+ repository_dependencies_tups = repository_dependencies_dict.get( 'repository_dependencies', [] )
+ for repository_dependencies_tup in repository_dependencies_tups:
+ tool_shed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = \
+ common_util.parse_repository_dependency_tuple( repository_dependencies_tup )
+ if name == rm_repository_name and owner == rm_repository_owner:
+ # We've discovered a repository revision that depends upon the repository associated
+ # with the received repository_metadata record, but we need to make sure it depends
+ # upon the revision.
+ if changeset_revision == rm_changeset_revision:
+ dependent_downloadable_revisions.append( downloadable_revision )
+ else:
+ # Make sure the defined changeset_revision is current.
+ defined_repository_metadata = \
+ trans.sa_session.query( trans.model.RepositoryMetadata ) \
+ .filter( trans.model.RepositoryMetadata.table.c.changeset_revision == changeset_revision ) \
+ .first()
+ if defined_repository_metadata is None:
+ # The defined changeset_revision is not associated with a repository_metadata
+ # record, so updates must be necessary.
+ defined_repository = get_repository_by_name_and_owner( trans.app, name, owner )
+ defined_repo_dir = defined_repository.repo_path( trans.app )
+ defined_repo = hg.repository( get_configured_ui(), defined_repo_dir )
+ updated_changeset_revision = \
+ get_next_downloadable_changeset_revision( defined_repository,
+ defined_repo,
+ changeset_revision )
+ if updated_changeset_revision == rm_changeset_revision:
+ dependent_downloadable_revisions.append( downloadable_revision )
+ return dependent_downloadable_revisions
+
def get_file_context_from_ctx( ctx, filename ):
"""Return the mercurial file context for a specified file."""
- # We have to be careful in determining if we found the correct file because multiple files with the same name may be in different directories
- # within ctx if the files were moved within the change set. For example, in the following ctx.files() list, the former may have been moved to
- # the latter: ['tmap_wrapper_0.0.19/tool_data_table_conf.xml.sample', 'tmap_wrapper_0.3.3/tool_data_table_conf.xml.sample']. Another scenario
- # is that the file has been deleted.
+ # We have to be careful in determining if we found the correct file because multiple files with
+ # the same name may be in different directories within ctx if the files were moved within the change
+ # set. For example, in the following ctx.files() list, the former may have been moved to the latter:
+ # ['tmap_wrapper_0.0.19/tool_data_table_conf.xml.sample', 'tmap_wrapper_0.3.3/tool_data_table_conf.xml.sample'].
+ # Another scenario is that the file has been deleted.
deleted = False
filename = strip_path( filename )
for ctx_file in ctx.files():
diff -r f11a729d42cb66753b8b1616ad625c1b2a306b3d -r cd8ca90e93a1f896ee6ff3ac56665e64373ace00 templates/webapps/tool_shed/repository/import_capsule_results.mako
--- a/templates/webapps/tool_shed/repository/import_capsule_results.mako
+++ b/templates/webapps/tool_shed/repository/import_capsule_results.mako
@@ -70,7 +70,7 @@
<table class="grid">
%for import_results_tup in import_results_tups:
<%
- name_owner_tup, results_message = import_results_tup
+ ok, name_owner_tup, results_message = import_results_tup
name, owner = name_owner_tup
%><tr><td>Archive of repository <b>${name}</b> owned by <b>${owner}</b><br/>${results_message}</td></tr>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: martenson: fix for bug with hid starting at 2 instead of 1, introduced by myself at 2bb7df0
by commits-noreply@bitbucket.org 03 Jan '14
by commits-noreply@bitbucket.org 03 Jan '14
03 Jan '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/f11a729d42cb/
Changeset: f11a729d42cb
User: martenson
Date: 2014-01-03 22:01:54
Summary: fix for bug with hid starting at 2 instead of 1, introduced by myself at 2bb7df0
Affected #: 1 file
diff -r 720ad6d7b18324740eaf8727e2ed41ee24c68b45 -r f11a729d42cb66753b8b1616ad625c1b2a306b3d lib/galaxy/model/mapping.py
--- a/lib/galaxy/model/mapping.py
+++ b/lib/galaxy/model/mapping.py
@@ -1852,16 +1852,21 @@
def db_next_hid( self ):
"""
- Override __next_hid to generate from the database in a concurrency
- safe way.
+ db_next_hid( self )
+
+ Override __next_hid to generate from the database in a concurrency safe way.
+ Loads the next history ID from the DB and returns it.
+ It also saves the future next_id into the DB.
+
+ :rtype: int
+ :returns: the next history id
"""
conn = object_session( self ).connection()
table = self.table
trans = conn.begin()
try:
- current_hid = select( [table.c.hid_counter], table.c.id == self.id, for_update=True ).scalar()
- next_hid = current_hid + 1
- table.update( table.c.id == self.id ).execute( hid_counter = ( next_hid ) )
+ next_hid = select( [table.c.hid_counter], table.c.id == self.id, for_update=True ).scalar()
+ table.update( table.c.id == self.id ).execute( hid_counter = ( next_hid + 1 ) )
trans.commit()
return next_hid
except:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: martenson: library - stop event propagation if the folder row was clicked
by commits-noreply@bitbucket.org 03 Jan '14
by commits-noreply@bitbucket.org 03 Jan '14
03 Jan '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/720ad6d7b183/
Changeset: 720ad6d7b183
User: martenson
Date: 2014-01-03 20:41:59
Summary: library - stop event propagation if the folder row was clicked
Affected #: 2 files
diff -r c4a97053aca397ca783079a1e05c0cd2f921fb88 -r 720ad6d7b18324740eaf8727e2ed41ee24c68b45 static/scripts/galaxy.library.js
--- a/static/scripts/galaxy.library.js
+++ b/static/scripts/galaxy.library.js
@@ -506,6 +506,7 @@
source = 'td';
}
if (checkbox === '') {event.stopPropagation(); return;} // button in row was clicked
+ if (checkbox === undefined) {event.stopPropagation(); return;} // folder row was clicked
if (checkbox.checked){
if (source==='td'){
@@ -578,7 +579,7 @@
// import all selected datasets into history
importAllIntoHistory : function (){
- //disable the button to rprevent multiple submission
+ //disable the button to prevent multiple submission
this.modal.disableButton('Import');
var history_id = $("select[name=dataset_import_bulk] option:selected").val();
diff -r c4a97053aca397ca783079a1e05c0cd2f921fb88 -r 720ad6d7b18324740eaf8727e2ed41ee24c68b45 static/scripts/packed/galaxy.library.js
--- a/static/scripts/packed/galaxy.library.js
+++ b/static/scripts/packed/galaxy.library.js
@@ -1,1 +1,1 @@
-var view=null;var library_router=null;var responses=[];define(["galaxy.modal","galaxy.masthead","utils/galaxy.utils","libs/toastr"],function(j,k,g,m){var e=Backbone.Model.extend({urlRoot:"/api/libraries"});var n=Backbone.Collection.extend({url:"/api/libraries",model:e});var h=Backbone.Model.extend({urlRoot:"/api/libraries/datasets"});var c=Backbone.Collection.extend({model:h});var d=Backbone.Model.extend({defaults:{folder:new c(),full_path:"unknown",urlRoot:"/api/folders/",id:"unknown"},parse:function(q){this.full_path=q[0].full_path;this.get("folder").reset(q[1].folder_contents);return q}});var b=Backbone.Model.extend({urlRoot:"/api/histories/"});var i=Backbone.Model.extend({url:"/api/histories/"});var o=Backbone.Collection.extend({url:"/api/histories",model:i});var p=Backbone.Router.extend({routes:{"":"libraries","folders/:id":"folder_content","folders/:folder_id/download/:format":"download"}});var l=Backbone.View.extend({el:"#center",progress:0,progressStep:1,lastSelectedHistory:"",modal:null,folders:null,initialize:function(){this.folders=[];this.queue=jQuery.Deferred();this.queue.resolve()},templateFolder:function(){var q=[];q.push('<div id="library_container" style="width: 90%; margin: auto; margin-top: 2em; ">');q.push('<h3>New Data Libraries. This is work in progress. Report problems & ideas to <a href="mailto:marten@bx.psu.edu?Subject=DataLibraries_Feedback" target="_blank">Marten</a>.</h3>');q.push('<div id="library_folder_toolbar" >');q.push(' <button title="Create New Folder" id="toolbtn_create_folder" class="btn btn-primary" type="button"><span class="fa fa-plus"></span><span class="fa fa-folder-close"></span> folder</button>');q.push(' <button id="toolbtn_bulk_import" class="btn btn-primary" style="display: none; margin-left: 0.5em;" type="button"><span class="fa fa-external-link"></span> to history</button>');q.push(' <div id="toolbtn_dl" class="btn-group" style="margin-left: 0.5em; display: none; ">');q.push(' <button id="drop_toggle" type="button" class="btn btn-primary dropdown-toggle" data-toggle="dropdown">');q.push(' <span class="fa fa-download"></span> download <span class="caret"></span>');q.push(" </button>");q.push(' <ul class="dropdown-menu" role="menu">');q.push(' <li><a href="#/folders/<%= id %>/download/tgz">.tar.gz</a></li>');q.push(' <li><a href="#/folders/<%= id %>/download/tbz">.tar.bz</a></li>');q.push(' <li><a href="#/folders/<%= id %>/download/zip">.zip</a></li>');q.push(" </ul>");q.push(" </div>");q.push("</div>");q.push('<div class="library_breadcrumb">');q.push('<a title="Return to the list of libraries" href="#">Libraries</a><b>|</b> ');q.push("<% _.each(path, function(path_item) { %>");q.push("<% if (path_item[0] != id) { %>");q.push('<a title="Return to this folder" href="#/folders/<%- path_item[0] %>"><%- path_item[1] %></a><b>|</b> ');q.push("<% } else { %>");q.push('<span title="You are in this folder"><%- path_item[1] %></span>');q.push("<% } %>");q.push("<% }); %>");q.push("</div>");q.push('<table id="folder_table" class="table table-condensed">');q.push(" <thead>");q.push(' <th style="text-align: center; width: 20px; "><input id="select-all-checkboxes" style="margin: 0;" type="checkbox"></th>');q.push(' <th class="button_heading">view</th>');q.push(" <th>name</th>");q.push(" <th>data type</th>");q.push(" <th>size</th>");q.push(" <th>date</th>");q.push(" </thead>");q.push(" <tbody>");q.push(" <td></td>");q.push(' <td><button title="Go to parent folder" type="button" data-id="<%- upper_folder_id %>" class="btn_open_folder btn btn-default btn-xs">');q.push(' <span class="fa fa-arrow-up"></span> .. go up</td>');q.push(" <td></td>");q.push(" <td></td>");q.push(" <td></td>");q.push(" <td></td>");q.push(" </tr>");q.push(" <% _.each(items, function(content_item) { %>");q.push(' <tr class="folder_row light" id="<%- content_item.id %>">');q.push(' <% if (content_item.get("type") === "folder") { %>');q.push(" <td></td>");q.push(' <td><button title="Open this folder" type="button" data-id="<%- content_item.id %>" class="btn_open_folder btn btn-default btn-xs">');q.push(' <span class="fa fa-folder-open"></span> browse</td>');q.push(' <td><%- content_item.get("name") %>');q.push(' <% if (content_item.get("item_count") === 0) { %>');q.push(' <span class="muted">(empty folder)</span>');q.push(" <% } %>");q.push(" </td>");q.push(" <td>folder</td>");q.push(' <td><%= _.escape(content_item.get("item_count")) %> item(s)</td>');q.push(" <% } else { %>");q.push(' <td style="text-align: center; "><input style="margin: 0;" type="checkbox"></td>');q.push(" <td>");q.push(' <button title="See details of this dataset" type="button" class="library-dataset btn btn-default btn-xs">');q.push(' <span class="fa fa-eye"></span> details');q.push(" </button>");q.push(" </td>");q.push(' <td><%- content_item.get("name") %></td>');q.push(' <td><%= _.escape(content_item.get("data_type")) %></td>');q.push(' <td><%= _.escape(content_item.get("readable_size")) %></td>');q.push(" <% } %> ");q.push(' <td><%= _.escape(content_item.get("time_updated")) %></td>');q.push(" </tr>");q.push(" <% }); %>");q.push(" ");q.push(" </tbody>");q.push("</table>");q.push("</div>");return q.join("")},templateDatasetModal:function(){var q=[];q.push('<div id="dataset_info_modal">');q.push(' <table class="table table-striped table-condensed">');q.push(" <tr>");q.push(' <th scope="row" id="id_row" data-id="<%= _.escape(item.get("ldda_id")) %>">Name</th>');q.push(' <td><%= _.escape(item.get("name")) %></td>');q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Data type</th>');q.push(' <td><%= _.escape(item.get("data_type")) %></td>');q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Genome build</th>');q.push(' <td><%= _.escape(item.get("genome_build")) %></td>');q.push(" </tr>");q.push(' <th scope="row">Size</th>');q.push(" <td><%= _.escape(size) %></td>");q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Date uploaded</th>');q.push(' <td><%= _.escape(item.get("date_uploaded")) %></td>');q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Uploaded by</th>');q.push(' <td><%= _.escape(item.get("uploaded_by")) %></td>');q.push(" </tr>");q.push(' <tr scope="row">');q.push(' <th scope="row">Data Lines</th>');q.push(' <td scope="row"><%= _.escape(item.get("metadata_data_lines")) %></td>');q.push(" </tr>");q.push(' <th scope="row">Comment Lines</th>');q.push(' <% if (item.get("metadata_comment_lines") === "") { %>');q.push(' <td scope="row"><%= _.escape(item.get("metadata_comment_lines")) %></td>');q.push(" <% } else { %>");q.push(' <td scope="row">unknown</td>');q.push(" <% } %>");q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Number of Columns</th>');q.push(' <td scope="row"><%= _.escape(item.get("metadata_columns")) %></td>');q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Column Types</th>');q.push(' <td scope="row"><%= _.escape(item.get("metadata_column_types")) %></td>');q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Miscellaneous information</th>');q.push(' <td scope="row"><%= _.escape(item.get("misc_blurb")) %></td>');q.push(" </tr>");q.push(" </table>");q.push(' <pre class="peek">');q.push(" </pre>");q.push("</div>");return q.join("")},templateHistorySelectInModal:function(){var q=[];q.push('<span id="history_modal_combo" style="width:90%; margin-left: 1em; margin-right: 1em; ">');q.push("Select history: ");q.push('<select id="dataset_import_single" name="dataset_import_single" style="width:50%; margin-bottom: 1em; "> ');q.push(" <% _.each(histories, function(history) { %>");q.push(' <option value="<%= _.escape(history.get("id")) %>"><%= _.escape(history.get("name")) %></option>');q.push(" <% }); %>");q.push("</select>");q.push("</span>");return q.join("")},templateBulkImportInModal:function(){var q=[];q.push('<span id="history_modal_combo_bulk" style="width:90%; margin-left: 1em; margin-right: 1em; ">');q.push("Select history: ");q.push('<select id="dataset_import_bulk" name="dataset_import_bulk" style="width:50%; margin-bottom: 1em; "> ');q.push(" <% _.each(histories, function(history) { %>");q.push(' <option value="<%= _.escape(history.get("id")) %>"><%= _.escape(history.get("name")) %></option>');q.push(" <% }); %>");q.push("</select>");q.push("</span>");return q.join("")},size_to_string:function(q){var r="";if(q>=100000000000){q=q/100000000000;r="TB"}else{if(q>=100000000){q=q/100000000;r="GB"}else{if(q>=100000){q=q/100000;r="MB"}else{if(q>=100){q=q/100;r="KB"}else{q=q*10;r="b"}}}}return(Math.round(q)/10)+r},events:{"click #select-all-checkboxes":"selectAll","click .folder_row":"selectClickedRow","click #toolbtn_bulk_import":"modalBulkImport","click #toolbtn_dl":"bulkDownload","click .library-dataset":"showDatasetDetails","click #toolbtn_create_folder":"createFolderModal","click .btn_open_folder":"navigateToFolder"},render:function(q){$("#center").css("overflow","auto");view=this;var s=this;var r=new d({id:q.id});r.url=r.attributes.urlRoot+q.id+"/contents";r.fetch({success:function(t){for(var v=0;v<r.attributes.folder.models.length;v++){var u=r.attributes.folder.models[v];if(u.get("type")==="file"){u.set("readable_size",s.size_to_string(u.get("file_size")))}}var x=r.full_path;var y;if(x.length===1){y=0}else{y=x[x.length-2][0]}var w=_.template(s.templateFolder(),{path:r.full_path,items:r.attributes.folder.models,id:q.id,upper_folder_id:y});s.$el.html(w)}})},navigateToFolder:function(r){var q=$(r.target).attr("data-id");if(typeof q==="undefined"){return false}else{if(q==="0"){library_router.navigate("#",{trigger:true,replace:true})}else{library_router.navigate("folders/"+q,{trigger:true,replace:true})}}},showDatasetDetails:function(t){t.preventDefault();var u=$(t.target).parent().parent().attr("id");var s=new h();var r=new o();s.id=u;var q=this;s.fetch({success:function(v){r.fetch({success:function(w){q.renderModalAfterFetch(v,w)}})}})},renderModalAfterFetch:function(v,s){var t=this.size_to_string(v.get("file_size"));var u=_.template(this.templateDatasetModal(),{item:v,size:t});this.modal=null;var r=this;this.modal=new j.GalaxyModal({title:"Dataset Details",body:u,buttons:{Import:function(){r.importCurrentIntoHistory()},Download:function(){r.downloadCurrent()},Close:function(){r.modal.hide();$(".modal").remove();r.modal=null}}});this.modal.bindEvents(event);$(".peek").html(v.get("peek"));var q=_.template(this.templateHistorySelectInModal(),{histories:s.models});$(this.modal.elMain).find(".buttons").prepend(q);if(r.lastSelectedHistory.length>0){$(this.modal.elMain).find("#dataset_import_single").val(r.lastSelectedHistory)}this.modal.show()},downloadCurrent:function(){this.modal.disableButton("Import");this.modal.disableButton("Download");var q=[];q.push($("#id_row").attr("data-id"));var r="/api/libraries/datasets/download/uncompressed";var s={ldda_ids:q};folderContentView.processDownload(r,s);this.modal.enableButton("Import");this.modal.enableButton("Download")},importCurrentIntoHistory:function(){this.modal.disableButton("Import");this.modal.disableButton("Download");var s=$(this.modal.elMain).find("select[name=dataset_import_single] option:selected").val();this.lastSelectedHistory=s;var q=$("#id_row").attr("data-id");var t=new b();var r=this;t.url=t.urlRoot+s+"/contents";t.save({content:q,source:"library"},{success:function(){m.success("Dataset imported");r.modal.enableButton("Import");r.modal.enableButton("Download")},error:function(){m.error("An error occured! Dataset not imported. Please try again.");r.modal.enableButton("Import");r.modal.enableButton("Download")}})},selectAll:function(r){var q=r.target.checked;that=this;$(":checkbox").each(function(){this.checked=q;$row=$(this.parentElement.parentElement);(q)?that.makeDarkRow($row):that.makeWhiteRow($row)});this.checkTools()},selectClickedRow:function(r){var t="";var q;var s;if(r.target.localName==="input"){t=r.target;q=$(r.target.parentElement.parentElement);s="input"}else{if(r.target.localName==="td"){t=$("#"+r.target.parentElement.id).find(":checkbox")[0];q=$(r.target.parentElement);s="td"}}if(t===""){r.stopPropagation();return}if(t.checked){if(s==="td"){t.checked="";this.makeWhiteRow(q)}else{if(s==="input"){this.makeDarkRow(q)}}}else{if(s==="td"){t.checked="selected";this.makeDarkRow(q)}else{if(s==="input"){this.makeWhiteRow(q)}}}this.checkTools()},makeDarkRow:function(q){q.removeClass("light");q.find("a").removeClass("light");q.addClass("dark");q.find("a").addClass("dark")},makeWhiteRow:function(q){q.removeClass("dark");q.find("a").removeClass("dark");q.addClass("light");q.find("a").addClass("light")},checkTools:function(){var q=$("#folder_table").find(":checked");if(q.length>0){$("#toolbtn_bulk_import").show();$("#toolbtn_dl").show()}else{$("#toolbtn_bulk_import").hide();$("#toolbtn_dl").hide()}},modalBulkImport:function(){var r=this;var q=new o();q.fetch({success:function(s){var t=_.template(r.templateBulkImportInModal(),{histories:s.models});r.modal=new j.GalaxyModal({title:"Import into History",body:t,buttons:{Import:function(){r.importAllIntoHistory()},Close:function(){r.modal.hide();$(".modal").remove();r.modal=null}}});r.modal.bindEvents(event);r.modal.show()}})},importAllIntoHistory:function(){this.modal.disableButton("Import");var s=$("select[name=dataset_import_bulk] option:selected").val();var w=$("select[name=dataset_import_bulk] option:selected").text();var y=[];$("#folder_table").find(":checked").each(function(){if(this.parentElement.parentElement.id!=""){y.push(this.parentElement.parentElement.id)}});var x=_.template(this.templateProgressBar(),{history_name:w});$(this.modal.elMain).find(".modal-body").html(x);var t=100/y.length;this.initProgress(t);var q=[];for(var r=y.length-1;r>=0;r--){library_dataset_id=y[r];var u=new b();var v=this;u.url=u.urlRoot+s+"/contents";u.content=library_dataset_id;u.source="library";q.push(u)}this.chainCall(q)},chainCall:function(r){var q=this;var s=r.pop();if(typeof s==="undefined"){m.success("All datasets imported");this.modal.destroy();return}var t=$.when(s.save({content:s.content,source:s.source})).done(function(u){q.updateProgress();responses.push(u);q.chainCall(r)})},initProgress:function(q){this.progress=0;this.progressStep=q},updateProgress:function(){this.progress+=this.progressStep;$(".progress-bar-import").width(Math.round(this.progress)+"%");txt_representation=Math.round(this.progress)+"% Complete";$(".completion_span").text(txt_representation)},templateProgressBar:function(){var q=[];q.push('<div class="import_text">');q.push("Importing selected datasets to history <b><%= _.escape(history_name) %></b>");q.push("</div>");q.push('<div class="progress">');q.push(' <div class="progress-bar progress-bar-import" role="progressbar" aria-valuenow="0" aria-valuemin="0" aria-valuemax="100" style="width: 00%;">');q.push(' <span class="completion_span">0% Complete</span>');q.push(" </div>");q.push("</div>");q.push("");return q.join("")},download:function(q,u){var s=[];$("#folder_table").find(":checked").each(function(){if(this.parentElement.parentElement.id!=""){s.push(this.parentElement.parentElement.id)}});var r="/api/libraries/datasets/download/"+u;var t={ldda_ids:s};this.processDownload(r,t,"get")},processDownload:function(r,s,t){if(r&&s){s=typeof s=="string"?s:$.param(s);var q="";$.each(s.split("&"),function(){var u=this.split("=");q+='<input type="hidden" name="'+u[0]+'" value="'+u[1]+'" />'});$('<form action="'+r+'" method="'+(t||"post")+'">'+q+"</form>").appendTo("body").submit().remove();m.info("Your download will begin soon")}},createFolderModal:function(){m.info("This will create folder...in the future")}});var a=Backbone.View.extend({el:"#center",events:{"click #create_new_library_btn":"show_library_modal"},initialize:function(){},template_library_list:function(){tmpl_array=[];tmpl_array.push('<div id="library_container" style="width: 90%; margin: auto; margin-top: 2em; overflow: auto !important; ">');tmpl_array.push("");tmpl_array.push('<h3>New Data Libraries. This is work in progress. Report problems & ideas to <a href="mailto:marten@bx.psu.edu?Subject=DataLibraries_Feedback" target="_blank">Marten</a>.</h3>');tmpl_array.push('<a href="" id="create_new_library_btn" class="btn btn-primary file ">New Library</a>');tmpl_array.push('<table class="table table-condensed">');tmpl_array.push(" <thead>");tmpl_array.push(' <th class="button_heading"></th>');tmpl_array.push(" <th>name</th>");tmpl_array.push(" <th>description</th>");tmpl_array.push(" <th>synopsis</th> ");tmpl_array.push(" <th>model type</th> ");tmpl_array.push(" </thead>");tmpl_array.push(" <tbody>");tmpl_array.push(" <% _.each(libraries, function(library) { %>");tmpl_array.push(" <tr>");tmpl_array.push(' <td><button title="Open this library" type="button" data-id="<%- library.get("root_folder_id") %>" class="btn_open_folder btn btn-default btn-xs">');tmpl_array.push(' <span class="fa fa-folder-open"></span> browse</td>');tmpl_array.push(' <td><%- library.get("name") %></td>');tmpl_array.push(' <td><%= _.escape(library.get("description")) %></td>');tmpl_array.push(' <td><%= _.escape(library.get("synopsis")) %></td>');tmpl_array.push(' <td><%= _.escape(library.get("model_class")) %></td>');tmpl_array.push(" </tr>");tmpl_array.push(" <% }); %>");tmpl_array.push(" </tbody>");tmpl_array.push("</table>");tmpl_array.push("</div>");return tmpl_array.join("")},render:function(){$("#center").css("overflow","auto");var q=this;libraries=new n();libraries.fetch({success:function(r){var s=_.template(q.template_library_list(),{libraries:r.models});q.$el.html(s)},error:function(s,r){if(r.statusCode().status===403){m.error("Please log in first. Redirecting to login page in 3s.");setTimeout(q.redirectToLogin,3000)}else{m.error("An error occured. Please try again.")}}})},redirectToHome:function(){window.location="../"},redirectToLogin:function(){window.location="/user/login"},modal:null,show_library_modal:function(r){r.preventDefault();r.stopPropagation();var q=this;this.modal=new j.GalaxyModal({title:"Create New Library",body:this.template_new_library(),buttons:{Create:function(){q.create_new_library_event()},Close:function(){q.modal.hide()}}});this.modal.show()},create_new_library_event:function(){var s=this.serialize_new_library();if(this.validate_new_library(s)){var r=new e();var q=this;r.save(s,{success:function(t){q.modal.hide();q.clear_library_modal();q.render();m.success("Library created")},error:function(){m.error("An error occured :(")}})}else{m.error("Library's name is missing")}return false},clear_library_modal:function(){$("input[name='Name']").val("");$("input[name='Description']").val("");$("input[name='Synopsis']").val("")},serialize_new_library:function(){return{name:$("input[name='Name']").val(),description:$("input[name='Description']").val(),synopsis:$("input[name='Synopsis']").val()}},validate_new_library:function(q){return q.name!==""},template_new_library:function(){tmpl_array=[];tmpl_array.push('<div id="new_library_modal">');tmpl_array.push("<form>");tmpl_array.push('<input type="text" name="Name" value="" placeholder="Name">');tmpl_array.push('<input type="text" name="Description" value="" placeholder="Description">');tmpl_array.push('<input type="text" name="Synopsis" value="" placeholder="Synopsis">');tmpl_array.push("</form>");tmpl_array.push("</div>");return tmpl_array.join("")}});var f=Backbone.View.extend({folderContentView:null,galaxyLibraryview:null,initialize:function(){folderContentView=new l();galaxyLibraryview=new a();library_router=new p();library_router.on("route:libraries",function(){galaxyLibraryview.render()});library_router.on("route:folder_content",function(q){folderContentView.render({id:q})});library_router.on("route:download",function(q,r){if($("#center").find(":checked").length===0){library_router.navigate("folders/"+q,{trigger:true,replace:true})}else{folderContentView.download(q,r);library_router.navigate("folders/"+q,{trigger:false,replace:true})}});Backbone.history.start();return this}});return{GalaxyApp:f}});
\ No newline at end of file
+var view=null;var library_router=null;var responses=[];define(["galaxy.modal","galaxy.masthead","utils/galaxy.utils","libs/toastr"],function(j,k,g,m){var e=Backbone.Model.extend({urlRoot:"/api/libraries"});var n=Backbone.Collection.extend({url:"/api/libraries",model:e});var h=Backbone.Model.extend({urlRoot:"/api/libraries/datasets"});var c=Backbone.Collection.extend({model:h});var d=Backbone.Model.extend({defaults:{folder:new c(),full_path:"unknown",urlRoot:"/api/folders/",id:"unknown"},parse:function(q){this.full_path=q[0].full_path;this.get("folder").reset(q[1].folder_contents);return q}});var b=Backbone.Model.extend({urlRoot:"/api/histories/"});var i=Backbone.Model.extend({url:"/api/histories/"});var o=Backbone.Collection.extend({url:"/api/histories",model:i});var p=Backbone.Router.extend({routes:{"":"libraries","folders/:id":"folder_content","folders/:folder_id/download/:format":"download"}});var l=Backbone.View.extend({el:"#center",progress:0,progressStep:1,lastSelectedHistory:"",modal:null,folders:null,initialize:function(){this.folders=[];this.queue=jQuery.Deferred();this.queue.resolve()},templateFolder:function(){var q=[];q.push('<div id="library_container" style="width: 90%; margin: auto; margin-top: 2em; ">');q.push('<h3>New Data Libraries. This is work in progress. Report problems & ideas to <a href="mailto:marten@bx.psu.edu?Subject=DataLibraries_Feedback" target="_blank">Marten</a>.</h3>');q.push('<div id="library_folder_toolbar" >');q.push(' <button title="Create New Folder" id="toolbtn_create_folder" class="btn btn-primary" type="button"><span class="fa fa-plus"></span><span class="fa fa-folder-close"></span> folder</button>');q.push(' <button id="toolbtn_bulk_import" class="btn btn-primary" style="display: none; margin-left: 0.5em;" type="button"><span class="fa fa-external-link"></span> to history</button>');q.push(' <div id="toolbtn_dl" class="btn-group" style="margin-left: 0.5em; display: none; ">');q.push(' <button id="drop_toggle" type="button" class="btn btn-primary dropdown-toggle" data-toggle="dropdown">');q.push(' <span class="fa fa-download"></span> download <span class="caret"></span>');q.push(" </button>");q.push(' <ul class="dropdown-menu" role="menu">');q.push(' <li><a href="#/folders/<%= id %>/download/tgz">.tar.gz</a></li>');q.push(' <li><a href="#/folders/<%= id %>/download/tbz">.tar.bz</a></li>');q.push(' <li><a href="#/folders/<%= id %>/download/zip">.zip</a></li>');q.push(" </ul>");q.push(" </div>");q.push("</div>");q.push('<div class="library_breadcrumb">');q.push('<a title="Return to the list of libraries" href="#">Libraries</a><b>|</b> ');q.push("<% _.each(path, function(path_item) { %>");q.push("<% if (path_item[0] != id) { %>");q.push('<a title="Return to this folder" href="#/folders/<%- path_item[0] %>"><%- path_item[1] %></a><b>|</b> ');q.push("<% } else { %>");q.push('<span title="You are in this folder"><%- path_item[1] %></span>');q.push("<% } %>");q.push("<% }); %>");q.push("</div>");q.push('<table id="folder_table" class="table table-condensed">');q.push(" <thead>");q.push(' <th style="text-align: center; width: 20px; "><input id="select-all-checkboxes" style="margin: 0;" type="checkbox"></th>');q.push(' <th class="button_heading">view</th>');q.push(" <th>name</th>");q.push(" <th>data type</th>");q.push(" <th>size</th>");q.push(" <th>date</th>");q.push(" </thead>");q.push(" <tbody>");q.push(" <td></td>");q.push(' <td><button title="Go to parent folder" type="button" data-id="<%- upper_folder_id %>" class="btn_open_folder btn btn-default btn-xs">');q.push(' <span class="fa fa-arrow-up"></span> .. go up</td>');q.push(" <td></td>");q.push(" <td></td>");q.push(" <td></td>");q.push(" <td></td>");q.push(" </tr>");q.push(" <% _.each(items, function(content_item) { %>");q.push(' <tr class="folder_row light" id="<%- content_item.id %>">');q.push(' <% if (content_item.get("type") === "folder") { %>');q.push(" <td></td>");q.push(' <td><button title="Open this folder" type="button" data-id="<%- content_item.id %>" class="btn_open_folder btn btn-default btn-xs">');q.push(' <span class="fa fa-folder-open"></span> browse</td>');q.push(' <td><%- content_item.get("name") %>');q.push(' <% if (content_item.get("item_count") === 0) { %>');q.push(' <span class="muted">(empty folder)</span>');q.push(" <% } %>");q.push(" </td>");q.push(" <td>folder</td>");q.push(' <td><%= _.escape(content_item.get("item_count")) %> item(s)</td>');q.push(" <% } else { %>");q.push(' <td style="text-align: center; "><input style="margin: 0;" type="checkbox"></td>');q.push(" <td>");q.push(' <button title="See details of this dataset" type="button" class="library-dataset btn btn-default btn-xs">');q.push(' <span class="fa fa-eye"></span> details');q.push(" </button>");q.push(" </td>");q.push(' <td><%- content_item.get("name") %></td>');q.push(' <td><%= _.escape(content_item.get("data_type")) %></td>');q.push(' <td><%= _.escape(content_item.get("readable_size")) %></td>');q.push(" <% } %> ");q.push(' <td><%= _.escape(content_item.get("time_updated")) %></td>');q.push(" </tr>");q.push(" <% }); %>");q.push(" ");q.push(" </tbody>");q.push("</table>");q.push("</div>");return q.join("")},templateDatasetModal:function(){var q=[];q.push('<div id="dataset_info_modal">');q.push(' <table class="table table-striped table-condensed">');q.push(" <tr>");q.push(' <th scope="row" id="id_row" data-id="<%= _.escape(item.get("ldda_id")) %>">Name</th>');q.push(' <td><%= _.escape(item.get("name")) %></td>');q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Data type</th>');q.push(' <td><%= _.escape(item.get("data_type")) %></td>');q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Genome build</th>');q.push(' <td><%= _.escape(item.get("genome_build")) %></td>');q.push(" </tr>");q.push(' <th scope="row">Size</th>');q.push(" <td><%= _.escape(size) %></td>");q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Date uploaded</th>');q.push(' <td><%= _.escape(item.get("date_uploaded")) %></td>');q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Uploaded by</th>');q.push(' <td><%= _.escape(item.get("uploaded_by")) %></td>');q.push(" </tr>");q.push(' <tr scope="row">');q.push(' <th scope="row">Data Lines</th>');q.push(' <td scope="row"><%= _.escape(item.get("metadata_data_lines")) %></td>');q.push(" </tr>");q.push(' <th scope="row">Comment Lines</th>');q.push(' <% if (item.get("metadata_comment_lines") === "") { %>');q.push(' <td scope="row"><%= _.escape(item.get("metadata_comment_lines")) %></td>');q.push(" <% } else { %>");q.push(' <td scope="row">unknown</td>');q.push(" <% } %>");q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Number of Columns</th>');q.push(' <td scope="row"><%= _.escape(item.get("metadata_columns")) %></td>');q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Column Types</th>');q.push(' <td scope="row"><%= _.escape(item.get("metadata_column_types")) %></td>');q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Miscellaneous information</th>');q.push(' <td scope="row"><%= _.escape(item.get("misc_blurb")) %></td>');q.push(" </tr>");q.push(" </table>");q.push(' <pre class="peek">');q.push(" </pre>");q.push("</div>");return q.join("")},templateHistorySelectInModal:function(){var q=[];q.push('<span id="history_modal_combo" style="width:90%; margin-left: 1em; margin-right: 1em; ">');q.push("Select history: ");q.push('<select id="dataset_import_single" name="dataset_import_single" style="width:50%; margin-bottom: 1em; "> ');q.push(" <% _.each(histories, function(history) { %>");q.push(' <option value="<%= _.escape(history.get("id")) %>"><%= _.escape(history.get("name")) %></option>');q.push(" <% }); %>");q.push("</select>");q.push("</span>");return q.join("")},templateBulkImportInModal:function(){var q=[];q.push('<span id="history_modal_combo_bulk" style="width:90%; margin-left: 1em; margin-right: 1em; ">');q.push("Select history: ");q.push('<select id="dataset_import_bulk" name="dataset_import_bulk" style="width:50%; margin-bottom: 1em; "> ');q.push(" <% _.each(histories, function(history) { %>");q.push(' <option value="<%= _.escape(history.get("id")) %>"><%= _.escape(history.get("name")) %></option>');q.push(" <% }); %>");q.push("</select>");q.push("</span>");return q.join("")},size_to_string:function(q){var r="";if(q>=100000000000){q=q/100000000000;r="TB"}else{if(q>=100000000){q=q/100000000;r="GB"}else{if(q>=100000){q=q/100000;r="MB"}else{if(q>=100){q=q/100;r="KB"}else{q=q*10;r="b"}}}}return(Math.round(q)/10)+r},events:{"click #select-all-checkboxes":"selectAll","click .folder_row":"selectClickedRow","click #toolbtn_bulk_import":"modalBulkImport","click #toolbtn_dl":"bulkDownload","click .library-dataset":"showDatasetDetails","click #toolbtn_create_folder":"createFolderModal","click .btn_open_folder":"navigateToFolder"},render:function(q){$("#center").css("overflow","auto");view=this;var s=this;var r=new d({id:q.id});r.url=r.attributes.urlRoot+q.id+"/contents";r.fetch({success:function(t){for(var v=0;v<r.attributes.folder.models.length;v++){var u=r.attributes.folder.models[v];if(u.get("type")==="file"){u.set("readable_size",s.size_to_string(u.get("file_size")))}}var x=r.full_path;var y;if(x.length===1){y=0}else{y=x[x.length-2][0]}var w=_.template(s.templateFolder(),{path:r.full_path,items:r.attributes.folder.models,id:q.id,upper_folder_id:y});s.$el.html(w)}})},navigateToFolder:function(r){var q=$(r.target).attr("data-id");if(typeof q==="undefined"){return false}else{if(q==="0"){library_router.navigate("#",{trigger:true,replace:true})}else{library_router.navigate("folders/"+q,{trigger:true,replace:true})}}},showDatasetDetails:function(t){t.preventDefault();var u=$(t.target).parent().parent().attr("id");var s=new h();var r=new o();s.id=u;var q=this;s.fetch({success:function(v){r.fetch({success:function(w){q.renderModalAfterFetch(v,w)}})}})},renderModalAfterFetch:function(v,s){var t=this.size_to_string(v.get("file_size"));var u=_.template(this.templateDatasetModal(),{item:v,size:t});this.modal=null;var r=this;this.modal=new j.GalaxyModal({title:"Dataset Details",body:u,buttons:{Import:function(){r.importCurrentIntoHistory()},Download:function(){r.downloadCurrent()},Close:function(){r.modal.hide();$(".modal").remove();r.modal=null}}});this.modal.bindEvents(event);$(".peek").html(v.get("peek"));var q=_.template(this.templateHistorySelectInModal(),{histories:s.models});$(this.modal.elMain).find(".buttons").prepend(q);if(r.lastSelectedHistory.length>0){$(this.modal.elMain).find("#dataset_import_single").val(r.lastSelectedHistory)}this.modal.show()},downloadCurrent:function(){this.modal.disableButton("Import");this.modal.disableButton("Download");var q=[];q.push($("#id_row").attr("data-id"));var r="/api/libraries/datasets/download/uncompressed";var s={ldda_ids:q};folderContentView.processDownload(r,s);this.modal.enableButton("Import");this.modal.enableButton("Download")},importCurrentIntoHistory:function(){this.modal.disableButton("Import");this.modal.disableButton("Download");var s=$(this.modal.elMain).find("select[name=dataset_import_single] option:selected").val();this.lastSelectedHistory=s;var q=$("#id_row").attr("data-id");var t=new b();var r=this;t.url=t.urlRoot+s+"/contents";t.save({content:q,source:"library"},{success:function(){m.success("Dataset imported");r.modal.enableButton("Import");r.modal.enableButton("Download")},error:function(){m.error("An error occured! Dataset not imported. Please try again.");r.modal.enableButton("Import");r.modal.enableButton("Download")}})},selectAll:function(r){var q=r.target.checked;that=this;$(":checkbox").each(function(){this.checked=q;$row=$(this.parentElement.parentElement);(q)?that.makeDarkRow($row):that.makeWhiteRow($row)});this.checkTools()},selectClickedRow:function(r){var t="";var q;var s;if(r.target.localName==="input"){t=r.target;q=$(r.target.parentElement.parentElement);s="input"}else{if(r.target.localName==="td"){t=$("#"+r.target.parentElement.id).find(":checkbox")[0];q=$(r.target.parentElement);s="td"}}if(t===""){r.stopPropagation();return}if(t===undefined){r.stopPropagation();return}if(t.checked){if(s==="td"){t.checked="";this.makeWhiteRow(q)}else{if(s==="input"){this.makeDarkRow(q)}}}else{if(s==="td"){t.checked="selected";this.makeDarkRow(q)}else{if(s==="input"){this.makeWhiteRow(q)}}}this.checkTools()},makeDarkRow:function(q){q.removeClass("light");q.find("a").removeClass("light");q.addClass("dark");q.find("a").addClass("dark")},makeWhiteRow:function(q){q.removeClass("dark");q.find("a").removeClass("dark");q.addClass("light");q.find("a").addClass("light")},checkTools:function(){var q=$("#folder_table").find(":checked");if(q.length>0){$("#toolbtn_bulk_import").show();$("#toolbtn_dl").show()}else{$("#toolbtn_bulk_import").hide();$("#toolbtn_dl").hide()}},modalBulkImport:function(){var r=this;var q=new o();q.fetch({success:function(s){var t=_.template(r.templateBulkImportInModal(),{histories:s.models});r.modal=new j.GalaxyModal({title:"Import into History",body:t,buttons:{Import:function(){r.importAllIntoHistory()},Close:function(){r.modal.hide();$(".modal").remove();r.modal=null}}});r.modal.bindEvents(event);r.modal.show()}})},importAllIntoHistory:function(){this.modal.disableButton("Import");var s=$("select[name=dataset_import_bulk] option:selected").val();var w=$("select[name=dataset_import_bulk] option:selected").text();var y=[];$("#folder_table").find(":checked").each(function(){if(this.parentElement.parentElement.id!=""){y.push(this.parentElement.parentElement.id)}});var x=_.template(this.templateProgressBar(),{history_name:w});$(this.modal.elMain).find(".modal-body").html(x);var t=100/y.length;this.initProgress(t);var q=[];for(var r=y.length-1;r>=0;r--){library_dataset_id=y[r];var u=new b();var v=this;u.url=u.urlRoot+s+"/contents";u.content=library_dataset_id;u.source="library";q.push(u)}this.chainCall(q)},chainCall:function(r){var q=this;var s=r.pop();if(typeof s==="undefined"){m.success("All datasets imported");this.modal.destroy();return}var t=$.when(s.save({content:s.content,source:s.source})).done(function(u){q.updateProgress();responses.push(u);q.chainCall(r)})},initProgress:function(q){this.progress=0;this.progressStep=q},updateProgress:function(){this.progress+=this.progressStep;$(".progress-bar-import").width(Math.round(this.progress)+"%");txt_representation=Math.round(this.progress)+"% Complete";$(".completion_span").text(txt_representation)},templateProgressBar:function(){var q=[];q.push('<div class="import_text">');q.push("Importing selected datasets to history <b><%= _.escape(history_name) %></b>");q.push("</div>");q.push('<div class="progress">');q.push(' <div class="progress-bar progress-bar-import" role="progressbar" aria-valuenow="0" aria-valuemin="0" aria-valuemax="100" style="width: 00%;">');q.push(' <span class="completion_span">0% Complete</span>');q.push(" </div>");q.push("</div>");q.push("");return q.join("")},download:function(q,u){var s=[];$("#folder_table").find(":checked").each(function(){if(this.parentElement.parentElement.id!=""){s.push(this.parentElement.parentElement.id)}});var r="/api/libraries/datasets/download/"+u;var t={ldda_ids:s};this.processDownload(r,t,"get")},processDownload:function(r,s,t){if(r&&s){s=typeof s=="string"?s:$.param(s);var q="";$.each(s.split("&"),function(){var u=this.split("=");q+='<input type="hidden" name="'+u[0]+'" value="'+u[1]+'" />'});$('<form action="'+r+'" method="'+(t||"post")+'">'+q+"</form>").appendTo("body").submit().remove();m.info("Your download will begin soon")}},createFolderModal:function(){m.info("This will create folder...in the future")}});var a=Backbone.View.extend({el:"#center",events:{"click #create_new_library_btn":"show_library_modal"},initialize:function(){},template_library_list:function(){tmpl_array=[];tmpl_array.push('<div id="library_container" style="width: 90%; margin: auto; margin-top: 2em; overflow: auto !important; ">');tmpl_array.push("");tmpl_array.push('<h3>New Data Libraries. This is work in progress. Report problems & ideas to <a href="mailto:marten@bx.psu.edu?Subject=DataLibraries_Feedback" target="_blank">Marten</a>.</h3>');tmpl_array.push('<a href="" id="create_new_library_btn" class="btn btn-primary file ">New Library</a>');tmpl_array.push('<table class="table table-condensed">');tmpl_array.push(" <thead>");tmpl_array.push(' <th class="button_heading"></th>');tmpl_array.push(" <th>name</th>");tmpl_array.push(" <th>description</th>");tmpl_array.push(" <th>synopsis</th> ");tmpl_array.push(" <th>model type</th> ");tmpl_array.push(" </thead>");tmpl_array.push(" <tbody>");tmpl_array.push(" <% _.each(libraries, function(library) { %>");tmpl_array.push(" <tr>");tmpl_array.push(' <td><button title="Open this library" type="button" data-id="<%- library.get("root_folder_id") %>" class="btn_open_folder btn btn-default btn-xs">');tmpl_array.push(' <span class="fa fa-folder-open"></span> browse</td>');tmpl_array.push(' <td><%- library.get("name") %></td>');tmpl_array.push(' <td><%= _.escape(library.get("description")) %></td>');tmpl_array.push(' <td><%= _.escape(library.get("synopsis")) %></td>');tmpl_array.push(' <td><%= _.escape(library.get("model_class")) %></td>');tmpl_array.push(" </tr>");tmpl_array.push(" <% }); %>");tmpl_array.push(" </tbody>");tmpl_array.push("</table>");tmpl_array.push("</div>");return tmpl_array.join("")},render:function(){$("#center").css("overflow","auto");var q=this;libraries=new n();libraries.fetch({success:function(r){var s=_.template(q.template_library_list(),{libraries:r.models});q.$el.html(s)},error:function(s,r){if(r.statusCode().status===403){m.error("Please log in first. Redirecting to login page in 3s.");setTimeout(q.redirectToLogin,3000)}else{m.error("An error occured. Please try again.")}}})},redirectToHome:function(){window.location="../"},redirectToLogin:function(){window.location="/user/login"},modal:null,show_library_modal:function(r){r.preventDefault();r.stopPropagation();var q=this;this.modal=new j.GalaxyModal({title:"Create New Library",body:this.template_new_library(),buttons:{Create:function(){q.create_new_library_event()},Close:function(){q.modal.hide()}}});this.modal.show()},create_new_library_event:function(){var s=this.serialize_new_library();if(this.validate_new_library(s)){var r=new e();var q=this;r.save(s,{success:function(t){q.modal.hide();q.clear_library_modal();q.render();m.success("Library created")},error:function(){m.error("An error occured :(")}})}else{m.error("Library's name is missing")}return false},clear_library_modal:function(){$("input[name='Name']").val("");$("input[name='Description']").val("");$("input[name='Synopsis']").val("")},serialize_new_library:function(){return{name:$("input[name='Name']").val(),description:$("input[name='Description']").val(),synopsis:$("input[name='Synopsis']").val()}},validate_new_library:function(q){return q.name!==""},template_new_library:function(){tmpl_array=[];tmpl_array.push('<div id="new_library_modal">');tmpl_array.push("<form>");tmpl_array.push('<input type="text" name="Name" value="" placeholder="Name">');tmpl_array.push('<input type="text" name="Description" value="" placeholder="Description">');tmpl_array.push('<input type="text" name="Synopsis" value="" placeholder="Synopsis">');tmpl_array.push("</form>");tmpl_array.push("</div>");return tmpl_array.join("")}});var f=Backbone.View.extend({folderContentView:null,galaxyLibraryview:null,initialize:function(){folderContentView=new l();galaxyLibraryview=new a();library_router=new p();library_router.on("route:libraries",function(){galaxyLibraryview.render()});library_router.on("route:folder_content",function(q){folderContentView.render({id:q})});library_router.on("route:download",function(q,r){if($("#center").find(":checked").length===0){library_router.navigate("folders/"+q,{trigger:true,replace:true})}else{folderContentView.download(q,r);library_router.navigate("folders/"+q,{trigger:false,replace:true})}});Backbone.history.start();return this}});return{GalaxyApp:f}});
\ No newline at end of file
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Raise an exception if attempting to install a repository into Galaxy that contains an invalid repository dependency definition (implying a bug in the Tool Shed framework).
by commits-noreply@bitbucket.org 03 Jan '14
by commits-noreply@bitbucket.org 03 Jan '14
03 Jan '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/c4a97053aca3/
Changeset: c4a97053aca3
User: greg
Date: 2014-01-03 20:21:29
Summary: Raise an exception if attempting to install a repository into Galaxy that contains an invalid repository dependency definition (implying a bug in the Tool Shed framework).
Affected #: 5 files
diff -r 10b7c04f919ff6d7fc1d03ba88ebb282645fcbba -r c4a97053aca397ca783079a1e05c0cd2f921fb88 lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
--- a/lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
+++ b/lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
@@ -1440,16 +1440,17 @@
tool_path, relative_install_dir = repository.get_tool_relative_path( trans.app )
if relative_install_dir:
original_metadata_dict = repository.metadata
- metadata_dict, invalid_file_tups = metadata_util.generate_metadata_for_changeset_revision( app=trans.app,
- repository=repository,
- changeset_revision=repository.changeset_revision,
- repository_clone_url=repository_clone_url,
- shed_config_dict = repository.get_shed_config_dict( trans.app ),
- relative_install_dir=relative_install_dir,
- repository_files_dir=None,
- resetting_all_metadata_on_repository=False,
- updating_installed_repository=False,
- persist=False )
+ metadata_dict, invalid_file_tups = \
+ metadata_util.generate_metadata_for_changeset_revision( app=trans.app,
+ repository=repository,
+ changeset_revision=repository.changeset_revision,
+ repository_clone_url=repository_clone_url,
+ shed_config_dict = repository.get_shed_config_dict( trans.app ),
+ relative_install_dir=relative_install_dir,
+ repository_files_dir=None,
+ resetting_all_metadata_on_repository=False,
+ updating_installed_repository=False,
+ persist=False )
repository.metadata = metadata_dict
if metadata_dict != original_metadata_dict:
suc.update_in_shed_tool_config( trans.app, repository )
@@ -1607,7 +1608,11 @@
changeset_revision = kwd.get( 'changeset_revision', None )
latest_changeset_revision = kwd.get( 'latest_changeset_revision', None )
latest_ctx_rev = kwd.get( 'latest_ctx_rev', None )
- repository = suc.get_tool_shed_repository_by_shed_name_owner_changeset_revision( trans.app, tool_shed_url, name, owner, changeset_revision )
+ repository = suc.get_tool_shed_repository_by_shed_name_owner_changeset_revision( trans.app,
+ tool_shed_url,
+ name,
+ owner,
+ changeset_revision )
if changeset_revision and latest_changeset_revision and latest_ctx_rev:
if changeset_revision == latest_changeset_revision:
message = "The installed repository named '%s' is current, there are no updates available. " % name
@@ -1627,16 +1632,17 @@
if repository.includes_data_managers:
data_manager_util.remove_from_data_manager( trans.app, repository )
# Update the repository metadata.
- metadata_dict, invalid_file_tups = metadata_util.generate_metadata_for_changeset_revision( app=trans.app,
- repository=repository,
- changeset_revision=latest_changeset_revision,
- repository_clone_url=repository_clone_url,
- shed_config_dict = repository.get_shed_config_dict( trans.app ),
- relative_install_dir=relative_install_dir,
- repository_files_dir=None,
- resetting_all_metadata_on_repository=False,
- updating_installed_repository=True,
- persist=True )
+ metadata_dict, invalid_file_tups = \
+ metadata_util.generate_metadata_for_changeset_revision( app=trans.app,
+ repository=repository,
+ changeset_revision=latest_changeset_revision,
+ repository_clone_url=repository_clone_url,
+ shed_config_dict=repository.get_shed_config_dict( trans.app ),
+ relative_install_dir=relative_install_dir,
+ repository_files_dir=None,
+ resetting_all_metadata_on_repository=False,
+ updating_installed_repository=True,
+ persist=True )
repository.metadata = metadata_dict
# Update the repository.changeset_revision column in the database.
repository.changeset_revision = latest_changeset_revision
@@ -1674,10 +1680,15 @@
repository_tools_tups )
# Create tool_dependency records if necessary.
if 'tool_dependencies' in metadata_dict:
- tool_dependencies = tool_dependency_util.create_tool_dependency_objects( trans.app, repository, relative_install_dir, set_status=False )
- message = "The installed repository named '%s' has been updated to change set revision '%s'. " % ( name, latest_changeset_revision )
+ tool_dependencies = tool_dependency_util.create_tool_dependency_objects( trans.app,
+ repository,
+ relative_install_dir,
+ set_status=False )
+ message = "The installed repository named '%s' has been updated to change set revision '%s'. " % \
+ ( name, latest_changeset_revision )
# See if any tool dependencies can be installed.
- shed_tool_conf, tool_path, relative_install_dir = suc.get_tool_panel_config_tool_path_install_dir( trans.app, repository )
+ shed_tool_conf, tool_path, relative_install_dir = \
+ suc.get_tool_panel_config_tool_path_install_dir( trans.app, repository )
if repository.missing_tool_dependencies:
message += "Click the name of one of the missing tool dependencies listed below to install tool dependencies."
else:
diff -r 10b7c04f919ff6d7fc1d03ba88ebb282645fcbba -r c4a97053aca397ca783079a1e05c0cd2f921fb88 lib/tool_shed/galaxy_install/install_manager.py
--- a/lib/tool_shed/galaxy_install/install_manager.py
+++ b/lib/tool_shed/galaxy_install/install_manager.py
@@ -330,12 +330,13 @@
tool_panel_elems.append( elem )
return tool_panel_elems
- def handle_repository_contents( self, tool_shed_repository, repository_clone_url, relative_install_dir, repository_elem, install_dependencies, is_repository_dependency=False ):
+ def handle_repository_contents( self, tool_shed_repository, repository_clone_url, relative_install_dir, repository_elem,
+ install_dependencies, is_repository_dependency=False ):
"""
- Generate the metadata for the installed tool shed repository, among other things. If the installed tool_shed_repository contains tools
- that are loaded into the Galaxy tool panel, this method will automatically eliminate all entries for each of the tools defined in the
- received repository_elem from all non-shed-related tool panel configuration files since the entries are automatically added to the reserved
- migrated_tools_conf.xml file as part of the migration process.
+ Generate the metadata for the installed tool shed repository, among other things. If the installed tool_shed_repository
+ contains tools that are loaded into the Galaxy tool panel, this method will automatically eliminate all entries for each
+ of the tools defined in the received repository_elem from all non-shed-related tool panel configuration files since the
+ entries are automatically added to the reserved migrated_tools_conf.xml file as part of the migration process.
"""
tool_configs_to_filter = []
tool_panel_dict_for_display = odict()
@@ -351,14 +352,16 @@
# See if tool_config is defined inside of a section in self.proprietary_tool_panel_elems.
is_displayed, tool_sections = self.get_containing_tool_sections( tool_config )
if is_displayed:
- tool_panel_dict_for_tool_config = tool_util.generate_tool_panel_dict_for_tool_config( guid, tool_config, tool_sections=tool_sections )
+ tool_panel_dict_for_tool_config = \
+ tool_util.generate_tool_panel_dict_for_tool_config( guid, tool_config, tool_sections=tool_sections )
# The tool-panel_dict has the following structure.
- # {<Tool guid> : [{ tool_config : <tool_config_file>, id: <ToolSection id>, version : <ToolSection version>, name : <TooSection name>}]}
+ # {<Tool guid> : [{ tool_config : <tool_config_file>, id: <ToolSection id>, version : <ToolSection version>,
+ # name : <TooSection name>}]}
for k, v in tool_panel_dict_for_tool_config.items():
tool_panel_dict_for_display[ k ] = v
for tool_panel_dict in v:
- # Keep track of tool config file names associated with entries that have been made to the migrated_tools_conf.xml file so
- # they can be eliminated from all non-shed-related tool panel configs.
+ # Keep track of tool config file names associated with entries that have been made to the
+ # migrated_tools_conf.xml file so they can be eliminated from all non-shed-related tool panel configs.
tool_config_file = tool_panel_dict.get( 'tool_config', None )
if tool_config_file:
if tool_config_file not in tool_configs_to_filter:
@@ -375,23 +378,28 @@
log.exception( "Exception attempting to filter and persist non-shed-related tool panel configs:\n%s" % str( e ) )
finally:
lock.release()
- metadata_dict, invalid_file_tups = metadata_util.generate_metadata_for_changeset_revision( app=self.app,
- repository=tool_shed_repository,
- changeset_revision=tool_shed_repository.changeset_revision,
- repository_clone_url=repository_clone_url,
- shed_config_dict = self.shed_config_dict,
- relative_install_dir=relative_install_dir,
- repository_files_dir=None,
- resetting_all_metadata_on_repository=False,
- updating_installed_repository=False,
- persist=True )
+ metadata_dict, invalid_file_tups = \
+ metadata_util.generate_metadata_for_changeset_revision( app=self.app,
+ repository=tool_shed_repository,
+ changeset_revision=tool_shed_repository.changeset_revision,
+ repository_clone_url=repository_clone_url,
+ shed_config_dict = self.shed_config_dict,
+ relative_install_dir=relative_install_dir,
+ repository_files_dir=None,
+ resetting_all_metadata_on_repository=False,
+ updating_installed_repository=False,
+ persist=True )
tool_shed_repository.metadata = metadata_dict
self.app.install_model.context.add( tool_shed_repository )
self.app.install_model.context.flush()
has_tool_dependencies = self.__has_tool_dependencies( metadata_dict )
if has_tool_dependencies:
- # All tool_dependency objects must be created before the tools are processed even if no tool dependencies will be installed.
- tool_dependencies = tool_dependency_util.create_tool_dependency_objects( self.app, tool_shed_repository, relative_install_dir, set_status=True )
+ # All tool_dependency objects must be created before the tools are processed even if no
+ # tool dependencies will be installed.
+ tool_dependencies = tool_dependency_util.create_tool_dependency_objects( self.app,
+ tool_shed_repository,
+ relative_install_dir,
+ set_status=True )
else:
tool_dependencies = None
if 'tools' in metadata_dict:
diff -r 10b7c04f919ff6d7fc1d03ba88ebb282645fcbba -r c4a97053aca397ca783079a1e05c0cd2f921fb88 lib/tool_shed/galaxy_install/repository_util.py
--- a/lib/tool_shed/galaxy_install/repository_util.py
+++ b/lib/tool_shed/galaxy_install/repository_util.py
@@ -339,23 +339,24 @@
changeset_revision_dict[ 'ctx_rev' ] = None
return changeset_revision_dict
-def handle_repository_contents( trans, tool_shed_repository, tool_path, repository_clone_url, relative_install_dir, tool_shed=None, tool_section=None, shed_tool_conf=None,
- reinstalling=False ):
+def handle_repository_contents( trans, tool_shed_repository, tool_path, repository_clone_url, relative_install_dir,
+ tool_shed=None, tool_section=None, shed_tool_conf=None, reinstalling=False ):
"""
- Generate the metadata for the installed tool shed repository, among other things. This method is called from Galaxy (never the tool shed)
- when an administrator is installing a new repository or reinstalling an uninstalled repository.
+ Generate the metadata for the installed tool shed repository, among other things. This method is called from Galaxy
+ (never the tool shed) when an administrator is installing a new repository or reinstalling an uninstalled repository.
"""
shed_config_dict = trans.app.toolbox.get_shed_config_dict_by_filename( shed_tool_conf )
- metadata_dict, invalid_file_tups = metadata_util.generate_metadata_for_changeset_revision( app=trans.app,
- repository=tool_shed_repository,
- changeset_revision=tool_shed_repository.changeset_revision,
- repository_clone_url=repository_clone_url,
- shed_config_dict=shed_config_dict,
- relative_install_dir=relative_install_dir,
- repository_files_dir=None,
- resetting_all_metadata_on_repository=False,
- updating_installed_repository=False,
- persist=True )
+ metadata_dict, invalid_file_tups = \
+ metadata_util.generate_metadata_for_changeset_revision( app=trans.app,
+ repository=tool_shed_repository,
+ changeset_revision=tool_shed_repository.changeset_revision,
+ repository_clone_url=repository_clone_url,
+ shed_config_dict=shed_config_dict,
+ relative_install_dir=relative_install_dir,
+ repository_files_dir=None,
+ resetting_all_metadata_on_repository=False,
+ updating_installed_repository=False,
+ persist=True )
tool_shed_repository.metadata = metadata_dict
# Update the tool_shed_repository.tool_shed_status column in the database.
tool_shed_status_dict = suc.get_tool_shed_status_for_installed_repository( trans.app, tool_shed_repository )
@@ -364,13 +365,20 @@
trans.install_model.context.add( tool_shed_repository )
trans.install_model.context.flush()
if 'tool_dependencies' in metadata_dict and not reinstalling:
- tool_dependencies = tool_dependency_util.create_tool_dependency_objects( trans.app, tool_shed_repository, relative_install_dir, set_status=True )
+ tool_dependencies = tool_dependency_util.create_tool_dependency_objects( trans.app,
+ tool_shed_repository,
+ relative_install_dir,
+ set_status=True )
if 'sample_files' in metadata_dict:
sample_files = metadata_dict.get( 'sample_files', [] )
tool_index_sample_files = tool_util.get_tool_index_sample_files( sample_files )
- tool_data_table_conf_filename, tool_data_table_elems = tool_util.install_tool_data_tables( trans.app, tool_shed_repository, tool_index_sample_files )
+ tool_data_table_conf_filename, tool_data_table_elems = \
+ tool_util.install_tool_data_tables( trans.app, tool_shed_repository, tool_index_sample_files )
if tool_data_table_elems:
- trans.app.tool_data_tables.add_new_entries_from_config_file( tool_data_table_conf_filename, None, trans.app.config.shed_tool_data_table_config, persist=True )
+ trans.app.tool_data_tables.add_new_entries_from_config_file( tool_data_table_conf_filename,
+ None,
+ trans.app.config.shed_tool_data_table_config,
+ persist=True )
if 'tools' in metadata_dict:
tool_panel_dict = tool_util.generate_tool_panel_dict_for_new_install( metadata_dict[ 'tools' ], tool_section )
sample_files = metadata_dict.get( 'sample_files', [] )
@@ -380,10 +388,19 @@
repository_tools_tups = suc.get_repository_tools_tups( trans.app, metadata_dict )
if repository_tools_tups:
# Handle missing data table entries for tool parameters that are dynamically generated select lists.
- repository_tools_tups = tool_util.handle_missing_data_table_entry( trans.app, relative_install_dir, tool_path, repository_tools_tups )
+ repository_tools_tups = tool_util.handle_missing_data_table_entry( trans.app,
+ relative_install_dir,
+ tool_path,
+ repository_tools_tups )
# Handle missing index files for tool parameters that are dynamically generated select lists.
- repository_tools_tups, sample_files_copied = tool_util.handle_missing_index_file( trans.app, tool_path, sample_files, repository_tools_tups, sample_files_copied )
- # Copy remaining sample files included in the repository to the ~/tool-data directory of the local Galaxy instance.
+ repository_tools_tups, sample_files_copied = \
+ tool_util.handle_missing_index_file( trans.app,
+ tool_path,
+ sample_files,
+ repository_tools_tups,
+ sample_files_copied )
+ # Copy remaining sample files included in the repository to the ~/tool-data directory of the
+ # local Galaxy instance.
tool_util.copy_sample_files( trans.app, sample_files, tool_path=tool_path, sample_files_copied=sample_files_copied )
tool_util.add_to_tool_panel( app=trans.app,
repository_name=tool_shed_repository.name,
@@ -413,16 +430,18 @@
files_dir = os.path.join( shed_config_dict[ 'tool_path' ], files_dir )
datatypes_config = suc.get_config_from_disk( suc.DATATYPES_CONFIG_FILENAME, files_dir )
# Load data types required by tools.
- converter_path, display_path = datatype_util.alter_config_and_load_prorietary_datatypes( trans.app, datatypes_config, files_dir, override=False )
+ converter_path, display_path = \
+ datatype_util.alter_config_and_load_prorietary_datatypes( trans.app, datatypes_config, files_dir, override=False )
if converter_path or display_path:
# Create a dictionary of tool shed repository related information.
- repository_dict = datatype_util.create_repository_dict_for_proprietary_datatypes( tool_shed=tool_shed,
- name=tool_shed_repository.name,
- owner=tool_shed_repository.owner,
- installed_changeset_revision=tool_shed_repository.installed_changeset_revision,
- tool_dicts=metadata_dict.get( 'tools', [] ),
- converter_path=converter_path,
- display_path=display_path )
+ repository_dict = \
+ datatype_util.create_repository_dict_for_proprietary_datatypes( tool_shed=tool_shed,
+ name=tool_shed_repository.name,
+ owner=tool_shed_repository.owner,
+ installed_changeset_revision=tool_shed_repository.installed_changeset_revision,
+ tool_dicts=metadata_dict.get( 'tools', [] ),
+ converter_path=converter_path,
+ display_path=display_path )
if converter_path:
# Load proprietary datatype converters
trans.app.datatypes_registry.load_datatype_converters( trans.app.toolbox, installed_repository_dict=repository_dict )
diff -r 10b7c04f919ff6d7fc1d03ba88ebb282645fcbba -r c4a97053aca397ca783079a1e05c0cd2f921fb88 lib/tool_shed/util/metadata_util.py
--- a/lib/tool_shed/util/metadata_util.py
+++ b/lib/tool_shed/util/metadata_util.py
@@ -568,23 +568,25 @@
tmp_url = suc.clean_repository_clone_url( repository_clone_url )
return '%s/%s/%s/%s' % ( tmp_url, guid_type, obj_id, version )
-def generate_metadata_for_changeset_revision( app, repository, changeset_revision, repository_clone_url, shed_config_dict=None, relative_install_dir=None,
- repository_files_dir=None, resetting_all_metadata_on_repository=False, updating_installed_repository=False,
+def generate_metadata_for_changeset_revision( app, repository, changeset_revision, repository_clone_url,
+ shed_config_dict=None, relative_install_dir=None, repository_files_dir=None,
+ resetting_all_metadata_on_repository=False, updating_installed_repository=False,
persist=False ):
"""
- Generate metadata for a repository using it's files on disk. To generate metadata for changeset revisions older than the repository tip,
- the repository will have been cloned to a temporary location and updated to a specified changeset revision to access that changeset revision's
- disk files, so the value of repository_files_dir will not always be repository.repo_path( app ) (it could be an absolute path to a temporary
- directory containing a clone). If it is an absolute path, the value of relative_install_dir must contain repository.repo_path( app ).
+ Generate metadata for a repository using it's files on disk. To generate metadata for changeset revisions older than
+ the repository tip, the repository will have been cloned to a temporary location and updated to a specified changeset
+ revision to access that changeset revision's disk files, so the value of repository_files_dir will not always be
+ repository.repo_path( app ) (it could be an absolute path to a temporary directory containing a clone). If it is an
+ absolute path, the value of relative_install_dir must contain repository.repo_path( app ).
- The value of persist will be True when the installed repository contains a valid tool_data_table_conf.xml.sample file, in which case the entries
- should ultimately be persisted to the file referred to by app.config.shed_tool_data_table_config.
+ The value of persist will be True when the installed repository contains a valid tool_data_table_conf.xml.sample file,
+ in which case the entries should ultimately be persisted to the file referred to by app.config.shed_tool_data_table_config.
"""
if shed_config_dict is None:
shed_config_dict = {}
if updating_installed_repository:
- # Keep the original tool shed repository metadata if setting metadata on a repository installed into a local Galaxy instance for which
- # we have pulled updates.
+ # Keep the original tool shed repository metadata if setting metadata on a repository installed into a local Galaxy
+ # instance for which we have pulled updates.
original_repository_metadata = repository.metadata
else:
original_repository_metadata = None
@@ -603,11 +605,12 @@
if resetting_all_metadata_on_repository:
if not relative_install_dir:
raise Exception( "The value of repository.repo_path( app ) must be sent when resetting all metadata on a repository." )
- # Keep track of the location where the repository is temporarily cloned so that we can strip the path when setting metadata. The value of
- # repository_files_dir is the full path to the temporary directory to which the repository was cloned.
+ # Keep track of the location where the repository is temporarily cloned so that we can strip the path when setting metadata.
+ # The value of repository_files_dir is the full path to the temporary directory to which the repository was cloned.
work_dir = repository_files_dir
files_dir = repository_files_dir
- # Since we're working from a temporary directory, we can safely copy sample files included in the repository to the repository root.
+ # Since we're working from a temporary directory, we can safely copy sample files included in the repository to the repository
+ # root.
app.config.tool_data_path = repository_files_dir
app.config.tool_data_table_config_path = repository_files_dir
else:
@@ -624,10 +627,11 @@
if datatypes_config:
metadata_dict = generate_datatypes_metadata( app, repository, repository_clone_url, files_dir, datatypes_config, metadata_dict )
# Get the relative path to all sample files included in the repository for storage in the repository's metadata.
- sample_file_metadata_paths, sample_file_copy_paths = get_sample_files_from_disk( repository_files_dir=files_dir,
- tool_path=shed_config_dict.get( 'tool_path' ),
- relative_install_dir=relative_install_dir,
- resetting_all_metadata_on_repository=resetting_all_metadata_on_repository )
+ sample_file_metadata_paths, sample_file_copy_paths = \
+ get_sample_files_from_disk( repository_files_dir=files_dir,
+ tool_path=shed_config_dict.get( 'tool_path' ),
+ relative_install_dir=relative_install_dir,
+ resetting_all_metadata_on_repository=resetting_all_metadata_on_repository )
if sample_file_metadata_paths:
metadata_dict[ 'sample_files' ] = sample_file_metadata_paths
# Copy all sample files included in the repository to a single directory location so we can load tools that depend on them.
@@ -636,10 +640,11 @@
# If the list of sample files includes a tool_data_table_conf.xml.sample file, laad it's table elements into memory.
relative_path, filename = os.path.split( sample_file )
if filename == 'tool_data_table_conf.xml.sample':
- new_table_elems, error_message = app.tool_data_tables.add_new_entries_from_config_file( config_filename=sample_file,
- tool_data_path=app.config.tool_data_path,
- shed_tool_data_table_config=app.config.shed_tool_data_table_config,
- persist=False )
+ new_table_elems, error_message = \
+ app.tool_data_tables.add_new_entries_from_config_file( config_filename=sample_file,
+ tool_data_path=app.config.tool_data_path,
+ shed_tool_data_table_config=app.config.shed_tool_data_table_config,
+ persist=False )
if error_message:
invalid_file_tups.append( ( filename, error_message ) )
for root, dirs, files in os.walk( files_dir ):
@@ -650,7 +655,9 @@
# See if we have a repository dependencies defined.
if name == suc.REPOSITORY_DEPENDENCY_DEFINITION_FILENAME:
path_to_repository_dependencies_config = os.path.join( root, name )
- metadata_dict, error_message = generate_repository_dependency_metadata( app, path_to_repository_dependencies_config, metadata_dict )
+ metadata_dict, error_message = generate_repository_dependency_metadata( app,
+ path_to_repository_dependencies_config,
+ metadata_dict )
if error_message:
invalid_file_tups.append( ( name, error_message ) )
# See if we have one or more READ_ME files.
@@ -666,8 +673,11 @@
elif name not in NOT_TOOL_CONFIGS and name.endswith( '.xml' ):
full_path = str( os.path.abspath( os.path.join( root, name ) ) )
if os.path.getsize( full_path ) > 0:
- if not ( checkers.check_binary( full_path ) or checkers.check_image( full_path ) or checkers.check_gzip( full_path )[ 0 ]
- or checkers.check_bz2( full_path )[ 0 ] or checkers.check_zip( full_path ) ):
+ if not ( checkers.check_binary( full_path ) or
+ checkers.check_image( full_path ) or
+ checkers.check_gzip( full_path )[ 0 ] or
+ checkers.check_bz2( full_path )[ 0 ] or
+ checkers.check_zip( full_path ) ):
# Make sure we're looking at a tool config and not a display application config or something else.
element_tree, error_message = xml_util.parse_xml( full_path )
if element_tree is None:
@@ -676,13 +686,15 @@
element_tree_root = element_tree.getroot()
is_tool = element_tree_root.tag == 'tool'
if is_tool:
- tool, valid, error_message = tool_util.load_tool_from_config( app, app.security.encode_id( repository.id ), full_path )
+ tool, valid, error_message = \
+ tool_util.load_tool_from_config( app, app.security.encode_id( repository.id ), full_path )
if tool is None:
if not valid:
invalid_tool_configs.append( name )
invalid_file_tups.append( ( name, error_message ) )
else:
- invalid_files_and_errors_tups = tool_util.check_tool_input_params( app, files_dir, name, tool, sample_file_copy_paths )
+ invalid_files_and_errors_tups = \
+ tool_util.check_tool_input_params( app, files_dir, name, tool, sample_file_copy_paths )
can_set_metadata = True
for tup in invalid_files_and_errors_tups:
if name in tup:
@@ -690,15 +702,17 @@
invalid_tool_configs.append( name )
break
if can_set_metadata:
- relative_path_to_tool_config = get_relative_path_to_repository_file( root,
- name,
- relative_install_dir,
- work_dir,
- shed_config_dict,
- resetting_all_metadata_on_repository )
-
-
- metadata_dict = generate_tool_metadata( relative_path_to_tool_config, tool, repository_clone_url, metadata_dict )
+ relative_path_to_tool_config = \
+ get_relative_path_to_repository_file( root,
+ name,
+ relative_install_dir,
+ work_dir,
+ shed_config_dict,
+ resetting_all_metadata_on_repository )
+ metadata_dict = generate_tool_metadata( relative_path_to_tool_config,
+ tool,
+ repository_clone_url,
+ metadata_dict )
else:
for tup in invalid_files_and_errors_tups:
invalid_file_tups.append( tup )
@@ -752,8 +766,9 @@
def generate_package_dependency_metadata( app, elem, valid_tool_dependencies_dict, invalid_tool_dependencies_dict ):
"""
- Generate the metadata for a tool dependencies package defined for a repository. The value of package_name must match the value of the "package"
- type in the tool config's <requirements> tag set. This method is called from both Galaxy and the tool shed.
+ Generate the metadata for a tool dependencies package defined for a repository. The value of package_name must
+ match the value of the "package" type in the tool config's <requirements> tag set. This method is called from
+ both Galaxy and the tool shed.
"""
repository_dependency_is_valid = True
repository_dependency_tup = []
@@ -833,22 +848,23 @@
valid_repository_dependencies_dict = dict( description=root.get( 'description' ) )
valid_repository_dependency_tups = []
for repository_elem in root.findall( 'repository' ):
- repository_dependency_tup, repository_dependency_is_valid, error_message = handle_repository_elem( app,
- repository_elem,
- only_if_compiling_contained_td=False )
+ repository_dependency_tup, repository_dependency_is_valid, err_msg = \
+ handle_repository_elem( app, repository_elem, only_if_compiling_contained_td=False )
if repository_dependency_is_valid:
valid_repository_dependency_tups.append( repository_dependency_tup )
else:
# Append the error_message to the repository dependencies tuple.
- toolshed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = repository_dependency_tup
+ toolshed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = \
+ repository_dependency_tup
repository_dependency_tup = ( toolshed,
name,
owner,
changeset_revision,
prior_installation_required,
only_if_compiling_contained_td,
- error_message )
+ err_msg )
invalid_repository_dependency_tups.append( repository_dependency_tup )
+ error_message += err_msg
if invalid_repository_dependency_tups:
invalid_repository_dependencies_dict[ 'repository_dependencies' ] = invalid_repository_dependency_tups
metadata_dict[ 'invalid_repository_dependencies' ] = invalid_repository_dependencies_dict
@@ -1142,50 +1158,72 @@
def handle_repository_elem( app, repository_elem, only_if_compiling_contained_td=False ):
"""
- Process the received repository_elem which is a <repository> tag either from a repository_dependencies.xml file or a tool_dependencies.xml file.
- If the former, we're generating repository dependencies metadata for a repository in the tool shed. If the latter, we're generating package
- dependency metadata within Galaxy or the tool shed.
+ Process the received repository_elem which is a <repository> tag either from a repository_dependencies.xml
+ file or a tool_dependencies.xml file. If the former, we're generating repository dependencies metadata for
+ a repository in the tool shed. If the latter, we're generating package dependency metadata within Galaxy or
+ the tool shed.
"""
sa_session = app.model.context.current
is_valid = True
error_message = ''
- toolshed = repository_elem.get( 'toolshed' )
+ toolshed = repository_elem.get( 'toolshed', None )
+ name = repository_elem.get( 'name', None )
+ owner = repository_elem.get( 'owner', None )
+ changeset_revision = repository_elem.get( 'changeset_revision', None )
+ prior_installation_required = str( repository_elem.get( 'prior_installation_required', False ) )
+ if app.name == 'galaxy':
+ # We're installing a repository into Galaxy, so make sure its contained repository dependency definition
+ # is valid.
+ if toolshed is None or name is None or owner is None or changeset_revision is None:
+ # Raise an exception here instead of returning an error_message to keep the installation from
+ # proceeding. Reaching here implies a bug in the Tool Shed framework.
+ error_message = 'Installation halted because the following repository dependency definition is invalid:\n'
+ error_message += xml_util.xml_to_string( repository_elem, use_indent=True )
+ raise Exception( error_message )
if not toolshed:
# Default to the current tool shed.
toolshed = str( url_for( '/', qualified=True ) ).rstrip( '/' )
+ repository_dependency_tup = [ toolshed,
+ name,
+ owner,
+ changeset_revision,
+ prior_installation_required,
+ str( only_if_compiling_contained_td ) ]
cleaned_toolshed = td_common_util.clean_tool_shed_url( toolshed )
- name = repository_elem.get( 'name' )
- owner = repository_elem.get( 'owner' )
- changeset_revision = repository_elem.get( 'changeset_revision' )
- prior_installation_required = str( repository_elem.get( 'prior_installation_required', False ) )
- repository_dependency_tup = [ toolshed, name, owner, changeset_revision, prior_installation_required, str( only_if_compiling_contained_td ) ]
user = None
repository = None
if app.name == 'galaxy':
- # We're in Galaxy. We reach here when we're generating the metadata for a tool dependencies package defined for a repository or when we're
- # generating metadata for an installed repository. See if we can locate the installed repository via the changeset_revision defined in the
- # repository_elem (it may be outdated). If we're successful in locating an installed repository with the attributes defined in the
- # repository_elem, we know it is valid.
+ # We're in Galaxy. We reach here when we're generating the metadata for a tool dependencies package defined
+ # for a repository or when we're generating metadata for an installed repository. See if we can locate the
+ # installed repository via the changeset_revision defined in the repository_elem (it may be outdated). If we're
+ # successful in locating an installed repository with the attributes defined in the repository_elem, we know it
+ # is valid.
repository = suc.get_repository_for_dependency_relationship( app, cleaned_toolshed, name, owner, changeset_revision )
if repository:
return repository_dependency_tup, is_valid, error_message
else:
- # Send a request to the tool shed to retrieve appropriate additional changeset revisions with which the repository may have been installed.
+ # Send a request to the tool shed to retrieve appropriate additional changeset revisions with which the repository
+ # may have been installed.
text = install_util.get_updated_changeset_revisions_from_tool_shed( app, toolshed, name, owner, changeset_revision )
if text:
updated_changeset_revisions = util.listify( text )
for updated_changeset_revision in updated_changeset_revisions:
- repository = suc.get_repository_for_dependency_relationship( app, cleaned_toolshed, name, owner, updated_changeset_revision )
+ repository = suc.get_repository_for_dependency_relationship( app,
+ cleaned_toolshed,
+ name,
+ owner,
+ updated_changeset_revision )
if repository:
return repository_dependency_tup, is_valid, error_message
- # Don't generate an error message for missing repository dependencies that are required only if compiling the dependent repository's
- # tool dependency.
+ # Don't generate an error message for missing repository dependencies that are required only if compiling the
+ # dependent repository's tool dependency.
if not only_if_compiling_contained_td:
- # We'll currently default to setting the repository dependency definition as invalid if an installed repository cannot be found.
- # This may not be ideal because the tool shed may have simply been inaccessible when metadata was being generated for the installed
- # tool shed repository.
- error_message = "Ignoring invalid repository dependency definition for tool shed %s, name %s, owner %s, changeset revision %s "% \
- ( toolshed, name, owner, changeset_revision )
+ # We'll currently default to setting the repository dependency definition as invalid if an installed repository
+ # cannot be found. This may not be ideal because the tool shed may have simply been inaccessible when metadata
+ # was being generated for the installed tool shed repository.
+ error_message = "Ignoring invalid repository dependency definition for tool shed %s, name %s, owner %s, " % \
+ ( toolshed, name, owner )
+ error_message += "changeset revision %s." % changeset_revision
log.debug( error_message )
is_valid = False
return repository_dependency_tup, is_valid, error_message
@@ -1197,9 +1235,9 @@
.filter( app.model.User.table.c.username == owner ) \
.one()
except Exception, e:
- error_message = "Ignoring repository dependency definition for tool shed %s, name %s, owner %s, changeset revision %s "% \
- ( toolshed, name, owner, changeset_revision )
- error_message += "because the owner is invalid. "
+ error_message = "Ignoring repository dependency definition for tool shed %s, name %s, owner %s, " % \
+ ( toolshed, name, owner )
+ error_message += "changeset revision %s because the owner is invalid. " % changeset_revision
log.debug( error_message )
is_valid = False
return repository_dependency_tup, is_valid, error_message
@@ -1209,21 +1247,26 @@
app.model.Repository.table.c.user_id == user.id ) ) \
.one()
except:
- error_message = "Ignoring repository dependency definition for tool shed %s, name %s, owner %s, changeset revision %s "% \
- ( toolshed, name, owner, changeset_revision )
- error_message += "because the name is invalid. "
+ error_message = "Ignoring repository dependency definition for tool shed %s, name %s, owner %s, " % \
+ ( toolshed, name, owner )
+ error_message += "changeset revision %s because the name is invalid. " % changeset_revision
log.debug( error_message )
is_valid = False
return repository_dependency_tup, is_valid, error_message
repo = hg.repository( suc.get_configured_ui(), repository.repo_path( app ) )
- # The received changeset_revision may be None since defining it in the dependency definition is optional. If this is the case,
- # the default will be to set it's value to the repository dependency tip revision. This probably occurs only when handling
- # circular dependency definitions.
+ # The received changeset_revision may be None since defining it in the dependency definition is optional.
+ # If this is the case, the default will be to set it's value to the repository dependency tip revision.
+ # This probably occurs only when handling circular dependency definitions.
tip_ctx = repo.changectx( repo.changelog.tip() )
# Make sure the repo.changlog includes at least 1 revision.
if changeset_revision is None and tip_ctx.rev() >= 0:
changeset_revision = str( tip_ctx )
- repository_dependency_tup = [ toolshed, name, owner, changeset_revision, prior_installation_required, str( only_if_compiling_contained_td ) ]
+ repository_dependency_tup = [ toolshed,
+ name,
+ owner,
+ changeset_revision,
+ prior_installation_required,
+ str( only_if_compiling_contained_td ) ]
return repository_dependency_tup, is_valid, error_message
else:
# Find the specified changeset revision in the repository's changelog to see if it's valid.
@@ -1234,16 +1277,17 @@
found = True
break
if not found:
- error_message = "Ignoring repository dependency definition for tool shed %s, name %s, owner %s, changeset revision %s "% \
- ( toolshed, name, owner, changeset_revision )
- error_message += "because the changeset revision is invalid. "
+ error_message = "Ignoring repository dependency definition for tool shed %s, name %s, owner %s, " % \
+ ( toolshed, name, owner )
+ error_message += "changeset revision %s because the changeset revision is invalid. " % changeset_revision
log.debug( error_message )
is_valid = False
return repository_dependency_tup, is_valid, error_message
else:
# Repository dependencies are currently supported within a single tool shed.
- error_message = "Repository dependencies are currently supported only within the same tool shed. Ignoring repository dependency definition "
- error_message += "for tool shed %s, name %s, owner %s, changeset revision %s. " % ( toolshed, name, owner, changeset_revision )
+ error_message = "Repository dependencies are currently supported only within the same tool shed. Ignoring "
+ error_message += "repository dependency definition for tool shed %s, name %s, owner %s, changeset revision %s. " % \
+ ( toolshed, name, owner, changeset_revision )
log.debug( error_message )
is_valid = False
return repository_dependency_tup, is_valid, error_message
@@ -1811,8 +1855,8 @@
def set_repository_metadata( trans, repository, content_alert_str='', **kwd ):
"""
- Set metadata using the repository's current disk files, returning specific error messages (if any) to alert the repository owner that the changeset
- has problems.
+ Set metadata using the repository's current disk files, returning specific error messages (if any) to alert the
+ repository owner that the changeset has problems.
"""
message = ''
status = 'done'
diff -r 10b7c04f919ff6d7fc1d03ba88ebb282645fcbba -r c4a97053aca397ca783079a1e05c0cd2f921fb88 lib/tool_shed/util/shed_util_common.py
--- a/lib/tool_shed/util/shed_util_common.py
+++ b/lib/tool_shed/util/shed_util_common.py
@@ -945,7 +945,10 @@
return has_repository_dependencies, has_repository_dependencies_only_if_compiling_contained_td
def get_repository_for_dependency_relationship( app, tool_shed, name, owner, changeset_revision ):
- """Return an installed tool_shed_repository database record that is defined by either the current changeset revision or the installed_changeset_revision."""
+ """
+ Return an installed tool_shed_repository database record that is defined by either the current changeset
+ revision or the installed_changeset_revision.
+ """
# This method is used only in Galaxy, not the tool shed.
if tool_shed.endswith( '/' ):
tool_shed = tool_shed.rstrip( '/' )
@@ -1330,8 +1333,8 @@
def get_updated_changeset_revisions( trans, name, owner, changeset_revision ):
"""
- Return a string of comma-separated changeset revision hashes for all available updates to the received changeset revision for the repository
- defined by the received name and owner.
+ Return a string of comma-separated changeset revision hashes for all available updates to the received changeset
+ revision for the repository defined by the received name and owner.
"""
repository = get_repository_by_name_and_owner( trans.app, name, owner )
repo_dir = repository.repo_path( trans.app )
@@ -1351,8 +1354,8 @@
def get_url_from_tool_shed( app, tool_shed ):
"""
- The value of tool_shed is something like: toolshed.g2.bx.psu.edu. We need the URL to this tool shed, which is something like:
- http://toolshed.g2.bx.psu.edu/
+ The value of tool_shed is something like: toolshed.g2.bx.psu.edu. We need the URL to this tool shed, which is
+ something like: http://toolshed.g2.bx.psu.edu/
"""
for shed_name, shed_url in app.tool_shed_registry.tool_sheds.items():
if shed_url.find( tool_shed ) >= 0:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: martenson: better handling the JS modal window event binding
by commits-noreply@bitbucket.org 03 Jan '14
by commits-noreply@bitbucket.org 03 Jan '14
03 Jan '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/10b7c04f919f/
Changeset: 10b7c04f919f
User: martenson
Date: 2014-01-03 19:40:08
Summary: better handling the JS modal window event binding
Affected #: 4 files
diff -r e92e13e9c103cc1f36dff65e1523479bf5cb17ed -r 10b7c04f919ff6d7fc1d03ba88ebb282645fcbba static/scripts/galaxy.library.js
--- a/static/scripts/galaxy.library.js
+++ b/static/scripts/galaxy.library.js
@@ -569,6 +569,7 @@
'Close' : function() {self.modal.hide(); $('.modal').remove(); self.modal = null;}
}
});
+ self.modal.bindEvents(event);
// show the prepared modal
self.modal.show();
}
@@ -577,7 +578,7 @@
// import all selected datasets into history
importAllIntoHistory : function (){
- //disable the button
+ //disable the button to rprevent multiple submission
this.modal.disableButton('Import');
var history_id = $("select[name=dataset_import_bulk] option:selected").val();
@@ -617,9 +618,9 @@
var popped_item = history_item_set.pop();
if (typeof popped_item === "undefined") {
mod_toastr.success('All datasets imported');
- this.modal.hide();
- // enable button again
- self.modal.enableButton('Import');
+
+ //this will destroy other modals too - including those hidden!!!
+ this.modal.destroy();
return
}
var promise = $.when(popped_item.save({content: popped_item.content, source: popped_item.source})).done(function(a1){
diff -r e92e13e9c103cc1f36dff65e1523479bf5cb17ed -r 10b7c04f919ff6d7fc1d03ba88ebb282645fcbba static/scripts/galaxy.modal.js
--- a/static/scripts/galaxy.modal.js
+++ b/static/scripts/galaxy.modal.js
@@ -25,20 +25,18 @@
self = this;
if (options)
this.create(options);
-
- // Bind the hiding events
- // this.bindEvents(event, self);
},
// bind the click-to-hide function
- bindEvents: function(event, that) {
+ bindEvents: function(event) {
// bind the ESC key to hide() function
$(document).on('keyup', function(event){
- if (event.keyCode == 27) { self.hide(); }
+ if (event.keyCode == 27) { self.hide(); $('.modal').remove();}
})
// bind the 'click anywhere' to hide() function...
$('html').on('click', function(event){
self.hide();
+ $('.modal').remove();
})
// ...but don't hide if the click is on modal content
$('.modal-content').on('click', function(event){
@@ -50,11 +48,12 @@
unbindEvents: function(event){
// bind the ESC key to hide() function
$(document).off('keyup', function(event){
- if (event.keyCode == 27) { self.hide(); }
+ if (event.keyCode == 27) { self.hide(); $('.modal').remove();}
})
// unbind the 'click anywhere' to hide() function...
$('html').off('click', function(event){
self.hide();
+ $('.modal').remove();
})
$('.modal-content').off('click', function(event){
event.stopPropagation();
diff -r e92e13e9c103cc1f36dff65e1523479bf5cb17ed -r 10b7c04f919ff6d7fc1d03ba88ebb282645fcbba static/scripts/packed/galaxy.library.js
--- a/static/scripts/packed/galaxy.library.js
+++ b/static/scripts/packed/galaxy.library.js
@@ -1,1 +1,1 @@
-var view=null;var library_router=null;var responses=[];define(["galaxy.modal","galaxy.masthead","utils/galaxy.utils","libs/toastr"],function(j,k,g,m){var e=Backbone.Model.extend({urlRoot:"/api/libraries"});var n=Backbone.Collection.extend({url:"/api/libraries",model:e});var h=Backbone.Model.extend({urlRoot:"/api/libraries/datasets"});var c=Backbone.Collection.extend({model:h});var d=Backbone.Model.extend({defaults:{folder:new c(),full_path:"unknown",urlRoot:"/api/folders/",id:"unknown"},parse:function(q){this.full_path=q[0].full_path;this.get("folder").reset(q[1].folder_contents);return q}});var b=Backbone.Model.extend({urlRoot:"/api/histories/"});var i=Backbone.Model.extend({url:"/api/histories/"});var o=Backbone.Collection.extend({url:"/api/histories",model:i});var p=Backbone.Router.extend({routes:{"":"libraries","folders/:id":"folder_content","folders/:folder_id/download/:format":"download"}});var l=Backbone.View.extend({el:"#center",progress:0,progressStep:1,lastSelectedHistory:"",modal:null,folders:null,initialize:function(){this.folders=[];this.queue=jQuery.Deferred();this.queue.resolve()},templateFolder:function(){var q=[];q.push('<div id="library_container" style="width: 90%; margin: auto; margin-top: 2em; ">');q.push('<h3>New Data Libraries. This is work in progress. Report problems & ideas to <a href="mailto:marten@bx.psu.edu?Subject=DataLibraries_Feedback" target="_blank">Marten</a>.</h3>');q.push('<div id="library_folder_toolbar" >');q.push(' <button title="Create New Folder" id="toolbtn_create_folder" class="btn btn-primary" type="button"><span class="fa fa-plus"></span><span class="fa fa-folder-close"></span> folder</button>');q.push(' <button id="toolbtn_bulk_import" class="btn btn-primary" style="display: none; margin-left: 0.5em;" type="button"><span class="fa fa-external-link"></span> to history</button>');q.push(' <div id="toolbtn_dl" class="btn-group" style="margin-left: 0.5em; display: none; ">');q.push(' <button id="drop_toggle" type="button" class="btn btn-primary dropdown-toggle" data-toggle="dropdown">');q.push(' <span class="fa fa-download"></span> download <span class="caret"></span>');q.push(" </button>");q.push(' <ul class="dropdown-menu" role="menu">');q.push(' <li><a href="#/folders/<%= id %>/download/tgz">.tar.gz</a></li>');q.push(' <li><a href="#/folders/<%= id %>/download/tbz">.tar.bz</a></li>');q.push(' <li><a href="#/folders/<%= id %>/download/zip">.zip</a></li>');q.push(" </ul>");q.push(" </div>");q.push("</div>");q.push('<div class="library_breadcrumb">');q.push('<a title="Return to the list of libraries" href="#">Libraries</a><b>|</b> ');q.push("<% _.each(path, function(path_item) { %>");q.push("<% if (path_item[0] != id) { %>");q.push('<a title="Return to this folder" href="#/folders/<%- path_item[0] %>"><%- path_item[1] %></a><b>|</b> ');q.push("<% } else { %>");q.push('<span title="You are in this folder"><%- path_item[1] %></span>');q.push("<% } %>");q.push("<% }); %>");q.push("</div>");q.push('<table id="folder_table" class="table table-condensed">');q.push(" <thead>");q.push(' <th style="text-align: center; width: 20px; "><input id="select-all-checkboxes" style="margin: 0;" type="checkbox"></th>');q.push(' <th class="button_heading">view</th>');q.push(" <th>name</th>");q.push(" <th>data type</th>");q.push(" <th>size</th>");q.push(" <th>date</th>");q.push(" </thead>");q.push(" <tbody>");q.push(" <td></td>");q.push(' <td><button title="Go to parent folder" type="button" data-id="<%- upper_folder_id %>" class="btn_open_folder btn btn-default btn-xs">');q.push(' <span class="fa fa-arrow-up"></span> .. go up</td>');q.push(" <td></td>");q.push(" <td></td>");q.push(" <td></td>");q.push(" <td></td>");q.push(" </tr>");q.push(" <% _.each(items, function(content_item) { %>");q.push(' <tr class="folder_row light" id="<%- content_item.id %>">');q.push(' <% if (content_item.get("type") === "folder") { %>');q.push(" <td></td>");q.push(' <td><button title="Open this folder" type="button" data-id="<%- content_item.id %>" class="btn_open_folder btn btn-default btn-xs">');q.push(' <span class="fa fa-folder-open"></span> browse</td>');q.push(' <td><%- content_item.get("name") %>');q.push(' <% if (content_item.get("item_count") === 0) { %>');q.push(' <span class="muted">(empty folder)</span>');q.push(" <% } %>");q.push(" </td>");q.push(" <td>folder</td>");q.push(' <td><%= _.escape(content_item.get("item_count")) %> item(s)</td>');q.push(" <% } else { %>");q.push(' <td style="text-align: center; "><input style="margin: 0;" type="checkbox"></td>');q.push(" <td>");q.push(' <button title="See details of this dataset" type="button" class="library-dataset btn btn-default btn-xs">');q.push(' <span class="fa fa-eye"></span> details');q.push(" </button>");q.push(" </td>");q.push(' <td><%- content_item.get("name") %></td>');q.push(' <td><%= _.escape(content_item.get("data_type")) %></td>');q.push(' <td><%= _.escape(content_item.get("readable_size")) %></td>');q.push(" <% } %> ");q.push(' <td><%= _.escape(content_item.get("time_updated")) %></td>');q.push(" </tr>");q.push(" <% }); %>");q.push(" ");q.push(" </tbody>");q.push("</table>");q.push("</div>");return q.join("")},templateDatasetModal:function(){var q=[];q.push('<div id="dataset_info_modal">');q.push(' <table class="table table-striped table-condensed">');q.push(" <tr>");q.push(' <th scope="row" id="id_row" data-id="<%= _.escape(item.get("ldda_id")) %>">Name</th>');q.push(' <td><%= _.escape(item.get("name")) %></td>');q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Data type</th>');q.push(' <td><%= _.escape(item.get("data_type")) %></td>');q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Genome build</th>');q.push(' <td><%= _.escape(item.get("genome_build")) %></td>');q.push(" </tr>");q.push(' <th scope="row">Size</th>');q.push(" <td><%= _.escape(size) %></td>");q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Date uploaded</th>');q.push(' <td><%= _.escape(item.get("date_uploaded")) %></td>');q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Uploaded by</th>');q.push(' <td><%= _.escape(item.get("uploaded_by")) %></td>');q.push(" </tr>");q.push(' <tr scope="row">');q.push(' <th scope="row">Data Lines</th>');q.push(' <td scope="row"><%= _.escape(item.get("metadata_data_lines")) %></td>');q.push(" </tr>");q.push(' <th scope="row">Comment Lines</th>');q.push(' <% if (item.get("metadata_comment_lines") === "") { %>');q.push(' <td scope="row"><%= _.escape(item.get("metadata_comment_lines")) %></td>');q.push(" <% } else { %>");q.push(' <td scope="row">unknown</td>');q.push(" <% } %>");q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Number of Columns</th>');q.push(' <td scope="row"><%= _.escape(item.get("metadata_columns")) %></td>');q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Column Types</th>');q.push(' <td scope="row"><%= _.escape(item.get("metadata_column_types")) %></td>');q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Miscellaneous information</th>');q.push(' <td scope="row"><%= _.escape(item.get("misc_blurb")) %></td>');q.push(" </tr>");q.push(" </table>");q.push(' <pre class="peek">');q.push(" </pre>");q.push("</div>");return q.join("")},templateHistorySelectInModal:function(){var q=[];q.push('<span id="history_modal_combo" style="width:90%; margin-left: 1em; margin-right: 1em; ">');q.push("Select history: ");q.push('<select id="dataset_import_single" name="dataset_import_single" style="width:50%; margin-bottom: 1em; "> ');q.push(" <% _.each(histories, function(history) { %>");q.push(' <option value="<%= _.escape(history.get("id")) %>"><%= _.escape(history.get("name")) %></option>');q.push(" <% }); %>");q.push("</select>");q.push("</span>");return q.join("")},templateBulkImportInModal:function(){var q=[];q.push('<span id="history_modal_combo_bulk" style="width:90%; margin-left: 1em; margin-right: 1em; ">');q.push("Select history: ");q.push('<select id="dataset_import_bulk" name="dataset_import_bulk" style="width:50%; margin-bottom: 1em; "> ');q.push(" <% _.each(histories, function(history) { %>");q.push(' <option value="<%= _.escape(history.get("id")) %>"><%= _.escape(history.get("name")) %></option>');q.push(" <% }); %>");q.push("</select>");q.push("</span>");return q.join("")},size_to_string:function(q){var r="";if(q>=100000000000){q=q/100000000000;r="TB"}else{if(q>=100000000){q=q/100000000;r="GB"}else{if(q>=100000){q=q/100000;r="MB"}else{if(q>=100){q=q/100;r="KB"}else{q=q*10;r="b"}}}}return(Math.round(q)/10)+r},events:{"click #select-all-checkboxes":"selectAll","click .folder_row":"selectClickedRow","click #toolbtn_bulk_import":"modalBulkImport","click #toolbtn_dl":"bulkDownload","click .library-dataset":"showDatasetDetails","click #toolbtn_create_folder":"createFolderModal","click .btn_open_folder":"navigateToFolder"},render:function(q){$("#center").css("overflow","auto");view=this;var s=this;var r=new d({id:q.id});r.url=r.attributes.urlRoot+q.id+"/contents";r.fetch({success:function(t){for(var v=0;v<r.attributes.folder.models.length;v++){var u=r.attributes.folder.models[v];if(u.get("type")==="file"){u.set("readable_size",s.size_to_string(u.get("file_size")))}}var x=r.full_path;var y;if(x.length===1){y=0}else{y=x[x.length-2][0]}var w=_.template(s.templateFolder(),{path:r.full_path,items:r.attributes.folder.models,id:q.id,upper_folder_id:y});s.$el.html(w)}})},navigateToFolder:function(r){var q=$(r.target).attr("data-id");if(typeof q==="undefined"){return false}else{if(q==="0"){library_router.navigate("#",{trigger:true,replace:true})}else{library_router.navigate("folders/"+q,{trigger:true,replace:true})}}},showDatasetDetails:function(t){t.preventDefault();var u=$(t.target).parent().parent().attr("id");var s=new h();var r=new o();s.id=u;var q=this;s.fetch({success:function(v){r.fetch({success:function(w){q.renderModalAfterFetch(v,w)}})}})},renderModalAfterFetch:function(v,s){var t=this.size_to_string(v.get("file_size"));var u=_.template(this.templateDatasetModal(),{item:v,size:t});this.modal=null;var r=this;this.modal=new j.GalaxyModal({title:"Dataset Details",body:u,buttons:{Import:function(){r.importCurrentIntoHistory()},Download:function(){r.downloadCurrent()},Close:function(){r.modal.hide();$(".modal").remove();r.modal=null}}});this.modal.bindEvents(event);$(".peek").html(v.get("peek"));var q=_.template(this.templateHistorySelectInModal(),{histories:s.models});$(this.modal.elMain).find(".buttons").prepend(q);if(r.lastSelectedHistory.length>0){$(this.modal.elMain).find("#dataset_import_single").val(r.lastSelectedHistory)}this.modal.show()},downloadCurrent:function(){this.modal.disableButton("Import");this.modal.disableButton("Download");var q=[];q.push($("#id_row").attr("data-id"));var r="/api/libraries/datasets/download/uncompressed";var s={ldda_ids:q};folderContentView.processDownload(r,s);this.modal.enableButton("Import");this.modal.enableButton("Download")},importCurrentIntoHistory:function(){this.modal.disableButton("Import");this.modal.disableButton("Download");var s=$(this.modal.elMain).find("select[name=dataset_import_single] option:selected").val();this.lastSelectedHistory=s;var q=$("#id_row").attr("data-id");var t=new b();var r=this;t.url=t.urlRoot+s+"/contents";t.save({content:q,source:"library"},{success:function(){m.success("Dataset imported");r.modal.enableButton("Import");r.modal.enableButton("Download")},error:function(){m.error("An error occured! Dataset not imported. Please try again.");r.modal.enableButton("Import");r.modal.enableButton("Download")}})},selectAll:function(r){var q=r.target.checked;that=this;$(":checkbox").each(function(){this.checked=q;$row=$(this.parentElement.parentElement);(q)?that.makeDarkRow($row):that.makeWhiteRow($row)});this.checkTools()},selectClickedRow:function(r){var t="";var q;var s;if(r.target.localName==="input"){t=r.target;q=$(r.target.parentElement.parentElement);s="input"}else{if(r.target.localName==="td"){t=$("#"+r.target.parentElement.id).find(":checkbox")[0];q=$(r.target.parentElement);s="td"}}if(t===""){r.stopPropagation();return}if(t.checked){if(s==="td"){t.checked="";this.makeWhiteRow(q)}else{if(s==="input"){this.makeDarkRow(q)}}}else{if(s==="td"){t.checked="selected";this.makeDarkRow(q)}else{if(s==="input"){this.makeWhiteRow(q)}}}this.checkTools()},makeDarkRow:function(q){q.removeClass("light");q.find("a").removeClass("light");q.addClass("dark");q.find("a").addClass("dark")},makeWhiteRow:function(q){q.removeClass("dark");q.find("a").removeClass("dark");q.addClass("light");q.find("a").addClass("light")},checkTools:function(){var q=$("#folder_table").find(":checked");if(q.length>0){$("#toolbtn_bulk_import").show();$("#toolbtn_dl").show()}else{$("#toolbtn_bulk_import").hide();$("#toolbtn_dl").hide()}},modalBulkImport:function(){var r=this;var q=new o();q.fetch({success:function(s){var t=_.template(r.templateBulkImportInModal(),{histories:s.models});r.modal=new j.GalaxyModal({title:"Import into History",body:t,buttons:{Import:function(){r.importAllIntoHistory()},Close:function(){r.modal.hide();$(".modal").remove();r.modal=null}}});r.modal.show()}})},importAllIntoHistory:function(){this.modal.disableButton("Import");var s=$("select[name=dataset_import_bulk] option:selected").val();var w=$("select[name=dataset_import_bulk] option:selected").text();var y=[];$("#folder_table").find(":checked").each(function(){if(this.parentElement.parentElement.id!=""){y.push(this.parentElement.parentElement.id)}});var x=_.template(this.templateProgressBar(),{history_name:w});$(this.modal.elMain).find(".modal-body").html(x);var t=100/y.length;this.initProgress(t);var q=[];for(var r=y.length-1;r>=0;r--){library_dataset_id=y[r];var u=new b();var v=this;u.url=u.urlRoot+s+"/contents";u.content=library_dataset_id;u.source="library";q.push(u)}this.chainCall(q)},chainCall:function(r){var q=this;var s=r.pop();if(typeof s==="undefined"){m.success("All datasets imported");this.modal.hide();q.modal.enableButton("Import");return}var t=$.when(s.save({content:s.content,source:s.source})).done(function(u){q.updateProgress();responses.push(u);q.chainCall(r)})},initProgress:function(q){this.progress=0;this.progressStep=q},updateProgress:function(){this.progress+=this.progressStep;$(".progress-bar-import").width(Math.round(this.progress)+"%");txt_representation=Math.round(this.progress)+"% Complete";$(".completion_span").text(txt_representation)},templateProgressBar:function(){var q=[];q.push('<div class="import_text">');q.push("Importing selected datasets to history <b><%= _.escape(history_name) %></b>");q.push("</div>");q.push('<div class="progress">');q.push(' <div class="progress-bar progress-bar-import" role="progressbar" aria-valuenow="0" aria-valuemin="0" aria-valuemax="100" style="width: 00%;">');q.push(' <span class="completion_span">0% Complete</span>');q.push(" </div>");q.push("</div>");q.push("");return q.join("")},download:function(q,u){var s=[];$("#folder_table").find(":checked").each(function(){if(this.parentElement.parentElement.id!=""){s.push(this.parentElement.parentElement.id)}});var r="/api/libraries/datasets/download/"+u;var t={ldda_ids:s};this.processDownload(r,t,"get")},processDownload:function(r,s,t){if(r&&s){s=typeof s=="string"?s:$.param(s);var q="";$.each(s.split("&"),function(){var u=this.split("=");q+='<input type="hidden" name="'+u[0]+'" value="'+u[1]+'" />'});$('<form action="'+r+'" method="'+(t||"post")+'">'+q+"</form>").appendTo("body").submit().remove();m.info("Your download will begin soon")}},createFolderModal:function(){m.info("This will create folder...in the future")}});var a=Backbone.View.extend({el:"#center",events:{"click #create_new_library_btn":"show_library_modal"},initialize:function(){},template_library_list:function(){tmpl_array=[];tmpl_array.push('<div id="library_container" style="width: 90%; margin: auto; margin-top: 2em; overflow: auto !important; ">');tmpl_array.push("");tmpl_array.push('<h3>New Data Libraries. This is work in progress. Report problems & ideas to <a href="mailto:marten@bx.psu.edu?Subject=DataLibraries_Feedback" target="_blank">Marten</a>.</h3>');tmpl_array.push('<a href="" id="create_new_library_btn" class="btn btn-primary file ">New Library</a>');tmpl_array.push('<table class="table table-condensed">');tmpl_array.push(" <thead>");tmpl_array.push(' <th class="button_heading"></th>');tmpl_array.push(" <th>name</th>");tmpl_array.push(" <th>description</th>");tmpl_array.push(" <th>synopsis</th> ");tmpl_array.push(" <th>model type</th> ");tmpl_array.push(" </thead>");tmpl_array.push(" <tbody>");tmpl_array.push(" <% _.each(libraries, function(library) { %>");tmpl_array.push(" <tr>");tmpl_array.push(' <td><button title="Open this library" type="button" data-id="<%- library.get("root_folder_id") %>" class="btn_open_folder btn btn-default btn-xs">');tmpl_array.push(' <span class="fa fa-folder-open"></span> browse</td>');tmpl_array.push(' <td><%- library.get("name") %></td>');tmpl_array.push(' <td><%= _.escape(library.get("description")) %></td>');tmpl_array.push(' <td><%= _.escape(library.get("synopsis")) %></td>');tmpl_array.push(' <td><%= _.escape(library.get("model_class")) %></td>');tmpl_array.push(" </tr>");tmpl_array.push(" <% }); %>");tmpl_array.push(" </tbody>");tmpl_array.push("</table>");tmpl_array.push("</div>");return tmpl_array.join("")},render:function(){$("#center").css("overflow","auto");var q=this;libraries=new n();libraries.fetch({success:function(r){var s=_.template(q.template_library_list(),{libraries:r.models});q.$el.html(s)},error:function(s,r){if(r.statusCode().status===403){m.error("Please log in first. Redirecting to login page in 3s.");setTimeout(q.redirectToLogin,3000)}else{m.error("An error occured. Please try again.")}}})},redirectToHome:function(){window.location="../"},redirectToLogin:function(){window.location="/user/login"},modal:null,show_library_modal:function(r){r.preventDefault();r.stopPropagation();var q=this;this.modal=new j.GalaxyModal({title:"Create New Library",body:this.template_new_library(),buttons:{Create:function(){q.create_new_library_event()},Close:function(){q.modal.hide()}}});this.modal.show()},create_new_library_event:function(){var s=this.serialize_new_library();if(this.validate_new_library(s)){var r=new e();var q=this;r.save(s,{success:function(t){q.modal.hide();q.clear_library_modal();q.render();m.success("Library created")},error:function(){m.error("An error occured :(")}})}else{m.error("Library's name is missing")}return false},clear_library_modal:function(){$("input[name='Name']").val("");$("input[name='Description']").val("");$("input[name='Synopsis']").val("")},serialize_new_library:function(){return{name:$("input[name='Name']").val(),description:$("input[name='Description']").val(),synopsis:$("input[name='Synopsis']").val()}},validate_new_library:function(q){return q.name!==""},template_new_library:function(){tmpl_array=[];tmpl_array.push('<div id="new_library_modal">');tmpl_array.push("<form>");tmpl_array.push('<input type="text" name="Name" value="" placeholder="Name">');tmpl_array.push('<input type="text" name="Description" value="" placeholder="Description">');tmpl_array.push('<input type="text" name="Synopsis" value="" placeholder="Synopsis">');tmpl_array.push("</form>");tmpl_array.push("</div>");return tmpl_array.join("")}});var f=Backbone.View.extend({folderContentView:null,galaxyLibraryview:null,initialize:function(){folderContentView=new l();galaxyLibraryview=new a();library_router=new p();library_router.on("route:libraries",function(){galaxyLibraryview.render()});library_router.on("route:folder_content",function(q){folderContentView.render({id:q})});library_router.on("route:download",function(q,r){if($("#center").find(":checked").length===0){library_router.navigate("folders/"+q,{trigger:true,replace:true})}else{folderContentView.download(q,r);library_router.navigate("folders/"+q,{trigger:false,replace:true})}});Backbone.history.start();return this}});return{GalaxyApp:f}});
\ No newline at end of file
+var view=null;var library_router=null;var responses=[];define(["galaxy.modal","galaxy.masthead","utils/galaxy.utils","libs/toastr"],function(j,k,g,m){var e=Backbone.Model.extend({urlRoot:"/api/libraries"});var n=Backbone.Collection.extend({url:"/api/libraries",model:e});var h=Backbone.Model.extend({urlRoot:"/api/libraries/datasets"});var c=Backbone.Collection.extend({model:h});var d=Backbone.Model.extend({defaults:{folder:new c(),full_path:"unknown",urlRoot:"/api/folders/",id:"unknown"},parse:function(q){this.full_path=q[0].full_path;this.get("folder").reset(q[1].folder_contents);return q}});var b=Backbone.Model.extend({urlRoot:"/api/histories/"});var i=Backbone.Model.extend({url:"/api/histories/"});var o=Backbone.Collection.extend({url:"/api/histories",model:i});var p=Backbone.Router.extend({routes:{"":"libraries","folders/:id":"folder_content","folders/:folder_id/download/:format":"download"}});var l=Backbone.View.extend({el:"#center",progress:0,progressStep:1,lastSelectedHistory:"",modal:null,folders:null,initialize:function(){this.folders=[];this.queue=jQuery.Deferred();this.queue.resolve()},templateFolder:function(){var q=[];q.push('<div id="library_container" style="width: 90%; margin: auto; margin-top: 2em; ">');q.push('<h3>New Data Libraries. This is work in progress. Report problems & ideas to <a href="mailto:marten@bx.psu.edu?Subject=DataLibraries_Feedback" target="_blank">Marten</a>.</h3>');q.push('<div id="library_folder_toolbar" >');q.push(' <button title="Create New Folder" id="toolbtn_create_folder" class="btn btn-primary" type="button"><span class="fa fa-plus"></span><span class="fa fa-folder-close"></span> folder</button>');q.push(' <button id="toolbtn_bulk_import" class="btn btn-primary" style="display: none; margin-left: 0.5em;" type="button"><span class="fa fa-external-link"></span> to history</button>');q.push(' <div id="toolbtn_dl" class="btn-group" style="margin-left: 0.5em; display: none; ">');q.push(' <button id="drop_toggle" type="button" class="btn btn-primary dropdown-toggle" data-toggle="dropdown">');q.push(' <span class="fa fa-download"></span> download <span class="caret"></span>');q.push(" </button>");q.push(' <ul class="dropdown-menu" role="menu">');q.push(' <li><a href="#/folders/<%= id %>/download/tgz">.tar.gz</a></li>');q.push(' <li><a href="#/folders/<%= id %>/download/tbz">.tar.bz</a></li>');q.push(' <li><a href="#/folders/<%= id %>/download/zip">.zip</a></li>');q.push(" </ul>");q.push(" </div>");q.push("</div>");q.push('<div class="library_breadcrumb">');q.push('<a title="Return to the list of libraries" href="#">Libraries</a><b>|</b> ');q.push("<% _.each(path, function(path_item) { %>");q.push("<% if (path_item[0] != id) { %>");q.push('<a title="Return to this folder" href="#/folders/<%- path_item[0] %>"><%- path_item[1] %></a><b>|</b> ');q.push("<% } else { %>");q.push('<span title="You are in this folder"><%- path_item[1] %></span>');q.push("<% } %>");q.push("<% }); %>");q.push("</div>");q.push('<table id="folder_table" class="table table-condensed">');q.push(" <thead>");q.push(' <th style="text-align: center; width: 20px; "><input id="select-all-checkboxes" style="margin: 0;" type="checkbox"></th>');q.push(' <th class="button_heading">view</th>');q.push(" <th>name</th>");q.push(" <th>data type</th>");q.push(" <th>size</th>");q.push(" <th>date</th>");q.push(" </thead>");q.push(" <tbody>");q.push(" <td></td>");q.push(' <td><button title="Go to parent folder" type="button" data-id="<%- upper_folder_id %>" class="btn_open_folder btn btn-default btn-xs">');q.push(' <span class="fa fa-arrow-up"></span> .. go up</td>');q.push(" <td></td>");q.push(" <td></td>");q.push(" <td></td>");q.push(" <td></td>");q.push(" </tr>");q.push(" <% _.each(items, function(content_item) { %>");q.push(' <tr class="folder_row light" id="<%- content_item.id %>">');q.push(' <% if (content_item.get("type") === "folder") { %>');q.push(" <td></td>");q.push(' <td><button title="Open this folder" type="button" data-id="<%- content_item.id %>" class="btn_open_folder btn btn-default btn-xs">');q.push(' <span class="fa fa-folder-open"></span> browse</td>');q.push(' <td><%- content_item.get("name") %>');q.push(' <% if (content_item.get("item_count") === 0) { %>');q.push(' <span class="muted">(empty folder)</span>');q.push(" <% } %>");q.push(" </td>");q.push(" <td>folder</td>");q.push(' <td><%= _.escape(content_item.get("item_count")) %> item(s)</td>');q.push(" <% } else { %>");q.push(' <td style="text-align: center; "><input style="margin: 0;" type="checkbox"></td>');q.push(" <td>");q.push(' <button title="See details of this dataset" type="button" class="library-dataset btn btn-default btn-xs">');q.push(' <span class="fa fa-eye"></span> details');q.push(" </button>");q.push(" </td>");q.push(' <td><%- content_item.get("name") %></td>');q.push(' <td><%= _.escape(content_item.get("data_type")) %></td>');q.push(' <td><%= _.escape(content_item.get("readable_size")) %></td>');q.push(" <% } %> ");q.push(' <td><%= _.escape(content_item.get("time_updated")) %></td>');q.push(" </tr>");q.push(" <% }); %>");q.push(" ");q.push(" </tbody>");q.push("</table>");q.push("</div>");return q.join("")},templateDatasetModal:function(){var q=[];q.push('<div id="dataset_info_modal">');q.push(' <table class="table table-striped table-condensed">');q.push(" <tr>");q.push(' <th scope="row" id="id_row" data-id="<%= _.escape(item.get("ldda_id")) %>">Name</th>');q.push(' <td><%= _.escape(item.get("name")) %></td>');q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Data type</th>');q.push(' <td><%= _.escape(item.get("data_type")) %></td>');q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Genome build</th>');q.push(' <td><%= _.escape(item.get("genome_build")) %></td>');q.push(" </tr>");q.push(' <th scope="row">Size</th>');q.push(" <td><%= _.escape(size) %></td>");q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Date uploaded</th>');q.push(' <td><%= _.escape(item.get("date_uploaded")) %></td>');q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Uploaded by</th>');q.push(' <td><%= _.escape(item.get("uploaded_by")) %></td>');q.push(" </tr>");q.push(' <tr scope="row">');q.push(' <th scope="row">Data Lines</th>');q.push(' <td scope="row"><%= _.escape(item.get("metadata_data_lines")) %></td>');q.push(" </tr>");q.push(' <th scope="row">Comment Lines</th>');q.push(' <% if (item.get("metadata_comment_lines") === "") { %>');q.push(' <td scope="row"><%= _.escape(item.get("metadata_comment_lines")) %></td>');q.push(" <% } else { %>");q.push(' <td scope="row">unknown</td>');q.push(" <% } %>");q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Number of Columns</th>');q.push(' <td scope="row"><%= _.escape(item.get("metadata_columns")) %></td>');q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Column Types</th>');q.push(' <td scope="row"><%= _.escape(item.get("metadata_column_types")) %></td>');q.push(" </tr>");q.push(" <tr>");q.push(' <th scope="row">Miscellaneous information</th>');q.push(' <td scope="row"><%= _.escape(item.get("misc_blurb")) %></td>');q.push(" </tr>");q.push(" </table>");q.push(' <pre class="peek">');q.push(" </pre>");q.push("</div>");return q.join("")},templateHistorySelectInModal:function(){var q=[];q.push('<span id="history_modal_combo" style="width:90%; margin-left: 1em; margin-right: 1em; ">');q.push("Select history: ");q.push('<select id="dataset_import_single" name="dataset_import_single" style="width:50%; margin-bottom: 1em; "> ');q.push(" <% _.each(histories, function(history) { %>");q.push(' <option value="<%= _.escape(history.get("id")) %>"><%= _.escape(history.get("name")) %></option>');q.push(" <% }); %>");q.push("</select>");q.push("</span>");return q.join("")},templateBulkImportInModal:function(){var q=[];q.push('<span id="history_modal_combo_bulk" style="width:90%; margin-left: 1em; margin-right: 1em; ">');q.push("Select history: ");q.push('<select id="dataset_import_bulk" name="dataset_import_bulk" style="width:50%; margin-bottom: 1em; "> ');q.push(" <% _.each(histories, function(history) { %>");q.push(' <option value="<%= _.escape(history.get("id")) %>"><%= _.escape(history.get("name")) %></option>');q.push(" <% }); %>");q.push("</select>");q.push("</span>");return q.join("")},size_to_string:function(q){var r="";if(q>=100000000000){q=q/100000000000;r="TB"}else{if(q>=100000000){q=q/100000000;r="GB"}else{if(q>=100000){q=q/100000;r="MB"}else{if(q>=100){q=q/100;r="KB"}else{q=q*10;r="b"}}}}return(Math.round(q)/10)+r},events:{"click #select-all-checkboxes":"selectAll","click .folder_row":"selectClickedRow","click #toolbtn_bulk_import":"modalBulkImport","click #toolbtn_dl":"bulkDownload","click .library-dataset":"showDatasetDetails","click #toolbtn_create_folder":"createFolderModal","click .btn_open_folder":"navigateToFolder"},render:function(q){$("#center").css("overflow","auto");view=this;var s=this;var r=new d({id:q.id});r.url=r.attributes.urlRoot+q.id+"/contents";r.fetch({success:function(t){for(var v=0;v<r.attributes.folder.models.length;v++){var u=r.attributes.folder.models[v];if(u.get("type")==="file"){u.set("readable_size",s.size_to_string(u.get("file_size")))}}var x=r.full_path;var y;if(x.length===1){y=0}else{y=x[x.length-2][0]}var w=_.template(s.templateFolder(),{path:r.full_path,items:r.attributes.folder.models,id:q.id,upper_folder_id:y});s.$el.html(w)}})},navigateToFolder:function(r){var q=$(r.target).attr("data-id");if(typeof q==="undefined"){return false}else{if(q==="0"){library_router.navigate("#",{trigger:true,replace:true})}else{library_router.navigate("folders/"+q,{trigger:true,replace:true})}}},showDatasetDetails:function(t){t.preventDefault();var u=$(t.target).parent().parent().attr("id");var s=new h();var r=new o();s.id=u;var q=this;s.fetch({success:function(v){r.fetch({success:function(w){q.renderModalAfterFetch(v,w)}})}})},renderModalAfterFetch:function(v,s){var t=this.size_to_string(v.get("file_size"));var u=_.template(this.templateDatasetModal(),{item:v,size:t});this.modal=null;var r=this;this.modal=new j.GalaxyModal({title:"Dataset Details",body:u,buttons:{Import:function(){r.importCurrentIntoHistory()},Download:function(){r.downloadCurrent()},Close:function(){r.modal.hide();$(".modal").remove();r.modal=null}}});this.modal.bindEvents(event);$(".peek").html(v.get("peek"));var q=_.template(this.templateHistorySelectInModal(),{histories:s.models});$(this.modal.elMain).find(".buttons").prepend(q);if(r.lastSelectedHistory.length>0){$(this.modal.elMain).find("#dataset_import_single").val(r.lastSelectedHistory)}this.modal.show()},downloadCurrent:function(){this.modal.disableButton("Import");this.modal.disableButton("Download");var q=[];q.push($("#id_row").attr("data-id"));var r="/api/libraries/datasets/download/uncompressed";var s={ldda_ids:q};folderContentView.processDownload(r,s);this.modal.enableButton("Import");this.modal.enableButton("Download")},importCurrentIntoHistory:function(){this.modal.disableButton("Import");this.modal.disableButton("Download");var s=$(this.modal.elMain).find("select[name=dataset_import_single] option:selected").val();this.lastSelectedHistory=s;var q=$("#id_row").attr("data-id");var t=new b();var r=this;t.url=t.urlRoot+s+"/contents";t.save({content:q,source:"library"},{success:function(){m.success("Dataset imported");r.modal.enableButton("Import");r.modal.enableButton("Download")},error:function(){m.error("An error occured! Dataset not imported. Please try again.");r.modal.enableButton("Import");r.modal.enableButton("Download")}})},selectAll:function(r){var q=r.target.checked;that=this;$(":checkbox").each(function(){this.checked=q;$row=$(this.parentElement.parentElement);(q)?that.makeDarkRow($row):that.makeWhiteRow($row)});this.checkTools()},selectClickedRow:function(r){var t="";var q;var s;if(r.target.localName==="input"){t=r.target;q=$(r.target.parentElement.parentElement);s="input"}else{if(r.target.localName==="td"){t=$("#"+r.target.parentElement.id).find(":checkbox")[0];q=$(r.target.parentElement);s="td"}}if(t===""){r.stopPropagation();return}if(t.checked){if(s==="td"){t.checked="";this.makeWhiteRow(q)}else{if(s==="input"){this.makeDarkRow(q)}}}else{if(s==="td"){t.checked="selected";this.makeDarkRow(q)}else{if(s==="input"){this.makeWhiteRow(q)}}}this.checkTools()},makeDarkRow:function(q){q.removeClass("light");q.find("a").removeClass("light");q.addClass("dark");q.find("a").addClass("dark")},makeWhiteRow:function(q){q.removeClass("dark");q.find("a").removeClass("dark");q.addClass("light");q.find("a").addClass("light")},checkTools:function(){var q=$("#folder_table").find(":checked");if(q.length>0){$("#toolbtn_bulk_import").show();$("#toolbtn_dl").show()}else{$("#toolbtn_bulk_import").hide();$("#toolbtn_dl").hide()}},modalBulkImport:function(){var r=this;var q=new o();q.fetch({success:function(s){var t=_.template(r.templateBulkImportInModal(),{histories:s.models});r.modal=new j.GalaxyModal({title:"Import into History",body:t,buttons:{Import:function(){r.importAllIntoHistory()},Close:function(){r.modal.hide();$(".modal").remove();r.modal=null}}});r.modal.bindEvents(event);r.modal.show()}})},importAllIntoHistory:function(){this.modal.disableButton("Import");var s=$("select[name=dataset_import_bulk] option:selected").val();var w=$("select[name=dataset_import_bulk] option:selected").text();var y=[];$("#folder_table").find(":checked").each(function(){if(this.parentElement.parentElement.id!=""){y.push(this.parentElement.parentElement.id)}});var x=_.template(this.templateProgressBar(),{history_name:w});$(this.modal.elMain).find(".modal-body").html(x);var t=100/y.length;this.initProgress(t);var q=[];for(var r=y.length-1;r>=0;r--){library_dataset_id=y[r];var u=new b();var v=this;u.url=u.urlRoot+s+"/contents";u.content=library_dataset_id;u.source="library";q.push(u)}this.chainCall(q)},chainCall:function(r){var q=this;var s=r.pop();if(typeof s==="undefined"){m.success("All datasets imported");this.modal.destroy();return}var t=$.when(s.save({content:s.content,source:s.source})).done(function(u){q.updateProgress();responses.push(u);q.chainCall(r)})},initProgress:function(q){this.progress=0;this.progressStep=q},updateProgress:function(){this.progress+=this.progressStep;$(".progress-bar-import").width(Math.round(this.progress)+"%");txt_representation=Math.round(this.progress)+"% Complete";$(".completion_span").text(txt_representation)},templateProgressBar:function(){var q=[];q.push('<div class="import_text">');q.push("Importing selected datasets to history <b><%= _.escape(history_name) %></b>");q.push("</div>");q.push('<div class="progress">');q.push(' <div class="progress-bar progress-bar-import" role="progressbar" aria-valuenow="0" aria-valuemin="0" aria-valuemax="100" style="width: 00%;">');q.push(' <span class="completion_span">0% Complete</span>');q.push(" </div>");q.push("</div>");q.push("");return q.join("")},download:function(q,u){var s=[];$("#folder_table").find(":checked").each(function(){if(this.parentElement.parentElement.id!=""){s.push(this.parentElement.parentElement.id)}});var r="/api/libraries/datasets/download/"+u;var t={ldda_ids:s};this.processDownload(r,t,"get")},processDownload:function(r,s,t){if(r&&s){s=typeof s=="string"?s:$.param(s);var q="";$.each(s.split("&"),function(){var u=this.split("=");q+='<input type="hidden" name="'+u[0]+'" value="'+u[1]+'" />'});$('<form action="'+r+'" method="'+(t||"post")+'">'+q+"</form>").appendTo("body").submit().remove();m.info("Your download will begin soon")}},createFolderModal:function(){m.info("This will create folder...in the future")}});var a=Backbone.View.extend({el:"#center",events:{"click #create_new_library_btn":"show_library_modal"},initialize:function(){},template_library_list:function(){tmpl_array=[];tmpl_array.push('<div id="library_container" style="width: 90%; margin: auto; margin-top: 2em; overflow: auto !important; ">');tmpl_array.push("");tmpl_array.push('<h3>New Data Libraries. This is work in progress. Report problems & ideas to <a href="mailto:marten@bx.psu.edu?Subject=DataLibraries_Feedback" target="_blank">Marten</a>.</h3>');tmpl_array.push('<a href="" id="create_new_library_btn" class="btn btn-primary file ">New Library</a>');tmpl_array.push('<table class="table table-condensed">');tmpl_array.push(" <thead>");tmpl_array.push(' <th class="button_heading"></th>');tmpl_array.push(" <th>name</th>");tmpl_array.push(" <th>description</th>");tmpl_array.push(" <th>synopsis</th> ");tmpl_array.push(" <th>model type</th> ");tmpl_array.push(" </thead>");tmpl_array.push(" <tbody>");tmpl_array.push(" <% _.each(libraries, function(library) { %>");tmpl_array.push(" <tr>");tmpl_array.push(' <td><button title="Open this library" type="button" data-id="<%- library.get("root_folder_id") %>" class="btn_open_folder btn btn-default btn-xs">');tmpl_array.push(' <span class="fa fa-folder-open"></span> browse</td>');tmpl_array.push(' <td><%- library.get("name") %></td>');tmpl_array.push(' <td><%= _.escape(library.get("description")) %></td>');tmpl_array.push(' <td><%= _.escape(library.get("synopsis")) %></td>');tmpl_array.push(' <td><%= _.escape(library.get("model_class")) %></td>');tmpl_array.push(" </tr>");tmpl_array.push(" <% }); %>");tmpl_array.push(" </tbody>");tmpl_array.push("</table>");tmpl_array.push("</div>");return tmpl_array.join("")},render:function(){$("#center").css("overflow","auto");var q=this;libraries=new n();libraries.fetch({success:function(r){var s=_.template(q.template_library_list(),{libraries:r.models});q.$el.html(s)},error:function(s,r){if(r.statusCode().status===403){m.error("Please log in first. Redirecting to login page in 3s.");setTimeout(q.redirectToLogin,3000)}else{m.error("An error occured. Please try again.")}}})},redirectToHome:function(){window.location="../"},redirectToLogin:function(){window.location="/user/login"},modal:null,show_library_modal:function(r){r.preventDefault();r.stopPropagation();var q=this;this.modal=new j.GalaxyModal({title:"Create New Library",body:this.template_new_library(),buttons:{Create:function(){q.create_new_library_event()},Close:function(){q.modal.hide()}}});this.modal.show()},create_new_library_event:function(){var s=this.serialize_new_library();if(this.validate_new_library(s)){var r=new e();var q=this;r.save(s,{success:function(t){q.modal.hide();q.clear_library_modal();q.render();m.success("Library created")},error:function(){m.error("An error occured :(")}})}else{m.error("Library's name is missing")}return false},clear_library_modal:function(){$("input[name='Name']").val("");$("input[name='Description']").val("");$("input[name='Synopsis']").val("")},serialize_new_library:function(){return{name:$("input[name='Name']").val(),description:$("input[name='Description']").val(),synopsis:$("input[name='Synopsis']").val()}},validate_new_library:function(q){return q.name!==""},template_new_library:function(){tmpl_array=[];tmpl_array.push('<div id="new_library_modal">');tmpl_array.push("<form>");tmpl_array.push('<input type="text" name="Name" value="" placeholder="Name">');tmpl_array.push('<input type="text" name="Description" value="" placeholder="Description">');tmpl_array.push('<input type="text" name="Synopsis" value="" placeholder="Synopsis">');tmpl_array.push("</form>");tmpl_array.push("</div>");return tmpl_array.join("")}});var f=Backbone.View.extend({folderContentView:null,galaxyLibraryview:null,initialize:function(){folderContentView=new l();galaxyLibraryview=new a();library_router=new p();library_router.on("route:libraries",function(){galaxyLibraryview.render()});library_router.on("route:folder_content",function(q){folderContentView.render({id:q})});library_router.on("route:download",function(q,r){if($("#center").find(":checked").length===0){library_router.navigate("folders/"+q,{trigger:true,replace:true})}else{folderContentView.download(q,r);library_router.navigate("folders/"+q,{trigger:false,replace:true})}});Backbone.history.start();return this}});return{GalaxyApp:f}});
\ No newline at end of file
diff -r e92e13e9c103cc1f36dff65e1523479bf5cb17ed -r 10b7c04f919ff6d7fc1d03ba88ebb282645fcbba static/scripts/packed/galaxy.modal.js
--- a/static/scripts/packed/galaxy.modal.js
+++ b/static/scripts/packed/galaxy.modal.js
@@ -1,1 +1,1 @@
-define([],function(){var a=Backbone.View.extend({elMain:"#everything",optionsDefault:{title:"galaxy-modal",body:"",backdrop:true,height:null,width:null},options:{},initialize:function(b){self=this;if(b){this.create(b)}},bindEvents:function(c,b){$(document).on("keyup",function(d){if(d.keyCode==27){self.hide()}});$("html").on("click",function(d){self.hide()});$(".modal-content").on("click",function(d){d.stopPropagation()})},unbindEvents:function(b){$(document).off("keyup",function(c){if(c.keyCode==27){self.hide()}});$("html").off("click",function(c){self.hide()});$(".modal-content").off("click",function(c){c.stopPropagation()})},destroy:function(){this.hide();this.unbindEvents();$(".modal").remove()},show:function(b){this.initialize(b);if(this.options.height){this.$body.css("height",this.options.height);this.$body.css("overflow","hidden")}else{this.$body.css("max-height",$(window).height()/2)}if(this.options.width){this.$dialog.css("width",this.options.width)}if(this.visible){this.$el.show()}else{this.$el.fadeIn("fast")}this.visible=true},hide:function(){this.$el.fadeOut("fast");this.visible=false;this.unbindEvents()},create:function(c){this.options=_.defaults(c,this.optionsDefault);if(this.options.body=="progress"){this.options.body=$('<div class="progress progress-striped active"><div class="progress-bar progress-bar-info" style="width:100%"></div></div>')}if(this.$el){this.$el.remove()}this.setElement(this.template(this.options.title));this.$dialog=(this.$el).find(".modal-dialog");this.$body=(this.$el).find(".modal-body");this.$footer=(this.$el).find(".modal-footer");this.$buttons=(this.$el).find(".buttons");this.$backdrop=(this.$el).find(".modal-backdrop");this.$body.html(this.options.body);if(!this.options.backdrop){this.$backdrop.removeClass("in")}if(this.options.buttons){var b=this;$.each(this.options.buttons,function(d,e){b.$buttons.append($('<button id="'+String(d).toLowerCase()+'"></button>').text(d).click(e)).append(" ")})}else{this.$footer.hide()}$(this.elMain).append($(this.el))},enableButton:function(b){this.$buttons.find("#"+String(b).toLowerCase()).prop("disabled",false)},disableButton:function(b){this.$buttons.find("#"+String(b).toLowerCase()).prop("disabled",true)},hideButton:function(b){this.$buttons.find("#"+String(b).toLowerCase()).hide()},showButton:function(b){this.$buttons.find("#"+String(b).toLowerCase()).show()},scrollTop:function(){return this.$body.scrollTop()},template:function(b){return'<div class="modal"><div class="modal-backdrop fade in" style="z-index: -1;"></div><div class="modal-dialog"><div class="modal-content"><div class="modal-header"><button type="button" class="close" style="display: none;">×</button><h4 class="title">'+b+'</h4></div><div class="modal-body"></div><div class="modal-footer"><div class="buttons" style="float: right;"></div></div></div</div></div>'}});return{GalaxyModal:a}});
\ No newline at end of file
+define([],function(){var a=Backbone.View.extend({elMain:"#everything",optionsDefault:{title:"galaxy-modal",body:"",backdrop:true,height:null,width:null},options:{},initialize:function(b){self=this;if(b){this.create(b)}},bindEvents:function(b){$(document).on("keyup",function(c){if(c.keyCode==27){self.hide();$(".modal").remove()}});$("html").on("click",function(c){self.hide();$(".modal").remove()});$(".modal-content").on("click",function(c){c.stopPropagation()})},unbindEvents:function(b){$(document).off("keyup",function(c){if(c.keyCode==27){self.hide();$(".modal").remove()}});$("html").off("click",function(c){self.hide();$(".modal").remove()});$(".modal-content").off("click",function(c){c.stopPropagation()})},destroy:function(){this.hide();this.unbindEvents();$(".modal").remove()},show:function(b){this.initialize(b);if(this.options.height){this.$body.css("height",this.options.height);this.$body.css("overflow","hidden")}else{this.$body.css("max-height",$(window).height()/2)}if(this.options.width){this.$dialog.css("width",this.options.width)}if(this.visible){this.$el.show()}else{this.$el.fadeIn("fast")}this.visible=true},hide:function(){this.$el.fadeOut("fast");this.visible=false;this.unbindEvents()},create:function(c){this.options=_.defaults(c,this.optionsDefault);if(this.options.body=="progress"){this.options.body=$('<div class="progress progress-striped active"><div class="progress-bar progress-bar-info" style="width:100%"></div></div>')}if(this.$el){this.$el.remove()}this.setElement(this.template(this.options.title));this.$dialog=(this.$el).find(".modal-dialog");this.$body=(this.$el).find(".modal-body");this.$footer=(this.$el).find(".modal-footer");this.$buttons=(this.$el).find(".buttons");this.$backdrop=(this.$el).find(".modal-backdrop");this.$body.html(this.options.body);if(!this.options.backdrop){this.$backdrop.removeClass("in")}if(this.options.buttons){var b=this;$.each(this.options.buttons,function(d,e){b.$buttons.append($('<button id="'+String(d).toLowerCase()+'"></button>').text(d).click(e)).append(" ")})}else{this.$footer.hide()}$(this.elMain).append($(this.el))},enableButton:function(b){this.$buttons.find("#"+String(b).toLowerCase()).prop("disabled",false)},disableButton:function(b){this.$buttons.find("#"+String(b).toLowerCase()).prop("disabled",true)},hideButton:function(b){this.$buttons.find("#"+String(b).toLowerCase()).hide()},showButton:function(b){this.$buttons.find("#"+String(b).toLowerCase()).show()},scrollTop:function(){return this.$body.scrollTop()},template:function(b){return'<div class="modal"><div class="modal-backdrop fade in" style="z-index: -1;"></div><div class="modal-dialog"><div class="modal-content"><div class="modal-header"><button type="button" class="close" style="display: none;">×</button><h4 class="title">'+b+'</h4></div><div class="modal-body"></div><div class="modal-footer"><div class="buttons" style="float: right;"></div></div></div</div></div>'}});return{GalaxyModal:a}});
\ No newline at end of file
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: natefoo: Allow changing the header for remote user.
by commits-noreply@bitbucket.org 03 Jan '14
by commits-noreply@bitbucket.org 03 Jan '14
03 Jan '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/e92e13e9c103/
Changeset: e92e13e9c103
User: natefoo
Date: 2014-01-03 17:41:19
Summary: Allow changing the header for remote user.
Affected #: 6 files
diff -r 5f221e5774804c77987831bebc6941d5a91fa872 -r e92e13e9c103cc1f36dff65e1523479bf5cb17ed lib/galaxy/config.py
--- a/lib/galaxy/config.py
+++ b/lib/galaxy/config.py
@@ -123,6 +123,7 @@
self.retry_metadata_internally = string_as_bool( kwargs.get( "retry_metadata_internally", "True" ) )
self.use_remote_user = string_as_bool( kwargs.get( "use_remote_user", "False" ) )
self.remote_user_maildomain = kwargs.get( "remote_user_maildomain", None )
+ self.remote_user_header = kwargs.get( "remote_user_header", 'HTTP_REMOTE_USER' )
self.remote_user_logout_href = kwargs.get( "remote_user_logout_href", None )
self.require_login = string_as_bool( kwargs.get( "require_login", "False" ) )
self.allow_user_creation = string_as_bool( kwargs.get( "allow_user_creation", "True" ) )
diff -r 5f221e5774804c77987831bebc6941d5a91fa872 -r e92e13e9c103cc1f36dff65e1523479bf5cb17ed lib/galaxy/web/framework/__init__.py
--- a/lib/galaxy/web/framework/__init__.py
+++ b/lib/galaxy/web/framework/__init__.py
@@ -586,9 +586,9 @@
# things now.
if self.app.config.use_remote_user:
#If this is an api request, and they've passed a key, we let this go.
- assert "HTTP_REMOTE_USER" in self.environ, \
- "use_remote_user is set but no HTTP_REMOTE_USER variable"
- remote_user_email = self.environ[ 'HTTP_REMOTE_USER' ]
+ assert self.app.config.remote_user_header in self.environ, \
+ "use_remote_user is set but %s header was not provided" % self.app.config.remote_user_header
+ remote_user_email = self.environ[ self.app.config.remote_user_header ]
if galaxy_session:
# An existing session, make sure correct association exists
if galaxy_session.user is None:
diff -r 5f221e5774804c77987831bebc6941d5a91fa872 -r e92e13e9c103cc1f36dff65e1523479bf5cb17ed lib/galaxy/web/framework/middleware/remoteuser.py
--- a/lib/galaxy/web/framework/middleware/remoteuser.py
+++ b/lib/galaxy/web/framework/middleware/remoteuser.py
@@ -36,11 +36,12 @@
"""
class RemoteUser( object ):
- def __init__( self, app, maildomain=None, display_servers=None, admin_users=None ):
+ def __init__( self, app, maildomain=None, display_servers=None, admin_users=None, remote_user_header=None ):
self.app = app
self.maildomain = maildomain
self.display_servers = display_servers or []
self.admin_users = admin_users or []
+ self.remote_user_header = remote_user_header or 'HTTP_REMOTE_USER'
def __call__( self, environ, start_response ):
# Allow display servers
if self.display_servers and environ.has_key( 'REMOTE_ADDR' ):
@@ -50,16 +51,16 @@
# in the event of a lookup failure, deny access
host = None
if host in self.display_servers:
- environ[ 'HTTP_REMOTE_USER' ] = 'remote_display_server@%s' % ( self.maildomain or 'example.org' )
+ environ[ self.remote_user_header ] = 'remote_display_server@%s' % ( self.maildomain or 'example.org' )
return self.app( environ, start_response )
# Apache sets REMOTE_USER to the string '(null)' when using the
# Rewrite* method for passing REMOTE_USER and a user is
# un-authenticated. Any other possible values need to go here as well.
path_info = environ.get('PATH_INFO', '')
- if environ.has_key( 'HTTP_REMOTE_USER' ) and environ[ 'HTTP_REMOTE_USER' ] != '(null)':
- if not environ[ 'HTTP_REMOTE_USER' ].count( '@' ):
+ if environ.has_key( self.remote_user_header ) and environ[ self.remote_user_header ] != '(null)':
+ if not environ[ self.remote_user_header ].count( '@' ):
if self.maildomain is not None:
- environ[ 'HTTP_REMOTE_USER' ] += '@' + self.maildomain
+ environ[ self.remote_user_header ] += '@' + self.maildomain
else:
title = "Access to Galaxy is denied"
message = """
@@ -73,7 +74,7 @@
before you may access Galaxy.
"""
return self.error( start_response, title, message )
- if path_info.startswith( '/user/create' ) and environ[ 'HTTP_REMOTE_USER' ] in self.admin_users:
+ if path_info.startswith( '/user/create' ) and environ[ self.remote_user_header ] in self.admin_users:
pass # admins can create users
elif path_info.startswith( '/user/api_keys' ):
pass # api keys can be managed when remote_user is in use
diff -r 5f221e5774804c77987831bebc6941d5a91fa872 -r e92e13e9c103cc1f36dff65e1523479bf5cb17ed lib/galaxy/webapps/galaxy/buildapp.py
--- a/lib/galaxy/webapps/galaxy/buildapp.py
+++ b/lib/galaxy/webapps/galaxy/buildapp.py
@@ -350,7 +350,8 @@
from galaxy.web.framework.middleware.remoteuser import RemoteUser
app = RemoteUser( app, maildomain = conf.get( 'remote_user_maildomain', None ),
display_servers = util.listify( conf.get( 'display_servers', '' ) ),
- admin_users = conf.get( 'admin_users', '' ).split( ',' ) )
+ admin_users = conf.get( 'admin_users', '' ).split( ',' ),
+ remote_user_header = conf.get( 'remote_user_header', 'HTTP_REMOTE_USER' ) )
log.debug( "Enabling 'remote user' middleware" )
# The recursive middleware allows for including requests in other
# requests or forwarding of requests, all on the server side.
diff -r 5f221e5774804c77987831bebc6941d5a91fa872 -r e92e13e9c103cc1f36dff65e1523479bf5cb17ed lib/galaxy/webapps/tool_shed/config.py
--- a/lib/galaxy/webapps/tool_shed/config.py
+++ b/lib/galaxy/webapps/tool_shed/config.py
@@ -72,6 +72,7 @@
self.blacklist_location = kwargs.get( 'blacklist_file', None )
self.blacklist_content = None
self.remote_user_maildomain = kwargs.get( "remote_user_maildomain", None )
+ self.remote_user_header = kwargs.get( "remote_user_header", 'HTTP_REMOTE_USER' )
self.remote_user_logout_href = kwargs.get( "remote_user_logout_href", None )
self.require_login = string_as_bool( kwargs.get( "require_login", "False" ) )
self.allow_user_creation = string_as_bool( kwargs.get( "allow_user_creation", "True" ) )
diff -r 5f221e5774804c77987831bebc6941d5a91fa872 -r e92e13e9c103cc1f36dff65e1523479bf5cb17ed universe_wsgi.ini.sample
--- a/universe_wsgi.ini.sample
+++ b/universe_wsgi.ini.sample
@@ -586,6 +586,13 @@
# to usernames, to become your Galaxy usernames (email addresses).
#remote_user_maildomain = None
+# If use_remote_user is enabled, the header that the upstream proxy provides
+# the remote username in defaults to HTTP_REMOTE_USER (the 'HTTP_' is prepended
+# by WSGI). This option allows you to change the header. Note, you still need
+# to prepend 'HTTP_' to the header in this option, but your proxy server should
+# *not* include 'HTTP_' at the beginning of the header name.
+#remote_user_header = 'HTTP_REMOTE_USER'
+
# If use_remote_user is enabled, you can set this to a URL that will log your
# users out.
#remote_user_logout_href = None
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Order imports per database_contexts requirements to fix the tool shed's install and test framework.
by commits-noreply@bitbucket.org 03 Jan '14
by commits-noreply@bitbucket.org 03 Jan '14
03 Jan '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/5f221e577480/
Changeset: 5f221e577480
User: greg
Date: 2014-01-03 16:02:19
Summary: Order imports per database_contexts requirements to fix the tool shed's install and test framework.
Affected #: 2 files
diff -r f0f7c3cd2e8af64243f878c04f418c1ee054bc55 -r 5f221e5774804c77987831bebc6941d5a91fa872 scripts/functional_tests.py
--- a/scripts/functional_tests.py
+++ b/scripts/functional_tests.py
@@ -485,9 +485,14 @@
master_api_key=master_api_key,
user_api_key=os.environ.get( "GALAXY_TEST_USER_API_KEY", default_galaxy_user_key ),
)
- else: #when testing data managers, do not test toolbox
+ else:
+ # We must make sure that functional.test_toolbox is always imported after
+ # database_contexts.galaxy_content is set (which occurs in this method above).
+ # If functional.test_toolbox is imported before database_contexts.galaxy_content
+ # is set, sa_session will be None in all methods that use it.
import functional.test_toolbox
functional.test_toolbox.toolbox = app.toolbox
+ # When testing data managers, do not test toolbox.
functional.test_toolbox.build_tests(
testing_shed_tools=testing_shed_tools,
master_api_key=master_api_key,
diff -r f0f7c3cd2e8af64243f878c04f418c1ee054bc55 -r 5f221e5774804c77987831bebc6941d5a91fa872 test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py
@@ -52,7 +52,6 @@
from paste import httpserver
from functional import database_contexts
-import functional.test_toolbox as imported_test_toolbox
log = logging.getLogger( 'install_and_test_repositories_with_tools' )
@@ -136,6 +135,10 @@
return tool_id, tool_version
def install_and_test_repositories( app, galaxy_shed_tools_dict, galaxy_shed_tool_conf_file ):
+ # We must make sure that functional.test_toolbox is always imported after database_contexts.galaxy_content
+ # is set (which occurs in the main method before this method is called). If functional.test_toolbox is
+ # imported before database_contexts.galaxy_content is set, sa_session will be None in all methods that use it.
+ import functional.test_toolbox as imported_test_toolbox
global test_toolbox
test_toolbox = imported_test_toolbox
# Initialize a dictionary for the summary that will be printed to stdout.
@@ -442,7 +445,6 @@
app = UniverseApplication( **kwargs )
database_contexts.galaxy_context = app.model.context
database_contexts.install_context = app.install_model.context
-
log.debug( "Embedded Galaxy application started..." )
# ---- Run galaxy webserver ------------------------------------------------------
server = None
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
11 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/3f8f90da2773/
Changeset: 3f8f90da2773
User: jmchilton
Date: 2014-01-03 00:50:10
Summary: Fix LWR exit code handling.
Affected #: 1 file
diff -r 5378f0517318861997ae7be112bbbf4098bdf7ce -r 3f8f90da27736f7ae1e7dfee4bd2e43bc1cc4bfd lib/galaxy/jobs/runners/lwr.py
--- a/lib/galaxy/jobs/runners/lwr.py
+++ b/lib/galaxy/jobs/runners/lwr.py
@@ -173,6 +173,7 @@
run_results = client.raw_check_complete()
stdout = run_results.get('stdout', '')
stderr = run_results.get('stderr', '')
+ exit_code = run_results.get('returncode', None)
working_directory_contents = run_results.get('working_directory_contents', [])
# Use LWR client code to transfer/copy files back
# and cleanup job if needed.
@@ -207,7 +208,7 @@
self._handle_metadata_externally( job_wrapper, resolve_requirements=True )
# Finish the job
try:
- job_wrapper.finish( stdout, stderr )
+ job_wrapper.finish( stdout, stderr, exit_code )
except Exception:
log.exception("Job wrapper finish method failed")
job_wrapper.fail("Unable to finish job", exception=True)
https://bitbucket.org/galaxy/galaxy-central/commits/18b2967240e5/
Changeset: 18b2967240e5
User: jmchilton
Date: 2014-01-03 00:50:10
Summary: Refactor JobRunner's in_directory out into galaxy.util for reuse.
Affected #: 2 files
diff -r 3f8f90da27736f7ae1e7dfee4bd2e43bc1cc4bfd -r 18b2967240e53d77ed87c3ef5aeaec532597bd58 lib/galaxy/jobs/runners/__init__.py
--- a/lib/galaxy/jobs/runners/__init__.py
+++ b/lib/galaxy/jobs/runners/__init__.py
@@ -15,6 +15,7 @@
from galaxy.jobs.command_factory import build_command
from galaxy import model
from galaxy.util import DATABASE_MAX_STRING_SIZE, shrink_stream_by_size
+from galaxy.util import in_directory
from galaxy.jobs.runners.util.job_script import job_script
log = logging.getLogger( __name__ )
@@ -154,18 +155,6 @@
if not job_working_directory:
job_working_directory = os.path.abspath( job_wrapper.working_directory )
- def in_directory( file, directory ):
- """
- Return true, if the common prefix of both is equal to directory
- e.g. /a/b/c/d.rst and directory is /a/b, the common prefix is /a/b
- """
-
- # Make both absolute.
- directory = os.path.abspath( directory )
- file = os.path.abspath( file )
-
- return os.path.commonprefix( [ file, directory ] ) == directory
-
# Set up dict of dataset id --> output path; output path can be real or
# false depending on outputs_to_working_directory
output_paths = {}
diff -r 3f8f90da27736f7ae1e7dfee4bd2e43bc1cc4bfd -r 18b2967240e53d77ed87c3ef5aeaec532597bd58 lib/galaxy/util/__init__.py
--- a/lib/galaxy/util/__init__.py
+++ b/lib/galaxy/util/__init__.py
@@ -374,6 +374,20 @@
return default
return out
+
+def in_directory( file, directory ):
+ """
+ Return true, if the common prefix of both is equal to directory
+ e.g. /a/b/c/d.rst and directory is /a/b, the common prefix is /a/b
+ """
+
+ # Make both absolute.
+ directory = os.path.abspath( directory )
+ file = os.path.abspath( file )
+
+ return os.path.commonprefix( [ file, directory ] ) == directory
+
+
class Params( object ):
"""
Stores and 'sanitizes' parameters. Alphanumeric characters and the
https://bitbucket.org/galaxy/galaxy-central/commits/9920d006a966/
Changeset: 9920d006a966
User: jmchilton
Date: 2014-01-03 00:50:11
Summary: Update LWR client through LWR changeset 8ef5299.
This encompasses numerous individual LWR changesets. The largest of these changes include extensions to:
- Enable LWR client to work in a fashion where remote paths are precalculated and used during job input evaluation instead of having these inputs rewritten after the fact. (Galaxy + LWR runner require significant modifications to enable this - found in subsequent changesets). https://bitbucket.org/jmchilton/lwr/commits/a06587ff0544f3a09cf221057be1b91….
- Allow staging or arbitrary paths (not restricted to job files such as inputs, working directory files, configs, etc...). https://bitbucket.org/jmchilton/lwr/commits/63981e79696337399edb42be5614bc7….
- Vast improvements to extra_files_path handling - before it only worked for one level of input extra_files_path - now works with arbitrary nesting of inputs and outputs. https://bitbucket.org/jmchilton/lwr/commits/b5e8c4dffc3a04639550e01f3f44ac3…, https://bitbucket.org/jmchilton/lwr/commits/ab5bc61f6e0c9e11748436bd61aa1f6….
- Handle tools with version_command defined (LWR servers running on *nix systems only). https://bitbucket.org/jmchilton/lwr/commits/a3b43baa1aa6a8167a4e8cbb195d2a9….
- Allow restircting path mapping to specific input types (input, tool, config, workdir, output, output_workdir, *default* (all previous), unstructured (new for arbitrary files), and *any* (*default* + unstructured). https://bitbucket.org/jmchilton/lwr/commits/27e678d47846c2fdf5792d0c64167d1…
As well as several major refactorings to break improve the LWR client code structure
- Break up stager.py into smaller modules - https://bitbucket.org/jmchilton/lwr/commits/d0efda40b2c92100161ca7f825cb754…
- Refactor complex method __download_results into class with smaller helper methods - https://bitbucket.org/jmchilton/lwr/commits/45fd16e52579c273eea5f52948ca57a…
- Introduce higher level abstrction for staging actions - https://bitbucket.org/jmchilton/lwr/commits/35ea7e2fa88714aff5a38f0e0a55e3f….
Affected #: 10 files
diff -r 18b2967240e53d77ed87c3ef5aeaec532597bd58 -r 9920d006a96618d7729ef09e0fb861b22ff530db lib/galaxy/jobs/runners/lwr.py
--- a/lib/galaxy/jobs/runners/lwr.py
+++ b/lib/galaxy/jobs/runners/lwr.py
@@ -15,6 +15,8 @@
from .lwr_client import finish_job as lwr_finish_job
from .lwr_client import submit_job as lwr_submit_job
from .lwr_client import ClientJobDescription
+from .lwr_client import LwrOutputs
+from .lwr_client import GalaxyOutputs
log = logging.getLogger( __name__ )
@@ -79,6 +81,7 @@
tool=job_wrapper.tool,
config_files=job_wrapper.extra_filenames,
requirements=requirements,
+ version_file=job_wrapper.get_version_string_path(),
)
job_id = lwr_submit_job(client, client_job_description, remote_job_config)
log.info("lwr job submitted with job_id %s" % job_id)
@@ -174,7 +177,7 @@
stdout = run_results.get('stdout', '')
stderr = run_results.get('stderr', '')
exit_code = run_results.get('returncode', None)
- working_directory_contents = run_results.get('working_directory_contents', [])
+ lwr_outputs = LwrOutputs(run_results)
# Use LWR client code to transfer/copy files back
# and cleanup job if needed.
completed_normally = \
@@ -188,13 +191,17 @@
# no need to handle them differently here.
work_dir_outputs = []
output_files = self.get_output_files( job_wrapper )
+ galaxy_outputs = GalaxyOutputs(
+ working_directory=job_wrapper.working_directory,
+ work_dir_outputs=work_dir_outputs,
+ output_files=output_files,
+ version_file=job_wrapper.get_version_string_path(),
+ )
finish_args = dict( client=client,
- working_directory=job_wrapper.working_directory,
job_completed_normally=completed_normally,
cleanup_job=cleanup_job,
- work_dir_outputs=work_dir_outputs,
- output_files=output_files,
- working_directory_contents=working_directory_contents )
+ galaxy_outputs=galaxy_outputs,
+ lwr_outputs=lwr_outputs )
failed = lwr_finish_job( **finish_args )
if failed:
diff -r 18b2967240e53d77ed87c3ef5aeaec532597bd58 -r 9920d006a96618d7729ef09e0fb861b22ff530db lib/galaxy/jobs/runners/lwr_client/__init__.py
--- a/lib/galaxy/jobs/runners/lwr_client/__init__.py
+++ b/lib/galaxy/jobs/runners/lwr_client/__init__.py
@@ -6,9 +6,24 @@
"""
-from .stager import submit_job, finish_job, ClientJobDescription
+from .staging.down import finish_job
+from .staging.up import submit_job
+from .staging import ClientJobDescription
+from .staging import LwrOutputs
+from .staging import GalaxyOutputs
from .client import OutputNotFoundException
from .manager import ClientManager
from .destination import url_to_destination_params
+from .path_mapper import PathMapper
-__all__ = [ClientManager, OutputNotFoundException, url_to_destination_params, finish_job, submit_job, ClientJobDescription]
+__all__ = [
+ ClientManager,
+ OutputNotFoundException,
+ url_to_destination_params,
+ finish_job,
+ submit_job,
+ ClientJobDescription,
+ LwrOutputs,
+ GalaxyOutputs,
+ PathMapper,
+]
diff -r 18b2967240e53d77ed87c3ef5aeaec532597bd58 -r 9920d006a96618d7729ef09e0fb861b22ff530db lib/galaxy/jobs/runners/lwr_client/action_mapper.py
--- a/lib/galaxy/jobs/runners/lwr_client/action_mapper.py
+++ b/lib/galaxy/jobs/runners/lwr_client/action_mapper.py
@@ -1,12 +1,49 @@
from simplejson import load
from os.path import abspath
-from fnmatch import fnmatch
+from os.path import dirname
+from os.path import join
+from os.path import basename
+from os.path import sep
+import fnmatch
from re import compile
-
+from re import escape
+from galaxy.util.bunch import Bunch
+from .util import directory_files
+from .util import unique_path_prefix
DEFAULT_MAPPED_ACTION = 'transfer' # Not really clear to me what this should be, exception?
DEFAULT_PATH_MAPPER_TYPE = 'prefix'
+# Poor man's enum.
+path_type = Bunch(
+ # Galaxy input datasets and extra files.
+ INPUT="input",
+ # Galaxy config and param files.
+ CONFIG="config",
+ # Files from tool's tool_dir (for now just wrapper if available).
+ TOOL="tool",
+ # Input work dir files - e.g. metadata files, task-split input files, etc..
+ WORKDIR="workdir",
+ # Galaxy output datasets in their final home.
+ OUTPUT="output",
+ # Galaxy from_work_dir output paths and other files (e.g. galaxy.json)
+ OUTPUT_WORKDIR="output_workdir",
+ # Other fixed tool parameter paths (likely coming from tool data, but not
+ # nessecarily). Not sure this is the best name...
+ UNSTRUCTURED="unstructured",
+)
+
+
+ACTION_DEFAULT_PATH_TYPES = [
+ path_type.INPUT,
+ path_type.CONFIG,
+ path_type.TOOL,
+ path_type.WORKDIR,
+ path_type.OUTPUT,
+ path_type.OUTPUT_WORKDIR,
+]
+ALL_PATH_TYPES = ACTION_DEFAULT_PATH_TYPES + [path_type.UNSTRUCTURED]
+
class FileActionMapper(object):
"""
@@ -15,37 +52,61 @@
>>> json_string = r'''{"paths": [ \
{"path": "/opt/galaxy", "action": "none"}, \
{"path": "/galaxy/data", "action": "transfer"}, \
- {"path": "/cool/bamfiles/**/*.bam", "action": "copy", "type": "glob"}, \
- {"path": ".*/dataset_\\\\d+.dat", "action": "copy", "type": "regex"} \
+ {"path": "/cool/bamfiles/**/*.bam", "action": "copy", "match_type": "glob"}, \
+ {"path": ".*/dataset_\\\\d+.dat", "action": "copy", "match_type": "regex"} \
]}'''
>>> from tempfile import NamedTemporaryFile
>>> from os import unlink
- >>> f = NamedTemporaryFile(delete=False)
- >>> write_result = f.write(json_string.encode('UTF-8'))
- >>> f.close()
- >>> class MockClient():
- ... default_file_action = 'none'
- ... action_config_path = f.name
- ...
- >>> mapper = FileActionMapper(MockClient())
- >>> unlink(f.name)
+ >>> def mapper_for(default_action, config_contents):
+ ... f = NamedTemporaryFile(delete=False)
+ ... f.write(config_contents.encode('UTF-8'))
+ ... f.close()
+ ... mock_client = Bunch(default_file_action=default_action, action_config_path=f.name)
+ ... mapper = FileActionMapper(mock_client)
+ ... unlink(f.name)
+ ... return mapper
+ >>> mapper = mapper_for(default_action='none', config_contents=json_string)
>>> # Test first config line above, implicit path prefix mapper
- >>> mapper.action('/opt/galaxy/tools/filters/catWrapper.py', 'input')[0] == u'none'
+ >>> action = mapper.action('/opt/galaxy/tools/filters/catWrapper.py', 'input')
+ >>> action.action_type == u'none'
True
+ >>> action.staging_needed
+ False
>>> # Test another (2nd) mapper, this one with a different action
- >>> mapper.action('/galaxy/data/files/000/dataset_1.dat', 'input')[0] == u'transfer'
+ >>> action = mapper.action('/galaxy/data/files/000/dataset_1.dat', 'input')
+ >>> action.action_type == u'transfer'
+ True
+ >>> action.staging_needed
True
>>> # Always at least copy work_dir outputs.
- >>> mapper.action('/opt/galaxy/database/working_directory/45.sh', 'work_dir')[0] == u'copy'
+ >>> action = mapper.action('/opt/galaxy/database/working_directory/45.sh', 'workdir')
+ >>> action.action_type == u'copy'
+ True
+ >>> action.staging_needed
True
>>> # Test glob mapper (matching test)
- >>> mapper.action('/cool/bamfiles/projectABC/study1/patient3.bam', 'input')[0] == u'copy'
+ >>> mapper.action('/cool/bamfiles/projectABC/study1/patient3.bam', 'input').action_type == u'copy'
True
>>> # Test glob mapper (non-matching test)
- >>> mapper.action('/cool/bamfiles/projectABC/study1/patient3.bam.bai', 'input')[0] == u'none'
+ >>> mapper.action('/cool/bamfiles/projectABC/study1/patient3.bam.bai', 'input').action_type == u'none'
True
>>> # Regex mapper test.
- >>> mapper.action('/old/galaxy/data/dataset_10245.dat', 'input')[0] == u'copy'
+ >>> mapper.action('/old/galaxy/data/dataset_10245.dat', 'input').action_type == u'copy'
+ True
+ >>> # Doesn't map unstructured paths by default
+ >>> mapper.action('/old/galaxy/data/dataset_10245.dat', 'unstructured').action_type == u'none'
+ True
+ >>> input_only_mapper = mapper_for(default_action="none", config_contents=r'''{"paths": [ \
+ {"path": "/", "action": "transfer", "path_types": "input"} \
+ ] }''')
+ >>> input_only_mapper.action('/dataset_1.dat', 'input').action_type == u'transfer'
+ True
+ >>> input_only_mapper.action('/dataset_1.dat', 'output').action_type == u'none'
+ True
+ >>> unstructured_mapper = mapper_for(default_action="none", config_contents=r'''{"paths": [ \
+ {"path": "/", "action": "transfer", "path_types": "*any*"} \
+ ] }''')
+ >>> unstructured_mapper.action('/old/galaxy/data/dataset_10245.dat', 'unstructured').action_type == u'transfer'
True
"""
@@ -59,27 +120,92 @@
def __load_action_config(self, path):
config = load(open(path, 'rb'))
for path_config in config.get('paths', []):
- map_type = path_config.get('type', DEFAULT_PATH_MAPPER_TYPE)
+ map_type = path_config.get('match_type', DEFAULT_PATH_MAPPER_TYPE)
self.mappers.append(mappers[map_type](path_config))
- def action(self, path, type):
- action = self.default_action
+ def action(self, path, type, mapper=None):
+ action_type = self.default_action if type in ACTION_DEFAULT_PATH_TYPES else "none"
+ file_lister = DEFAULT_FILE_LISTER
normalized_path = abspath(path)
- for mapper in self.mappers:
- if mapper.matches(normalized_path):
- action = mapper.action
- break
- if type in ["work_dir", "output_task"] and action == "none":
+ if not mapper:
+ for query_mapper in self.mappers:
+ if query_mapper.matches(normalized_path, type):
+ mapper = query_mapper
+ break
+ if mapper:
+ action_type = mapper.action_type
+ file_lister = mapper.file_lister
+ if type in ["workdir", "output_workdir"] and action_type == "none":
## We are changing the working_directory relative to what
## Galaxy would use, these need to be copied over.
- action = "copy"
- return (action,)
+ action_type = "copy"
+ action_class = actions.get(action_type, None)
+ if action_class is None:
+ message_template = "Unknown action_type encountered %s while trying to map path %s"
+ message_args = (action_type, path)
+ raise Exception(message_template % message_args)
+ return action_class(path, file_lister=file_lister)
+
+ def unstructured_mappers(self):
+ """ Return mappers that will map 'unstructured' files (i.e. go beyond
+ mapping inputs, outputs, and config files).
+ """
+ return filter(lambda m: path_type.UNSTRUCTURED in m.path_types, self.mappers)
+
+
+class BaseAction(object):
+
+ def __init__(self, path, file_lister=None):
+ self.path = path
+ self.file_lister = file_lister or DEFAULT_FILE_LISTER
+
+ def unstructured_map(self):
+ unstructured_map = self.file_lister.unstructured_map(self.path)
+ # To ensure uniqueness, prepend unique prefix to each name
+ prefix = unique_path_prefix(self.path)
+ for path, name in unstructured_map.iteritems():
+ unstructured_map[path] = join(prefix, name)
+ return unstructured_map
+
+
+class NoneAction(BaseAction):
+ """ This action indicates the corresponding path does not require any
+ additional action. This should indicate paths that are available both on
+ the LWR client (i.e. Galaxy server) and remote LWR server with the same
+ paths. """
+ action_type = "none"
+ staging_needed = False
+
+
+class TransferAction(BaseAction):
+ """ This actions indicates that the LWR client should initiate an HTTP
+ transfer of the corresponding path to the remote LWR server before
+ launching the job. """
+ action_type = "transfer"
+ staging_needed = True
+
+
+class CopyAction(BaseAction):
+ """ This action indicates that the LWR client should execute a file system
+ copy of the corresponding path to the LWR staging directory prior to
+ launching the corresponding job. """
+ action_type = "copy"
+ staging_needed = True
class BasePathMapper(object):
def __init__(self, config):
- self.action = config.get('action', DEFAULT_MAPPED_ACTION)
+ self.action_type = config.get('action', DEFAULT_MAPPED_ACTION)
+ path_types_str = config.get('path_types', "*defaults*")
+ path_types_str = path_types_str.replace("*defaults*", ",".join(ACTION_DEFAULT_PATH_TYPES))
+ path_types_str = path_types_str.replace("*any*", ",".join(ALL_PATH_TYPES))
+ self.path_types = path_types_str.split(",")
+ self.file_lister = FileLister(config)
+
+ def matches(self, path, path_type):
+ path_type_matches = path_type in self.path_types
+ return path_type_matches and self._path_matches(path)
class PrefixPathMapper(BasePathMapper):
@@ -88,9 +214,13 @@
super(PrefixPathMapper, self).__init__(config)
self.prefix_path = abspath(config['path'])
- def matches(self, path):
+ def _path_matches(self, path):
return path.startswith(self.prefix_path)
+ def to_pattern(self):
+ pattern_str = "(%s%s[^\s,\"\']+)" % (escape(self.prefix_path), escape(sep))
+ return compile(pattern_str)
+
class GlobPathMapper(BasePathMapper):
@@ -98,8 +228,11 @@
super(GlobPathMapper, self).__init__(config)
self.glob_path = config['path']
- def matches(self, path):
- return fnmatch(path, self.glob_path)
+ def _path_matches(self, path):
+ return fnmatch.fnmatch(path, self.glob_path)
+
+ def to_pattern(self):
+ return compile(fnmatch.translate(self.glob_path))
class RegexPathMapper(BasePathMapper):
@@ -108,9 +241,32 @@
super(RegexPathMapper, self).__init__(config)
self.pattern = compile(config['path'])
- def matches(self, path):
+ def _path_matches(self, path):
return self.pattern.match(path) is not None
+ def to_pattern(self):
+ return self.pattern
+
+
+class FileLister(object):
+
+ def __init__(self, config):
+ self.depth = int(config.get("depth", "0"))
+
+ def unstructured_map(self, path):
+ depth = self.depth
+ if self.depth == 0:
+ return {path: basename(path)}
+ else:
+ while depth > 0:
+ path = dirname(path)
+ depth -= 1
+ return dict([(join(path, f), f) for f in directory_files(path)])
+
+DEFAULT_FILE_LISTER = FileLister(dict(depth=0))
+
+ACTION_CLASSES = [NoneAction, TransferAction, CopyAction]
+actions = dict([(clazz.action_type, clazz) for clazz in ACTION_CLASSES])
mappers = {
'prefix': PrefixPathMapper,
@@ -119,4 +275,4 @@
}
-__all__ = [FileActionMapper]
+__all__ = [FileActionMapper, path_type]
diff -r 18b2967240e53d77ed87c3ef5aeaec532597bd58 -r 9920d006a96618d7729ef09e0fb861b22ff530db lib/galaxy/jobs/runners/lwr_client/client.py
--- a/lib/galaxy/jobs/runners/lwr_client/client.py
+++ b/lib/galaxy/jobs/runners/lwr_client/client.py
@@ -82,16 +82,16 @@
args = {"job_id": self.job_id, "name": name, "input_type": input_type}
return self._raw_execute('input_path', args)
- def put_file(self, path, input_type, name=None, contents=None, action='transfer'):
+ def put_file(self, path, input_type, name=None, contents=None, action_type='transfer'):
if not name:
name = os.path.basename(path)
args = {"job_id": self.job_id, "name": name, "input_type": input_type}
input_path = path
if contents:
input_path = None
- if action == 'transfer':
+ if action_type == 'transfer':
return self._upload_file(args, contents, input_path)
- elif action == 'copy':
+ elif action_type == 'copy':
lwr_path = self._raw_execute('input_path', args)
self._copy(path, lwr_path)
return {'path': lwr_path}
@@ -105,11 +105,14 @@
## path. Use old paths.
input_type = args['input_type']
action = {
- 'input': 'upload_input',
- 'input_extra': 'upload_extra_input',
+ # For backward compatibility just target upload_input_extra for all
+ # inputs, it allows nested inputs. Want to do away with distinction
+ # inputs and extra inputs.
+ 'input': 'upload_extra_input',
'config': 'upload_config_file',
- 'work_dir': 'upload_working_directory_file',
- 'tool': 'upload_tool_file'
+ 'workdir': 'upload_working_directory_file',
+ 'tool': 'upload_tool_file',
+ 'unstructured': 'upload_unstructured_file',
}[input_type]
del args['input_type']
return action
@@ -119,7 +122,23 @@
return self._raw_execute("get_output_type", {"name": name,
"job_id": self.job_id})
- def fetch_output(self, path, working_directory, action='transfer'):
+ # Deprecated
+ def fetch_output_legacy(self, path, working_directory, action_type='transfer'):
+ # Needs to determine if output is task/working directory or standard.
+ name = os.path.basename(path)
+
+ output_type = self._get_output_type(name)
+ if output_type == "none":
+ # Just make sure the file was created.
+ if not os.path.exists(path):
+ raise OutputNotFoundException(path)
+ return
+ elif output_type in ["task"]:
+ path = os.path.join(working_directory, name)
+
+ self.__populate_output_path(name, path, output_type, action_type)
+
+ def fetch_output(self, path, name=None, check_exists_remotely=False, action_type='transfer'):
"""
Download an output dataset from the remote server.
@@ -130,38 +149,23 @@
working_directory : str
Local working_directory for the job.
"""
- name = os.path.basename(path)
- output_type = self._get_output_type(name)
- if output_type == "none":
- # Just make sure the file was created.
- if not os.path.exists(path):
- raise OutputNotFoundException(path)
- return
+ if not name:
+ # Extra files will send in the path.
+ name = os.path.basename(path)
- output_path = self.__output_path(path, name, working_directory, output_type)
- self.__populate_output_path(name, output_path, output_type, action)
+ output_type = "direct" # Task/from_work_dir outputs now handled with fetch_work_dir_output
+ self.__populate_output_path(name, path, output_type, action_type)
- def __populate_output_path(self, name, output_path, output_type, action):
- if action == 'transfer':
+ def __populate_output_path(self, name, output_path, output_type, action_type):
+ self.__ensure_directory(output_path)
+ if action_type == 'transfer':
self.__raw_download_output(name, self.job_id, output_type, output_path)
- elif action == 'copy':
+ elif action_type == 'copy':
lwr_path = self._output_path(name, self.job_id, output_type)['path']
self._copy(lwr_path, output_path)
- def __output_path(self, path, name, working_directory, output_type):
- """
- Preconditions: output_type is not 'none'.
- """
- if output_type == "direct":
- output_path = path
- elif output_type == "task":
- output_path = os.path.join(working_directory, name)
- else:
- raise Exception("Unknown output_type returned from LWR server %s" % output_type)
- return output_path
-
- def fetch_work_dir_output(self, name, working_directory, output_path, action='transfer'):
+ def fetch_work_dir_output(self, name, working_directory, output_path, action_type='transfer'):
"""
Download an output dataset specified with from_work_dir from the
remote server.
@@ -175,12 +179,18 @@
output_path : str
Full path to output dataset.
"""
- if action == 'transfer':
+ self.__ensure_directory(output_path)
+ if action_type == 'transfer':
self.__raw_download_output(name, self.job_id, "work_dir", output_path)
else: # Even if action is none - LWR has a different work_dir so this needs to be copied.
lwr_path = self._output_path(name, self.job_id, 'work_dir')['path']
self._copy(lwr_path, output_path)
+ def __ensure_directory(self, output_path):
+ output_path_directory = os.path.dirname(output_path)
+ if not os.path.exists(output_path_directory):
+ os.makedirs(output_path_directory)
+
@parseJson()
def _output_path(self, name, job_id, output_type):
return self._raw_execute("output_path",
diff -r 18b2967240e53d77ed87c3ef5aeaec532597bd58 -r 9920d006a96618d7729ef09e0fb861b22ff530db lib/galaxy/jobs/runners/lwr_client/path_mapper.py
--- /dev/null
+++ b/lib/galaxy/jobs/runners/lwr_client/path_mapper.py
@@ -0,0 +1,97 @@
+import os.path
+from .action_mapper import FileActionMapper
+from .action_mapper import path_type
+from .util import PathHelper
+
+from galaxy.util import in_directory
+
+
+class PathMapper(object):
+ """ Ties together a FileActionMapper and remote job configuration returned
+ by the LWR setup method to pre-determine the location of files for staging
+ on the remote LWR server.
+
+ This is not useful when rewrite_paths (as has traditionally been done with
+ the LWR) because when doing that the LWR determines the paths as files are
+ uploaded. When rewrite_paths is disabled however, the destination of files
+ needs to be determined prior to transfer so an object of this class can be
+ used.
+ """
+
+ def __init__(
+ self,
+ client,
+ remote_job_config,
+ local_working_directory,
+ action_mapper=None,
+ ):
+ self.local_working_directory = local_working_directory
+ if not action_mapper:
+ action_mapper = FileActionMapper(client)
+ self.action_mapper = action_mapper
+ self.input_directory = remote_job_config["inputs_directory"]
+ self.output_directory = remote_job_config["outputs_directory"]
+ self.working_directory = remote_job_config["working_directory"]
+ self.unstructured_files_directory = remote_job_config["unstructured_files_directory"]
+ self.config_directory = remote_job_config["configs_directory"]
+ separator = remote_job_config["system_properties"]["separator"]
+ self.path_helper = PathHelper(separator)
+
+ def remote_output_path_rewrite(self, local_path):
+ output_type = path_type.OUTPUT
+ if in_directory(local_path, self.local_working_directory):
+ output_type = path_type.OUTPUT_WORKDIR
+ remote_path = self.__remote_path_rewrite(local_path, output_type)
+ return remote_path
+
+ def remote_input_path_rewrite(self, local_path):
+ remote_path = self.__remote_path_rewrite(local_path, path_type.INPUT)
+ return remote_path
+
+ def remote_version_path_rewrite(self, local_path):
+ remote_path = self.__remote_path_rewrite(local_path, path_type.OUTPUT, name="COMMAND_VERSION")
+ return remote_path
+
+ def check_for_arbitrary_rewrite(self, local_path):
+ if not os.path.exists(local_path):
+ return None, []
+
+ path = str(local_path) # Use false_path if needed.
+ action = self.action_mapper.action(path, path_type.UNSTRUCTURED)
+ if not action.staging_needed:
+ return None, []
+ unique_names = action.unstructured_map()
+ name = unique_names[path]
+ remote_path = self.path_helper.remote_join(self.unstructured_files_directory, name)
+ return remote_path, unique_names
+
+ def __remote_path_rewrite(self, dataset_path, dataset_path_type, name=None):
+ """ Return remote path of this file (if staging is required) else None.
+ """
+ path = str(dataset_path) # Use false_path if needed.
+ action = self.action_mapper.action(path, dataset_path_type)
+ remote_path_rewrite = None
+ if action.staging_needed:
+ if name is None:
+ name = os.path.basename(path)
+ remote_directory = self.__remote_directory(dataset_path_type)
+ remote_path_rewrite = self.path_helper.remote_join(remote_directory, name)
+ return remote_path_rewrite
+
+ def __action(self, dataset_path, dataset_path_type):
+ path = str(dataset_path) # Use false_path if needed.
+ action = self.action_mapper.action(path, dataset_path_type)
+ return action
+
+ def __remote_directory(self, dataset_path_type):
+ if dataset_path_type in [path_type.OUTPUT]:
+ return self.output_directory
+ elif dataset_path_type in [path_type.WORKDIR, path_type.OUTPUT_WORKDIR]:
+ return self.working_directory
+ elif dataset_path_type in [path_type.INPUT]:
+ return self.input_directory
+ else:
+ message = "PathMapper cannot handle path type %s" % dataset_path_type
+ raise Exception(message)
+
+__all__ = [PathMapper]
diff -r 18b2967240e53d77ed87c3ef5aeaec532597bd58 -r 9920d006a96618d7729ef09e0fb861b22ff530db lib/galaxy/jobs/runners/lwr_client/stager.py
--- a/lib/galaxy/jobs/runners/lwr_client/stager.py
+++ /dev/null
@@ -1,417 +0,0 @@
-from os.path import abspath, basename, join, exists
-from os import listdir, sep
-from re import findall
-from re import compile
-from io import open
-from contextlib import contextmanager
-
-from .action_mapper import FileActionMapper
-
-from logging import getLogger
-log = getLogger(__name__)
-
-# All output files marked with from_work_dir attributes will copied or downloaded
-# this pattern picks up attiditional files to copy back - such as those
-# associated with multiple outputs and metadata configuration. Set to .* to just
-# copy everything
-COPY_FROM_WORKING_DIRECTORY_PATTERN = compile(r"primary_.*|galaxy.json|metadata_.*")
-
-
-class JobInputs(object):
- """
- Abstractions over dynamic inputs created for a given job (namely the command to
- execute and created configfiles).
-
- **Parameters**
-
- command_line : str
- Local command to execute for this job. (To be rewritten.)
- config_files : str
- Config files created for this job. (To be rewritten.)
-
-
- >>> import tempfile
- >>> tf = tempfile.NamedTemporaryFile()
- >>> def setup_inputs(tf):
- ... open(tf.name, "w").write(u"world /path/to/input the rest")
- ... inputs = JobInputs(u"hello /path/to/input", [tf.name])
- ... return inputs
- >>> inputs = setup_inputs(tf)
- >>> inputs.rewrite_paths(u"/path/to/input", u'C:\\input')
- >>> inputs.rewritten_command_line == u'hello C:\\\\input'
- True
- >>> inputs.rewritten_config_files[tf.name] == u'world C:\\\\input the rest'
- True
- >>> tf.close()
- >>> tf = tempfile.NamedTemporaryFile()
- >>> inputs = setup_inputs(tf)
- >>> inputs.find_referenced_subfiles('/path/to') == [u'/path/to/input']
- True
- >>> inputs.path_referenced('/path/to')
- True
- >>> inputs.path_referenced(u'/path/to')
- True
- >>> inputs.path_referenced('/path/to/input')
- True
- >>> inputs.path_referenced('/path/to/notinput')
- False
- >>> tf.close()
- """
-
- def __init__(self, command_line, config_files):
- self.rewritten_command_line = command_line
- self.rewritten_config_files = {}
- for config_file in config_files or []:
- config_contents = _read(config_file)
- self.rewritten_config_files[config_file] = config_contents
-
- def find_referenced_subfiles(self, directory):
- """
- Return list of files below specified `directory` in job inputs. Could
- use more sophisticated logic (match quotes to handle spaces, handle
- subdirectories, etc...).
-
- **Parameters**
-
- directory : str
- Full path to directory to search.
-
- """
- pattern = r"(%s%s\S+)" % (directory, sep)
- referenced_files = set()
- for input_contents in self.__items():
- referenced_files.update(findall(pattern, input_contents))
- return list(referenced_files)
-
- def path_referenced(self, path):
- pattern = r"%s" % path
- found = False
- for input_contents in self.__items():
- if findall(pattern, input_contents):
- found = True
- break
- return found
-
- def rewrite_paths(self, local_path, remote_path):
- """
- Rewrite references to `local_path` with `remote_path` in job inputs.
- """
- self.__rewrite_command_line(local_path, remote_path)
- self.__rewrite_config_files(local_path, remote_path)
-
- def __rewrite_command_line(self, local_path, remote_path):
- self.rewritten_command_line = self.rewritten_command_line.replace(local_path, remote_path)
-
- def __rewrite_config_files(self, local_path, remote_path):
- for config_file, rewritten_contents in self.rewritten_config_files.items():
- self.rewritten_config_files[config_file] = rewritten_contents.replace(local_path, remote_path)
-
- def __items(self):
- items = [self.rewritten_command_line]
- items.extend(self.rewritten_config_files.values())
- return items
-
-
-class TransferTracker(object):
-
- def __init__(self, client, job_inputs):
- self.client = client
- self.action_mapper = FileActionMapper(client)
- self.job_inputs = job_inputs
- self.file_renames = {}
-
- def handle_transfer(self, path, type, name=None, contents=None):
- if contents:
- # If contents loaded in memory, no need to write out file and copy,
- # just transfer.
- action = ('transfer', )
- else:
- if not exists(path):
- message = "handle_tranfer called on non-existent file - [%s]" % path
- log.warn(message)
- raise Exception(message)
- action = self.__action(path, type)
-
- if action[0] in ['transfer', 'copy']:
- response = self.client.put_file(path, type, name=name, contents=contents)
- self.register_rewrite(path, response['path'], type, force=True)
- elif action[0] == 'none':
- # No action for this file.
- pass
- else:
- raise Exception("Unknown action type (%s) encountered for path (%s)" % (action[0], path))
-
- def register_rewrite(self, local_path, remote_path, type, force=False):
- action = self.__action(local_path, type)
- if action[0] in ['transfer', 'copy'] or force:
- self.file_renames[local_path] = remote_path
-
- def rewrite_input_paths(self):
- """
- For each file that has been transferred and renamed, updated
- command_line and configfiles to reflect that rewrite.
- """
- for local_path, remote_path in self.file_renames.items():
- self.job_inputs.rewrite_paths(local_path, remote_path)
-
- def __action(self, path, type):
- return self.action_mapper.action(path, type)
-
-
-class FileStager(object):
- """
- Objects of the FileStager class interact with an LWR client object to
- stage the files required to run jobs on a remote LWR server.
-
- **Parameters**
-
- client : JobClient
- LWR client object.
- client_job_description : client_job_description
- Description of client view of job to stage and execute remotely.
- """
-
- def __init__(self, client, client_job_description, job_config):
- """
- """
- self.client = client
- self.command_line = client_job_description.command_line
- self.config_files = client_job_description.config_files
- self.input_files = client_job_description.input_files
- self.output_files = client_job_description.output_files
- self.tool_id = client_job_description.tool.id
- self.tool_version = client_job_description.tool.version
- self.tool_dir = abspath(client_job_description.tool.tool_dir)
- self.working_directory = client_job_description.working_directory
-
- # Setup job inputs, these will need to be rewritten before
- # shipping off to remote LWR server.
- self.job_inputs = JobInputs(self.command_line, self.config_files)
-
- self.transfer_tracker = TransferTracker(client, self.job_inputs)
-
- self.__handle_setup(job_config)
- self.__initialize_referenced_tool_files()
- self.__upload_tool_files()
- self.__upload_input_files()
- self.__upload_working_directory_files()
- self.__initialize_output_file_renames()
- self.__initialize_task_output_file_renames()
- self.__initialize_config_file_renames()
- self.__handle_rewrites()
- self.__upload_rewritten_config_files()
-
- def __handle_setup(self, job_config):
- if not job_config:
- job_config = self.client.setup(self.tool_id, self.tool_version)
-
- self.new_working_directory = job_config['working_directory']
- self.new_outputs_directory = job_config['outputs_directory']
- # Default configs_directory to match remote working_directory to mimic
- # behavior of older LWR servers.
- self.new_configs_drectory = job_config.get('configs_directory', self.new_working_directory)
- self.remote_path_separator = job_config['path_separator']
- # If remote LWR server assigned job id, use that otherwise
- # just use local job_id assigned.
- galaxy_job_id = self.client.job_id
- self.job_id = job_config.get('job_id', galaxy_job_id)
- if self.job_id != galaxy_job_id:
- # Remote LWR server assigned an id different than the
- # Galaxy job id, update client to reflect this.
- self.client.job_id = self.job_id
-
- def __initialize_referenced_tool_files(self):
- self.referenced_tool_files = self.job_inputs.find_referenced_subfiles(self.tool_dir)
-
- def __upload_tool_files(self):
- for referenced_tool_file in self.referenced_tool_files:
- self.transfer_tracker.handle_transfer(referenced_tool_file, 'tool')
-
- def __upload_input_files(self):
- for input_file in self.input_files:
- self.__upload_input_file(input_file)
- self.__upload_input_extra_files(input_file)
-
- def __upload_input_file(self, input_file):
- if self.job_inputs.path_referenced(input_file):
- if exists(input_file):
- self.transfer_tracker.handle_transfer(input_file, 'input')
- else:
- message = "LWR: __upload_input_file called on empty or missing dataset." + \
- " So such file: [%s]" % input_file
- log.debug(message)
-
- def __upload_input_extra_files(self, input_file):
- # TODO: Determine if this is object store safe and what needs to be
- # done if it is not.
- files_path = "%s_files" % input_file[0:-len(".dat")]
- if exists(files_path) and self.job_inputs.path_referenced(files_path):
- for extra_file in listdir(files_path):
- extra_file_path = join(files_path, extra_file)
- relative_path = basename(files_path)
- extra_file_relative_path = join(relative_path, extra_file)
- self.transfer_tracker.handle_transfer(extra_file_path, 'input_extra', name=extra_file_relative_path)
-
- def __upload_working_directory_files(self):
- # Task manager stages files into working directory, these need to be
- # uploaded if present.
- for working_directory_file in listdir(self.working_directory):
- path = join(self.working_directory, working_directory_file)
- self.transfer_tracker.handle_transfer(path, 'work_dir')
-
- def __initialize_output_file_renames(self):
- for output_file in self.output_files:
- remote_path = r'%s%s%s' % (self.new_outputs_directory, self.remote_path_separator, basename(output_file))
- self.transfer_tracker.register_rewrite(output_file, remote_path, 'output')
-
- def __initialize_task_output_file_renames(self):
- for output_file in self.output_files:
- name = basename(output_file)
- task_file = join(self.working_directory, name)
- remote_path = r'%s%s%s' % (self.new_working_directory, self.remote_path_separator, name)
- self.transfer_tracker.register_rewrite(task_file, remote_path, 'output_task')
-
- def __initialize_config_file_renames(self):
- for config_file in self.config_files:
- remote_path = r'%s%s%s' % (self.new_configs_drectory, self.remote_path_separator, basename(config_file))
- self.transfer_tracker.register_rewrite(config_file, remote_path, 'config')
-
- def __handle_rewrites(self):
- """
- For each file that has been transferred and renamed, updated
- command_line and configfiles to reflect that rewrite.
- """
- self.transfer_tracker.rewrite_input_paths()
-
- def __upload_rewritten_config_files(self):
- for config_file, new_config_contents in self.job_inputs.rewritten_config_files.items():
- self.client.put_file(config_file, input_type='config', contents=new_config_contents)
-
- def get_rewritten_command_line(self):
- """
- Returns the rewritten version of the command line to execute suitable
- for remote host.
- """
- return self.job_inputs.rewritten_command_line
-
-
-def finish_job(client, cleanup_job, job_completed_normally, working_directory, work_dir_outputs, output_files, working_directory_contents=[]):
- """
- """
- download_failure_exceptions = []
- if job_completed_normally:
- download_failure_exceptions = __download_results(client, working_directory, work_dir_outputs, output_files, working_directory_contents)
- return __clean(download_failure_exceptions, cleanup_job, client)
-
-
-def __download_results(client, working_directory, work_dir_outputs, output_files, working_directory_contents):
- action_mapper = FileActionMapper(client)
- downloaded_working_directory_files = []
- exception_tracker = DownloadExceptionTracker()
-
- # Fetch explicit working directory outputs.
- for source_file, output_file in work_dir_outputs:
- name = basename(source_file)
- with exception_tracker():
- action = action_mapper.action(output_file, 'output')
- client.fetch_work_dir_output(name, working_directory, output_file, action[0])
- downloaded_working_directory_files.append(name)
- # Remove from full output_files list so don't try to download directly.
- output_files.remove(output_file)
-
- # Fetch output files.
- for output_file in output_files:
- with exception_tracker():
- action = action_mapper.action(output_file, 'output')
- client.fetch_output(output_file, working_directory=working_directory, action=action[0])
-
- # Fetch remaining working directory outputs of interest.
- for name in working_directory_contents:
- if name in downloaded_working_directory_files:
- continue
- if COPY_FROM_WORKING_DIRECTORY_PATTERN.match(name):
- with exception_tracker():
- output_file = join(working_directory, name)
- action = action_mapper.action(output_file, 'output')
- client.fetch_work_dir_output(name, working_directory, output_file, action=action[0])
- downloaded_working_directory_files.append(name)
-
- return exception_tracker.download_failure_exceptions
-
-
-class DownloadExceptionTracker(object):
-
- def __init__(self):
- self.download_failure_exceptions = []
-
- @contextmanager
- def __call__(self):
- try:
- yield
- except Exception as e:
- self.download_failure_exceptions.append(e)
-
-
-def __clean(download_failure_exceptions, cleanup_job, client):
- failed = (len(download_failure_exceptions) > 0)
- if (not failed and cleanup_job != "never") or cleanup_job == "always":
- try:
- client.clean()
- except:
- log.warn("Failed to cleanup remote LWR job")
- return failed
-
-
-def submit_job(client, client_job_description, job_config=None):
- """
- """
- file_stager = FileStager(client, client_job_description, job_config)
- rebuilt_command_line = file_stager.get_rewritten_command_line()
- job_id = file_stager.job_id
- client.launch(rebuilt_command_line, requirements=client_job_description.requirements)
- return job_id
-
-
-def _read(path):
- """
- Utility method to quickly read small files (config files and tool
- wrappers) into memory as bytes.
- """
- input = open(path, "r", encoding="utf-8")
- try:
- return input.read()
- finally:
- input.close()
-
-
-class ClientJobDescription(object):
- """ A description of how client views job - command_line, inputs, etc..
-
- **Parameters**
-
- command_line : str
- The local command line to execute, this will be rewritten for the remote server.
- config_files : list
- List of Galaxy 'configfile's produced for this job. These will be rewritten and sent to remote server.
- input_files : list
- List of input files used by job. These will be transferred and references rewritten.
- output_files : list
- List of output_files produced by job.
- tool_dir : str
- Directory containing tool to execute (if a wrapper is used, it will be transferred to remote server).
- working_directory : str
- Local path created by Galaxy for running this job.
- requirements : list
- List of requirements for tool execution.
- """
-
- def __init__(self, tool, command_line, config_files, input_files, output_files, working_directory, requirements):
- self.tool = tool
- self.command_line = command_line
- self.config_files = config_files
- self.input_files = input_files
- self.output_files = output_files
- self.working_directory = working_directory
- self.requirements = requirements
-
-__all__ = [submit_job, ClientJobDescription, finish_job]
diff -r 18b2967240e53d77ed87c3ef5aeaec532597bd58 -r 9920d006a96618d7729ef09e0fb861b22ff530db lib/galaxy/jobs/runners/lwr_client/staging/__init__.py
--- /dev/null
+++ b/lib/galaxy/jobs/runners/lwr_client/staging/__init__.py
@@ -0,0 +1,120 @@
+from os.path import basename
+from os.path import join
+from os.path import dirname
+from os import sep
+
+from ..util import PathHelper
+
+COMMAND_VERSION_FILENAME = "COMMAND_VERSION"
+
+
+class ClientJobDescription(object):
+ """ A description of how client views job - command_line, inputs, etc..
+
+ **Parameters**
+
+ command_line : str
+ The local command line to execute, this will be rewritten for the remote server.
+ config_files : list
+ List of Galaxy 'configfile's produced for this job. These will be rewritten and sent to remote server.
+ input_files : list
+ List of input files used by job. These will be transferred and references rewritten.
+ output_files : list
+ List of output_files produced by job.
+ tool_dir : str
+ Directory containing tool to execute (if a wrapper is used, it will be transferred to remote server).
+ working_directory : str
+ Local path created by Galaxy for running this job.
+ requirements : list
+ List of requirements for tool execution.
+ version_file : str
+ Path to version file expected on the client server
+ arbitrary_files : dict()
+ Additional non-input, non-tool, non-config, non-working directory files
+ to transfer before staging job. This is most likely data indices but
+ can be anything. For now these are copied into staging working
+ directory but this will be reworked to find a better, more robust
+ location.
+ rewrite_paths : boolean
+ Indicates whether paths should be rewritten in job inputs (command_line
+ and config files) while staging files).
+ """
+
+ def __init__(
+ self,
+ tool,
+ command_line,
+ config_files,
+ input_files,
+ output_files,
+ working_directory,
+ requirements,
+ version_file=None,
+ arbitrary_files=None,
+ rewrite_paths=True,
+ ):
+ self.tool = tool
+ self.command_line = command_line
+ self.config_files = config_files
+ self.input_files = input_files
+ self.output_files = output_files
+ self.working_directory = working_directory
+ self.requirements = requirements
+ self.version_file = version_file
+ self.rewrite_paths = rewrite_paths
+ self.arbitrary_files = arbitrary_files or {}
+
+
+class GalaxyOutputs(object):
+ """ Abstraction describing the output datasets EXPECTED by the Galaxy job
+ runner client. """
+
+ def __init__(self, working_directory, work_dir_outputs, output_files, version_file):
+ self.working_directory = working_directory
+ self.work_dir_outputs = work_dir_outputs
+ self.output_files = output_files
+ self.version_file = version_file
+
+
+class LwrOutputs(object):
+ """ Abstraction describing the output files PRODUCED by the remote LWR
+ server. """
+
+ def __init__(self, complete_response):
+ # Default to None instead of [] to distinguish between empty contents and it not set
+ # by the LWR - older LWR instances will not set these in complete response.
+ self.working_directory_contents = complete_response.get("working_directory_contents", None)
+ self.output_directory_contents = complete_response.get("outputs_directory_contents", None)
+ # Older (pre-2014) LWR servers will not include separator in response,
+ #so this should only be used when reasoning about outputs in
+ # subdirectories which was not previously supported.
+ self.path_helper = PathHelper(complete_response.get("system_properties", {}).get("separator", sep))
+
+ def has_output_file(self, output_file):
+ if self.output_directory_contents is None:
+ # Legacy LWR doesn't report this, return None indicating unsure if
+ # output was generated.
+ return None
+ else:
+ return basename(output_file) in self.output_directory_contents
+
+ def has_output_directory_listing(self):
+ return self.output_directory_contents is not None
+
+ def output_extras(self, output_file):
+ """
+ Returns dict mapping local path to remote name.
+ """
+ if not self.has_output_directory_listing():
+ # Fetching $output.extra_files_path is not supported with legacy
+ # LWR (pre-2014) severs.
+ return {}
+
+ output_directory = dirname(output_file)
+
+ def local_path(name):
+ return join(output_directory, self.path_helper.local_name(name))
+
+ files_directory = "%s_files%s" % (basename(output_file)[0:-len(".dat")], self.path_helper.separator)
+ names = filter(lambda o: o.startswith(files_directory), self.output_directory_contents)
+ return dict(map(lambda name: (local_path(name), name), names))
diff -r 18b2967240e53d77ed87c3ef5aeaec532597bd58 -r 9920d006a96618d7729ef09e0fb861b22ff530db lib/galaxy/jobs/runners/lwr_client/staging/down.py
--- /dev/null
+++ b/lib/galaxy/jobs/runners/lwr_client/staging/down.py
@@ -0,0 +1,127 @@
+from os.path import join
+from os.path import relpath
+from re import compile
+from contextlib import contextmanager
+
+from ..staging import COMMAND_VERSION_FILENAME
+from ..action_mapper import FileActionMapper
+
+
+from logging import getLogger
+log = getLogger(__name__)
+
+# All output files marked with from_work_dir attributes will copied or downloaded
+# this pattern picks up attiditional files to copy back - such as those
+# associated with multiple outputs and metadata configuration. Set to .* to just
+# copy everything
+COPY_FROM_WORKING_DIRECTORY_PATTERN = compile(r"primary_.*|galaxy.json|metadata_.*|dataset_\d+\.dat|dataset_\d+_files.+")
+
+
+def finish_job(client, cleanup_job, job_completed_normally, galaxy_outputs, lwr_outputs):
+ """ Responsible for downloading results from remote server and cleaning up
+ LWR staging directory (if needed.)
+ """
+ download_failure_exceptions = []
+ if job_completed_normally:
+ downloader = ResultsDownloader(client, galaxy_outputs, lwr_outputs)
+ download_failure_exceptions = downloader.download()
+ return __clean(download_failure_exceptions, cleanup_job, client)
+
+
+class ResultsDownloader(object):
+
+ def __init__(self, client, galaxy_outputs, lwr_outputs):
+ self.client = client
+ self.galaxy_outputs = galaxy_outputs
+ self.lwr_outputs = lwr_outputs
+ self.action_mapper = FileActionMapper(client)
+ self.downloaded_working_directory_files = []
+ self.exception_tracker = DownloadExceptionTracker()
+ self.output_files = galaxy_outputs.output_files
+ self.working_directory_contents = lwr_outputs.working_directory_contents or []
+
+ def download(self):
+ self.__download_working_directory_outputs()
+ self.__download_outputs()
+ self.__download_version_file()
+ self.__download_other_working_directory_files()
+ return self.exception_tracker.download_failure_exceptions
+
+ def __download_working_directory_outputs(self):
+ working_directory = self.galaxy_outputs.working_directory
+ # Fetch explicit working directory outputs.
+ for source_file, output_file in self.galaxy_outputs.work_dir_outputs:
+ name = relpath(source_file, working_directory)
+ remote_name = self.lwr_outputs.path_helper.remote_name(name)
+ with self.exception_tracker():
+ action = self.action_mapper.action(output_file, 'output_workdir')
+ self.client.fetch_work_dir_output(remote_name, working_directory, output_file, action_type=action.action_type)
+ self.downloaded_working_directory_files.append(remote_name)
+ # Remove from full output_files list so don't try to download directly.
+ self.output_files.remove(output_file)
+
+ def __download_outputs(self):
+ # Legacy LWR not returning list of files, iterate over the list of
+ # expected outputs for tool.
+ for output_file in self.output_files:
+ # Fetch ouptut directly...
+ with self.exception_tracker():
+ action = self.action_mapper.action(output_file, 'output')
+ output_generated = self.lwr_outputs.has_output_file(output_file)
+ working_directory = self.galaxy_outputs.working_directory
+ if output_generated is None:
+ self.client.fetch_output_legacy(output_file, working_directory, action_type=action.action_type)
+ elif output_generated:
+ self.client.fetch_output(output_file, action_type=action.action_type)
+
+ for local_path, remote_name in self.lwr_outputs.output_extras(output_file).iteritems():
+ with self.exception_tracker():
+ action = self.action_mapper.action(local_path, 'output')
+ self.client.fetch_output(path=local_path, name=remote_name, action_type=action.action_type)
+ # else not output generated, do not attempt download.
+
+ def __download_version_file(self):
+ version_file = self.galaxy_outputs.version_file
+ # output_directory_contents may be none for legacy LWR servers.
+ lwr_output_directory_contents = (self.lwr_outputs.output_directory_contents or [])
+ if version_file and COMMAND_VERSION_FILENAME in lwr_output_directory_contents:
+ action = self.action_mapper.action(version_file, 'output')
+ self.client.fetch_output(path=version_file, name=COMMAND_VERSION_FILENAME, action_type=action.action_type)
+
+ def __download_other_working_directory_files(self):
+ working_directory = self.galaxy_outputs.working_directory
+ # Fetch remaining working directory outputs of interest.
+ for name in self.working_directory_contents:
+ if name in self.downloaded_working_directory_files:
+ continue
+ if COPY_FROM_WORKING_DIRECTORY_PATTERN.match(name):
+ with self.exception_tracker():
+ output_file = join(working_directory, self.lwr_outputs.path_helper.local_name(name))
+ action = self.action_mapper.action(output_file, 'output_workdir')
+ self.client.fetch_work_dir_output(name, working_directory, output_file, action_type=action.action_type)
+ self.downloaded_working_directory_files.append(name)
+
+
+class DownloadExceptionTracker(object):
+
+ def __init__(self):
+ self.download_failure_exceptions = []
+
+ @contextmanager
+ def __call__(self):
+ try:
+ yield
+ except Exception as e:
+ self.download_failure_exceptions.append(e)
+
+
+def __clean(download_failure_exceptions, cleanup_job, client):
+ failed = (len(download_failure_exceptions) > 0)
+ if (not failed and cleanup_job != "never") or cleanup_job == "always":
+ try:
+ client.clean()
+ except:
+ log.warn("Failed to cleanup remote LWR job")
+ return failed
+
+__all__ = [finish_job]
diff -r 18b2967240e53d77ed87c3ef5aeaec532597bd58 -r 9920d006a96618d7729ef09e0fb861b22ff530db lib/galaxy/jobs/runners/lwr_client/staging/up.py
--- /dev/null
+++ b/lib/galaxy/jobs/runners/lwr_client/staging/up.py
@@ -0,0 +1,373 @@
+from os.path import abspath, basename, join, exists
+from os.path import dirname
+from os.path import relpath
+from os import listdir, sep
+from re import findall
+from io import open
+
+from ..staging import COMMAND_VERSION_FILENAME
+from ..action_mapper import FileActionMapper
+from ..action_mapper import path_type
+from ..util import PathHelper
+from ..util import directory_files
+
+from logging import getLogger
+log = getLogger(__name__)
+
+
+def submit_job(client, client_job_description, job_config=None):
+ """
+ """
+ file_stager = FileStager(client, client_job_description, job_config)
+ rebuilt_command_line = file_stager.get_command_line()
+ job_id = file_stager.job_id
+ client.launch(rebuilt_command_line, requirements=client_job_description.requirements)
+ return job_id
+
+
+class FileStager(object):
+ """
+ Objects of the FileStager class interact with an LWR client object to
+ stage the files required to run jobs on a remote LWR server.
+
+ **Parameters**
+
+ client : JobClient
+ LWR client object.
+ client_job_description : client_job_description
+ Description of client view of job to stage and execute remotely.
+ """
+
+ def __init__(self, client, client_job_description, job_config):
+ """
+ """
+ self.client = client
+ self.command_line = client_job_description.command_line
+ self.config_files = client_job_description.config_files
+ self.input_files = client_job_description.input_files
+ self.output_files = client_job_description.output_files
+ self.tool_id = client_job_description.tool.id
+ self.tool_version = client_job_description.tool.version
+ self.tool_dir = abspath(client_job_description.tool.tool_dir)
+ self.working_directory = client_job_description.working_directory
+ self.version_file = client_job_description.version_file
+ self.arbitrary_files = client_job_description.arbitrary_files
+ self.rewrite_paths = client_job_description.rewrite_paths
+
+ # Setup job inputs, these will need to be rewritten before
+ # shipping off to remote LWR server.
+ self.job_inputs = JobInputs(self.command_line, self.config_files)
+
+ self.action_mapper = FileActionMapper(client)
+ self.transfer_tracker = TransferTracker(client, self.action_mapper, self.job_inputs, rewrite_paths=self.rewrite_paths)
+
+ self.__handle_setup(job_config)
+ self.__initialize_referenced_tool_files()
+ if self.rewrite_paths:
+ self.__initialize_referenced_arbitrary_files()
+
+ self.__upload_tool_files()
+ self.__upload_input_files()
+ self.__upload_working_directory_files()
+ self.__upload_arbitrary_files()
+
+ if self.rewrite_paths:
+ self.__initialize_output_file_renames()
+ self.__initialize_task_output_file_renames()
+ self.__initialize_config_file_renames()
+ self.__initialize_version_file_rename()
+
+ self.__handle_rewrites()
+
+ self.__upload_rewritten_config_files()
+
+ def __handle_setup(self, job_config):
+ if not job_config:
+ job_config = self.client.setup(self.tool_id, self.tool_version)
+
+ self.new_working_directory = job_config['working_directory']
+ self.new_outputs_directory = job_config['outputs_directory']
+ # Default configs_directory to match remote working_directory to mimic
+ # behavior of older LWR servers.
+ self.new_configs_directory = job_config.get('configs_directory', self.new_working_directory)
+ self.remote_separator = self.__parse_remote_separator(job_config)
+ self.path_helper = PathHelper(self.remote_separator)
+ # If remote LWR server assigned job id, use that otherwise
+ # just use local job_id assigned.
+ galaxy_job_id = self.client.job_id
+ self.job_id = job_config.get('job_id', galaxy_job_id)
+ if self.job_id != galaxy_job_id:
+ # Remote LWR server assigned an id different than the
+ # Galaxy job id, update client to reflect this.
+ self.client.job_id = self.job_id
+
+ def __parse_remote_separator(self, job_config):
+ separator = job_config.get("system_properties", {}).get("separator", None)
+ if not separator: # Legacy LWR
+ separator = job_config["path_separator"] # Poorly named
+ return separator
+
+ def __initialize_referenced_tool_files(self):
+ self.referenced_tool_files = self.job_inputs.find_referenced_subfiles(self.tool_dir)
+
+ def __initialize_referenced_arbitrary_files(self):
+ referenced_arbitrary_path_mappers = dict()
+ for mapper in self.action_mapper.unstructured_mappers():
+ mapper_pattern = mapper.to_pattern()
+ # TODO: Make more sophisticated, allow parent directories,
+ # grabbing sibbling files based on patterns, etc...
+ paths = self.job_inputs.find_pattern_references(mapper_pattern)
+ for path in paths:
+ if path not in referenced_arbitrary_path_mappers:
+ referenced_arbitrary_path_mappers[path] = mapper
+ for path, mapper in referenced_arbitrary_path_mappers.iteritems():
+ action = self.action_mapper.action(path, path_type.UNSTRUCTURED, mapper)
+ unstructured_map = action.unstructured_map()
+ self.arbitrary_files.update(unstructured_map)
+
+ def __upload_tool_files(self):
+ for referenced_tool_file in self.referenced_tool_files:
+ self.transfer_tracker.handle_transfer(referenced_tool_file, path_type.TOOL)
+
+ def __upload_arbitrary_files(self):
+ for path, name in self.arbitrary_files.iteritems():
+ self.transfer_tracker.handle_transfer(path, path_type.UNSTRUCTURED, name=name)
+
+ def __upload_input_files(self):
+ for input_file in self.input_files:
+ self.__upload_input_file(input_file)
+ self.__upload_input_extra_files(input_file)
+
+ def __upload_input_file(self, input_file):
+ if self.__stage_input(input_file):
+ if exists(input_file):
+ self.transfer_tracker.handle_transfer(input_file, path_type.INPUT)
+ else:
+ message = "LWR: __upload_input_file called on empty or missing dataset." + \
+ " So such file: [%s]" % input_file
+ log.debug(message)
+
+ def __upload_input_extra_files(self, input_file):
+ files_path = "%s_files" % input_file[0:-len(".dat")]
+ if exists(files_path) and self.__stage_input(files_path):
+ for extra_file_name in directory_files(files_path):
+ extra_file_path = join(files_path, extra_file_name)
+ remote_name = self.path_helper.remote_name(relpath(extra_file_path, dirname(files_path)))
+ self.transfer_tracker.handle_transfer(extra_file_path, path_type.INPUT, name=remote_name)
+
+ def __upload_working_directory_files(self):
+ # Task manager stages files into working directory, these need to be
+ # uploaded if present.
+ working_directory_files = listdir(self.working_directory) if exists(self.working_directory) else []
+ for working_directory_file in working_directory_files:
+ path = join(self.working_directory, working_directory_file)
+ self.transfer_tracker.handle_transfer(path, 'workdir')
+
+ def __initialize_version_file_rename(self):
+ version_file = self.version_file
+ if version_file:
+ remote_path = self.path_helper.remote_join(self.new_outputs_directory, COMMAND_VERSION_FILENAME)
+ self.transfer_tracker.register_rewrite(version_file, remote_path, path_type.OUTPUT)
+
+ def __initialize_output_file_renames(self):
+ for output_file in self.output_files:
+ remote_path = self.path_helper.remote_join(self.new_outputs_directory, basename(output_file))
+ self.transfer_tracker.register_rewrite(output_file, remote_path, path_type.OUTPUT)
+
+ def __initialize_task_output_file_renames(self):
+ for output_file in self.output_files:
+ name = basename(output_file)
+ task_file = join(self.working_directory, name)
+ remote_path = self.path_helper.remote_join(self.new_working_directory, name)
+ self.transfer_tracker.register_rewrite(task_file, remote_path, path_type.OUTPUT_WORKDIR)
+
+ def __initialize_config_file_renames(self):
+ for config_file in self.config_files:
+ remote_path = self.path_helper.remote_join(self.new_configs_directory, basename(config_file))
+ self.transfer_tracker.register_rewrite(config_file, remote_path, path_type.CONFIG)
+
+ def __handle_rewrites(self):
+ """
+ For each file that has been transferred and renamed, updated
+ command_line and configfiles to reflect that rewrite.
+ """
+ self.transfer_tracker.rewrite_input_paths()
+
+ def __upload_rewritten_config_files(self):
+ for config_file, new_config_contents in self.job_inputs.config_files.items():
+ self.client.put_file(config_file, input_type='config', contents=new_config_contents)
+
+ def get_command_line(self):
+ """
+ Returns the rewritten version of the command line to execute suitable
+ for remote host.
+ """
+ return self.job_inputs.command_line
+
+ def __stage_input(self, file_path):
+ # If we have disabled path rewriting, just assume everything needs to be transferred,
+ # else check to ensure the file is referenced before transferring it.
+ return (not self.rewrite_paths) or self.job_inputs.path_referenced(file_path)
+
+
+class JobInputs(object):
+ """
+ Abstractions over dynamic inputs created for a given job (namely the command to
+ execute and created configfiles).
+
+ **Parameters**
+
+ command_line : str
+ Local command to execute for this job. (To be rewritten.)
+ config_files : str
+ Config files created for this job. (To be rewritten.)
+
+
+ >>> import tempfile
+ >>> tf = tempfile.NamedTemporaryFile()
+ >>> def setup_inputs(tf):
+ ... open(tf.name, "w").write(u"world /path/to/input the rest")
+ ... inputs = JobInputs(u"hello /path/to/input", [tf.name])
+ ... return inputs
+ >>> inputs = setup_inputs(tf)
+ >>> inputs.rewrite_paths(u"/path/to/input", u'C:\\input')
+ >>> inputs.command_line == u'hello C:\\\\input'
+ True
+ >>> inputs.config_files[tf.name] == u'world C:\\\\input the rest'
+ True
+ >>> tf.close()
+ >>> tf = tempfile.NamedTemporaryFile()
+ >>> inputs = setup_inputs(tf)
+ >>> inputs.find_referenced_subfiles('/path/to') == [u'/path/to/input']
+ True
+ >>> inputs.path_referenced('/path/to')
+ True
+ >>> inputs.path_referenced(u'/path/to')
+ True
+ >>> inputs.path_referenced('/path/to/input')
+ True
+ >>> inputs.path_referenced('/path/to/notinput')
+ False
+ >>> tf.close()
+ """
+
+ def __init__(self, command_line, config_files):
+ self.command_line = command_line
+ self.config_files = {}
+ for config_file in config_files or []:
+ config_contents = _read(config_file)
+ self.config_files[config_file] = config_contents
+
+ def find_pattern_references(self, pattern):
+ referenced_files = set()
+ for input_contents in self.__items():
+ referenced_files.update(findall(pattern, input_contents))
+ return list(referenced_files)
+
+ def find_referenced_subfiles(self, directory):
+ """
+ Return list of files below specified `directory` in job inputs. Could
+ use more sophisticated logic (match quotes to handle spaces, handle
+ subdirectories, etc...).
+
+ **Parameters**
+
+ directory : str
+ Full path to directory to search.
+
+ """
+ pattern = r"(%s%s\S+)" % (directory, sep)
+ return self.find_pattern_references(pattern)
+
+ def path_referenced(self, path):
+ pattern = r"%s" % path
+ found = False
+ for input_contents in self.__items():
+ if findall(pattern, input_contents):
+ found = True
+ break
+ return found
+
+ def rewrite_paths(self, local_path, remote_path):
+ """
+ Rewrite references to `local_path` with `remote_path` in job inputs.
+ """
+ self.__rewrite_command_line(local_path, remote_path)
+ self.__rewrite_config_files(local_path, remote_path)
+
+ def __rewrite_command_line(self, local_path, remote_path):
+ self.command_line = self.command_line.replace(local_path, remote_path)
+
+ def __rewrite_config_files(self, local_path, remote_path):
+ for config_file, contents in self.config_files.items():
+ self.config_files[config_file] = contents.replace(local_path, remote_path)
+
+ def __items(self):
+ items = [self.command_line]
+ items.extend(self.config_files.values())
+ return items
+
+
+class TransferTracker(object):
+
+ def __init__(self, client, action_mapper, job_inputs, rewrite_paths):
+ self.client = client
+ self.action_mapper = action_mapper
+
+ self.job_inputs = job_inputs
+ self.rewrite_paths = rewrite_paths
+ self.file_renames = {}
+
+ def handle_transfer(self, path, type, name=None, contents=None):
+ if contents:
+ # If contents loaded in memory, no need to write out file and copy,
+ # just transfer.
+ action_type = 'transfer'
+ else:
+ if not exists(path):
+ message = "handle_tranfer called on non-existent file - [%s]" % path
+ log.warn(message)
+ raise Exception(message)
+ action_type = self.__action(path, type).action_type
+
+ if action_type in ['transfer', 'copy']:
+ response = self.client.put_file(path, type, name=name, contents=contents)
+ register = self.rewrite_paths or type == 'tool' # Even if inputs not rewritten, tool must be.
+ if register:
+ self.register_rewrite(path, response['path'], type, force=True)
+ elif action_type == 'none':
+ # No action for this file.
+ pass
+ else:
+ raise Exception("Unknown action type (%s) encountered for path (%s)" % (action_type, path))
+
+ def register_rewrite(self, local_path, remote_path, type, force=False):
+ action = self.__action(local_path, type)
+ if action.action_type in ['transfer', 'copy'] or force:
+ self.file_renames[local_path] = remote_path
+
+ def rewrite_input_paths(self):
+ """
+ For each file that has been transferred and renamed, updated
+ command_line and configfiles to reflect that rewrite.
+ """
+ for local_path, remote_path in self.file_renames.items():
+ self.job_inputs.rewrite_paths(local_path, remote_path)
+
+ def __action(self, path, type):
+ return self.action_mapper.action(path, type)
+
+
+def _read(path):
+ """
+ Utility method to quickly read small files (config files and tool
+ wrappers) into memory as bytes.
+ """
+ input = open(path, "r", encoding="utf-8")
+ try:
+ return input.read()
+ finally:
+ input.close()
+
+
+__all__ = [submit_job]
diff -r 18b2967240e53d77ed87c3ef5aeaec532597bd58 -r 9920d006a96618d7729ef09e0fb861b22ff530db lib/galaxy/jobs/runners/lwr_client/util.py
--- a/lib/galaxy/jobs/runners/lwr_client/util.py
+++ b/lib/galaxy/jobs/runners/lwr_client/util.py
@@ -1,5 +1,85 @@
from threading import Lock, Event
from weakref import WeakValueDictionary
+from os import walk
+from os import curdir
+from os.path import relpath
+from os.path import join
+import os.path
+import hashlib
+
+
+def unique_path_prefix(path):
+ m = hashlib.md5()
+ m.update(path)
+ return m.hexdigest()
+
+
+def directory_files(directory):
+ """
+
+ >>> from tempfile import mkdtemp
+ >>> from shutil import rmtree
+ >>> from os.path import join
+ >>> from os import makedirs
+ >>> tempdir = mkdtemp()
+ >>> with open(join(tempdir, "moo"), "w") as f: pass
+ >>> directory_files(tempdir)
+ ['moo']
+ >>> subdir = join(tempdir, "cow", "sub1")
+ >>> makedirs(subdir)
+ >>> with open(join(subdir, "subfile1"), "w") as f: pass
+ >>> with open(join(subdir, "subfile2"), "w") as f: pass
+ >>> sorted(directory_files(tempdir))
+ ['cow/sub1/subfile1', 'cow/sub1/subfile2', 'moo']
+ >>> rmtree(tempdir)
+ """
+ contents = []
+ for path, _, files in walk(directory):
+ relative_path = relpath(path, directory)
+ for name in files:
+ # Return file1.txt, dataset_1_files/image.png, etc... don't
+ # include . in path.
+ if relative_path != curdir:
+ contents.append(join(relative_path, name))
+ else:
+ contents.append(name)
+ return contents
+
+
+class PathHelper(object):
+ '''
+
+ >>> import posixpath
+ >>> # Forcing local path to posixpath because LWR designed to be used with
+ >>> # posix client.
+ >>> posix_path_helper = PathHelper("/", local_path_module=posixpath)
+ >>> windows_slash = "\\\\"
+ >>> len(windows_slash)
+ 1
+ >>> nt_path_helper = PathHelper(windows_slash, local_path_module=posixpath)
+ >>> posix_path_helper.remote_name("moo/cow")
+ 'moo/cow'
+ >>> nt_path_helper.remote_name("moo/cow")
+ 'moo\\\\cow'
+ >>> posix_path_helper.local_name("moo/cow")
+ 'moo/cow'
+ >>> nt_path_helper.local_name("moo\\\\cow")
+ 'moo/cow'
+ '''
+
+ def __init__(self, separator, local_path_module=os.path):
+ self.separator = separator
+ self.local_join = local_path_module.join
+ self.local_sep = local_path_module.sep
+
+ def remote_name(self, local_name):
+ return self.remote_join(*local_name.split(self.local_sep))
+
+ def local_name(self, remote_name):
+ return self.local_join(*remote_name.split(self.separator))
+
+ def remote_join(self, *args):
+ return self.separator.join(args)
class TransferEventManager(object):
https://bitbucket.org/galaxy/galaxy-central/commits/6da672723d5c/
Changeset: 6da672723d5c
User: jmchilton
Date: 2014-01-03 00:50:11
Summary: Fix bug in from_work_dir location checking...
... would only be exhibited if using LWR + 'remote_metadata'.
Affected #: 1 file
diff -r 9920d006a96618d7729ef09e0fb861b22ff530db -r 6da672723d5c6e55347abb57de399460c39b5d38 lib/galaxy/jobs/runners/__init__.py
--- a/lib/galaxy/jobs/runners/__init__.py
+++ b/lib/galaxy/jobs/runners/__init__.py
@@ -179,7 +179,7 @@
# TODO: move instead of copy to save time?
source_file = os.path.join( job_working_directory, hda_tool_output.from_work_dir )
destination = job_wrapper.get_output_destination( output_paths[ dataset.dataset_id ] )
- if in_directory( source_file, job_wrapper.working_directory ):
+ if in_directory( source_file, job_working_directory ):
output_pairs.append( ( source_file, destination ) )
log.debug( "Copying %s to %s as directed by from_work_dir" % ( source_file, destination ) )
else:
https://bitbucket.org/galaxy/galaxy-central/commits/4529b941cf27/
Changeset: 4529b941cf27
User: jmchilton
Date: 2014-01-03 00:50:12
Summary: PEP-8 fixes for local job runner.
Affected #: 1 file
diff -r 6da672723d5c6e55347abb57de399460c39b5d38 -r 4529b941cf2751323d2a5cb8875dfe8bf3ccb02c lib/galaxy/jobs/runners/local.py
--- a/lib/galaxy/jobs/runners/local.py
+++ b/lib/galaxy/jobs/runners/local.py
@@ -18,11 +18,13 @@
__all__ = [ 'LocalJobRunner' ]
+
class LocalJobRunner( BaseJobRunner ):
"""
Job runner backed by a finite pool of worker threads. FIFO scheduling
"""
runner_name = "LocalRunner"
+
def __init__( self, app, nworkers ):
"""Start the job runner """
@@ -74,13 +76,13 @@
log.debug( '(%s) executing: %s' % ( job_id, command_line ) )
stdout_file = tempfile.NamedTemporaryFile( suffix='_stdout', dir=job_wrapper.working_directory )
stderr_file = tempfile.NamedTemporaryFile( suffix='_stderr', dir=job_wrapper.working_directory )
- proc = subprocess.Popen( args = command_line,
- shell = True,
- cwd = job_wrapper.working_directory,
- stdout = stdout_file,
- stderr = stderr_file,
- env = self._environ,
- preexec_fn = os.setpgrp )
+ proc = subprocess.Popen( args=command_line,
+ shell=True,
+ cwd=job_wrapper.working_directory,
+ stdout=stdout_file,
+ stderr=stderr_file,
+ env=self._environ,
+ preexec_fn=os.setpgrp )
job_wrapper.set_job_destination(job_wrapper.job_destination, proc.pid)
job_wrapper.change_state( model.Job.states.RUNNING )
job_start = datetime.datetime.now()
@@ -89,7 +91,7 @@
while proc.poll() is None:
i += 1
if (i % 20) == 0:
- msg = job_wrapper.check_limits(runtime = datetime.datetime.now() - job_start)
+ msg = job_wrapper.check_limits(runtime=datetime.datetime.now() - job_start)
if msg is not None:
job_wrapper.fail(msg)
log.debug('(%s) Terminating process group' % job_id)
@@ -122,7 +124,7 @@
#if our local job has JobExternalOutputMetadata associated, then our primary job has to have already finished
job_ext_output_metadata = job.get_external_output_metadata()
if job_ext_output_metadata:
- pid = job_ext_output_metadata[0].job_runner_external_pid #every JobExternalOutputMetadata has a pid set, we just need to take from one of them
+ pid = job_ext_output_metadata[0].job_runner_external_pid # every JobExternalOutputMetadata has a pid set, we just need to take from one of them
else:
pid = job.get_job_runner_external_id()
if pid in [ None, '' ]:
@@ -140,14 +142,14 @@
return # give up
sleep( 2 )
if not self._check_pid( pid ):
- log.debug( "stop_job(): %s: PID %d successfully killed with signal %d" %( job.get_id(), pid, sig ) )
+ log.debug( "stop_job(): %s: PID %d successfully killed with signal %d" % ( job.get_id(), pid, sig ) )
return
else:
- log.warning( "stop_job(): %s: PID %d refuses to die after signaling TERM/KILL" %( job.get_id(), pid ) )
+ log.warning( "stop_job(): %s: PID %d refuses to die after signaling TERM/KILL" % ( job.get_id(), pid ) )
def recover( self, job, job_wrapper ):
# local jobs can't be recovered
- job_wrapper.change_state( model.Job.states.ERROR, info = "This job was killed when Galaxy was restarted. Please retry the job." )
+ job_wrapper.change_state( model.Job.states.ERROR, info="This job was killed when Galaxy was restarted. Please retry the job." )
def _check_pid( self, pid ):
try:
@@ -157,7 +159,7 @@
if e.errno == errno.ESRCH:
log.debug( "_check_pid(): PID %d is dead" % pid )
else:
- log.warning( "_check_pid(): Got errno %s when attempting to check PID %d: %s" %( errno.errorcode[e.errno], pid, e.strerror ) )
+ log.warning( "_check_pid(): Got errno %s when attempting to check PID %d: %s" % ( errno.errorcode[e.errno], pid, e.strerror ) )
return False
def _terminate( self, proc ):
@@ -165,4 +167,4 @@
sleep( 1 )
if proc.poll() is None:
os.killpg( proc.pid, 9 )
- return proc.wait() # reap
+ return proc.wait() # reap
https://bitbucket.org/galaxy/galaxy-central/commits/18e6f346d6b1/
Changeset: 18e6f346d6b1
User: jmchilton
Date: 2014-01-03 00:50:12
Summary: PEP-8 fixes for jobs/__init__.py
Affected #: 1 file
diff -r 4529b941cf2751323d2a5cb8875dfe8bf3ccb02c -r 18e6f346d6b13521c9757989bfb7fc06791e561b lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py
+++ b/lib/galaxy/jobs/__init__.py
@@ -58,6 +58,7 @@
if self.tags is not None:
self['tags'] = [ x.strip() for x in self.tags.split(',') ]
+
class JobToolConfiguration( Bunch ):
"""
Provides details on what handler and destination a tool should use
@@ -71,12 +72,14 @@
self['params'] = dict()
super(JobToolConfiguration, self).__init__(**kwds)
+
class JobConfiguration( object ):
"""A parser and interface to advanced job management features.
These features are configured in the job configuration, by default, ``job_conf.xml``
"""
DEFAULT_NWORKERS = 4
+
def __init__(self, app):
"""Parse the job configuration XML.
"""
@@ -172,17 +175,17 @@
self.tools[id].append(JobToolConfiguration(**dict(tool.items())))
self.tools[id][-1]['params'] = self.__get_params(tool)
- types = dict(registered_user_concurrent_jobs = int,
- anonymous_user_concurrent_jobs = int,
- walltime = str,
- output_size = int)
+ types = dict(registered_user_concurrent_jobs=int,
+ anonymous_user_concurrent_jobs=int,
+ walltime=str,
+ output_size=int)
- self.limits = Bunch(registered_user_concurrent_jobs = None,
- anonymous_user_concurrent_jobs = None,
- walltime = None,
- walltime_delta = None,
- output_size = None,
- concurrent_jobs = {})
+ self.limits = Bunch(registered_user_concurrent_jobs=None,
+ anonymous_user_concurrent_jobs=None,
+ walltime=None,
+ walltime_delta=None,
+ output_size=None,
+ concurrent_jobs={})
# Parse job limits
limits = root.find('limits')
@@ -253,12 +256,12 @@
self.default_destination_id = self.app.config.default_cluster_job_runner
# Set the job limits
- self.limits = Bunch(registered_user_concurrent_jobs = self.app.config.registered_user_job_limit,
- anonymous_user_concurrent_jobs = self.app.config.anonymous_user_job_limit,
- walltime = self.app.config.job_walltime,
- walltime_delta = self.app.config.job_walltime_delta,
- output_size = self.app.config.output_size_limit,
- concurrent_jobs = {})
+ self.limits = Bunch(registered_user_concurrent_jobs=self.app.config.registered_user_job_limit,
+ anonymous_user_concurrent_jobs=self.app.config.anonymous_user_job_limit,
+ walltime=self.app.config.job_walltime,
+ walltime_delta=self.app.config.job_walltime_delta,
+ output_size=self.app.config.output_size_limit,
+ concurrent_jobs={})
log.debug('Done loading job configuration')
@@ -533,6 +536,7 @@
else:
log.warning("Legacy destination with id '%s' could not be converted: Unknown runner plugin: %s" % (id, destination.runner))
+
class JobWrapper( object ):
"""
Wraps a 'model.Job' with convenience methods for running processes and
@@ -627,7 +631,7 @@
Prepare the job to run by creating the working directory and the
config files.
"""
- self.sa_session.expunge_all() #this prevents the metadata reverting that has been seen in conjunction with the PBS job runner
+ self.sa_session.expunge_all() # this prevents the metadata reverting that has been seen in conjunction with the PBS job runner
if not os.path.exists( self.working_directory ):
os.mkdir( self.working_directory )
@@ -686,7 +690,7 @@
self.command_line = self.tool.build_command_line( param_dict )
# FIXME: for now, tools get Galaxy's lib dir in their path
if self.command_line and self.command_line.startswith( 'python' ):
- self.galaxy_lib_dir = os.path.abspath( "lib" ) # cwd = galaxy root
+ self.galaxy_lib_dir = os.path.abspath( "lib" ) # cwd = galaxy root
# Shell fragment to inject dependencies
self.dependency_shell_commands = self.tool.build_dependency_shell_commands()
# We need command_line persisted to the db in order for Galaxy to re-queue the job
@@ -719,7 +723,7 @@
job.traceback = traceback.format_exc()
# Get the exception and let the tool attempt to generate
# a better message
- etype, evalue, tb = sys.exc_info()
+ etype, evalue, tb = sys.exc_info()
m = self.tool.handle_job_failure_exception( evalue )
if m:
message = m
@@ -790,7 +794,7 @@
job.state = job.states.PAUSED
self.sa_session.add( job )
- def change_state( self, state, info = False ):
+ def change_state( self, state, info=False ):
job = self.get_job()
self.sa_session.refresh( job )
for dataset_assoc in job.output_datasets + job.output_library_datasets:
@@ -816,7 +820,7 @@
log.warning('set_runner() is deprecated, use set_job_destination()')
self.set_job_destination(self.job_destination, external_id)
- def set_job_destination(self, job_destination, external_id=None ):
+ def set_job_destination( self, job_destination, external_id=None ):
"""
Persist job destination params in the database for recovery.
@@ -896,11 +900,11 @@
job.state = final_job_state
return self.fail( "Job %s's output dataset(s) could not be read" % job.id )
- job_context = ExpressionContext( dict( stdout = job.stdout, stderr = job.stderr ) )
+ job_context = ExpressionContext( dict( stdout=job.stdout, stderr=job.stderr ) )
for dataset_assoc in job.output_datasets + job.output_library_datasets:
context = self.get_dataset_finish_context( job_context, dataset_assoc.dataset.dataset )
#should this also be checking library associations? - can a library item be added from a history before the job has ended? - lets not allow this to occur
- for dataset in dataset_assoc.dataset.dataset.history_associations + dataset_assoc.dataset.dataset.library_associations: #need to update all associated output hdas, i.e. history was shared with job running
+ for dataset in dataset_assoc.dataset.dataset.history_associations + dataset_assoc.dataset.dataset.library_associations: # need to update all associated output hdas, i.e. history was shared with job running
trynum = 0
while trynum < self.app.config.retry_job_output_collection:
try:
@@ -911,9 +915,9 @@
except ( OSError, ObjectNotFound ), e:
trynum += 1
log.warning( 'Error accessing %s, will retry: %s', dataset.dataset.file_name, e )
- time.sleep( 2 )
+ time.sleep( 2 )
dataset.blurb = 'done'
- dataset.peek = 'no peek'
+ dataset.peek = 'no peek'
dataset.info = (dataset.info or '')
if context['stdout'].strip():
#Ensure white space between entries
@@ -941,7 +945,7 @@
#it would be quicker to just copy the metadata from the originating output dataset,
#but somewhat trickier (need to recurse up the copied_from tree), for now we'll call set_meta()
if ( not self.external_output_metadata.external_metadata_set_successfully( dataset, self.sa_session ) and self.app.config.retry_metadata_internally ):
- dataset.datatype.set_meta( dataset, overwrite = False ) #call datatype.set_meta directly for the initial set_meta call during dataset creation
+ dataset.datatype.set_meta( dataset, overwrite=False ) # call datatype.set_meta directly for the initial set_meta call during dataset creation
elif not self.external_output_metadata.external_metadata_set_successfully( dataset, self.sa_session ) and job.states.ERROR != final_job_state:
dataset._state = model.Dataset.states.FAILED_METADATA
else:
@@ -1011,19 +1015,22 @@
out_data = dict( [ ( da.name, da.dataset ) for da in job.output_datasets ] )
inp_data.update( [ ( da.name, da.dataset ) for da in job.input_library_datasets ] )
out_data.update( [ ( da.name, da.dataset ) for da in job.output_library_datasets ] )
- param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] ) # why not re-use self.param_dict here? ##dunno...probably should, this causes tools.parameters.basic.UnvalidatedValue to be used in following methods instead of validated and transformed values during i.e. running workflows
+ param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] ) # why not re-use self.param_dict here? ##dunno...probably should, this causes tools.parameters.basic.UnvalidatedValue to be used in following methods instead of validated and transformed values during i.e. running workflows
param_dict = self.tool.params_from_strings( param_dict, self.app )
# Check for and move associated_files
self.tool.collect_associated_files(out_data, self.working_directory)
gitd = self.sa_session.query( model.GenomeIndexToolData ).filter_by( job=job ).first()
if gitd:
- self.tool.collect_associated_files({'' : gitd}, self.working_directory)
+ self.tool.collect_associated_files({'': gitd}, self.working_directory)
# Create generated output children and primary datasets and add to param_dict
- collected_datasets = {'children':self.tool.collect_child_datasets(out_data, self.working_directory),'primary':self.tool.collect_primary_datasets(out_data, self.working_directory)}
- param_dict.update({'__collected_datasets__':collected_datasets})
+ collected_datasets = {
+ 'children': self.tool.collect_child_datasets(out_data, self.working_directory),
+ 'primary': self.tool.collect_primary_datasets(out_data, self.working_directory)
+ }
+ param_dict.update({'__collected_datasets__': collected_datasets})
# Certain tools require tasks to be completed after job execution
# ( this used to be performed in the "exec_after_process" hook, but hooks are deprecated ).
- self.tool.exec_after_process( self.queue.app, inp_data, out_data, param_dict, job = job )
+ self.tool.exec_after_process( self.queue.app, inp_data, out_data, param_dict, job=job )
# Call 'exec_after_process' hook
self.tool.call_hook( 'exec_after_process', self.queue.app, inp_data=inp_data,
out_data=out_data, param_dict=param_dict,
@@ -1114,7 +1121,7 @@
def get_input_fnames( self ):
job = self.get_job()
filenames = []
- for da in job.input_datasets + job.input_library_datasets: #da is JobToInputDatasetAssociation object
+ for da in job.input_datasets + job.input_library_datasets: # da is JobToInputDatasetAssociation object
if da.dataset:
filenames.extend(self.get_input_dataset_fnames(da.dataset))
return filenames
@@ -1136,11 +1143,12 @@
def compute_outputs( self ) :
class DatasetPath( object ):
- def __init__( self, dataset_id, real_path, false_path = None, mutable = True ):
+ def __init__( self, dataset_id, real_path, false_path=None, mutable=True ):
self.dataset_id = dataset_id
self.real_path = real_path
self.false_path = false_path
self.mutable = mutable
+
def __str__( self ):
if self.false_path is None:
return self.real_path
@@ -1157,13 +1165,13 @@
self.output_hdas_and_paths = {}
for name, hda in [ ( da.name, da.dataset ) for da in job.output_datasets + job.output_library_datasets ]:
false_path = os.path.abspath( os.path.join( self.working_directory, "galaxy_dataset_%d.dat" % hda.dataset.id ) )
- dsp = DatasetPath( hda.dataset.id, hda.dataset.file_name, false_path, mutable = hda.dataset.external_filename is None )
+ dsp = DatasetPath( hda.dataset.id, hda.dataset.file_name, false_path, mutable=hda.dataset.external_filename is None )
self.output_paths.append( dsp )
self.output_hdas_and_paths[name] = hda, dsp
if special:
false_path = os.path.abspath( os.path.join( self.working_directory, "galaxy_dataset_%d.dat" % special.dataset.id ) )
else:
- results = [ ( da.name, da.dataset, DatasetPath( da.dataset.dataset.id, da.dataset.file_name, mutable = da.dataset.dataset.external_filename is None ) ) for da in job.output_datasets + job.output_library_datasets ]
+ results = [ ( da.name, da.dataset, DatasetPath( da.dataset.dataset.id, da.dataset.file_name, mutable=da.dataset.dataset.external_filename is None ) ) for da in job.output_datasets + job.output_library_datasets ]
self.output_paths = [t[2] for t in results]
self.output_hdas_and_paths = dict([(t[0], t[1:]) for t in results])
if special:
@@ -1238,13 +1246,13 @@
datatypes_config = self.app.datatypes_registry.integrated_datatypes_configs
return self.external_output_metadata.setup_external_metadata( [ output_dataset_assoc.dataset for output_dataset_assoc in job.output_datasets ],
self.sa_session,
- exec_dir = exec_dir,
- tmp_dir = tmp_dir,
- dataset_files_path = dataset_files_path,
- config_root = config_root,
- config_file = config_file,
- datatypes_config = datatypes_config,
- job_metadata = os.path.join( self.working_directory, TOOL_PROVIDED_JOB_METADATA_FILE ),
+ exec_dir=exec_dir,
+ tmp_dir=tmp_dir,
+ dataset_files_path=dataset_files_path,
+ config_root=config_root,
+ config_file=config_file,
+ datatypes_config=datatypes_config,
+ job_metadata=os.path.join( self.working_directory, TOOL_PROVIDED_JOB_METADATA_FILE ),
**kwds )
@property
@@ -1318,7 +1326,7 @@
just copy these files directly to the ulimate destination.
"""
return output_path
-
+
@property
def requires_setting_metadata( self ):
if self.tool:
@@ -1391,7 +1399,7 @@
out_data.update( [ ( da.name, da.dataset ) for da in job.output_library_datasets ] )
# DBTODO New method for generating command line for a task?
# These can be passed on the command line if wanted as $userId $userEmail
- if job.history and job.history.user: # check for anonymous user!
+ if job.history and job.history.user: # check for anonymous user!
userId = '%d' % job.history.user.id
userEmail = str(job.history.user.email)
else:
@@ -1430,7 +1438,7 @@
self.command_line = self.command_line.replace(k, v)
# FIXME: for now, tools get Galaxy's lib dir in their path
if self.command_line and self.command_line.startswith( 'python' ):
- self.galaxy_lib_dir = os.path.abspath( "lib" ) # cwd = galaxy root
+ self.galaxy_lib_dir = os.path.abspath( "lib" ) # cwd = galaxy root
# Shell fragment to inject dependencies
self.dependency_shell_commands = self.tool.build_dependency_shell_commands()
# We need command_line persisted to the db in order for Galaxy to re-queue the job
@@ -1452,7 +1460,7 @@
self.status = 'error'
# How do we want to handle task failure? Fail the job and let it clean up?
- def change_state( self, state, info = False ):
+ def change_state( self, state, info=False ):
task = self.get_task()
self.sa_session.refresh( task )
if info:
@@ -1568,11 +1576,14 @@
"""
def put( self, *args, **kwargs ):
return
+
def put_stop( self, *args ):
return
+
def shutdown( self ):
return
+
class ParallelismInfo(object):
"""
Stores the information (if any) for running multiple instances of the tool in parallel
@@ -1584,7 +1595,7 @@
items = tag.iteritems()
else:
items = tag.attrib.items()
- self.attributes = dict([item for item in items if item[0] != 'method' ])
+ self.attributes = dict( [ item for item in items if item[ 0 ] != 'method' ])
if len(self.attributes) == 0:
# legacy basic mode - provide compatible defaults
self.attributes['split_size'] = 20
https://bitbucket.org/galaxy/galaxy-central/commits/ebbd49858ce9/
Changeset: ebbd49858ce9
User: jmchilton
Date: 2014-01-03 00:50:13
Summary: PEP-8 fixes task runner.
Affected #: 1 file
diff -r 18e6f346d6b13521c9757989bfb7fc06791e561b -r ebbd49858ce94bd99805622456268e84bcbc5d7f lib/galaxy/jobs/runners/tasks.py
--- a/lib/galaxy/jobs/runners/tasks.py
+++ b/lib/galaxy/jobs/runners/tasks.py
@@ -1,11 +1,10 @@
import logging
import subprocess
-from Queue import Queue
-import threading
from galaxy import model
-import os, errno
+import os
+import errno
from time import sleep
from galaxy.jobs import TaskWrapper
@@ -15,11 +14,13 @@
__all__ = [ 'TaskedJobRunner' ]
+
class TaskedJobRunner( BaseJobRunner ):
"""
Job runner backed by a finite pool of worker threads. FIFO scheduling
"""
runner_name = "TaskRunner"
+
def __init__( self, app, nworkers ):
"""Start the job runner with 'nworkers' worker threads"""
super( TaskedJobRunner, self ).__init__( app, nworkers )
@@ -124,14 +125,14 @@
#this is terminate-able when output dataset/job is deleted
#so that long running set_meta()s can be canceled without having to reboot the server
if job_wrapper.get_state() not in [ model.Job.states.ERROR, model.Job.states.DELETED ] and job_wrapper.output_paths:
- external_metadata_script = job_wrapper.setup_external_metadata( output_fnames = job_wrapper.get_output_fnames(),
- set_extension = True,
- kwds = { 'overwrite' : False } ) #we don't want to overwrite metadata that was copied over in init_meta(), as per established behavior
+ external_metadata_script = job_wrapper.setup_external_metadata( output_fnames=job_wrapper.get_output_fnames(),
+ set_extension=True,
+ kwds={ 'overwrite' : False } ) # we don't want to overwrite metadata that was copied over in init_meta(), as per established behavior
log.debug( 'executing external set_meta script for job %d: %s' % ( job_wrapper.job_id, external_metadata_script ) )
- external_metadata_proc = subprocess.Popen( args = external_metadata_script,
- shell = True,
- env = os.environ,
- preexec_fn = os.setpgrp )
+ external_metadata_proc = subprocess.Popen( args=external_metadata_script,
+ shell=True,
+ env=os.environ,
+ preexec_fn=os.setpgrp )
job_wrapper.external_output_metadata.set_job_runner_external_pid( external_metadata_proc.pid, self.sa_session )
external_metadata_proc.wait()
log.debug( 'execution of external set_meta finished for job %d' % job_wrapper.job_id )
@@ -160,7 +161,7 @@
else:
#if our local job has JobExternalOutputMetadata associated, then our primary job has to have already finished
if job.external_output_metadata:
- pid = job.external_output_metadata[0].job_runner_external_pid #every JobExternalOutputMetadata has a pid set, we just need to take from one of them
+ pid = job.external_output_metadata[0].job_runner_external_pid # every JobExternalOutputMetadata has a pid set, we just need to take from one of them
else:
pid = job.job_runner_external_id
if pid in [ None, '' ]:
@@ -170,7 +171,7 @@
def recover( self, job, job_wrapper ):
# DBTODO Task Recovery, this should be possible.
- job_wrapper.change_state( model.Job.states.ERROR, info = "This job was killed when Galaxy was restarted. Please retry the job." )
+ job_wrapper.change_state( model.Job.states.ERROR, info="This job was killed when Galaxy was restarted. Please retry the job." )
def _cancel_job( self, job_wrapper, task_wrappers ):
"""
@@ -227,7 +228,7 @@
if e.errno == errno.ESRCH:
log.debug( "_check_pid(): PID %d is dead" % pid )
else:
- log.warning( "_check_pid(): Got errno %s when attempting to check PID %d: %s" %( errno.errorcode[e.errno], pid, e.strerror ) )
+ log.warning( "_check_pid(): Got errno %s when attempting to check PID %d: %s" % ( errno.errorcode[e.errno], pid, e.strerror ) )
return False
def _stop_pid( self, pid, job_id ):
@@ -254,7 +255,7 @@
# avoid a two-second overhead using some other asynchronous method.
sleep( 2 )
if not self._check_pid( pid ):
- log.debug( "_stop_pid(): %s: PID %d successfully killed with signal %d" %( job_id, pid, sig ) )
+ log.debug( "_stop_pid(): %s: PID %d successfully killed with signal %d" % ( job_id, pid, sig ) )
return
else:
- log.warning( "_stop_pid(): %s: PID %d refuses to die after signaling TERM/KILL" %( job_id, pid ) )
+ log.warning( "_stop_pid(): %s: PID %d refuses to die after signaling TERM/KILL" % ( job_id, pid ) )
https://bitbucket.org/galaxy/galaxy-central/commits/6facd407e247/
Changeset: 6facd407e247
User: jmchilton
Date: 2014-01-03 00:50:14
Summary: PEP-8 fixes for set_metadata.py.
Affected #: 1 file
diff -r ebbd49858ce94bd99805622456268e84bcbc5d7f -r 6facd407e2479f56fac8530a2f413226d497dfb2 scripts/set_metadata.py
--- a/scripts/set_metadata.py
+++ b/scripts/set_metadata.py
@@ -10,7 +10,9 @@
logging.basicConfig()
log = logging.getLogger( __name__ )
-import os, sys, cPickle
+import os
+import sys
+import cPickle
# ensure supported version
from check_python import check_python
try:
@@ -19,15 +21,15 @@
sys.exit(1)
new_path = [ os.path.join( os.getcwd(), "lib" ) ]
-new_path.extend( sys.path[1:] ) # remove scripts/ from the path
+new_path.extend( sys.path[ 1: ] ) # remove scripts/ from the path
sys.path = new_path
from galaxy import eggs
import pkg_resources
pkg_resources.require("simplejson")
import simplejson
-import galaxy.model.mapping #need to load this before we unpickle, in order to setup properties assigned by the mappers
-galaxy.model.Job() #this looks REAL stupid, but it is REQUIRED in order for SA to insert parameters into the classes defined by the mappers --> it appears that instantiating ANY mapper'ed class would suffice here
+import galaxy.model.mapping # need to load this before we unpickle, in order to setup properties assigned by the mappers
+galaxy.model.Job() # this looks REAL stupid, but it is REQUIRED in order for SA to insert parameters into the classes defined by the mappers --> it appears that instantiating ANY mapper'ed class would suffice here
from galaxy.util import stringify_dictionary_keys
from galaxy.util.json import from_json_string
from sqlalchemy.orm import clear_mappers
@@ -35,6 +37,7 @@
from galaxy import config
import ConfigParser
+
def __main__():
file_path = sys.argv.pop( 1 )
tmp_dir = sys.argv.pop( 1 )
@@ -45,7 +48,7 @@
config_file_name = sys.argv.pop( 1 )
if not os.path.isabs( config_file_name ):
config_file_name = os.path.join( config_root, config_file_name )
-
+
# Set up reference to object store
# First, read in the main config file for Galaxy; this is required because
# the object store configuration is stored there
@@ -65,7 +68,7 @@
universe_config = config.Configuration(**conf_dict)
object_store = build_object_store_from_config(universe_config)
galaxy.model.Dataset.object_store = object_store
-
+
# Set up datatypes registry
datatypes_config = sys.argv.pop( 1 )
datatypes_registry = galaxy.datatypes.registry.Registry()
@@ -89,32 +92,32 @@
filename_out = fields.pop( 0 )
filename_results_code = fields.pop( 0 )
dataset_filename_override = fields.pop( 0 )
- #Need to be careful with the way that these parameters are populated from the filename splitting,
- #because if a job is running when the server is updated, any existing external metadata command-lines
+ # Need to be careful with the way that these parameters are populated from the filename splitting,
+ # because if a job is running when the server is updated, any existing external metadata command-lines
#will not have info about the newly added override_metadata file
if fields:
override_metadata = fields.pop( 0 )
else:
override_metadata = None
try:
- dataset = cPickle.load( open( filename_in ) ) #load DatasetInstance
+ dataset = cPickle.load( open( filename_in ) ) # load DatasetInstance
if dataset_filename_override:
dataset.dataset.external_filename = dataset_filename_override
if ext_override.get( dataset.dataset.id, None ):
dataset.extension = ext_override[ dataset.dataset.id ]
- #Metadata FileParameter types may not be writable on a cluster node, and are therefore temporarily substituted with MetadataTempFiles
+ # Metadata FileParameter types may not be writable on a cluster node, and are therefore temporarily substituted with MetadataTempFiles
if override_metadata:
override_metadata = simplejson.load( open( override_metadata ) )
for metadata_name, metadata_file_override in override_metadata:
if galaxy.datatypes.metadata.MetadataTempFile.is_JSONified_value( metadata_file_override ):
metadata_file_override = galaxy.datatypes.metadata.MetadataTempFile.from_JSON( metadata_file_override )
setattr( dataset.metadata, metadata_name, metadata_file_override )
- kwds = stringify_dictionary_keys( simplejson.load( open( filename_kwds ) ) )#load kwds; need to ensure our keywords are not unicode
+ kwds = stringify_dictionary_keys( simplejson.load( open( filename_kwds ) ) ) # load kwds; need to ensure our keywords are not unicode
dataset.datatype.set_meta( dataset, **kwds )
- dataset.metadata.to_JSON_dict( filename_out ) # write out results of set_meta
- simplejson.dump( ( True, 'Metadata has been set successfully' ), open( filename_results_code, 'wb+' ) ) #setting metadata has succeeded
+ dataset.metadata.to_JSON_dict( filename_out ) # write out results of set_meta
+ simplejson.dump( ( True, 'Metadata has been set successfully' ), open( filename_results_code, 'wb+' ) ) # setting metadata has succeeded
except Exception, e:
- simplejson.dump( ( False, str( e ) ), open( filename_results_code, 'wb+' ) ) #setting metadata has failed somehow
+ simplejson.dump( ( False, str( e ) ), open( filename_results_code, 'wb+' ) ) # setting metadata has failed somehow
clear_mappers()
# Shut down any additional threads that might have been created via the ObjectStore
object_store.shutdown()
https://bitbucket.org/galaxy/galaxy-central/commits/de7ed9e75dda/
Changeset: de7ed9e75dda
User: jmchilton
Date: 2014-01-03 00:50:14
Summary: Work on unit test path problems.
Move test/unit/tool_shed to test/unit/tool_shed_unit_tests.
Affected #: 5 files
diff -r 6facd407e2479f56fac8530a2f413226d497dfb2 -r de7ed9e75dda6b8d9a364a56f9ac29a085a9e3cc test/unit/tool_shed/test_fabric_util.py
--- a/test/unit/tool_shed/test_fabric_util.py
+++ /dev/null
@@ -1,45 +0,0 @@
-from contextlib import contextmanager
-from tool_shed.galaxy_install.tool_dependencies import fabric_util
-
-
-def test_env_file_builder():
- install_dir = "/opt/galaxy/dependencies/foo/"
- env_file_builder = fabric_util.EnvFileBuilder( install_dir )
- added_lines = []
- mock_return = dict(value=0)
-
- def mock_file_append( text, file_path, **kwds ):
- added_lines.append(text)
- return mock_return["value"]
-
- with __mock_fabric_util_method("file_append", mock_file_append):
- env_file_builder.append_line( name="PATH", action="prepend_to", value="/usr/bin/local/R" )
- assert added_lines == [ "PATH=/usr/bin/local/R:$PATH; export PATH" ]
- assert env_file_builder.return_code == 0
-
- # Reset mock lines
- del added_lines[:]
- # Next time file_append will fail
- mock_return["value"] = 1
-
- env_file_builder.append_line( action="source", value="/usr/bin/local/R/env.sh" )
- assert added_lines == [ "if [ -f /usr/bin/local/R/env.sh ] ; then . /usr/bin/local/R/env.sh ; fi" ]
- # Check failure
- assert env_file_builder.return_code == 1
-
- mock_return["value"] = 0
- env_file_builder.append_line( name="LD_LIBRARY_PATH", action="append_to", value="/usr/bin/local/R/lib" )
- # Verify even though last append succeeded, previous failure still recorded.
- assert env_file_builder.return_code == 1
-
-
-## Poor man's mocking. Need to get a real mocking library as real Galaxy development
-## dependnecy.
-@contextmanager
-def __mock_fabric_util_method(name, mock_method):
- real_method = getattr(fabric_util, name)
- try:
- setattr(fabric_util, name, mock_method)
- yield
- finally:
- setattr(fabric_util, name, real_method)
diff -r 6facd407e2479f56fac8530a2f413226d497dfb2 -r de7ed9e75dda6b8d9a364a56f9ac29a085a9e3cc test/unit/tool_shed/test_td_common_util.py
--- a/test/unit/tool_shed/test_td_common_util.py
+++ /dev/null
@@ -1,80 +0,0 @@
-from os.path import join
-from contextlib import contextmanager
-from galaxy.util import parse_xml_string
-
-from tool_shed.galaxy_install.tool_dependencies import fabric_util
-from tool_shed.galaxy_install.tool_dependencies import td_common_util
-
-
-TEST_DEPENDENCIES_DIR = "/opt/galaxy/dependencies"
-TEST_INSTALL_DIR = "%s/test_install_dir" % TEST_DEPENDENCIES_DIR
-
-
-class MockApp( object ):
-
- def __init__( self ):
- pass
-
-def test_create_or_update_env_shell_file():
- test_path = "/usr/share/R/libs"
- env_file_builder = fabric_util.EnvFileBuilder( test_path )
- line, path = env_file_builder.create_or_update_env_shell_file( TEST_INSTALL_DIR, dict( action="append_to", name="R_LIBS", value=test_path ) )
- assert path == join( TEST_INSTALL_DIR, "env.sh" )
- assert line == "R_LIBS=$R_LIBS:/usr/share/R/libs; export R_LIBS"
-
- line, path = env_file_builder.create_or_update_env_shell_file( TEST_INSTALL_DIR, dict( action="prepend_to", name="R_LIBS", value=test_path ) )
- assert path == join( TEST_INSTALL_DIR, "env.sh" )
- assert line == "R_LIBS=/usr/share/R/libs:$R_LIBS; export R_LIBS"
-
- line, path = env_file_builder.create_or_update_env_shell_file( TEST_INSTALL_DIR, dict( action="set_to", name="R_LIBS", value=test_path ) )
- assert path == join( TEST_INSTALL_DIR, "env.sh" )
- assert line == "R_LIBS=/usr/share/R/libs; export R_LIBS"
-
- line, path = env_file_builder.create_or_update_env_shell_file( TEST_INSTALL_DIR, dict( action="source", value=test_path ) )
- assert path == join( TEST_INSTALL_DIR, "env.sh" )
- assert line == "if [ -f /usr/share/R/libs ] ; then . /usr/share/R/libs ; fi"
-
-def test_get_env_shell_file_paths_from_setup_environment_elem():
- xml = """<action name="setup_r_environment">
- <repository name="package_r_3_0_1" owner="bgruening" toolshed="toolshed.g2.bx.psu.edu" changeset_revision="1234567">
- <package name="R" version="3.0.1" />
- </repository>
- </action>
- """
- mock_app = MockApp()
- action_elem = parse_xml_string( xml )
- required_for_install_env_sh = '/path/to/existing.sh'
- all_env_paths = [ required_for_install_env_sh ]
- action_dict = {}
-
- r_env_sh = '/path/to/go/env.sh'
-
- def mock_get_env_shell_file_paths( app, elem ):
- assert app == mock_app
- assert elem.get( 'name' ) == "package_r_3_0_1"
- return [ r_env_sh ]
-
- with __mock_common_util_method( "get_env_shell_file_paths", mock_get_env_shell_file_paths ):
- td_common_util.get_env_shell_file_paths_from_setup_environment_elem( mock_app, all_env_paths, action_elem, action_dict )
- ## Verify old env files weren't deleted.
- assert required_for_install_env_sh in all_env_paths
- ## Verify new ones added.
- assert r_env_sh in all_env_paths
- ## env_shell_file_paths includes everything
- assert all( [ env in action_dict[ 'env_shell_file_paths' ] for env in all_env_paths ] )
-
- ## action_shell_file_paths includes only env files defined in
- ## inside the setup_ action element.
- assert required_for_install_env_sh not in action_dict[ 'action_shell_file_paths' ]
- assert r_env_sh in action_dict[ 'action_shell_file_paths' ]
-
-## Poor man's mocking. Need to get a real mocking library as real Galaxy development
-## dependnecy.
-@contextmanager
-def __mock_common_util_method( name, mock_method ):
- real_method = getattr( td_common_util, name )
- try:
- setattr( td_common_util, name, mock_method )
- yield
- finally:
- setattr( td_common_util, name, real_method )
diff -r 6facd407e2479f56fac8530a2f413226d497dfb2 -r de7ed9e75dda6b8d9a364a56f9ac29a085a9e3cc test/unit/tool_shed_unit_tests/__init__.py
--- /dev/null
+++ b/test/unit/tool_shed_unit_tests/__init__.py
@@ -0,0 +1,4 @@
+"""
+Module cannot be called tool_shed, because this conflicts with lib/tool_shed
+also at top level of path.
+"""
diff -r 6facd407e2479f56fac8530a2f413226d497dfb2 -r de7ed9e75dda6b8d9a364a56f9ac29a085a9e3cc test/unit/tool_shed_unit_tests/test_fabric_util.py
--- /dev/null
+++ b/test/unit/tool_shed_unit_tests/test_fabric_util.py
@@ -0,0 +1,45 @@
+from contextlib import contextmanager
+from tool_shed.galaxy_install.tool_dependencies import fabric_util
+
+
+def test_env_file_builder():
+ install_dir = "/opt/galaxy/dependencies/foo/"
+ env_file_builder = fabric_util.EnvFileBuilder( install_dir )
+ added_lines = []
+ mock_return = dict(value=0)
+
+ def mock_file_append( text, file_path, **kwds ):
+ added_lines.append(text)
+ return mock_return["value"]
+
+ with __mock_fabric_util_method("file_append", mock_file_append):
+ env_file_builder.append_line( name="PATH", action="prepend_to", value="/usr/bin/local/R" )
+ assert added_lines == [ "PATH=/usr/bin/local/R:$PATH; export PATH" ]
+ assert env_file_builder.return_code == 0
+
+ # Reset mock lines
+ del added_lines[:]
+ # Next time file_append will fail
+ mock_return["value"] = 1
+
+ env_file_builder.append_line( action="source", value="/usr/bin/local/R/env.sh" )
+ assert added_lines == [ "if [ -f /usr/bin/local/R/env.sh ] ; then . /usr/bin/local/R/env.sh ; fi" ]
+ # Check failure
+ assert env_file_builder.return_code == 1
+
+ mock_return["value"] = 0
+ env_file_builder.append_line( name="LD_LIBRARY_PATH", action="append_to", value="/usr/bin/local/R/lib" )
+ # Verify even though last append succeeded, previous failure still recorded.
+ assert env_file_builder.return_code == 1
+
+
+## Poor man's mocking. Need to get a real mocking library as real Galaxy development
+## dependnecy.
+@contextmanager
+def __mock_fabric_util_method(name, mock_method):
+ real_method = getattr(fabric_util, name)
+ try:
+ setattr(fabric_util, name, mock_method)
+ yield
+ finally:
+ setattr(fabric_util, name, real_method)
diff -r 6facd407e2479f56fac8530a2f413226d497dfb2 -r de7ed9e75dda6b8d9a364a56f9ac29a085a9e3cc test/unit/tool_shed_unit_tests/test_td_common_util.py
--- /dev/null
+++ b/test/unit/tool_shed_unit_tests/test_td_common_util.py
@@ -0,0 +1,80 @@
+from os.path import join
+from contextlib import contextmanager
+from galaxy.util import parse_xml_string
+
+from tool_shed.galaxy_install.tool_dependencies import fabric_util
+from tool_shed.galaxy_install.tool_dependencies import td_common_util
+
+
+TEST_DEPENDENCIES_DIR = "/opt/galaxy/dependencies"
+TEST_INSTALL_DIR = "%s/test_install_dir" % TEST_DEPENDENCIES_DIR
+
+
+class MockApp( object ):
+
+ def __init__( self ):
+ pass
+
+def test_create_or_update_env_shell_file():
+ test_path = "/usr/share/R/libs"
+ env_file_builder = fabric_util.EnvFileBuilder( test_path )
+ line, path = env_file_builder.create_or_update_env_shell_file( TEST_INSTALL_DIR, dict( action="append_to", name="R_LIBS", value=test_path ) )
+ assert path == join( TEST_INSTALL_DIR, "env.sh" )
+ assert line == "R_LIBS=$R_LIBS:/usr/share/R/libs; export R_LIBS"
+
+ line, path = env_file_builder.create_or_update_env_shell_file( TEST_INSTALL_DIR, dict( action="prepend_to", name="R_LIBS", value=test_path ) )
+ assert path == join( TEST_INSTALL_DIR, "env.sh" )
+ assert line == "R_LIBS=/usr/share/R/libs:$R_LIBS; export R_LIBS"
+
+ line, path = env_file_builder.create_or_update_env_shell_file( TEST_INSTALL_DIR, dict( action="set_to", name="R_LIBS", value=test_path ) )
+ assert path == join( TEST_INSTALL_DIR, "env.sh" )
+ assert line == "R_LIBS=/usr/share/R/libs; export R_LIBS"
+
+ line, path = env_file_builder.create_or_update_env_shell_file( TEST_INSTALL_DIR, dict( action="source", value=test_path ) )
+ assert path == join( TEST_INSTALL_DIR, "env.sh" )
+ assert line == "if [ -f /usr/share/R/libs ] ; then . /usr/share/R/libs ; fi"
+
+def test_get_env_shell_file_paths_from_setup_environment_elem():
+ xml = """<action name="setup_r_environment">
+ <repository name="package_r_3_0_1" owner="bgruening" toolshed="toolshed.g2.bx.psu.edu" changeset_revision="1234567">
+ <package name="R" version="3.0.1" />
+ </repository>
+ </action>
+ """
+ mock_app = MockApp()
+ action_elem = parse_xml_string( xml )
+ required_for_install_env_sh = '/path/to/existing.sh'
+ all_env_paths = [ required_for_install_env_sh ]
+ action_dict = {}
+
+ r_env_sh = '/path/to/go/env.sh'
+
+ def mock_get_env_shell_file_paths( app, elem ):
+ assert app == mock_app
+ assert elem.get( 'name' ) == "package_r_3_0_1"
+ return [ r_env_sh ]
+
+ with __mock_common_util_method( "get_env_shell_file_paths", mock_get_env_shell_file_paths ):
+ td_common_util.get_env_shell_file_paths_from_setup_environment_elem( mock_app, all_env_paths, action_elem, action_dict )
+ ## Verify old env files weren't deleted.
+ assert required_for_install_env_sh in all_env_paths
+ ## Verify new ones added.
+ assert r_env_sh in all_env_paths
+ ## env_shell_file_paths includes everything
+ assert all( [ env in action_dict[ 'env_shell_file_paths' ] for env in all_env_paths ] )
+
+ ## action_shell_file_paths includes only env files defined in
+ ## inside the setup_ action element.
+ assert required_for_install_env_sh not in action_dict[ 'action_shell_file_paths' ]
+ assert r_env_sh in action_dict[ 'action_shell_file_paths' ]
+
+## Poor man's mocking. Need to get a real mocking library as real Galaxy development
+## dependnecy.
+@contextmanager
+def __mock_common_util_method( name, mock_method ):
+ real_method = getattr( td_common_util, name )
+ try:
+ setattr( td_common_util, name, mock_method )
+ yield
+ finally:
+ setattr( td_common_util, name, real_method )
https://bitbucket.org/galaxy/galaxy-central/commits/586dd135b4af/
Changeset: 586dd135b4af
User: jmchilton
Date: 2014-01-03 00:50:15
Summary: Fix version output redirection for pre-bash shells.
Affected #: 1 file
diff -r de7ed9e75dda6b8d9a364a56f9ac29a085a9e3cc -r 586dd135b4afc9d947c15aedc45c5104cc9ad94c lib/galaxy/jobs/command_factory.py
--- a/lib/galaxy/jobs/command_factory.py
+++ b/lib/galaxy/jobs/command_factory.py
@@ -38,7 +38,7 @@
def __handle_version_command(commands_builder, job_wrapper):
# Prepend version string
if job_wrapper.version_string_cmd:
- version_command = "%s &> %s" % ( job_wrapper.version_string_cmd, job_wrapper.get_version_string_path() )
+ version_command = "%s > %s 2>&1" % ( job_wrapper.version_string_cmd, job_wrapper.get_version_string_path() )
commands_builder.prepend_command(version_command)
https://bitbucket.org/galaxy/galaxy-central/commits/f0f7c3cd2e8a/
Changeset: f0f7c3cd2e8a
User: jmchilton
Date: 2014-01-03 00:50:15
Summary: Adjust GALAXY_SLOTS in local.py for pre-bash shells.
Affected #: 1 file
diff -r 586dd135b4afc9d947c15aedc45c5104cc9ad94c -r f0f7c3cd2e8af64243f878c04f418c1ee054bc55 lib/galaxy/jobs/runners/local.py
--- a/lib/galaxy/jobs/runners/local.py
+++ b/lib/galaxy/jobs/runners/local.py
@@ -54,9 +54,9 @@
slots = job_wrapper.job_destination.params.get( "local_slots", None )
command_line = command_line.lstrip( " ;" )
if slots:
- command_line = 'export GALAXY_SLOTS="%d"; export GALAXY_SLOTS_CONFIGURED="1"; %s' % ( int( slots ), command_line )
+ command_line = 'GALAXY_SLOTS="%d"; export GALAXY_SLOTS; GALAXY_SLOTS_CONFIGURED="1"; export GALAXY_SLOTS_CONFIGURED; %s' % ( int( slots ), command_line )
else:
- command_line = 'export GALAXY_SLOTS="1"; %s' % command_line
+ command_line = 'GALAXY_SLOTS="1"; export GALAXY_SLOTS; %s' % command_line
return command_line
def queue_job( self, job_wrapper ):
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: guerler: Fix XSS issue in former grids code
by commits-noreply@bitbucket.org 02 Jan '14
by commits-noreply@bitbucket.org 02 Jan '14
02 Jan '14
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/27350e3fc09b/
Changeset: 27350e3fc09b
Branch: stable
User: guerler
Date: 2014-01-03 05:25:22
Summary: Fix XSS issue in former grids code
Affected #: 1 file
diff -r 8411a9f30feb3a6fd15cecc09809afe2e24b5e79 -r 27350e3fc09bd36fea964c703b5ea787e2215de4 templates/grid_base.mako
--- a/templates/grid_base.mako
+++ b/templates/grid_base.mako
@@ -56,6 +56,14 @@
${h.js("libs/jquery/jquery.autocomplete", "galaxy.autocom_tagging", "libs/jquery/jquery.rating", "galaxy.grids" )}
${handle_refresh_frames()}
+ <%
+ self.grid_options = {
+ 'sort_key' : sort_key,
+ 'use_async' : grid.use_async,
+ 'cur_page_num' : cur_page_num,
+ 'num_pages' : num_pages
+ }
+ %><script type="text/javascript">
// Needed URLs for grid history searching.
@@ -65,6 +73,7 @@
//
// Create grid object.
//
+ var grid_options = ${ h.to_json_string(self.grid_options) };
// Operations that are async (AJAX) compatible.
var async_ops = [];
@@ -83,20 +92,20 @@
/** Returns true if string denotes true. */
var is_true = function(s) { return _.indexOf(['True', 'true', 't'], s) !== -1; };
-
+
// Create grid.
var grid = new Grid({
url_base: '${trans.request.path_url}',
- async: is_true('${grid.use_async}'),
+ async: is_true(grid_options.use_async),
async_ops: async_ops,
categorical_filters: categorical_filters,
filters: ${h.to_json_string( cur_filter_dict )},
- sort_key: '${sort_key}',
+ sort_key: grid_options.sort_key,
show_item_checkboxes: is_true('${context.get('show_item_checkboxes', False)}'),
- cur_page: ${cur_page_num},
+ cur_page: grid_options.cur_page_num,
// persistent page="all"
//cur_page: ('${cur_page_num}' === 'all')?('all'):(Number('${cur_page_num}')),
- num_pages: ${num_pages}
+ num_pages: grid_options.num_pages
});
// Initialize grid objects on load.
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0