1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/7677b8c3ef43/
changeset: 7677b8c3ef43
user: inithello
date: 2013-03-06 22:07:32
summary: Added more information to the tool test environment dict. Restrict when do_not_test flag is set. Made test logging format more structured. Moved some utility code to the util module.
affected #: 3 files
diff -r 48dad8860ee1b1196a1f242c9dd18d218e6db573 -r 7677b8c3ef43d06eda783f1c08ec521d2bd625e5 lib/tool_shed/scripts/check_repositories_for_functional_tests.py
--- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
+++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
@@ -12,9 +12,9 @@
from galaxy import eggs
import pkg_resources
-pkg_resources.require( "SQLAlchemy >= 0.4" )
-pkg_resources.require( 'mercurial' )
-from mercurial import hg, ui, commands
+eggs.require( "SQLAlchemy >= 0.4" )
+eggs.require( 'mercurial' )
+from mercurial import hg, ui, commands, __version__
import time, ConfigParser, shutil
from datetime import datetime, timedelta
@@ -24,12 +24,13 @@
import galaxy.webapps.tool_shed.config as tool_shed_config
import galaxy.webapps.tool_shed.model.mapping
import sqlalchemy as sa
-from galaxy.model.orm import and_, not_, distinct
+from galaxy.model.orm import and_, not_
from galaxy.util.json import from_json_string, to_json_string
from galaxy.web import url_for
+from galaxy.tools import parameters
from tool_shed.util.shed_util_common import clone_repository, get_configured_ui
-from base.util import get_test_environment
+from base.util import get_test_environment, get_database_version, get_repository_current_revision
assert sys.version_info[:2] >= ( 2, 4 )
@@ -50,6 +51,7 @@
now = strftime( "%Y-%m-%d %H:%M:%S" )
print "#############################################################################"
print "# %s - Checking repositories for tools with functional tests." % now
+ print "# This tool shed is configured to listen on %s:%s." % ( config_parser.get( 'server:main', 'host' ), config_parser.get( 'server:main', 'port' ) )
app = FlagRepositoriesApplication( config )
if options.info_only:
@@ -113,7 +115,7 @@
}
'''
start = time.time()
- repository_ids_to_check = []
+ checked_repository_ids = []
tool_count = 0
has_tests = 0
no_tests = 0
@@ -121,25 +123,31 @@
# Get the list of metadata records to check for functional tests and test data. Limit this to records that have not been flagged do_not_test
# or tools_functionally_correct. Also filter out changeset revisions that are not downloadable, because it's redundant to test a revision that
# a user can't install.
+ # Initialize the repository_status dict with the test environment, but leave the test_errors empty.
+ repository_status = {}
+ repository_status[ 'test_environment' ] = get_test_environment()
+ repository_status[ 'test_environment' ][ 'tool_shed_database_version' ] = get_database_version( app )
+ repository_status[ 'test_environment' ][ 'tool_shed_mercurial_version' ] = __version__.version
+ repository_status[ 'test_environment' ][ 'tool_shed_revision' ] = get_repository_current_revision( os.getcwd() )
+ repository_status[ 'test_errors' ] = []
metadata_records_to_check = app.sa_session.query( app.model.RepositoryMetadata ) \
.filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True,
app.model.RepositoryMetadata.table.c.do_not_test == False,
app.model.RepositoryMetadata.table.c.tools_functionally_correct == False ) ) \
.all()
for metadata_record in metadata_records_to_check:
+ repository_status[ 'test_errors' ] = []
name = metadata_record.repository.name
owner = metadata_record.repository.user.username
changeset_revision = str( metadata_record.changeset_revision )
- repository_status = {}
+ if metadata_record.repository.id not in checked_repository_ids:
+ checked_repository_ids.append( metadata_record.repository.id )
# If this changeset revision has no tools, we don't need to do anything here, the install and test script has a filter for returning
# only repositories that contain tools.
if 'tools' not in metadata_record.metadata:
no_tools += 1
continue
else:
- # Initialize the repository_status dict with the test environment, but leave the test_errors empty.
- repository_status[ 'test_environment' ] = get_test_environment()
- repository_status[ 'test_errors' ] = []
# Loop through all the tools in this metadata record, checking each one for defined functional tests.
for tool_metadata in metadata_record.metadata[ 'tools' ]:
tool_count += 1
@@ -153,12 +161,9 @@
if 'tests' not in tool_metadata or not tool_metadata[ 'tests' ]:
if verbose:
print '# No functional tests defined for %s.' % tool_id
- if 'test_errors' not in repository_status:
- repository_status[ 'test_errors' ] = []
- test_id = 'Functional tests for %s' % tool_id
# The repository_metadata.tool_test_errors attribute should always have the following structure:
# {
- # "environment":
+ # "test_environment":
# {
# "python_version": "2.7.2",
# "architecture": "x86_64",
@@ -168,15 +173,16 @@
# [
# {
# "test_id": "Something that will easily identify what the problem is",
- # "stdout": "The output of the test, or a more detailed description of what was tested and why it failed."
+ # "stderr": "The output of the test, or a more detailed description of what was tested and why it failed."
# },
# ]
# }
- # Optionally, "stderr" and "traceback" may be included in a test_errors dict, if they are relevant.
- test_errors = dict( test_id=test_id,
- stdout='No functional tests defined in changeset revision %s of repository %s owned by %s.' % \
- ( changeset_revision, name, owner ) )
+ # Optionally, "stdout" and "traceback" may be included in a test_errors dict, if they are relevant.
+ test_id = 'Functional tests for %s' % tool_id
+ test_errors = dict( stderr='No functional tests defined for tool %s in changeset revision %s of repository %s owned by %s.' % \
+ ( tool_id, changeset_revision, name, owner ) )
repository_status[ 'test_errors' ].append( test_errors )
+ repository_status[ 'status' ] = 'failed'
no_tests += 1
else:
has_tests += 1
@@ -201,14 +207,12 @@
if 'test-data' in dirs:
has_test_data = True
break
- # Remove the cloned path.
+ # Remove the cloned repository path.
if os.path.exists( work_dir ):
shutil.rmtree( work_dir )
if not has_test_data:
if verbose:
print '# Test data missing in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
- repository_status[ 'test_environment' ] = get_test_environment()
- test_id = 'Find functional test data for %s' % metadata_record.repository.name
# The repository_metadata.tool_test_errors attribute should always have the following structure:
# {
# "test_environment":
@@ -221,28 +225,40 @@
# [
# {
# "test_id": "Something that will easily identify what the problem is",
- # "stdout": "The output of the test, or a more detailed description of what was tested and why it failed."
+ # "stderr": "The output of the test, or a more detailed description of what was tested and why it failed."
# },
# ]
# }
- # Optionally, "stderr" and "traceback" may be included in a test_errors dict, if they are relevant.
- test_errors = dict( test_id=test_id,
- stdout='No test data found for changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) )
+ # Optionally, "stdout" and "traceback" may be included in a test_errors dict, if they are relevant.
+ test_id = 'Find functional test data for %s' % metadata_record.repository.name
+ test_errors = dict( stderr='No test data found for changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) )
repository_status[ 'test_errors' ].append( test_errors )
+ repository_status[ 'status' ] = 'failed'
else:
if verbose:
print '# Test data found in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
if not info_only:
+ # If repository_status[ 'test_errors' ] is empty, no issues were found, and we can just update time_last_tested with the platform
+ # on which this script was run.
if repository_status[ 'test_errors' ]:
- metadata_record.do_not_test = True
+ # If functional test definitions or test data are missing, set do_not_test = True if and only if:
+ # a) There are multiple downloadable revisions, and the revision being tested is not the most recent downloadable revision. In this case,
+ # the revision will never be updated with correct data, and re-testing it would be redundant.
+ # b) There are one or more downloadable revisions, and the revision being tested is the most recent downloadable revision. In this case, if
+ # the repository is updated with test data or functional tests, the downloadable changeset revision that was tested will be replaced
+ # with the new changeset revision, which will be automatically tested.
+ if should_set_do_not_test_flag( app, metadata_record.repository, changeset_revision ):
+ metadata_record.do_not_test = True
metadata_record.tools_functionally_correct = False
- metadata_record.tool_test_errors = to_json_string( repository_status )
+ else:
+ repository_status[ 'status' ] = 'passed'
+ metadata_record.tool_test_errors = repository_status
metadata_record.time_last_tested = datetime.utcnow()
app.sa_session.add( metadata_record )
app.sa_session.flush()
stop = time.time()
- print '# Checked %d tools in %d changeset revisions.' % ( tool_count, len( metadata_records_to_check ) )
- print '# Found %d changeset revisions without tools.' % no_tools
+ print '# Checked %d repositories with %d tools in %d changeset revisions.' % ( len( checked_repository_ids ), tool_count, len( metadata_records_to_check ) )
+ print '# Skipped %d changeset revisions without tools.' % no_tools
print '# Found %d tools without functional tests.' % no_tests
print '# Found %d tools with functional tests.' % has_tests
if info_only:
@@ -250,6 +266,52 @@
print "# Elapsed time: ", stop - start
print "#############################################################################"
+def get_repo_changelog_tuples( repo_path ):
+ repo = hg.repository( ui.ui(), repo_path )
+ changelog_tuples = []
+ for changeset in repo.changelog:
+ ctx = repo.changectx( changeset )
+ changelog_tuples.append( ( ctx.rev(), str( ctx ) ) )
+ return changelog_tuples
+
+def is_most_recent_downloadable_revision( app, repository, changeset_revision, downloadable_revisions ):
+ # Get a list of ( numeric revision, changeset hash ) tuples from the changelog.
+ changelog = get_repo_changelog_tuples( repository.repo_path( app ) )
+ latest_downloadable_revision = None
+ for ctx_rev, changeset_hash in changelog:
+ if changeset_hash in downloadable_revisions:
+ # The last changeset hash in the changelog that is present in the list of downloadable revisions will always be the most
+ # recent downloadable revision, since the changelog tuples are ordered from earliest to most recent.
+ latest_downloadable_revision = changeset_hash
+ if latest_downloadable_revision == changeset_revision:
+ return True
+ return False
+
+def should_set_do_not_test_flag( app, repository, changeset_revision ):
+ '''
+ Returns True if:
+ a) There are multiple downloadable revisions, and the provided changeset revision is not the most recent downloadable revision. In this case,
+ the revision will never be updated with correct data, and re-testing it would be redundant.
+ b) There are one or more downloadable revisions, and the provided changeset revision is the most recent downloadable revision. In this case, if
+ the repository is updated with test data or functional tests, the downloadable changeset revision that was tested will be replaced
+ with the new changeset revision, which will be automatically tested.
+ '''
+ metadata_records = app.sa_session.query( app.model.RepositoryMetadata ) \
+ .filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True,
+ app.model.RepositoryMetadata.table.c.repository_id == repository.id ) ) \
+ .all()
+ downloadable_revisions = [ metadata_record.changeset_revision for metadata_record in metadata_records ]
+ is_latest_revision = is_most_recent_downloadable_revision( app, repository, changeset_revision, downloadable_revisions )
+ if len( downloadable_revisions ) == 1:
+ return True
+ elif len( downloadable_revisions ) > 1 and is_latest_revision:
+ return True
+ elif len( downloadable_revisions ) > 1 and not is_latest_revision:
+ return True
+ else:
+ return False
+
+
class FlagRepositoriesApplication( object ):
"""Encapsulates the state of a Universe application"""
def __init__( self, config ):
diff -r 48dad8860ee1b1196a1f242c9dd18d218e6db573 -r 7677b8c3ef43d06eda783f1c08ec521d2bd625e5 test/base/util.py
--- a/test/base/util.py
+++ b/test/base/util.py
@@ -12,6 +12,44 @@
sys.path = new_path
from galaxy.util import parse_xml
+from galaxy import eggs
+
+eggs.require( 'mercurial' )
+
+from mercurial import hg, ui, commands
+
+def get_repository_current_revision( repo_path ):
+ '''
+ This method uses the python mercurial API to get the current working directory's mercurial changeset hash. Note that if the author of mercurial
+ changes the API, this method will have to be updated or replaced.
+ '''
+ # Initialize a mercurial repo object from the provided path.
+ repo = hg.repository( ui.ui(), repo_path )
+ # Get the working directory's change context.
+ ctx = repo[ None ]
+ # Extract the changeset hash of the first parent of that change context (the most recent changeset to which the working directory was updated).
+ changectx = ctx.parents()[ 0 ]
+ # Also get the numeric revision, so we can return the customary id:hash changeset identifiers.
+ ctx_rev = changectx.rev()
+ hg_id = '%d:%s' % ( ctx_rev, str( changectx ) )
+ return hg_id
+
+def get_database_version( app ):
+ '''
+ This method returns the value of the version column from the migrate_version table, using the provided app's SQLAlchemy session to determine
+ which table to get that from. This way, it's provided with an instance of a Galaxy UniverseApplication, it will return the Galaxy instance's
+ database migration version. If a tool shed UniverseApplication is provided, it returns the tool shed's database migration version.
+ '''
+ sa_session = app.model.context.current
+ result = sa_session.execute( 'SELECT version FROM migrate_version LIMIT 1' )
+ # This query will return the following structure:
+ # row = [ column 0, column 1, ..., column n ]
+ # rows = [ row 0, row 1, ..., row n ]
+ # The first column in the first row is the version number we want.
+ for row in result:
+ version = row[ 0 ]
+ break
+ return version
def get_installed_repository_info( elem, last_galaxy_test_file_dir, last_tested_repository_name, last_tested_changeset_revision, tool_path ):
"""
diff -r 48dad8860ee1b1196a1f242c9dd18d218e6db573 -r 7677b8c3ef43d06eda783f1c08ec521d2bd625e5 test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -61,7 +61,7 @@
import nose.loader
import nose.plugins.manager
-from base.util import parse_tool_panel_config
+from base.util import parse_tool_panel_config, get_database_version, get_test_environment, get_repository_current_revision
from common import update
@@ -105,7 +105,7 @@
# Define a default location to find the list of repositories to check.
galaxy_repository_list = os.environ.get( 'GALAXY_INSTALL_TEST_REPOSITORY_LIST_LOCATIOM', 'repository_list.json' )
-galaxy_tool_shed_url = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHED_URL', 'http://toolshed.local:10001' )
+galaxy_tool_shed_url = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHED_URL', 'http://localhost:9009' )
tool_shed_api_key = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHED_API_KEY', None )
assert tool_shed_api_key is not None, 'Unable to proceed without API key.'
@@ -191,25 +191,22 @@
else:
raise AssertonError( 'Unknown format %s.' % format )
-def get_test_environment():
- rval = {}
- rval[ 'python_version' ] = platform.python_version()
- rval[ 'architecture' ] = platform.machine()
- os, hostname, os_version, uname, arch, processor = platform.uname()
- rval[ 'system' ] = '%s %s' % ( os, os_version )
- return rval
-
def json_from_url( url ):
url_handle = urllib.urlopen( url )
url_contents = url_handle.read()
return from_json_string( url_contents )
-def register_test_failure( url, metadata_id, test_errors ):
- params = dict( tools_functionally_correct='false', do_not_test='true', tool_test_errors=test_errors )
- return update( tool_shed_api_key, '%s' % ( url_join( galaxy_tool_shed_url, 'api', 'repository_revisions', metadata_id ) ), params, return_formatted=False )
-
-def register_test_success( url, metadata_id ):
- params = dict( tools_functionally_correct='true', do_not_test='true' )
+def register_test_result( url, metadata_id, test_results_dict, tests_passed=False ):
+ params = {}
+ if tests_passed:
+ params[ 'tools_functionally_correct' ] = 'true'
+ params[ 'do_not_test' ] = 'true'
+ test_results_dict[ 'status' ] = 'passed'
+ else:
+ params[ 'tools_functionally_correct' ] = 'false'
+ params[ 'do_not_test' ] = 'true'
+ test_results_dict[ 'status' ] = 'failed'
+ params[ 'tool_test_errors' ] = test_results_dict
return update( tool_shed_api_key, '%s' % ( url_join( galaxy_tool_shed_url, 'api', 'repository_revisions', metadata_id ) ), params, return_formatted=False )
def run_tests( test_config ):
@@ -233,8 +230,8 @@
# Initialize some variables for the summary that will be printed to stdout.
repositories_tested = 0
- repositories_passed = 0
- repositories_failed = 0
+ repositories_passed = []
+ repositories_failed = []
tool_path = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_PATH', 'tools' )
if 'HTTP_ACCEPT_LANGUAGE' not in os.environ:
@@ -395,6 +392,9 @@
log.info( "Repositories will be installed from the tool shed at %s" % galaxy_tool_shed_url )
success = False
repository_status = dict()
+ test_environment = get_test_environment()
+ test_environment[ 'galaxy_database_version' ] = get_database_version( app )
+ test_environment[ 'galaxy_revision'] = get_repository_current_revision( os.getcwd() )
try:
# Iterate through a list of repository info dicts.
log.info( "Retrieving repositories to install from the URL:\n%s\n" % str( galaxy_tool_shed_url ) )
@@ -421,6 +421,7 @@
"url": "/api/repository_revisions/529fd61ab1c6cc36"
}
"""
+ repository_status = dict()
repository_id = repository_to_install_dict.get( 'repository_id', None )
changeset_revision = repository_to_install_dict.get( 'changeset_revision', None )
metadata_revision_id = repository_to_install_dict.get( 'id', None )
@@ -460,6 +461,8 @@
"user_id": "529fd61ab1c6cc36"
}
"""
+ name = repository_dict[ 'name' ]
+ owner = repository_dict[ 'owner' ]
# Use the repository information dict to generate an install method that will install the repository into the embedded
# Galaxy application, with tool dependencies and repository dependencies, if any.
test_install_repositories.generate_install_method( repository_dict )
@@ -489,13 +492,13 @@
( repository_dict[ 'changeset_revision' ], repository_dict[ 'name' ], repository_dict[ 'owner' ] ) )
repository_status[ 'test_errors' ] = [ test_errors ]
# Record the status of this repository in the tool shed.
- register_test_failure( galaxy_tool_shed_url, metadata_revision_id, repository_status )
+ register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, tests_passed=False )
# Run the cleanup method. This removes tool functional test methods from the test_toolbox module and uninstalls the
# repository using Twill.
execute_uninstall_method( repository_dict )
# Set the test_toolbox.toolbox module-level variable to the new app.toolbox.
test_toolbox.toolbox = app.toolbox
- repositories_failed += 1
+ repositories_failed.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
else:
# If the repository does have a test-data directory, we write the generated shed_tools_dict to a file, so the functional
# test framework can find it.
@@ -519,16 +522,17 @@
success = result.wasSuccessful()
# Record some information about the environment in which this test was run, in case a failure is specific to a certain processor
# architecture or operating system.
- repository_dict[ 'test_environment' ] = get_test_environment()
- repository_dict[ 'functional_tests_passed' ] = success
+ repository_dict[ 'test_environment' ] = test_environment
test_errors = []
if success:
# This repository's tools passed all functional tests. Update the repository_metadata table in the tool shed's database
- # to reflect that. Call the register_test_success method, which executes a PUT request to the repository_revisions API
+ # to reflect that. Call the register_test_result method, which executes a PUT request to the repository_revisions API
# controller with the status of the test. This also sets the do_not_test and tools_functionally correct flags, and
# updates the time_last_tested field to today's date.
- repositories_passed += 1
- register_test_success( galaxy_tool_shed_url, metadata_revision_id )
+ repositories_passed.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
+ repository_status[ 'test_environment' ] = test_environment
+ repository_status[ 'test_errors' ] = []
+ register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, tests_passed=True )
log.debug( 'Revision %s of repository %s installed and passed functional tests.' % \
( repository_dict[ 'changeset_revision' ], repository_dict[ 'name' ] ) )
else:
@@ -569,13 +573,13 @@
test_errors.append( test_status )
if test_errors:
# Only update test_errors for this repository if it's not empty.
- repository_status[ 'test_environment' ] = get_test_environment()
+ repository_status[ 'test_environment' ] = test_environment
repository_status[ 'test_errors' ] = test_errors
- # Call the register_test_failure method, which executes a PUT request to the repository_revisions API controller with the failure
- # status of the test, and updates tool_test_errors with the relevant log data.
+ # Call the register_test_result method, which executes a PUT request to the repository_revisions API controller with the outcome
+ # status of the tests, and updates tool_test_errors with the relevant log data.
# This also sets the do_not_test and tools_functionally correct flags, and updates the time_last_tested field to today's date.
- repositories_failed += 1
- register_test_failure( galaxy_tool_shed_url, metadata_revision_id, repository_status )
+ repositories_failed.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
+ register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, tests_passed=False )
log.debug( 'Revision %s of repository %s installed successfully, but did not pass functional tests.' % \
( repository_dict[ 'changeset_revision' ], repository_dict[ 'name' ] ) )
# Run the cleanup method. This removes tool functional test methods from the test_toolbox module and uninstalls the
@@ -618,8 +622,14 @@
print "# %s - repository installation and testing script completed." % now
print "# Repository revisions tested: %d" % repositories_tested
if repositories_tested > 0:
- print "# Repository revisions passed: %d" % repositories_passed
- print "# Repository revisions failed: %d" % repositories_failed
+ if repositories_passed:
+ print "# Repositories passed:"
+ for repository in repositories_passed:
+ print "# %s owned by %s, changeset revision %s" % ( repository[ 'name' ], repository[ 'owner' ], repository[ 'changeset_revision' ] )
+ if repositories_failed:
+ print "# Repositories failed:"
+ for repository in repositories_failed:
+ print "# %s owned by %s, changeset revision %s" % ( repository[ 'name' ], repository[ 'owner' ], repository[ 'changeset_revision' ] )
print "####################################################################################"
if success:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/48dad8860ee1/
changeset: 48dad8860ee1
user: carlfeberhard
date: 2013-03-06 21:34:25
summary: fix imports
affected #: 1 file
diff -r dc10f9430b6aaaf85c67a998d4ce666a5b66b600 -r 48dad8860ee1b1196a1f242c9dd18d218e6db573 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -19,6 +19,9 @@
eggs.require( "simplejson" )
eggs.require( "MarkupSafe" ) #MarkupSafe must load before mako
eggs.require( "Mako" )
+eggs.require( "elementtree" )
+eggs.require( "Paste" )
+eggs.require( "SQLAlchemy >= 0.4" )
import simplejson
from cgi import FieldStorage
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/d1b435e7200d/
changeset: d1b435e7200d
user: dannon
date: 2013-03-06 19:09:35
summary: Re-add secondary workflow api download route to support applications already using it.
affected #: 1 file
diff -r 47480f11d5d5c99dfe67710cc4a7ecbaeebebb4a -r d1b435e7200d4543fe3060ba9c362220d73d615b lib/galaxy/webapps/galaxy/buildapp.py
--- a/lib/galaxy/webapps/galaxy/buildapp.py
+++ b/lib/galaxy/webapps/galaxy/buildapp.py
@@ -153,6 +153,8 @@
# Defines a named route "import_workflow".
webapp.api_mapper.connect("import_workflow", "/api/workflows/upload", controller="workflows", action="import_new_workflow", conditions=dict(method=["POST"]))
webapp.api_mapper.connect("workflow_dict", '/api/workflows/{workflow_id}/download', controller='workflows', action='workflow_dict', conditions=dict(method=['GET']))
+ # Preserve the following download route for now for dependent applications -- deprecate at some point
+ webapp.api_mapper.connect("workflow_dict", '/api/workflows/download/{workflow_id}', controller='workflows', action='workflow_dict', conditions=dict(method=['GET']))
# Connect logger from app
if app.trace_logger:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/3ec030c42b9e/
changeset: 3ec030c42b9e
user: dannon
date: 2013-03-06 18:38:48
summary: Remove unused import in objectstore.
affected #: 1 file
diff -r 3bdfcb0a7e9722ce4074c52528a4c0324f2fb83a -r 3ec030c42b9e59234d4c005e6a7f1de6e0e1e736 lib/galaxy/objectstore/__init__.py
--- a/lib/galaxy/objectstore/__init__.py
+++ b/lib/galaxy/objectstore/__init__.py
@@ -9,7 +9,6 @@
import time
import random
import shutil
-import statvfs
import logging
import threading
import subprocess
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/3bdfcb0a7e97/
changeset: 3bdfcb0a7e97
user: dannon
date: 2013-03-06 17:50:53
summary: Fix incorrect workflow download path via the API. Now is is correctly set to /api/workflows/<id>/download
affected #: 2 files
diff -r f97ebc1cf57d20019b785106221964931b122bb7 -r 3bdfcb0a7e9722ce4074c52528a4c0324f2fb83a lib/galaxy/webapps/galaxy/api/workflows.py
--- a/lib/galaxy/webapps/galaxy/api/workflows.py
+++ b/lib/galaxy/webapps/galaxy/api/workflows.py
@@ -258,24 +258,16 @@
trans.sa_session.flush()
return rval
- # ---------------------------------------------------------------------------------------------- #
- # ---------------------------------------------------------------------------------------------- #
- # ---- RPARK EDITS ---- #
- # ---------------------------------------------------------------------------------------------- #
- # ---------------------------------------------------------------------------------------------- #
@web.expose_api
- #(a)web.json
def workflow_dict( self, trans, workflow_id, **kwd ):
"""
GET /api/workflows/{encoded_workflow_id}/download
Returns a selected workflow as a json dictionary.
"""
-
try:
stored_workflow = trans.sa_session.query(self.app.model.StoredWorkflow).get(trans.security.decode_id(workflow_id))
except Exception,e:
return ("Workflow with ID='%s' can not be found\n Exception: %s") % (workflow_id, str( e ))
-
# check to see if user has permissions to selected workflow
if stored_workflow.user != trans.user and not trans.user_is_admin():
if trans.sa_session.query(trans.app.model.StoredWorkflowUserShareAssociation).filter_by(user=trans.user, stored_workflow=stored_workflow).count() == 0:
diff -r f97ebc1cf57d20019b785106221964931b122bb7 -r 3bdfcb0a7e9722ce4074c52528a4c0324f2fb83a lib/galaxy/webapps/galaxy/buildapp.py
--- a/lib/galaxy/webapps/galaxy/buildapp.py
+++ b/lib/galaxy/webapps/galaxy/buildapp.py
@@ -152,7 +152,7 @@
# "POST /api/workflows/import" => ``workflows.import_workflow()``.
# Defines a named route "import_workflow".
webapp.api_mapper.connect("import_workflow", "/api/workflows/upload", controller="workflows", action="import_new_workflow", conditions=dict(method=["POST"]))
- webapp.api_mapper.connect("workflow_dict", '/api/workflows/download/{workflow_id}', controller='workflows', action='workflow_dict', conditions=dict(method=['GET']))
+ webapp.api_mapper.connect("workflow_dict", '/api/workflows/{workflow_id}/download', controller='workflows', action='workflow_dict', conditions=dict(method=['GET']))
# Connect logger from app
if app.trace_logger:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/7eda7f4b555e/
changeset: 7eda7f4b555e
user: carlfeberhard
date: 2013-03-06 16:02:24
summary: history panel and api: if hda.peek is 'no peek' do not send peek through api and do not render peek in expanded hda
affected #: 3 files
diff -r 3a3eab0b0adbda30e72fdda0c5de899728bcd5e6 -r 7eda7f4b555eb13e8382e34b9739a38af6072849 lib/galaxy/webapps/galaxy/api/history_contents.py
--- a/lib/galaxy/webapps/galaxy/api/history_contents.py
+++ b/lib/galaxy/webapps/galaxy/api/history_contents.py
@@ -221,7 +221,8 @@
hda_dict[ 'display_types' ] = get_old_display_applications( trans, hda )
hda_dict[ 'visualizations' ] = hda.get_visualizations()
- hda_dict[ 'peek' ] = to_unicode( hda.display_peek() )
+ if hda.peek and hda.peek != 'no peek':
+ hda_dict[ 'peek' ] = to_unicode( hda.display_peek() )
if hda.creating_job and hda.creating_job.tool_id:
tool_used = trans.app.toolbox.get_tool( hda.creating_job.tool_id )
diff -r 3a3eab0b0adbda30e72fdda0c5de899728bcd5e6 -r 7eda7f4b555eb13e8382e34b9739a38af6072849 static/scripts/mvc/dataset/hda-base.js
--- a/static/scripts/mvc/dataset/hda-base.js
+++ b/static/scripts/mvc/dataset/hda-base.js
@@ -319,12 +319,13 @@
*/
//TODO: curr. pre-formatted into table on the server side - may not be ideal/flexible
_render_peek : function(){
- if( !this.model.get( 'peek' ) ){ return null; }
+ var peek = this.model.get( 'peek' );
+ if( !peek ){ return null; }
return $( '<div/>' ).append(
$( '<pre/>' )
.attr( 'id', 'peek' + this.model.get( 'id' ) )
.addClass( 'peek' )
- .append( this.model.get( 'peek' ) )
+ .append( peek )
);
},
diff -r 3a3eab0b0adbda30e72fdda0c5de899728bcd5e6 -r 7eda7f4b555eb13e8382e34b9739a38af6072849 static/scripts/packed/mvc/dataset/hda-base.js
--- a/static/scripts/packed/mvc/dataset/hda-base.js
+++ b/static/scripts/packed/mvc/dataset/hda-base.js
@@ -1,1 +1,1 @@
-var HDABaseView=BaseView.extend(LoggableMixin).extend({tagName:"div",className:"historyItemContainer",initialize:function(a){if(a.logger){this.logger=this.model.logger=a.logger}this.log(this+".initialize:",a);this.defaultPrimaryActionButtonRenderers=[this._render_showParamsButton];if(!a.urlTemplates){throw ("HDAView needs urlTemplates on initialize")}this.urlTemplates=a.urlTemplates;this.expanded=a.expanded||false;this.model.bind("change",this.render,this)},render:function(){var b=this,e=this.model.get("id"),c=this.model.get("state"),a=$("<div/>").attr("id","historyItem-"+e),d=(this.$el.children().size()===0);this.$el.attr("id","historyItemContainer-"+e);this.urls=this._renderUrls(this.urlTemplates,this.model.toJSON());a.addClass("historyItemWrapper").addClass("historyItem").addClass("historyItem-"+c);a.append(this._render_warnings());a.append(this._render_titleBar());this._setUpBehaviors(a);this.body=$(this._render_body());a.append(this.body);this.$el.fadeOut("fast",function(){b.$el.children().remove();b.$el.append(a).fadeIn("fast",function(){b.log(b+" rendered:",b.$el);var f="rendered";if(d){f+=":initial"}else{if(b.model.inReadyState()){f+=":ready"}}b.trigger(f)})});return this},_renderUrls:function(d,a){var b=this,c={};_.each(d,function(e,f){if(_.isObject(e)){c[f]=b._renderUrls(e,a)}else{if(f==="meta_download"){c[f]=b._renderMetaDownloadUrls(e,a)}else{try{c[f]=_.template(e,a)}catch(g){throw (b+"._renderUrls error: "+g+"\n rendering:"+e+"\n with "+JSON.stringify(a))}}}});return c},_renderMetaDownloadUrls:function(b,a){return _.map(a.meta_files,function(c){return{url:_.template(b,{id:a.id,file_type:c.file_type}),file_type:c.file_type}})},_setUpBehaviors:function(a){a=a||this.$el;make_popup_menus(a);a.find(".tooltip").tooltip({placement:"bottom"})},_render_warnings:function(){return $(jQuery.trim(HDABaseView.templates.messages(this.model.toJSON())))},_render_titleBar:function(){var a=$('<div class="historyItemTitleBar" style="overflow: hidden"></div>');a.append(this._render_titleButtons());a.append('<span class="state-icon"></span>');a.append(this._render_titleLink());return a},_render_titleButtons:function(){var a=$('<div class="historyItemButtons"></div>');a.append(this._render_displayButton());return a},_render_displayButton:function(){if((this.model.get("state")===HistoryDatasetAssociation.STATES.NOT_VIEWABLE)||(!this.model.get("accessible"))){this.displayButton=null;return null}var a={icon_class:"display",target:"galaxy_main"};if(this.model.get("purged")){a.enabled=false;a.title=_l("Cannot display datasets removed from disk")}else{a.title=_l("Display data in browser");a.href=this.urls.display}this.displayButton=new IconButtonView({model:new IconButton(a)});return this.displayButton.render().$el},_render_titleLink:function(){return $(jQuery.trim(HDABaseView.templates.titleLink(_.extend(this.model.toJSON(),{urls:this.urls}))))},_render_hdaSummary:function(){var a=_.extend(this.model.toJSON(),{urls:this.urls});return HDABaseView.templates.hdaSummary(a)},_render_primaryActionButtons:function(c){var a=this,b=$("<div/>").attr("id","primary-actions-"+this.model.get("id"));_.each(c,function(d){b.append(d.call(a))});return b},_render_downloadButton:function(){if(this.model.get("purged")||!this.model.hasData()){return null}var a=HDABaseView.templates.downloadLinks(_.extend(this.model.toJSON(),{urls:this.urls}));return $(a)},_render_showParamsButton:function(){this.showParamsButton=new IconButtonView({model:new IconButton({title:_l("View details"),href:this.urls.show_params,target:"galaxy_main",icon_class:"information"})});return this.showParamsButton.render().$el},_render_displayApps:function(){if(!this.model.hasData()){return null}var a=$("<div/>").addClass("display-apps");if(!_.isEmpty(this.model.get("display_types"))){a.append(HDABaseView.templates.displayApps({displayApps:this.model.get("display_types")}))}if(!_.isEmpty(this.model.get("display_apps"))){a.append(HDABaseView.templates.displayApps({displayApps:this.model.get("display_apps")}))}return a},_render_peek:function(){if(!this.model.get("peek")){return null}return $("<div/>").append($("<pre/>").attr("id","peek"+this.model.get("id")).addClass("peek").append(this.model.get("peek")))},_render_body:function(){var a=$("<div/>").attr("id","info-"+this.model.get("id")).addClass("historyItemBody").attr("style","display: none");if(this.expanded){this._render_body_html(a);a.show()}return a},_render_body_html:function(a){a.html("");switch(this.model.get("state")){case HistoryDatasetAssociation.STATES.NEW:break;case HistoryDatasetAssociation.STATES.NOT_VIEWABLE:this._render_body_not_viewable(a);break;case HistoryDatasetAssociation.STATES.UPLOAD:this._render_body_uploading(a);break;case HistoryDatasetAssociation.STATES.PAUSED:this._render_body_paused(a);break;case HistoryDatasetAssociation.STATES.QUEUED:this._render_body_queued(a);break;case HistoryDatasetAssociation.STATES.RUNNING:this._render_body_running(a);break;case HistoryDatasetAssociation.STATES.ERROR:this._render_body_error(a);break;case HistoryDatasetAssociation.STATES.DISCARDED:this._render_body_discarded(a);break;case HistoryDatasetAssociation.STATES.SETTING_METADATA:this._render_body_setting_metadata(a);break;case HistoryDatasetAssociation.STATES.EMPTY:this._render_body_empty(a);break;case HistoryDatasetAssociation.STATES.FAILED_METADATA:this._render_body_failed_metadata(a);break;case HistoryDatasetAssociation.STATES.OK:this._render_body_ok(a);break;default:a.append($('<div>Error: unknown dataset state "'+this.model.get("state")+'".</div>'))}a.append('<div style="clear: both"></div>');this._setUpBehaviors(a)},_render_body_not_viewable:function(a){a.append($("<div>"+_l("You do not have permission to view dataset")+".</div>"))},_render_body_uploading:function(a){a.append($("<div>"+_l("Dataset is uploading")+"</div>"))},_render_body_queued:function(a){a.append($("<div>"+_l("Job is waiting to run")+".</div>"));a.append(this._render_primaryActionButtons(this.defaultPrimaryActionButtonRenderers))},_render_body_paused:function(a){a.append($("<div>"+_l("Job is paused. Use the history menu to resume")+".</div>"));a.append(this._render_primaryActionButtons(this.defaultPrimaryActionButtonRenderers))},_render_body_running:function(a){a.append("<div>"+_l("Job is currently running")+".</div>");a.append(this._render_primaryActionButtons(this.defaultPrimaryActionButtonRenderers))},_render_body_error:function(a){if(!this.model.get("purged")){a.append($("<div>"+this.model.get("misc_blurb")+"</div>"))}a.append((_l("An error occurred with this dataset")+": <i>"+$.trim(this.model.get("misc_info"))+"</i>"));a.append(this._render_primaryActionButtons(this.defaultPrimaryActionButtonRenderers.concat([this._render_downloadButton])))},_render_body_discarded:function(a){a.append("<div>"+_l("The job creating this dataset was cancelled before completion")+".</div>");a.append(this._render_primaryActionButtons(this.defaultPrimaryActionButtonRenderers))},_render_body_setting_metadata:function(a){a.append($("<div>"+_l("Metadata is being auto-detected")+".</div>"))},_render_body_empty:function(a){a.append($("<div>"+_l("No data")+": <i>"+this.model.get("misc_blurb")+"</i></div>"));a.append(this._render_primaryActionButtons(this.defaultPrimaryActionButtonRenderers))},_render_body_failed_metadata:function(a){a.append($(HDABaseView.templates.failedMetadata(_.extend(this.model.toJSON(),{urls:this.urls}))));this._render_body_ok(a)},_render_body_ok:function(a){a.append(this._render_hdaSummary());if(this.model.isDeletedOrPurged()){a.append(this._render_primaryActionButtons([this._render_downloadButton,this._render_showParamsButton]));return}a.append(this._render_primaryActionButtons([this._render_downloadButton,this._render_showParamsButton]));a.append('<div class="clear"/>');a.append(this._render_displayApps());a.append(this._render_peek())},events:{"click .historyItemTitle":"toggleBodyVisibility"},toggleBodyVisibility:function(c,a){var b=this;this.expanded=(a===undefined)?(!this.body.is(":visible")):(a);if(this.expanded){b._render_body_html(b.body);this.body.slideDown("fast",function(){b.trigger("body-expanded",b.model.get("id"))})}else{this.body.slideUp("fast",function(){b.trigger("body-collapsed",b.model.get("id"))})}},remove:function(b){var a=this;this.$el.fadeOut("fast",function(){a.$el.remove();if(b){b()}})},toString:function(){var a=(this.model)?(this.model+""):("(no model)");return"HDABaseView("+a+")"}});HDABaseView.templates={warningMsg:Handlebars.templates["template-warningmessagesmall"],messages:Handlebars.templates["template-hda-warning-messages"],titleLink:Handlebars.templates["template-hda-titleLink"],hdaSummary:Handlebars.templates["template-hda-hdaSummary"],downloadLinks:Handlebars.templates["template-hda-downloadLinks"],failedMetadata:Handlebars.templates["template-hda-failedMetadata"],displayApps:Handlebars.templates["template-hda-displayApps"]};
\ No newline at end of file
+var HDABaseView=BaseView.extend(LoggableMixin).extend({tagName:"div",className:"historyItemContainer",initialize:function(a){if(a.logger){this.logger=this.model.logger=a.logger}this.log(this+".initialize:",a);this.defaultPrimaryActionButtonRenderers=[this._render_showParamsButton];if(!a.urlTemplates){throw ("HDAView needs urlTemplates on initialize")}this.urlTemplates=a.urlTemplates;this.expanded=a.expanded||false;this.model.bind("change",this.render,this)},render:function(){var b=this,e=this.model.get("id"),c=this.model.get("state"),a=$("<div/>").attr("id","historyItem-"+e),d=(this.$el.children().size()===0);this.$el.attr("id","historyItemContainer-"+e);this.urls=this._renderUrls(this.urlTemplates,this.model.toJSON());a.addClass("historyItemWrapper").addClass("historyItem").addClass("historyItem-"+c);a.append(this._render_warnings());a.append(this._render_titleBar());this._setUpBehaviors(a);this.body=$(this._render_body());a.append(this.body);this.$el.fadeOut("fast",function(){b.$el.children().remove();b.$el.append(a).fadeIn("fast",function(){b.log(b+" rendered:",b.$el);var f="rendered";if(d){f+=":initial"}else{if(b.model.inReadyState()){f+=":ready"}}b.trigger(f)})});return this},_renderUrls:function(d,a){var b=this,c={};_.each(d,function(e,f){if(_.isObject(e)){c[f]=b._renderUrls(e,a)}else{if(f==="meta_download"){c[f]=b._renderMetaDownloadUrls(e,a)}else{try{c[f]=_.template(e,a)}catch(g){throw (b+"._renderUrls error: "+g+"\n rendering:"+e+"\n with "+JSON.stringify(a))}}}});return c},_renderMetaDownloadUrls:function(b,a){return _.map(a.meta_files,function(c){return{url:_.template(b,{id:a.id,file_type:c.file_type}),file_type:c.file_type}})},_setUpBehaviors:function(a){a=a||this.$el;make_popup_menus(a);a.find(".tooltip").tooltip({placement:"bottom"})},_render_warnings:function(){return $(jQuery.trim(HDABaseView.templates.messages(this.model.toJSON())))},_render_titleBar:function(){var a=$('<div class="historyItemTitleBar" style="overflow: hidden"></div>');a.append(this._render_titleButtons());a.append('<span class="state-icon"></span>');a.append(this._render_titleLink());return a},_render_titleButtons:function(){var a=$('<div class="historyItemButtons"></div>');a.append(this._render_displayButton());return a},_render_displayButton:function(){if((this.model.get("state")===HistoryDatasetAssociation.STATES.NOT_VIEWABLE)||(!this.model.get("accessible"))){this.displayButton=null;return null}var a={icon_class:"display",target:"galaxy_main"};if(this.model.get("purged")){a.enabled=false;a.title=_l("Cannot display datasets removed from disk")}else{a.title=_l("Display data in browser");a.href=this.urls.display}this.displayButton=new IconButtonView({model:new IconButton(a)});return this.displayButton.render().$el},_render_titleLink:function(){return $(jQuery.trim(HDABaseView.templates.titleLink(_.extend(this.model.toJSON(),{urls:this.urls}))))},_render_hdaSummary:function(){var a=_.extend(this.model.toJSON(),{urls:this.urls});return HDABaseView.templates.hdaSummary(a)},_render_primaryActionButtons:function(c){var a=this,b=$("<div/>").attr("id","primary-actions-"+this.model.get("id"));_.each(c,function(d){b.append(d.call(a))});return b},_render_downloadButton:function(){if(this.model.get("purged")||!this.model.hasData()){return null}var a=HDABaseView.templates.downloadLinks(_.extend(this.model.toJSON(),{urls:this.urls}));return $(a)},_render_showParamsButton:function(){this.showParamsButton=new IconButtonView({model:new IconButton({title:_l("View details"),href:this.urls.show_params,target:"galaxy_main",icon_class:"information"})});return this.showParamsButton.render().$el},_render_displayApps:function(){if(!this.model.hasData()){return null}var a=$("<div/>").addClass("display-apps");if(!_.isEmpty(this.model.get("display_types"))){a.append(HDABaseView.templates.displayApps({displayApps:this.model.get("display_types")}))}if(!_.isEmpty(this.model.get("display_apps"))){a.append(HDABaseView.templates.displayApps({displayApps:this.model.get("display_apps")}))}return a},_render_peek:function(){var a=this.model.get("peek");if(!a){return null}return $("<div/>").append($("<pre/>").attr("id","peek"+this.model.get("id")).addClass("peek").append(a))},_render_body:function(){var a=$("<div/>").attr("id","info-"+this.model.get("id")).addClass("historyItemBody").attr("style","display: none");if(this.expanded){this._render_body_html(a);a.show()}return a},_render_body_html:function(a){a.html("");switch(this.model.get("state")){case HistoryDatasetAssociation.STATES.NEW:break;case HistoryDatasetAssociation.STATES.NOT_VIEWABLE:this._render_body_not_viewable(a);break;case HistoryDatasetAssociation.STATES.UPLOAD:this._render_body_uploading(a);break;case HistoryDatasetAssociation.STATES.PAUSED:this._render_body_paused(a);break;case HistoryDatasetAssociation.STATES.QUEUED:this._render_body_queued(a);break;case HistoryDatasetAssociation.STATES.RUNNING:this._render_body_running(a);break;case HistoryDatasetAssociation.STATES.ERROR:this._render_body_error(a);break;case HistoryDatasetAssociation.STATES.DISCARDED:this._render_body_discarded(a);break;case HistoryDatasetAssociation.STATES.SETTING_METADATA:this._render_body_setting_metadata(a);break;case HistoryDatasetAssociation.STATES.EMPTY:this._render_body_empty(a);break;case HistoryDatasetAssociation.STATES.FAILED_METADATA:this._render_body_failed_metadata(a);break;case HistoryDatasetAssociation.STATES.OK:this._render_body_ok(a);break;default:a.append($('<div>Error: unknown dataset state "'+this.model.get("state")+'".</div>'))}a.append('<div style="clear: both"></div>');this._setUpBehaviors(a)},_render_body_not_viewable:function(a){a.append($("<div>"+_l("You do not have permission to view dataset")+".</div>"))},_render_body_uploading:function(a){a.append($("<div>"+_l("Dataset is uploading")+"</div>"))},_render_body_queued:function(a){a.append($("<div>"+_l("Job is waiting to run")+".</div>"));a.append(this._render_primaryActionButtons(this.defaultPrimaryActionButtonRenderers))},_render_body_paused:function(a){a.append($("<div>"+_l("Job is paused. Use the history menu to resume")+".</div>"));a.append(this._render_primaryActionButtons(this.defaultPrimaryActionButtonRenderers))},_render_body_running:function(a){a.append("<div>"+_l("Job is currently running")+".</div>");a.append(this._render_primaryActionButtons(this.defaultPrimaryActionButtonRenderers))},_render_body_error:function(a){if(!this.model.get("purged")){a.append($("<div>"+this.model.get("misc_blurb")+"</div>"))}a.append((_l("An error occurred with this dataset")+": <i>"+$.trim(this.model.get("misc_info"))+"</i>"));a.append(this._render_primaryActionButtons(this.defaultPrimaryActionButtonRenderers.concat([this._render_downloadButton])))},_render_body_discarded:function(a){a.append("<div>"+_l("The job creating this dataset was cancelled before completion")+".</div>");a.append(this._render_primaryActionButtons(this.defaultPrimaryActionButtonRenderers))},_render_body_setting_metadata:function(a){a.append($("<div>"+_l("Metadata is being auto-detected")+".</div>"))},_render_body_empty:function(a){a.append($("<div>"+_l("No data")+": <i>"+this.model.get("misc_blurb")+"</i></div>"));a.append(this._render_primaryActionButtons(this.defaultPrimaryActionButtonRenderers))},_render_body_failed_metadata:function(a){a.append($(HDABaseView.templates.failedMetadata(_.extend(this.model.toJSON(),{urls:this.urls}))));this._render_body_ok(a)},_render_body_ok:function(a){a.append(this._render_hdaSummary());if(this.model.isDeletedOrPurged()){a.append(this._render_primaryActionButtons([this._render_downloadButton,this._render_showParamsButton]));return}a.append(this._render_primaryActionButtons([this._render_downloadButton,this._render_showParamsButton]));a.append('<div class="clear"/>');a.append(this._render_displayApps());a.append(this._render_peek())},events:{"click .historyItemTitle":"toggleBodyVisibility"},toggleBodyVisibility:function(c,a){var b=this;this.expanded=(a===undefined)?(!this.body.is(":visible")):(a);if(this.expanded){b._render_body_html(b.body);this.body.slideDown("fast",function(){b.trigger("body-expanded",b.model.get("id"))})}else{this.body.slideUp("fast",function(){b.trigger("body-collapsed",b.model.get("id"))})}},remove:function(b){var a=this;this.$el.fadeOut("fast",function(){a.$el.remove();if(b){b()}})},toString:function(){var a=(this.model)?(this.model+""):("(no model)");return"HDABaseView("+a+")"}});HDABaseView.templates={warningMsg:Handlebars.templates["template-warningmessagesmall"],messages:Handlebars.templates["template-hda-warning-messages"],titleLink:Handlebars.templates["template-hda-titleLink"],hdaSummary:Handlebars.templates["template-hda-hdaSummary"],downloadLinks:Handlebars.templates["template-hda-downloadLinks"],failedMetadata:Handlebars.templates["template-hda-failedMetadata"],displayApps:Handlebars.templates["template-hda-displayApps"]};
\ No newline at end of file
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.