galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
February 2013
- 2 participants
- 189 discussions
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/9697f9fc92b1/
changeset: 9697f9fc92b1
user: inithello
date: 2013-02-20 21:51:56
summary: Fix an import in the repository_revisions API controller.
affected #: 1 file
diff -r 1bd67f03d4f12d0f6cddacb57a6402cf306f8f55 -r 9697f9fc92b1bc8fbfaca72917eeff928c2cc4ac lib/galaxy/webapps/community/api/repository_revisions.py
--- a/lib/galaxy/webapps/community/api/repository_revisions.py
+++ b/lib/galaxy/webapps/community/api/repository_revisions.py
@@ -1,10 +1,11 @@
-from galaxy import web, util, logging
+from galaxy import web, util
from galaxy.web.base.controller import BaseController, BaseAPIController
from galaxy.web.framework.helpers import is_true
import pkg_resources
pkg_resources.require( "Routes" )
import routes
+import logging
log = logging.getLogger( __name__ )
https://bitbucket.org/galaxy/galaxy-central/commits/1adf6fdd9c49/
changeset: 1adf6fdd9c49
user: inithello
date: 2013-02-20 21:52:10
summary: Add do_not_test, tools_functionally_correct, time_last_tested, and tool_test_errors columns to the repository_metadata table.
affected #: 3 files
diff -r 9697f9fc92b1bc8fbfaca72917eeff928c2cc4ac -r 1adf6fdd9c49bda204e5d90f70f10e7ef4ec06bd lib/galaxy/webapps/community/model/__init__.py
--- a/lib/galaxy/webapps/community/model/__init__.py
+++ b/lib/galaxy/webapps/community/model/__init__.py
@@ -170,13 +170,18 @@
class RepositoryMetadata( object, APIItem ):
api_collection_visible_keys = ( 'id', 'repository_id', 'changeset_revision', 'malicious', 'downloadable' )
api_element_visible_keys = ( 'id', 'repository_id', 'changeset_revision', 'malicious', 'downloadable' )
- def __init__( self, repository_id=None, changeset_revision=None, metadata=None, tool_versions=None, malicious=False, downloadable=False ):
+ def __init__( self, repository_id=None, changeset_revision=None, metadata=None, tool_versions=None, malicious=False, downloadable=False,
+ tools_functionally_correct=False, do_not_test=False, time_last_tested=None, tool_test_errors=None ):
self.repository_id = repository_id
self.changeset_revision = changeset_revision
self.metadata = metadata or dict()
self.tool_versions = tool_versions or dict()
self.malicious = malicious
self.downloadable = downloadable
+ self.tools_functionally_correct = tools_functionally_correct
+ self.do_not_test = do_not_test
+ self.time_last_tested = time_last_tested
+ self.tool_test_errors = tool_test_errors
def get_api_value( self, view='collection', value_mapper=None ):
if value_mapper is None:
value_mapper = {}
diff -r 9697f9fc92b1bc8fbfaca72917eeff928c2cc4ac -r 1adf6fdd9c49bda204e5d90f70f10e7ef4ec06bd lib/galaxy/webapps/community/model/mapping.py
--- a/lib/galaxy/webapps/community/model/mapping.py
+++ b/lib/galaxy/webapps/community/model/mapping.py
@@ -130,7 +130,11 @@
Column( "metadata", JSONType, nullable=True ),
Column( "tool_versions", JSONType, nullable=True ),
Column( "malicious", Boolean, default=False ),
- Column( "downloadable", Boolean, default=True ) )
+ Column( "downloadable", Boolean, default=True ),
+ Column( "tools_functionally_correct", Boolean, default=False, index=True ),
+ Column( "do_not_test", Boolean, default=False, index=True ),
+ Column( "time_last_tested", DateTime, default=None, nullable=True ),
+ Column( "tool_test_errors", JSONType, nullable=True ) )
RepositoryReview.table = Table( "repository_review", metadata,
Column( "id", Integer, primary_key=True ),
diff -r 9697f9fc92b1bc8fbfaca72917eeff928c2cc4ac -r 1adf6fdd9c49bda204e5d90f70f10e7ef4ec06bd lib/galaxy/webapps/community/model/migrate/versions/0016_add_do_not_test_tools_functionally_correct_errors_columns.py
--- /dev/null
+++ b/lib/galaxy/webapps/community/model/migrate/versions/0016_add_do_not_test_tools_functionally_correct_errors_columns.py
@@ -0,0 +1,99 @@
+"""
+Migration script to add the tool_test_errors, do_not_test, tools_functionally_correct, and time_last_tested columns to the repository_metadata table.
+"""
+
+from sqlalchemy import *
+from sqlalchemy.orm import *
+from migrate import *
+from migrate.changeset import *
+
+# Need our custom types, but don't import anything else from model
+from galaxy.model.custom_types import *
+
+import sys, logging
+log = logging.getLogger( __name__ )
+log.setLevel(logging.DEBUG)
+handler = logging.StreamHandler( sys.stdout )
+format = "%(name)s %(levelname)s %(asctime)s %(message)s"
+formatter = logging.Formatter( format )
+handler.setFormatter( formatter )
+log.addHandler( handler )
+
+metadata = MetaData( migrate_engine )
+db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, autocommit=True ) )
+
+def upgrade():
+ print __doc__
+ metadata.reflect()
+ # Create and initialize tools_functionally_correct, do_not_test, time_last_tested, and tool_test_errors columns in repository_metadata table.
+ RepositoryMetadata_table = Table( "repository_metadata", metadata, autoload=True )
+ c = Column( "tools_functionally_correct", Boolean, default=False, index=True )
+ try:
+ # Create tools_functionally_correct column
+ c.create( RepositoryMetadata_table )
+ assert c is RepositoryMetadata_table.c.tools_functionally_correct
+ # Initialize.
+ if migrate_engine.name == 'mysql' or migrate_engine.name == 'sqlite':
+ default_false = "0"
+ elif migrate_engine.name == 'postgres':
+ default_false = "false"
+ db_session.execute( "UPDATE repository_metadata SET tools_functionally_correct=%s" % default_false )
+ except Exception, e:
+ print "Adding tools_functionally_correct column to the repository_metadata table failed: %s" % str( e )
+ log.debug( "Adding tools_functionally_correct column to the repository_metadata table failed: %s" % str( e ) )
+ c = Column( "do_not_test", Boolean, default=False, index=True )
+ try:
+ # Create do_not_test column
+ c.create( RepositoryMetadata_table )
+ assert c is RepositoryMetadata_table.c.do_not_test
+ # Initialize.
+ if migrate_engine.name == 'mysql' or migrate_engine.name == 'sqlite':
+ default_false = "0"
+ elif migrate_engine.name == 'postgres':
+ default_false = "false"
+ db_session.execute( "UPDATE repository_metadata SET do_not_test=%s" % default_false )
+ except Exception, e:
+ print "Adding do_not_test column to the repository_metadata table failed: %s" % str( e )
+ log.debug( "Adding do_not_test column to the repository_metadata table failed: %s" % str( e ) )
+ c = Column( "time_last_tested", DateTime, default=None, nullable=True )
+ try:
+ # Create time_last_tested column
+ c.create( RepositoryMetadata_table )
+ assert c is RepositoryMetadata_table.c.time_last_tested
+ except Exception, e:
+ print "Adding time_last_tested column to the repository_metadata table failed: %s" % str( e )
+ log.debug( "Adding time_last_tested column to the repository_metadata table failed: %s" % str( e ) )
+ c = Column( "tool_test_errors", JSONType, nullable=True )
+ try:
+ pass
+ # Create tool_test_errors column
+ c.create( RepositoryMetadata_table )
+ assert c is RepositoryMetadata_table.c.tool_test_errors
+ except Exception, e:
+ print "Adding tool_test_errors column to the repository_metadata table failed: %s" % str( e )
+ log.debug( "Adding tool_test_errors column to the repository_metadata table failed: %s" % str( e ) )
+
+def downgrade():
+ metadata.reflect()
+ # Drop tool_test_errors, time_last_tested, do_not_test, and tools_functionally_correct columns from repository_metadata table.
+ RepositoryMetadata_table = Table( "repository_metadata", metadata, autoload=True )
+ try:
+ RepositoryMetadata_table.c.tool_test_errors.drop()
+ except Exception, e:
+ print "Dropping column tool_test_errors from the repository_metadata table failed: %s" % str( e )
+ log.debug( "Dropping column tool_test_errors from the repository_metadata table failed: %s" % str( e ) )
+ try:
+ RepositoryMetadata_table.c.time_last_tested.drop()
+ except Exception, e:
+ print "Dropping column time_last_tested from the repository_metadata table failed: %s" % str( e )
+ log.debug( "Dropping column time_last_tested from the repository_metadata table failed: %s" % str( e ) )
+ try:
+ RepositoryMetadata_table.c.do_not_test.drop()
+ except Exception, e:
+ print "Dropping column do_not_test from the repository_metadata table failed: %s" % str( e )
+ log.debug( "Dropping column do_not_test from the repository_metadata table failed: %s" % str( e ) )
+ try:
+ RepositoryMetadata_table.c.tools_functionally_correct.drop()
+ except Exception, e:
+ print "Dropping column tools_functionally_correct from the repository_metadata table failed: %s" % str( e )
+ log.debug( "Dropping column tools_functionally_correct from the repository_metadata table failed: %s" % str( e ) )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
20 Feb '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/1bd67f03d4f1/
changeset: 1bd67f03d4f1
user: greg
date: 2013-02-20 20:55:26
summary: Add the API framework to the Galaxy Tool Shed.
affected #: 10 files
diff -r 0fb1af8ce7410cd48c9259962a4e0da528ff4ef6 -r 1bd67f03d4f12d0f6cddacb57a6402cf306f8f55 lib/galaxy/webapps/community/api/repository_revisions.py
--- /dev/null
+++ b/lib/galaxy/webapps/community/api/repository_revisions.py
@@ -0,0 +1,34 @@
+from galaxy import web, util, logging
+from galaxy.web.base.controller import BaseController, BaseAPIController
+from galaxy.web.framework.helpers import is_true
+
+import pkg_resources
+pkg_resources.require( "Routes" )
+import routes
+
+log = logging.getLogger( __name__ )
+
+class RepositoryRevisionsController( BaseAPIController ):
+ """RESTful controller for interactions with tool shed repositories."""
+ @web.expose_api
+ def index( self, trans, downloadable=True, **kwd ):
+ """
+ GET /api/repository_revisions
+ Displays a collection (list) of repository revisions.
+ """
+ rval = []
+ downloadable = util.string_as_bool( downloadable )
+ try:
+ query = trans.sa_session.query( trans.app.model.RepositoryMetadata ) \
+ .filter( trans.app.model.RepositoryMetadata.table.c.downloadable == downloadable ) \
+ .order_by( trans.app.model.RepositoryMetadata.table.c.repository_id ) \
+ .all()
+ for repository_metadata in query:
+ item = repository_metadata.get_api_value( value_mapper={ 'id' : trans.security.encode_id } )
+ item[ 'url' ] = web.url_for( 'repository_revision', id=trans.security.encode_id( repository_metadata.id ) )
+ rval.append( item )
+ except Exception, e:
+ rval = "Error in repository_revisions API"
+ log.error( rval + ": %s" % str( e ) )
+ trans.response.status = 500
+ return rval
diff -r 0fb1af8ce7410cd48c9259962a4e0da528ff4ef6 -r 1bd67f03d4f12d0f6cddacb57a6402cf306f8f55 lib/galaxy/webapps/community/buildapp.py
--- a/lib/galaxy/webapps/community/buildapp.py
+++ b/lib/galaxy/webapps/community/buildapp.py
@@ -69,6 +69,9 @@
webapp.add_route( '/:controller/:action', action='index' )
webapp.add_route( '/:action', controller='repository', action='index' )
webapp.add_route( '/repos/*path_info', controller='hg', action='handle_request', path_info='/' )
+ # Add the web API
+ webapp.add_api_controllers( 'galaxy.webapps.community.api', app )
+ webapp.api_mapper.resource( 'repository_revision', 'repository_revisions', path_prefix='/api' )
webapp.finalize_config()
# Wrap the webapp in some useful middleware
if kwargs.get( 'middleware', True ):
diff -r 0fb1af8ce7410cd48c9259962a4e0da528ff4ef6 -r 1bd67f03d4f12d0f6cddacb57a6402cf306f8f55 lib/galaxy/webapps/community/framework/middleware/remoteuser.py
--- a/lib/galaxy/webapps/community/framework/middleware/remoteuser.py
+++ b/lib/galaxy/webapps/community/framework/middleware/remoteuser.py
@@ -75,6 +75,9 @@
elif path_info.startswith( '/api/' ):
# The API handles its own authentication via keys
return self.app( environ, start_response )
+ elif path_info.startswith( '/user/api_keys' ):
+ # api_keys can be managed when remote_user is in use.
+ pass
else:
title = "Access to this Galaxy tool shed is denied"
message = """
diff -r 0fb1af8ce7410cd48c9259962a4e0da528ff4ef6 -r 1bd67f03d4f12d0f6cddacb57a6402cf306f8f55 lib/galaxy/webapps/community/model/__init__.py
--- a/lib/galaxy/webapps/community/model/__init__.py
+++ b/lib/galaxy/webapps/community/model/__init__.py
@@ -9,14 +9,20 @@
from galaxy.util.bunch import Bunch
from galaxy.util.hash_util import new_secure_hash
from galaxy.web.form_builder import *
+from galaxy.model.item_attrs import APIItem
from galaxy import eggs
eggs.require('mercurial')
from mercurial import hg, ui
log = logging.getLogger( __name__ )
-
-class User( object ):
+
+class APIKeys( object ):
+ pass
+
+class User( object, APIItem ):
+ api_collection_visible_keys = ( 'id', 'email' )
+ api_element_visible_keys = ( 'id', 'email', 'username' )
def __init__( self, email=None, password=None ):
self.email = email
self.password = password
@@ -47,12 +53,16 @@
def nice_total_disk_usage( self ):
return 0
-class Group( object ):
+class Group( object, APIItem ):
+ api_collection_visible_keys = ( 'id', 'name' )
+ api_element_visible_keys = ( 'id', 'name' )
def __init__( self, name = None ):
self.name = name
self.deleted = False
-class Role( object ):
+class Role( object, APIItem ):
+ api_collection_visible_keys = ( 'id', 'name' )
+ api_element_visible_keys = ( 'id', 'name', 'description', 'type' )
private_id = None
types = Bunch(
PRIVATE = 'private',
@@ -103,7 +113,9 @@
self.is_valid = is_valid
self.prev_session_id = prev_session_id
-class Repository( object ):
+class Repository( object, APIItem ):
+ api_collection_visible_keys = ( 'id', 'name' )
+ api_element_visible_keys = ( 'id', 'name', 'description' )
file_states = Bunch( NORMAL = 'n',
NEEDS_MERGING = 'm',
MARKED_FOR_REMOVAL = 'r',
@@ -155,7 +167,9 @@
fp.write( line )
fp.close()
-class RepositoryMetadata( object ):
+class RepositoryMetadata( object, APIItem ):
+ api_collection_visible_keys = ( 'id', 'repository_id', 'changeset_revision', 'malicious', 'downloadable' )
+ api_element_visible_keys = ( 'id', 'repository_id', 'changeset_revision', 'malicious', 'downloadable' )
def __init__( self, repository_id=None, changeset_revision=None, metadata=None, tool_versions=None, malicious=False, downloadable=False ):
self.repository_id = repository_id
self.changeset_revision = changeset_revision
@@ -163,8 +177,26 @@
self.tool_versions = tool_versions or dict()
self.malicious = malicious
self.downloadable = downloadable
+ def get_api_value( self, view='collection', value_mapper=None ):
+ if value_mapper is None:
+ value_mapper = {}
+ rval = {}
+ try:
+ visible_keys = self.__getattribute__( 'api_' + view + '_visible_keys' )
+ except AttributeError:
+ raise Exception( 'Unknown API view: %s' % view )
+ for key in visible_keys:
+ try:
+ rval[ key ] = self.__getattribute__( key )
+ if key in value_mapper:
+ rval[ key ] = value_mapper.get( key )( rval[ key ] )
+ except AttributeError:
+ rval[ key ] = None
+ return rval
-class RepositoryReview( object ):
+class RepositoryReview( object, APIItem ):
+ api_collection_visible_keys = ( 'id', 'repository_id', 'changeset_revision', 'user_id', 'rating' )
+ api_element_visible_keys = ( 'id', 'repository_id', 'changeset_revision', 'user_id', 'rating' )
approved_states = Bunch( NO='no', YES='yes' )
def __init__( self, repository_id=None, changeset_revision=None, user_id=None, rating=None, deleted=False ):
self.repository_id = repository_id
@@ -173,7 +205,9 @@
self.rating = rating
self.deleted = deleted
-class ComponentReview( object ):
+class ComponentReview( object, APIItem ):
+ api_collection_visible_keys = ( 'id', 'repository_review_id', 'component_id', 'private', 'approved', 'rating', 'deleted' )
+ api_element_visible_keys = ( 'id', 'repository_review_id', 'component_id', 'private', 'approved', 'rating', 'deleted' )
approved_states = Bunch( NO='no', YES='yes', NA='not_applicable' )
def __init__( self, repository_review_id=None, component_id=None, comment=None, private=False, approved=False, rating=None, deleted=False ):
self.repository_review_id = repository_review_id
@@ -204,7 +238,9 @@
def set_item( self, repository ):
self.repository = repository
-class Category( object ):
+class Category( object, APIItem ):
+ api_collection_visible_keys = ( 'id', 'name', 'description', 'deleted' )
+ api_element_visible_keys = ( 'id', 'name', 'description', 'deleted' )
def __init__( self, name=None, description=None, deleted=False ):
self.name = name
self.description = description
diff -r 0fb1af8ce7410cd48c9259962a4e0da528ff4ef6 -r 1bd67f03d4f12d0f6cddacb57a6402cf306f8f55 lib/galaxy/webapps/community/model/mapping.py
--- a/lib/galaxy/webapps/community/model/mapping.py
+++ b/lib/galaxy/webapps/community/model/mapping.py
@@ -39,6 +39,12 @@
# Return the current time in UTC without any timezone information
now = datetime.datetime.utcnow
+APIKeys.table = Table( "api_keys", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "create_time", DateTime, default=now ),
+ Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
+ Column( "key", TrimmedString( 32 ), index=True, unique=True ) )
+
User.table = Table( "galaxy_user", metadata,
Column( "id", Integer, primary_key=True),
Column( "create_time", DateTime, default=now ),
@@ -186,7 +192,11 @@
# With the tables defined we can define the mappers and setup the relationships between the model objects.
assign_mapper( context, User, User.table,
properties=dict( active_repositories=relation( Repository, primaryjoin=( ( Repository.table.c.user_id == User.table.c.id ) & ( not_( Repository.table.c.deleted ) ) ), order_by=( Repository.table.c.name ) ),
- galaxy_sessions=relation( GalaxySession, order_by=desc( GalaxySession.table.c.update_time ) ) ) )
+ galaxy_sessions=relation( GalaxySession, order_by=desc( GalaxySession.table.c.update_time ) ),
+ api_keys=relation( APIKeys, backref="user", order_by=desc( APIKeys.table.c.create_time ) ) ) )
+
+assign_mapper( context, APIKeys, APIKeys.table,
+ properties = {} )
assign_mapper( context, Group, Group.table,
properties=dict( users=relation( UserGroupAssociation ) ) )
diff -r 0fb1af8ce7410cd48c9259962a4e0da528ff4ef6 -r 1bd67f03d4f12d0f6cddacb57a6402cf306f8f55 lib/galaxy/webapps/community/model/migrate/versions/0015_add_api_keys_table.py
--- /dev/null
+++ b/lib/galaxy/webapps/community/model/migrate/versions/0015_add_api_keys_table.py
@@ -0,0 +1,46 @@
+"""
+Migration script to add the api_keys table.
+"""
+import datetime, sys, logging
+from sqlalchemy import *
+from sqlalchemy.orm import *
+from migrate import *
+from migrate.changeset import *
+from galaxy.model.custom_types import *
+
+log = logging.getLogger( __name__ )
+log.setLevel(logging.DEBUG)
+handler = logging.StreamHandler( sys.stdout )
+format = "%(name)s %(levelname)s %(asctime)s %(message)s"
+formatter = logging.Formatter( format )
+handler.setFormatter( formatter )
+log.addHandler( handler )
+
+now = datetime.datetime.utcnow
+
+metadata = MetaData( migrate_engine )
+db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, autocommit=True ) )
+
+metadata = MetaData( migrate_engine )
+
+APIKeys_table = Table( "api_keys", metadata,
+ Column( "id", Integer, primary_key=True ),
+ Column( "create_time", DateTime, default=now ),
+ Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
+ Column( "key", TrimmedString( 32 ), index=True, unique=True ) )
+
+def upgrade():
+ print __doc__
+ metadata.reflect()
+ try:
+ APIKeys_table.create()
+ except Exception, e:
+ log.debug( "Creating api_keys table failed: %s" % str( e ) )
+
+def downgrade():
+ # Load existing tables
+ metadata.reflect()
+ try:
+ APIKeys_table.drop()
+ except Exception, e:
+ log.debug( "Dropping api_keys table failed: %s" % str( e ) )
diff -r 0fb1af8ce7410cd48c9259962a4e0da528ff4ef6 -r 1bd67f03d4f12d0f6cddacb57a6402cf306f8f55 templates/user/index.mako
--- a/templates/user/index.mako
+++ b/templates/user/index.mako
@@ -25,6 +25,7 @@
%endif
%else:
<li><a href="${h.url_for( controller='user', action='manage_user_info', cntrller=cntrller )}">${_('Manage your information')}</a></li>
+ <li><a href="${h.url_for( controller='user', action='api_keys', cntrller=cntrller )}">${_('Manage your API keys')}</a></li><li><a href="${h.url_for( controller='repository', action='manage_email_alerts', cntrller=cntrller )}">${_('Manage your email alerts')}</a></li><li><a href="${h.url_for( controller='user', action='logout', logout_all=True )}" target="_top">${_('Logout')}</a> ${_('of all user sessions')}</li>
%endif
diff -r 0fb1af8ce7410cd48c9259962a4e0da528ff4ef6 -r 1bd67f03d4f12d0f6cddacb57a6402cf306f8f55 templates/webapps/community/base_panels.mako
--- a/templates/webapps/community/base_panels.mako
+++ b/templates/webapps/community/base_panels.mako
@@ -105,6 +105,7 @@
menu_options.append( [ _('Logout'), app.config.remote_user_logout_href, "_top" ] )
else:
menu_options.append( [ _('Preferences'), h.url_for( controller='/user', action='index', cntrller='user' ), "galaxy_main" ] )
+ menu_options.append( [ _('API Keys'), h.url_for( controller='/user', action='api_keys', cntrller='user' ), "galaxy_main" ] )
logout_url = h.url_for( controller='/user', action='logout' )
menu_options.append( [ 'Logout', logout_url, "_top" ] )
menu_options.append( None )
diff -r 0fb1af8ce7410cd48c9259962a4e0da528ff4ef6 -r 1bd67f03d4f12d0f6cddacb57a6402cf306f8f55 templates/webapps/galaxy/user/api_keys.mako
--- a/templates/webapps/galaxy/user/api_keys.mako
+++ b/templates/webapps/galaxy/user/api_keys.mako
@@ -30,11 +30,14 @@
(invalidates old key)
%endif
<div class="toolParamHelp" style="clear: both;">
- An API key will allow you to access Galaxy via its web
- API (documentation forthcoming). Please note that
- <strong>this key acts as an alternate means to access
- your account, and should be treated with the same care
- as your login password</strong>.
+ <%
+ if trans.webapp.name == 'galaxy':
+ webapp_str = 'Galaxy'
+ else:
+ webapp_str = 'the Tool Shed'
+ %>
+ An API key will allow you to access ${webapp_str} via its web API. Please note that <strong>this key acts as an alternate means
+ to access your account and should be treated with the same care as your login password</strong>.
</div></div></form>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: inithello: Tool shed functional test enhancement, made the grant_write_access method more robust, added functional test for contacting a repository's owner.
by Bitbucket 20 Feb '13
by Bitbucket 20 Feb '13
20 Feb '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/0fb1af8ce741/
changeset: 0fb1af8ce741
user: inithello
date: 2013-02-20 20:33:04
summary: Tool shed functional test enhancement, made the grant_write_access method more robust, added functional test for contacting a repository's owner.
affected #: 3 files
diff -r b9c93f559efa7a50cfa445b5f79c579690155589 -r 0fb1af8ce7410cd48c9259962a4e0da528ff4ef6 test/tool_shed/base/twilltestcase.py
--- a/test/tool_shed/base/twilltestcase.py
+++ b/test/tool_shed/base/twilltestcase.py
@@ -587,13 +587,20 @@
self.visit_url( url )
strings_displayed = [ "Role '%s' has been updated" % role.name ]
self.check_for_strings( strings_displayed, strings_not_displayed )
- def grant_write_access( self, repository, usernames=[], strings_displayed=[], strings_not_displayed=[] ):
+ def grant_write_access( self,
+ repository,
+ usernames=[],
+ strings_displayed=[],
+ strings_not_displayed=[],
+ post_submit_strings_displayed=[],
+ post_submit_strings_not_displayed=[] ):
self.display_manage_repository_page( repository )
- tc.fv( "3", "allow_push", '-Select one' )
+ self.check_for_strings( strings_displayed, strings_not_displayed )
+ tc.fv( "user_access", "allow_push", '-Select one' )
for username in usernames:
- tc.fv( "3", "allow_push", '+%s' % username )
+ tc.fv( "user_access", "allow_push", '+%s' % username )
tc.submit( 'user_access_button' )
- self.check_for_strings( strings_displayed, strings_not_displayed )
+ self.check_for_strings( post_submit_strings_displayed, post_submit_strings_not_displayed )
def import_workflow( self, repository, workflow_name, strings_displayed=[], strings_not_displayed=[] ):
url = '/admin_toolshed/import_workflow?repository_id=%s&workflow_name=%s' % \
( self.security.encode_id( repository.id ), tool_shed_encode( workflow_name ) )
@@ -847,6 +854,19 @@
tc.fv( "1", field_name, search_string )
tc.submit()
self.check_for_strings( strings_displayed, strings_not_displayed )
+ def send_message_to_repository_owner( self,
+ repository,
+ message,
+ strings_displayed=[],
+ strings_not_displayed=[],
+ post_submit_strings_displayed=[],
+ post_submit_strings_not_displayed=[] ):
+ url = '/repository/contact_owner?id=%s' % self.security.encode_id( repository.id )
+ self.visit_url( url )
+ self.check_for_strings( strings_displayed, strings_not_displayed )
+ tc.fv( 1, 'message', message )
+ tc.submit()
+ self.check_for_strings( post_submit_strings_displayed, post_submit_strings_not_displayed )
def set_repository_deprecated( self, repository, set_deprecated=True, strings_displayed=[], strings_not_displayed=[] ):
url = '/repository/deprecate?id=%s&mark_deprecated=%s' % ( self.security.encode_id( repository.id ), set_deprecated )
self.visit_url( url )
diff -r b9c93f559efa7a50cfa445b5f79c579690155589 -r 0fb1af8ce7410cd48c9259962a4e0da528ff4ef6 test/tool_shed/functional/test_0000_basic_repository_features.py
--- a/test/tool_shed/functional/test_0000_basic_repository_features.py
+++ b/test/tool_shed/functional/test_0000_basic_repository_features.py
@@ -9,11 +9,17 @@
'''Test core repository features.'''
def test_0000_initiate_users( self ):
"""Create necessary user accounts and login as an admin user."""
+ self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
test_user_1 = test_db_util.get_user( common.test_user_1_email )
assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email
test_user_1_private_role = test_db_util.get_private_role( test_user_1 )
self.logout()
+ self.login( email=common.test_user_2_email, username=common.test_user_2_name )
+ test_user_2 = test_db_util.get_user( common.test_user_2_email )
+ assert test_user_2 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_2_email
+ test_user_2_private_role = test_db_util.get_private_role( test_user_2 )
+ self.logout()
self.login( email=common.admin_email, username=common.admin_username )
admin_user = test_db_util.get_user( common.admin_email )
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
@@ -51,8 +57,8 @@
def test_0025_grant_write_access( self ):
'''Grant write access to another user'''
repository = test_db_util.get_repository_by_name_and_owner( repository_name, common.test_user_1_name )
- self.grant_write_access( repository, usernames=[ common.admin_username ] )
- self.revoke_write_access( repository, common.admin_username )
+ self.grant_write_access( repository, usernames=[ common.test_user_2_name ] )
+ self.revoke_write_access( repository, common.test_user_2_name )
def test_0030_upload_filtering_1_1_0( self ):
"""Upload filtering_1.1.0.tar to the repository"""
repository = test_db_util.get_repository_by_name_and_owner( repository_name, common.test_user_1_name )
@@ -214,3 +220,22 @@
assert test_user_1 is None, 'Creating user with public name "repos" succeeded.'
error_message = 'The term <b>repos</b> is a reserved word in the tool shed, so it cannot be used as a public user name.'
self.check_for_strings( strings_displayed=[ error_message ] )
+ def test_0105_contact_repository_owner( self ):
+ '''Fill out and submit the form to contact the owner of a repository.'''
+ '''
+ This test should not actually send the email, since functional tests are designed to function without
+ any external network connection. The embedded tool shed server these tests are running against has been configured
+ with an SMTP server address that will not and should not resolve correctly. However, since the successful sending of
+ the email is the last step in the process, this will verify functional correctness of all preceding steps.
+ '''
+ self.logout()
+ self.login( email=common.test_user_2_email, username=common.test_user_2_name )
+ repository = test_db_util.get_repository_by_name_and_owner( repository_name, common.test_user_1_name )
+ message = 'This is a test message.'
+ strings_displayed = [ 'Contact the owner of the repository named', repository.name, 'streamline appropriate communication' ]
+ post_submit_strings_displayed = [ 'An error occurred sending your message by email' ]
+ self.send_message_to_repository_owner( repository=repository,
+ message=message,
+ strings_displayed=strings_displayed,
+ post_submit_strings_displayed=post_submit_strings_displayed )
+
diff -r b9c93f559efa7a50cfa445b5f79c579690155589 -r 0fb1af8ce7410cd48c9259962a4e0da528ff4ef6 test/tool_shed/functional_tests.py
--- a/test/tool_shed/functional_tests.py
+++ b/test/tool_shed/functional_tests.py
@@ -199,6 +199,8 @@
new_file_path = new_repos_path,
running_functional_tests = True,
shed_tool_data_table_config = shed_tool_data_table_conf_file,
+ smtp_server = 'smtp.dummy.string.tld',
+ email_from = 'functional@localhost',
template_path = 'templates',
tool_path=tool_path,
tool_parse_help = False,
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: inithello: Extension of framework to automate the installation of a list of tool shed repositories to also run any functional tests in the tools contained within each repository.
by Bitbucket 20 Feb '13
by Bitbucket 20 Feb '13
20 Feb '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/b9c93f559efa/
changeset: b9c93f559efa
user: inithello
date: 2013-02-20 20:27:36
summary: Extension of framework to automate the installation of a list of tool shed repositories to also run any functional tests in the tools contained within each repository.
affected #: 6 files
diff -r efebd50fd708aa3af73f954e4da68e88f2bced9a -r b9c93f559efa7a50cfa445b5f79c579690155589 install_and_test_tool_shed_repositories.sh
--- /dev/null
+++ b/install_and_test_tool_shed_repositories.sh
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+# A good place to look for nose info: http://somethingaboutorange.com/mrl/projects/nose/
+#rm -f ./test/tool_shed/run_functional_tests.log
+
+python test/install_and_test_tool_shed_repositories/functional_tests.py -v --with-nosehtml --html-report-file ./test/install_and_test_tool_shed_repositories/run_functional_tests.html test/install_and_test_tool_shed_repositories/functional/test_install_repositories.py test/functional/test_toolbox.py
+
diff -r efebd50fd708aa3af73f954e4da68e88f2bced9a -r b9c93f559efa7a50cfa445b5f79c579690155589 scripts/functional_tests.py
--- a/scripts/functional_tests.py
+++ b/scripts/functional_tests.py
@@ -4,10 +4,12 @@
# Assume we are run from the galaxy root directory, add lib to the python path
cwd = os.getcwd()
-new_path = [ os.path.join( cwd, "lib" ) ]
+new_path = [ os.path.join( cwd, "lib" ), os.path.join( cwd, "test" ) ]
new_path.extend( sys.path[1:] )
sys.path = new_path
+from base.util import parse_tool_panel_config
+
from galaxy import eggs
eggs.require( "nose" )
@@ -85,73 +87,6 @@
global_conf.update( get_static_settings() )
return global_conf
-def parse_tool_panel_config( config, shed_tools_dict ):
- """
- Parse a shed-related tool panel config to generate the shed_tools_dict. This only happens when testing tools installed from the tool shed.
- """
- last_galaxy_test_file_dir = None
- last_tested_repository_name = None
- last_tested_changeset_revision = None
- tool_path = None
- tree = util.parse_xml( config )
- root = tree.getroot()
- tool_path = root.get('tool_path')
- for elem in root:
- if elem.tag == 'tool':
- galaxy_test_file_dir, \
- last_tested_repository_name, \
- last_tested_changeset_revision = get_installed_repository_info( elem,
- last_galaxy_test_file_dir,
- last_tested_repository_name,
- last_tested_changeset_revision,
- tool_path )
- if galaxy_test_file_dir:
- if galaxy_test_file_dir != last_galaxy_test_file_dir:
- if not os.path.isabs( galaxy_test_file_dir ):
- galaxy_test_file_dir = os.path.join( os.getcwd(), galaxy_test_file_dir )
- guid = elem.get( 'guid' )
- shed_tools_dict[ guid ] = galaxy_test_file_dir
- last_galaxy_test_file_dir = galaxy_test_file_dir
- elif elem.tag == 'section':
- for section_elem in elem:
- if section_elem.tag == 'tool':
- galaxy_test_file_dir, \
- last_tested_repository_name, \
- last_tested_changeset_revision = get_installed_repository_info( section_elem,
- last_galaxy_test_file_dir,
- last_tested_repository_name,
- last_tested_changeset_revision,
- tool_path )
- if galaxy_test_file_dir:
- if galaxy_test_file_dir != last_galaxy_test_file_dir:
- if not os.path.isabs( galaxy_test_file_dir ):
- galaxy_test_file_dir = os.path.join( os.getcwd(), galaxy_test_file_dir )
- guid = section_elem.get( 'guid' )
- shed_tools_dict[ guid ] = galaxy_test_file_dir
- last_galaxy_test_file_dir = galaxy_test_file_dir
- return shed_tools_dict
-
-def get_installed_repository_info( elem, last_galaxy_test_file_dir, last_tested_repository_name, last_tested_changeset_revision, tool_path ):
- """
- Return the GALAXY_TEST_FILE_DIR, the containing repository name and the change set revision for the tool elem.
- This only happens when testing tools installed from the tool shed.
- """
- tool_config_path = elem.get( 'file' )
- installed_tool_path_items = tool_config_path.split( '/repos/' )
- sans_shed = installed_tool_path_items[ 1 ]
- path_items = sans_shed.split( '/' )
- repository_owner = path_items[ 0 ]
- repository_name = path_items[ 1 ]
- changeset_revision = path_items[ 2 ]
- if repository_name != last_tested_repository_name or changeset_revision != last_tested_changeset_revision:
- # Locate the test-data directory.
- installed_tool_path = os.path.join( installed_tool_path_items[ 0 ], 'repos', repository_owner, repository_name, changeset_revision )
- for root, dirs, files in os.walk( os.path.join(tool_path, installed_tool_path )):
- if 'test-data' in dirs:
- return os.path.join( root, 'test-data' ), repository_name, changeset_revision
- return None, repository_name, changeset_revision
- return last_galaxy_test_file_dir, last_tested_repository_name, last_tested_changeset_revision
-
def run_tests( test_config ):
loader = nose.loader.TestLoader( config=test_config )
plug_loader = test_config.plugins.prepareTestLoader( loader )
diff -r efebd50fd708aa3af73f954e4da68e88f2bced9a -r b9c93f559efa7a50cfa445b5f79c579690155589 test/base/util.py
--- /dev/null
+++ b/test/base/util.py
@@ -0,0 +1,80 @@
+import os, sys
+
+cwd = os.getcwd()
+if cwd not in sys.path:
+ sys.path.append( cwd )
+
+new_path = [ os.path.join( cwd, "lib" ) ]
+if new_path not in sys.path:
+ new_path.extend( sys.path )
+ sys.path = new_path
+
+from galaxy.util import parse_xml
+
+def get_installed_repository_info( elem, last_galaxy_test_file_dir, last_tested_repository_name, last_tested_changeset_revision, tool_path ):
+ """
+ Return the GALAXY_TEST_FILE_DIR, the containing repository name and the change set revision for the tool elem.
+ This only happens when testing tools installed from the tool shed.
+ """
+ tool_config_path = elem.get( 'file' )
+ installed_tool_path_items = tool_config_path.split( '/repos/' )
+ sans_shed = installed_tool_path_items[ 1 ]
+ path_items = sans_shed.split( '/' )
+ repository_owner = path_items[ 0 ]
+ repository_name = path_items[ 1 ]
+ changeset_revision = path_items[ 2 ]
+ if repository_name != last_tested_repository_name or changeset_revision != last_tested_changeset_revision:
+ # Locate the test-data directory.
+ installed_tool_path = os.path.join( installed_tool_path_items[ 0 ], 'repos', repository_owner, repository_name, changeset_revision )
+ for root, dirs, files in os.walk( os.path.join(tool_path, installed_tool_path )):
+ if 'test-data' in dirs:
+ return os.path.join( root, 'test-data' ), repository_name, changeset_revision
+ return None, repository_name, changeset_revision
+ return last_galaxy_test_file_dir, last_tested_repository_name, last_tested_changeset_revision
+
+def parse_tool_panel_config( config, shed_tools_dict ):
+ """
+ Parse a shed-related tool panel config to generate the shed_tools_dict. This only happens when testing tools installed from the tool shed.
+ """
+ last_galaxy_test_file_dir = None
+ last_tested_repository_name = None
+ last_tested_changeset_revision = None
+ tool_path = None
+ tree = parse_xml( config )
+ root = tree.getroot()
+ tool_path = root.get('tool_path')
+ for elem in root:
+ if elem.tag == 'tool':
+ galaxy_test_file_dir, \
+ last_tested_repository_name, \
+ last_tested_changeset_revision = get_installed_repository_info( elem,
+ last_galaxy_test_file_dir,
+ last_tested_repository_name,
+ last_tested_changeset_revision,
+ tool_path )
+ if galaxy_test_file_dir:
+ if galaxy_test_file_dir != last_galaxy_test_file_dir:
+ if not os.path.isabs( galaxy_test_file_dir ):
+ galaxy_test_file_dir = os.path.join( os.getcwd(), galaxy_test_file_dir )
+ guid = elem.get( 'guid' )
+ shed_tools_dict[ guid ] = galaxy_test_file_dir
+ last_galaxy_test_file_dir = galaxy_test_file_dir
+ elif elem.tag == 'section':
+ for section_elem in elem:
+ if section_elem.tag == 'tool':
+ galaxy_test_file_dir, \
+ last_tested_repository_name, \
+ last_tested_changeset_revision = get_installed_repository_info( section_elem,
+ last_galaxy_test_file_dir,
+ last_tested_repository_name,
+ last_tested_changeset_revision,
+ tool_path )
+ if galaxy_test_file_dir:
+ if galaxy_test_file_dir != last_galaxy_test_file_dir:
+ if not os.path.isabs( galaxy_test_file_dir ):
+ galaxy_test_file_dir = os.path.join( os.getcwd(), galaxy_test_file_dir )
+ guid = section_elem.get( 'guid' )
+ shed_tools_dict[ guid ] = galaxy_test_file_dir
+ last_galaxy_test_file_dir = galaxy_test_file_dir
+ return shed_tools_dict
+
diff -r efebd50fd708aa3af73f954e4da68e88f2bced9a -r b9c93f559efa7a50cfa445b5f79c579690155589 test/install_and_test_tool_shed_repositories/base/twilltestcase.py
--- a/test/install_and_test_tool_shed_repositories/base/twilltestcase.py
+++ b/test/install_and_test_tool_shed_repositories/base/twilltestcase.py
@@ -121,4 +121,12 @@
( timeout_counter, repository.status ) )
break
time.sleep( 1 )
+ def uninstall_repository( self, installed_repository ):
+ url = '/admin_toolshed/deactivate_or_uninstall_repository?id=%s' % self.security.encode_id( installed_repository.id )
+ self.visit_url( url )
+ tc.fv ( 1, "remove_from_disk", 'true' )
+ tc.submit( 'deactivate_or_uninstall_repository_button' )
+ strings_displayed = [ 'The repository named' ]
+ strings_displayed.append( 'has been uninstalled' )
+ self.check_for_strings( strings_displayed, strings_not_displayed=[] )
diff -r efebd50fd708aa3af73f954e4da68e88f2bced9a -r b9c93f559efa7a50cfa445b5f79c579690155589 test/install_and_test_tool_shed_repositories/functional/test_install_repositories.py
--- a/test/install_and_test_tool_shed_repositories/functional/test_install_repositories.py
+++ b/test/install_and_test_tool_shed_repositories/functional/test_install_repositories.py
@@ -1,18 +1,33 @@
-import new
+import new, logging
import install_and_test_tool_shed_repositories.base.test_db_util as test_db_util
from install_and_test_tool_shed_repositories.base.twilltestcase import InstallTestRepository
+log = logging.getLogger(__name__)
class TestInstallRepositories( InstallTestRepository ):
- """Abstract test case that installs a predefined list of repositories."""
+ """Abstract test case that installs and uninstalls a predefined list of repositories."""
def do_installation( self, repository_info_dict ):
self.logout()
self.login( email='test(a)bx.psu.edu', username='test' )
admin_user = test_db_util.get_user( 'test(a)bx.psu.edu' )
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % admin_email
admin_user_private_role = test_db_util.get_private_role( admin_user )
+ # Install the repository through the web interface using twill.
self.install_repository( repository_info_dict )
-def build_tests( repository_dict=None ):
+ def do_uninstallation( self, repository_info_dict ):
+ self.logout()
+ self.login( email='test(a)bx.psu.edu', username='test' )
+ admin_user = test_db_util.get_user( 'test(a)bx.psu.edu' )
+ assert admin_user is not None, 'Problem retrieving user with email %s from the database' % admin_email
+ # Get the repository from the database.
+ repository = test_db_util.get_installed_repository_by_name_owner_changeset_revision( repository_info_dict[ 'name' ],
+ repository_info_dict[ 'owner' ],
+ repository_info_dict[ 'changeset_revision' ] )
+ admin_user_private_role = test_db_util.get_private_role( admin_user )
+ # Uninstall the repository through the web interface using twill.
+ self.uninstall_repository( repository )
+
+def generate_install_method( repository_dict=None ):
"""Generate abstract test cases for the defined list of repositories."""
if repository_dict is None:
return
@@ -20,7 +35,7 @@
G = globals()
# Eliminate all previous tests from G.
for key, val in G.items():
- if key.startswith( 'TestInstallRepository_' ):
+ if key.startswith( 'TestInstallRepository_' ) or key.startswith( 'TestUninstallRepository_' ):
del G[ key ]
# Create a new subclass with a method named install_repository_XXX that installs the repository specified by the provided dict.
name = "TestInstallRepository_" + repository_dict[ 'name' ]
@@ -37,3 +52,29 @@
# from baseclasses (which should be a tuple of classes) and with namespace dict.
new_class_obj = new.classobj( name, baseclasses, namespace )
G[ name ] = new_class_obj
+
+def generate_uninstall_method( repository_dict=None ):
+ """Generate abstract test cases for the defined list of repositories."""
+ if repository_dict is None:
+ return
+ # Push all the toolbox tests to module level
+ G = globals()
+ # Eliminate all previous tests from G.
+ for key, val in G.items():
+ if key.startswith( 'TestInstallRepository_' ) or key.startswith( 'TestUninstallRepository_' ):
+ del G[ key ]
+ # Create a new subclass with a method named install_repository_XXX that installs the repository specified by the provided dict.
+ name = "TestUninstallRepository_" + repository_dict[ 'name' ]
+ baseclasses = ( TestInstallRepositories, )
+ namespace = dict()
+ def make_uninstall_method( repository_dict ):
+ def test_install_repository( self ):
+ self.do_uninstallation( repository_dict )
+ return test_install_repository
+ test_method = make_uninstall_method( repository_dict )
+ test_method.__doc__ = "Uninstall the repository %s." % repository_dict[ 'name' ]
+ namespace[ 'install_repository_%s' % repository_dict[ 'name' ] ] = test_method
+ # The new.classobj function returns a new class object, with name name, derived
+ # from baseclasses (which should be a tuple of classes) and with namespace dict.
+ new_class_obj = new.classobj( name, baseclasses, namespace )
+ G[ name ] = new_class_obj
diff -r efebd50fd708aa3af73f954e4da68e88f2bced9a -r b9c93f559efa7a50cfa445b5f79c579690155589 test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -1,5 +1,9 @@
#!/usr/bin/env python
+# NOTE: This script cannot be run directly, because it needs to have test/functional/test_toolbox.py in sys.argv in
+# order to run functional tests on repository tools after installation. The install_and_test_tool_shed_repositories.sh
+# will execute this script with the appropriate parameters.
+
import os, sys, shutil, tempfile, re, string
# Assume we are run from the galaxy root directory, add lib to the python path
@@ -8,9 +12,9 @@
test_home_directory = os.path.join( cwd, 'test', 'install_and_test_tool_shed_repositories' )
default_test_file_dir = os.path.join( test_home_directory, 'test_data' )
+
# Here's the directory where everything happens. Temporary directories are created within this directory to contain
-# the hgweb.config file, the database, new repositories, etc. Since the tool shed browses repository contents via HTTP,
-# the full path to the temporary directroy wher eht repositories are located cannot contain invalid url characters.
+# the database, new repositories, etc.
galaxy_test_tmp_dir = os.path.join( test_home_directory, 'tmp' )
default_galaxy_locales = 'en'
default_galaxy_test_file_dir = "test-data"
@@ -43,12 +47,15 @@
import galaxy.app
from galaxy.app import UniverseApplication
from galaxy.web import buildapp
+from galaxy.util import parse_xml
import nose.core
import nose.config
import nose.loader
import nose.plugins.manager
+from base.util import parse_tool_panel_config
+
log = logging.getLogger( 'install_and_test_repositories' )
default_galaxy_test_port_min = 10000
@@ -66,11 +73,13 @@
</tool_sheds>
'''
+# Create a blank shed_tool_conf.xml to hold the installed repositories.
shed_tool_conf_xml_template = '''<?xml version="1.0"?><toolbox tool_path="${shed_tool_path}"></toolbox>
'''
+# Since we will be running functional tests, we'll need the upload tool, but the rest can be omitted.
tool_conf_xml = '''<?xml version="1.0"?><toolbox><section name="Get Data" id="getext">
@@ -79,11 +88,13 @@
</toolbox>
'''
+# And set up a blank tool_data_table_conf.xml and shed_tool_data_table_conf.xml.
tool_data_table_conf_xml_template = '''<?xml version="1.0"?><tables></tables>
'''
+# Define a default location to find the list of repositories to check.
galaxy_repository_list = os.environ.get( 'GALAXY_INSTALL_TEST_REPOSITORY_FILE', 'repository_list.json' )
if 'GALAXY_INSTALL_TEST_SECRET' not in os.environ:
@@ -92,7 +103,7 @@
else:
galaxy_encode_secret = os.environ[ 'GALAXY_INSTALL_TEST_SECRET' ]
-def get_repositories_to_install():
+def get_repositories_to_install( format='json' ):
'''
Get a list of repository info dicts to install. This method expects a json list of dicts with the following structure:
[
@@ -107,7 +118,10 @@
]
NOTE: If the tool shed URL specified in any dict is not present in the tool_sheds_conf.xml, the installation will fail.
'''
- return simplejson.loads( file( galaxy_repository_list, 'r' ).read() )
+ if format == 'json':
+ return simplejson.loads( file( galaxy_repository_list, 'r' ).read() )
+ else:
+ raise AssertonError( 'Unknown format %s.' % format )
def run_tests( test_config ):
loader = nose.loader.TestLoader( config=test_config )
@@ -126,7 +140,7 @@
def main():
# ---- Configuration ------------------------------------------------------
galaxy_test_host = os.environ.get( 'GALAXY_INSTALL_TEST_HOST', default_galaxy_test_host )
- galaxy_test_port = os.environ.get( 'GALAXY_INSTALL_TEST_PORT', None )
+ galaxy_test_port = os.environ.get( 'GALAXY_INSTALL_TEST_PORT', str( default_galaxy_test_port_max ) )
tool_path = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_PATH', 'tools' )
if 'HTTP_ACCEPT_LANGUAGE' not in os.environ:
@@ -134,28 +148,32 @@
galaxy_test_file_dir = os.environ.get( 'GALAXY_INSTALL_TEST_FILE_DIR', default_galaxy_test_file_dir )
if not os.path.isabs( galaxy_test_file_dir ):
galaxy_test_file_dir = os.path.abspath( galaxy_test_file_dir )
+ # Set up the tool dependency path for the Galaxy instance.
tool_dependency_dir = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_DEPENDENCY_DIR', None )
use_distributed_object_store = os.environ.get( 'GALAXY_INSTALL_TEST_USE_DISTRIBUTED_OBJECT_STORE', False )
if not os.path.isdir( galaxy_test_tmp_dir ):
os.mkdir( galaxy_test_tmp_dir )
galaxy_test_proxy_port = None
+ # Set up the configuration files for the Galaxy instance.
shed_tool_data_table_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_DATA_TABLE_CONF', os.path.join( galaxy_test_tmp_dir, 'test_shed_tool_data_table_conf.xml' ) )
galaxy_tool_data_table_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_DATA_TABLE_CONF', os.path.join( galaxy_test_tmp_dir, 'test_tool_data_table_conf.xml' ) )
galaxy_tool_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_CONF', os.path.join( galaxy_test_tmp_dir, 'test_tool_conf.xml' ) )
galaxy_shed_tool_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_CONF', os.path.join( galaxy_test_tmp_dir, 'test_shed_tool_conf.xml' ) )
galaxy_migrated_tool_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_MIGRATED_TOOL_CONF', os.path.join( galaxy_test_tmp_dir, 'test_migrated_tool_conf.xml' ) )
galaxy_tool_sheds_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF', os.path.join( galaxy_test_tmp_dir, 'test_tool_sheds_conf.xml' ) )
- shed_tool_dict = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_DICT_FILE', os.path.join( galaxy_test_tmp_dir, 'shed_tool_dict' ) )
+ galaxy_shed_tools_dict = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_DICT_FILE', os.path.join( galaxy_test_tmp_dir, 'shed_tool_dict' ) )
if 'GALAXY_INSTALL_TEST_TOOL_DATA_PATH' in os.environ:
tool_data_path = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_DATA_PATH' )
else:
tool_data_path = tempfile.mkdtemp( dir=galaxy_test_tmp_dir )
os.environ[ 'GALAXY_INSTALL_TEST_TOOL_DATA_PATH' ] = tool_data_path
+ # Configure the database connection and path.
if 'GALAXY_INSTALL_TEST_DBPATH' in os.environ:
galaxy_db_path = os.environ[ 'GALAXY_INSTALL_TEST_DBPATH' ]
else:
tempdir = tempfile.mkdtemp( dir=galaxy_test_tmp_dir )
galaxy_db_path = os.path.join( tempdir, 'database' )
+ # Configure the paths Galaxy needs to install and test tools.
galaxy_file_path = os.path.join( galaxy_db_path, 'files' )
new_repos_path = tempfile.mkdtemp( dir=galaxy_test_tmp_dir )
galaxy_tempfiles = tempfile.mkdtemp( dir=galaxy_test_tmp_dir )
@@ -239,6 +257,7 @@
static_enabled=False,
app=app )
+ # Serve the app on a specified or random port.
if galaxy_test_port is not None:
server = httpserver.serve( webapp, host=galaxy_test_host, port=galaxy_test_port, start_loop=False )
else:
@@ -260,9 +279,10 @@
os.environ[ 'GALAXY_INSTALL_TEST_PORT' ] = galaxy_test_proxy_port
else:
os.environ[ 'GALAXY_INSTALL_TEST_PORT' ] = galaxy_test_port
+ # Start the server.
t = threading.Thread( target=server.serve_forever )
t.start()
- # Test if the server is up
+ # Test if the server is up.
for i in range( 10 ):
# Directly test the app, not the proxy.
conn = httplib.HTTPConnection( galaxy_test_host, galaxy_test_port )
@@ -273,27 +293,71 @@
else:
raise Exception( "Test HTTP server did not return '200 OK' after 10 tries" )
log.info( "Embedded galaxy web server started" )
- # ---- Load the module to generate installation methods -------------------
+ # ---- Load the modules to generate installation, testing, and uninstallation methods -------------------
import install_and_test_tool_shed_repositories.functional.test_install_repositories as test_install_repositories
+ import functional.test_toolbox as test_toolbox
if galaxy_test_proxy_port:
log.info( "Tests will be run against %s:%s" % ( galaxy_test_host, galaxy_test_proxy_port ) )
else:
log.info( "Tests will be run against %s:%s" % ( galaxy_test_host, galaxy_test_port ) )
success = False
try:
+ # Iterate through a list of repository info dicts.
for repository_dict in get_repositories_to_install():
- test_install_repositories.build_tests( repository_dict )
+ # Generate the method that will install this repository into the running Galaxy instance.
+ test_install_repositories.generate_install_method( repository_dict )
os.environ[ 'GALAXY_INSTALL_TEST_HOST' ] = galaxy_test_host
+ # Configure nose to run the install method as a test.
test_config = nose.config.Config( env=os.environ, plugins=nose.plugins.manager.DefaultPluginManager() )
test_config.configure( sys.argv )
- # Run the tests.
+ # Run the configured install method as a test. This method uses the Galaxy web interface to install the specified
+ # repository, with tool and repository dependencies also selected for installation.
result = run_tests( test_config )
success = result.wasSuccessful()
+ # If the installation succeeds, set up and run functional tests for this repository. This is equivalent to
+ # sh run_functional_tests.sh -installed
+ if success:
+ log.debug( 'Installation of %s succeeded, running any defined functional tests.' % repository_dict[ 'name' ] )
+ # Parse the tool panel config to get the test-data path for this repository.
+ shed_tools_dict = parse_tool_panel_config( galaxy_shed_tool_conf_file, {} )
+ # Write this to a file, so the functional test framework can find it.
+ file( galaxy_shed_tools_dict, 'w' ).write( simplejson.dumps( shed_tools_dict ) )
+ # Set up the environment so that test.functional.test_toolbox can find the Galaxy server we configured in this framework.
+ os.environ[ 'GALAXY_TOOL_SHED_TEST_FILE' ] = galaxy_shed_tools_dict
+ os.environ[ 'GALAXY_TEST_HOST' ] = galaxy_test_host
+ os.environ[ 'GALAXY_TEST_PORT' ] = galaxy_test_port
+ # Set the module-level variable 'toolbox', so that test.functional.test_toolbox will generate the appropriate test methods.
+ test_toolbox.toolbox = app.toolbox
+ # Generate the test methods for this installed repository. We need to pass in True here, or it will look
+ # in $GALAXY_HOME/test-data for test data, which may result in missing or invalid test files.
+ test_toolbox.build_tests( testing_shed_tools=True )
+ # Set up nose to run the generated functional tests.
+ test_config = nose.config.Config( env=os.environ, plugins=nose.plugins.manager.DefaultPluginManager() )
+ test_config.configure( sys.argv )
+ # Run the configured tests.
+ result = run_tests( test_config )
+ success = result.wasSuccessful()
+ if success:
+ log.debug( 'Repository %s installed and passed functional tests.' % repository_dict[ 'name' ] )
+ else:
+ log.debug( 'Repository %s installed, but did not pass functional tests.' % repository_dict[ 'name' ] )
+ # Generate an uninstall method for this repository, so that the next repository has a clean environment for testing.
+ test_install_repositories.generate_uninstall_method( repository_dict )
+ # Set up nose to run the generated uninstall method as a functional test.
+ test_config = nose.config.Config( env=os.environ, plugins=nose.plugins.manager.DefaultPluginManager() )
+ test_config.configure( sys.argv )
+ # Run the uninstall method. This method uses the Galaxy web interface to uninstall the previously installed
+ # repository and delete it from disk.
+ result = run_tests( test_config )
+ success = result.wasSuccessful()
+ else:
+ log.debug( 'Repository %s failed to install correctly.' % repository_dict[ 'name' ] )
except:
log.exception( "Failure running tests" )
log.info( "Shutting down" )
# ---- Tear down -----------------------------------------------------------
+ # Gracefully shut down the embedded web server and UniverseApplication.
if server:
log.info( "Shutting down embedded galaxy web server" )
server.server_close()
@@ -304,6 +368,7 @@
app.shutdown()
app = None
log.info( "Embedded galaxy application stopped" )
+ # Clean up test files unless otherwise specified.
if 'GALAXY_INSTALL_TEST_NO_CLEANUP' not in os.environ:
try:
for dir in [ galaxy_test_tmp_dir ]:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jgoecks: Move grid element initialization to main body to prevent it from happening too early. Pack scripts.
by Bitbucket 20 Feb '13
by Bitbucket 20 Feb '13
20 Feb '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/7c3df0bcbc22/
changeset: 7c3df0bcbc22
branch: stable
user: jgoecks
date: 2013-02-17 20:59:02
summary: Move grid element initialization to main body to prevent it from happening too early. Pack scripts.
affected #: 4 files
diff -r 5ac8d92a066edefa4ab63936372ec9bcc689cafc -r 7c3df0bcbc222f478b1e21886fbb384d3dc96f31 static/scripts/galaxy.grids.js
--- a/static/scripts/galaxy.grids.js
+++ b/static/scripts/galaxy.grids.js
@@ -4,18 +4,6 @@
// not appended to the identifier of a nested array.
jQuery.ajaxSettings.traditional = true;
-// Initialize grid objects on load.
-$(document).ready(function() {
- init_grid_elements();
- init_grid_controls();
-
- // Initialize text filters to select text on click and use normal font when user is typing.
- $('input[type=text]').each(function() {
- $(this).click(function() { $(this).select(); } )
- .keyup(function () { $(this).css("font-style", "normal"); });
- });
-});
-
/**
* A Galaxy grid.
*/
diff -r 5ac8d92a066edefa4ab63936372ec9bcc689cafc -r 7c3df0bcbc222f478b1e21886fbb384d3dc96f31 static/scripts/packed/galaxy.grids.js
--- a/static/scripts/packed/galaxy.grids.js
+++ b/static/scripts/packed/galaxy.grids.js
@@ -1,1 +1,1 @@
-jQuery.ajaxSettings.traditional=true;$(document).ready(function(){init_grid_elements();init_grid_controls();$("input[type=text]").each(function(){$(this).click(function(){$(this).select()}).keyup(function(){$(this).css("font-style","normal")})})});var Grid=Backbone.Model.extend({defaults:{url_base:"",async:false,async_ops:[],categorical_filters:[],filters:{},sort_key:null,show_item_checkboxes:false,cur_page:1,num_pages:1,operation:undefined,item_ids:undefined},can_async_op:function(a){return _.indexOf(this.attributes.async_ops,a)!==-1},add_filter:function(e,f,b){if(b){var c=this.attributes.key,a;if(c===null||c===undefined){a=f}else{if(typeof(c)=="string"){if(c=="All"){a=f}else{var d=[];d[0]=c;d[1]=f;a=d}}else{a=c;a.push(f)}}this.attributes.filters[e]=a}else{this.attributes.filters[e]=f}},remove_filter:function(b,e){var a=this.attributes.filters[b];if(a===null||a===undefined){return false}var d=true;if(typeof(a)==="string"){if(a=="All"){d=false}else{delete this.attributes.filters[b]}}else{var c=_.indexOf(a,e);if(c!==-1){a.splice(c,1)}else{d=false}}return d},get_url_data:function(){var a={async:this.attributes.async,sort:this.attributes.sort_key,page:this.attributes.cur_page,show_item_checkboxes:this.attributes.show_item_checkboxes};if(this.attributes.operation){a.operation=this.attributes.operation}if(this.attributes.item_ids){a.id=this.attributes.item_ids}var b=this;_.each(_.keys(b.attributes.filters),function(c){a["f-"+c]=b.attributes.filters[c]});return a}});function init_operation_buttons(){$("input[name=operation]:submit").each(function(){$(this).click(function(){var b=$(this).val();var a=[];$("input[name=id]:checked").each(function(){a.push($(this).val())});do_operation(b,a)})})}function init_grid_controls(){init_operation_buttons();$(".submit-image").each(function(){$(this).mousedown(function(){$(this).addClass("gray-background")});$(this).mouseup(function(){$(this).removeClass("gray-background")})});$(".sort-link").each(function(){$(this).click(function(){set_sort_condition($(this).attr("sort_key"));return false})});$(".page-link > a").each(function(){$(this).click(function(){set_page($(this).attr("page_num"));return false})});$(".categorical-filter > a").each(function(){$(this).click(function(){set_categorical_filter($(this).attr("filter_key"),$(this).attr("filter_val"));return false})});$(".text-filter-form").each(function(){$(this).submit(function(){var d=$(this).attr("column_key");var c=$("#input-"+d+"-filter");var e=c.val();c.val("");add_filter_condition(d,e,true);return false})});var a=$("#input-tags-filter");if(a.length){a.autocomplete(history_tag_autocomplete_url,{selectFirst:false,autoFill:false,highlight:false,mustMatch:false})}var b=$("#input-name-filter");if(b.length){b.autocomplete(history_name_autocomplete_url,{selectFirst:false,autoFill:false,highlight:false,mustMatch:false})}$(".advanced-search-toggle").each(function(){$(this).click(function(){$("#standard-search").slideToggle("fast");$("#advanced-search").slideToggle("fast");return false})})}function init_grid_elements(){$(".grid").each(function(){var b=$(this).find("input.grid-row-select-checkbox");var a=$(this).find("span.grid-selected-count");var c=function(){a.text($(b).filter(":checked").length)};$(b).each(function(){$(this).change(c)});c()});$(".label").each(function(){var a=$(this).attr("href");if(a!==undefined&&a.indexOf("operation=")!=-1){$(this).click(function(){do_operation_from_href($(this).attr("href"));return false})}});$(".community_rating_star").rating({});make_popup_menus()}function go_page_one(){var a=grid.get("cur_page");if(a!==null&&a!==undefined&&a!=="all"){grid.set("cur_page",1)}}function add_filter_condition(c,e,a){if(e===""){return false}grid.add_filter(c,e,a);var d=$("<span>"+e+"<a href='javascript:void(0);'><span class='delete-search-icon' /></a></span>");d.addClass("text-filter-val");d.click(function(){grid.remove_filter(c,e);$(this).remove();go_page_one();update_grid()});var b=$("#"+c+"-filtering-criteria");b.append(d);go_page_one();update_grid()}function add_tag_to_grid_filter(c,b){var a=c+(b!==undefined&&b!==""?":"+b:"");$("#advanced-search").show("fast");add_filter_condition("tags",a,true)}function set_sort_condition(f){var e=grid.get("sort_key");var d=f;if(e.indexOf(f)!==-1){if(e.substring(0,1)!=="-"){d="-"+f}else{}}$(".sort-arrow").remove();var c=(d.substring(0,1)=="-")?"↑":"↓";var a=$("<span>"+c+"</span>").addClass("sort-arrow");var b=$("#"+f+"-header");b.append(a);grid.set("sort_key",d);go_page_one();update_grid()}function set_categorical_filter(b,d){var a=grid.get("categorical_filters")[b],c=grid.get("filters")[b];$("."+b+"-filter").each(function(){var h=$.trim($(this).text());var f=a[h];var g=f[b];if(g==d){$(this).empty();$(this).addClass("current-filter");$(this).append(h)}else{if(g==c){$(this).empty();var e=$("<a href='#'>"+h+"</a>");e.click(function(){set_categorical_filter(b,g)});$(this).removeClass("current-filter");$(this).append(e)}}});grid.add_filter(b,d);go_page_one();update_grid()}function set_page(a){$(".page-link").each(function(){var g=$(this).attr("id"),e=parseInt(g.split("-")[2],10),c=grid.get("cur_page"),f;if(e===a){f=$(this).children().text();$(this).empty();$(this).addClass("inactive-link");$(this).text(f)}else{if(e===c){f=$(this).text();$(this).empty();$(this).removeClass("inactive-link");var d=$("<a href='#'>"+f+"</a>");d.click(function(){set_page(e)});$(this).append(d)}}});var b=true;if(a==="all"){grid.set("cur_page",a);b=false}else{grid.set("cur_page",parseInt(a,10))}update_grid(b)}function do_operation(b,a){b=b.toLowerCase();grid.set({operation:b,item_ids:a});if(grid.can_async_op(b)){update_grid(true)}else{go_to_URL()}}function do_operation_from_href(c){var f=c.split("?");if(f.length>1){var a=f[1];var e=a.split("&");var b=null;var g=-1;for(var d=0;d<e.length;d++){if(e[d].indexOf("operation")!=-1){b=e[d].split("=")[1]}else{if(e[d].indexOf("id")!=-1){g=e[d].split("=")[1]}}}do_operation(b,g);return false}}function go_to_URL(){grid.set("async",false);window.location=grid.get("url_base")+"?"+$.param(grid.get_url_data())}function update_grid(a){if(!grid.get("async")){go_to_URL();return}var b=(grid.get("operation")?"POST":"GET");$(".loading-elt-overlay").show();$.ajax({type:b,url:grid.get("url_base"),data:grid.get_url_data(),error:function(){alert("Grid refresh failed")},success:function(d){var c=d.split("*****");$("#grid-table-body").html(c[0]);$("#grid-table-footer").html(c[1]);$("#grid-table-body").trigger("update");init_grid_elements();init_operation_buttons();make_popup_menus();$(".loading-elt-overlay").hide();var e=$.trim(c[2]);if(e!==""){$("#grid-message").html(e).show();setTimeout(function(){$("#grid-message").hide()},5000)}},complete:function(){grid.set({operation:undefined,item_ids:undefined})}})}function check_all_items(){var a=document.getElementById("check_all"),b=document.getElementsByTagName("input"),d=0,c;if(a.checked===true){for(c=0;c<b.length;c++){if(b[c].name.indexOf("id")!==-1){b[c].checked=true;d++}}}else{for(c=0;c<b.length;c++){if(b[c].name.indexOf("id")!==-1){b[c].checked=false}}}init_grid_elements()};
\ No newline at end of file
+jQuery.ajaxSettings.traditional=true;var Grid=Backbone.Model.extend({defaults:{url_base:"",async:false,async_ops:[],categorical_filters:[],filters:{},sort_key:null,show_item_checkboxes:false,cur_page:1,num_pages:1,operation:undefined,item_ids:undefined},can_async_op:function(a){return _.indexOf(this.attributes.async_ops,a)!==-1},add_filter:function(e,f,b){if(b){var c=this.attributes.key,a;if(c===null||c===undefined){a=f}else{if(typeof(c)=="string"){if(c=="All"){a=f}else{var d=[];d[0]=c;d[1]=f;a=d}}else{a=c;a.push(f)}}this.attributes.filters[e]=a}else{this.attributes.filters[e]=f}},remove_filter:function(b,e){var a=this.attributes.filters[b];if(a===null||a===undefined){return false}var d=true;if(typeof(a)==="string"){if(a=="All"){d=false}else{delete this.attributes.filters[b]}}else{var c=_.indexOf(a,e);if(c!==-1){a.splice(c,1)}else{d=false}}return d},get_url_data:function(){var a={async:this.attributes.async,sort:this.attributes.sort_key,page:this.attributes.cur_page,show_item_checkboxes:this.attributes.show_item_checkboxes};if(this.attributes.operation){a.operation=this.attributes.operation}if(this.attributes.item_ids){a.id=this.attributes.item_ids}var b=this;_.each(_.keys(b.attributes.filters),function(c){a["f-"+c]=b.attributes.filters[c]});return a}});function init_operation_buttons(){$("input[name=operation]:submit").each(function(){$(this).click(function(){var b=$(this).val();var a=[];$("input[name=id]:checked").each(function(){a.push($(this).val())});do_operation(b,a)})})}function init_grid_controls(){init_operation_buttons();$(".submit-image").each(function(){$(this).mousedown(function(){$(this).addClass("gray-background")});$(this).mouseup(function(){$(this).removeClass("gray-background")})});$(".sort-link").each(function(){$(this).click(function(){set_sort_condition($(this).attr("sort_key"));return false})});$(".page-link > a").each(function(){$(this).click(function(){set_page($(this).attr("page_num"));return false})});$(".categorical-filter > a").each(function(){$(this).click(function(){set_categorical_filter($(this).attr("filter_key"),$(this).attr("filter_val"));return false})});$(".text-filter-form").each(function(){$(this).submit(function(){var d=$(this).attr("column_key");var c=$("#input-"+d+"-filter");var e=c.val();c.val("");add_filter_condition(d,e,true);return false})});var a=$("#input-tags-filter");if(a.length){a.autocomplete(history_tag_autocomplete_url,{selectFirst:false,autoFill:false,highlight:false,mustMatch:false})}var b=$("#input-name-filter");if(b.length){b.autocomplete(history_name_autocomplete_url,{selectFirst:false,autoFill:false,highlight:false,mustMatch:false})}$(".advanced-search-toggle").each(function(){$(this).click(function(){$("#standard-search").slideToggle("fast");$("#advanced-search").slideToggle("fast");return false})})}function init_grid_elements(){$(".grid").each(function(){var b=$(this).find("input.grid-row-select-checkbox");var a=$(this).find("span.grid-selected-count");var c=function(){a.text($(b).filter(":checked").length)};$(b).each(function(){$(this).change(c)});c()});$(".label").each(function(){var a=$(this).attr("href");if(a!==undefined&&a.indexOf("operation=")!=-1){$(this).click(function(){do_operation_from_href($(this).attr("href"));return false})}});$(".community_rating_star").rating({});make_popup_menus()}function go_page_one(){var a=grid.get("cur_page");if(a!==null&&a!==undefined&&a!=="all"){grid.set("cur_page",1)}}function add_filter_condition(c,e,a){if(e===""){return false}grid.add_filter(c,e,a);var d=$("<span>"+e+"<a href='javascript:void(0);'><span class='delete-search-icon' /></a></span>");d.addClass("text-filter-val");d.click(function(){grid.remove_filter(c,e);$(this).remove();go_page_one();update_grid()});var b=$("#"+c+"-filtering-criteria");b.append(d);go_page_one();update_grid()}function add_tag_to_grid_filter(c,b){var a=c+(b!==undefined&&b!==""?":"+b:"");$("#advanced-search").show("fast");add_filter_condition("tags",a,true)}function set_sort_condition(f){var e=grid.get("sort_key");var d=f;if(e.indexOf(f)!==-1){if(e.substring(0,1)!=="-"){d="-"+f}else{}}$(".sort-arrow").remove();var c=(d.substring(0,1)=="-")?"↑":"↓";var a=$("<span>"+c+"</span>").addClass("sort-arrow");var b=$("#"+f+"-header");b.append(a);grid.set("sort_key",d);go_page_one();update_grid()}function set_categorical_filter(b,d){var a=grid.get("categorical_filters")[b],c=grid.get("filters")[b];$("."+b+"-filter").each(function(){var h=$.trim($(this).text());var f=a[h];var g=f[b];if(g==d){$(this).empty();$(this).addClass("current-filter");$(this).append(h)}else{if(g==c){$(this).empty();var e=$("<a href='#'>"+h+"</a>");e.click(function(){set_categorical_filter(b,g)});$(this).removeClass("current-filter");$(this).append(e)}}});grid.add_filter(b,d);go_page_one();update_grid()}function set_page(a){$(".page-link").each(function(){var g=$(this).attr("id"),e=parseInt(g.split("-")[2],10),c=grid.get("cur_page"),f;if(e===a){f=$(this).children().text();$(this).empty();$(this).addClass("inactive-link");$(this).text(f)}else{if(e===c){f=$(this).text();$(this).empty();$(this).removeClass("inactive-link");var d=$("<a href='#'>"+f+"</a>");d.click(function(){set_page(e)});$(this).append(d)}}});var b=true;if(a==="all"){grid.set("cur_page",a);b=false}else{grid.set("cur_page",parseInt(a,10))}update_grid(b)}function do_operation(b,a){b=b.toLowerCase();grid.set({operation:b,item_ids:a});if(grid.can_async_op(b)){update_grid(true)}else{go_to_URL()}}function do_operation_from_href(c){var f=c.split("?");if(f.length>1){var a=f[1];var e=a.split("&");var b=null;var g=-1;for(var d=0;d<e.length;d++){if(e[d].indexOf("operation")!=-1){b=e[d].split("=")[1]}else{if(e[d].indexOf("id")!=-1){g=e[d].split("=")[1]}}}do_operation(b,g);return false}}function go_to_URL(){grid.set("async",false);window.location=grid.get("url_base")+"?"+$.param(grid.get_url_data())}function update_grid(a){if(!grid.get("async")){go_to_URL();return}var b=(grid.get("operation")?"POST":"GET");$(".loading-elt-overlay").show();$.ajax({type:b,url:grid.get("url_base"),data:grid.get_url_data(),error:function(){alert("Grid refresh failed")},success:function(d){var c=d.split("*****");$("#grid-table-body").html(c[0]);$("#grid-table-footer").html(c[1]);$("#grid-table-body").trigger("update");init_grid_elements();init_operation_buttons();make_popup_menus();$(".loading-elt-overlay").hide();var e=$.trim(c[2]);if(e!==""){$("#grid-message").html(e).show();setTimeout(function(){$("#grid-message").hide()},5000)}},complete:function(){grid.set({operation:undefined,item_ids:undefined})}})}function check_all_items(){var a=document.getElementById("check_all"),b=document.getElementsByTagName("input"),d=0,c;if(a.checked===true){for(c=0;c<b.length;c++){if(b[c].name.indexOf("id")!==-1){b[c].checked=true;d++}}}else{for(c=0;c<b.length;c++){if(b[c].name.indexOf("id")!==-1){b[c].checked=false}}}init_grid_elements()};
\ No newline at end of file
diff -r 5ac8d92a066edefa4ab63936372ec9bcc689cafc -r 7c3df0bcbc222f478b1e21886fbb384d3dc96f31 static/scripts/packed/viz/visualization.js
--- a/static/scripts/packed/viz/visualization.js
+++ b/static/scripts/packed/viz/visualization.js
@@ -1,1 +1,1 @@
-define(["libs/underscore","mvc/data","viz/trackster/util","utils/config"],function(s,i,l,p){var a=function(u,x,w,v){$.ajax({url:u,data:w,error:function(){alert("Grid failed")},success:function(y){show_modal("Select datasets for new tracks",y,{Cancel:function(){hide_modal()},Add:function(){var z=[];$("input[name=id]:checked,input[name=ldda_ids]:checked").each(function(){var A={data_type:"track_config",hda_ldda:"hda"},B=$(this).val();if($(this).attr("name")!=="id"){A.hda_ldda="ldda"}z[z.length]=$.ajax({url:x+"/"+B,data:A,dataType:"json"})});$.when.apply($,z).then(function(){var A=(arguments[0] instanceof Array?$.map(arguments,function(B){return B[0]}):[arguments[0]]);v(A)});hide_modal()}})}})};var j=function(u){return("isResolved" in u)};var f=function(u){this.default_font=u!==undefined?u:"9px Monaco, Lucida Console, monospace";this.dummy_canvas=this.new_canvas();this.dummy_context=this.dummy_canvas.getContext("2d");this.dummy_context.font=this.default_font;this.char_width_px=this.dummy_context.measureText("A").width;this.patterns={};this.load_pattern("right_strand","/visualization/strand_right.png");this.load_pattern("left_strand","/visualization/strand_left.png");this.load_pattern("right_strand_inv","/visualization/strand_right_inv.png");this.load_pattern("left_strand_inv","/visualization/strand_left_inv.png")};s.extend(f.prototype,{load_pattern:function(u,y){var v=this.patterns,w=this.dummy_context,x=new Image();x.src=galaxy_paths.attributes.image_path+y;x.onload=function(){v[u]=w.createPattern(x,"repeat")}},get_pattern:function(u){return this.patterns[u]},new_canvas:function(){var u=$("<canvas/>")[0];if(window.G_vmlCanvasManager){G_vmlCanvasManager.initElement(u)}u.manager=this;return u}});var q=Backbone.Model.extend({defaults:{num_elements:20,obj_cache:null,key_ary:null},initialize:function(u){this.clear()},get_elt:function(v){var w=this.attributes.obj_cache,x=this.attributes.key_ary,u=x.indexOf(v);if(u!==-1){if(w[v].stale){x.splice(u,1);delete w[v]}else{this.move_key_to_end(v,u)}}return w[v]},set_elt:function(v,x){var y=this.attributes.obj_cache,z=this.attributes.key_ary,w=this.attributes.num_elements;if(!y[v]){if(z.length>=w){var u=z.shift();delete y[u]}z.push(v)}y[v]=x;return x},move_key_to_end:function(v,u){this.attributes.key_ary.splice(u,1);this.attributes.key_ary.push(v)},clear:function(){this.attributes.obj_cache={};this.attributes.key_ary=[]},size:function(){return this.attributes.key_ary.length}});var d=q.extend({defaults:s.extend({},q.prototype.defaults,{dataset:null,init_data:null,filters_manager:null,data_type:"data",data_mode_compatible:function(u,v){return true},can_subset:function(u){return false}}),initialize:function(u){q.prototype.initialize.call(this);var v=this.get("init_data");if(v){this.add_data(v)}},add_data:function(u){if(this.get("num_elements")<u.length){this.set("num_elements",u.length)}var v=this;s.each(u,function(w){v.set_data(w.region,w)})},data_is_ready:function(){var x=this.get("dataset"),w=$.Deferred(),u=(this.get("data_type")=="raw_data"?"state":this.get("data_type")=="data"?"converted_datasets_state":"error"),v=new l.ServerStateDeferred({ajax_settings:{url:this.get("dataset").url(),data:{hda_ldda:x.get("hda_ldda"),data_type:u},dataType:"json"},interval:5000,success_fn:function(y){return y!=="pending"}});$.when(v.go()).then(function(y){w.resolve(y==="ok"||y==="data")});return w},search_features:function(u){var v=this.get("dataset"),w={query:u,hda_ldda:v.get("hda_ldda"),data_type:"features"};return $.getJSON(v.url(),w)},load_data:function(C,B,v,A){var y=this.get("dataset"),x={data_type:this.get("data_type"),chrom:C.get("chrom"),low:C.get("start"),high:C.get("end"),mode:B,resolution:v,hda_ldda:y.get("hda_ldda")};$.extend(x,A);var E=this.get("filters_manager");if(E){var F=[];var u=E.filters;for(var z=0;z<u.length;z++){F.push(u[z].name)}x.filter_cols=JSON.stringify(F)}var w=this,D=$.getJSON(y.url(),x,function(G){w.set_data(C,G)});this.set_data(C,D);return D},get_data:function(A,z,w,y){var B=this.get_elt(A);if(B&&(j(B)||this.get("data_mode_compatible")(B,z))){return B}var C=this.get("key_ary"),v=this.get("obj_cache"),D,u;for(var x=0;x<C.length;x++){D=C[x];u=new g({from_str:D});if(u.contains(A)){B=v[D];if(j(B)||(this.get("data_mode_compatible")(B,z)&&this.get("can_subset")(B))){this.move_key_to_end(D,x);return B}}}return this.load_data(A,z,w,y)},set_data:function(v,u){this.set_elt(v,u)},DEEP_DATA_REQ:"deep",BROAD_DATA_REQ:"breadth",get_more_data:function(C,B,x,A,y){var E=this._mark_stale(C);if(!(E&&this.get("data_mode_compatible")(E,B))){console.log("ERROR: problem with getting more data: current data is not compatible");return}var w=C.get("start");if(y===this.DEEP_DATA_REQ){$.extend(A,{start_val:E.data.length+1})}else{if(y===this.BROAD_DATA_REQ){w=(E.max_high?E.max_high:E.data[E.data.length-1][2])+1}}var D=C.copy().set("start",w);var v=this,z=this.load_data(D,B,x,A),u=$.Deferred();this.set_data(C,u);$.when(z).then(function(F){if(F.data){F.data=E.data.concat(F.data);if(F.max_low){F.max_low=E.max_low}if(F.message){F.message=F.message.replace(/[0-9]+/,F.data.length)}}v.set_data(C,F);u.resolve(F)});return u},can_get_more_detailed_data:function(v){var u=this.get_elt(v);return(u.dataset_type==="bigwig"&&u.data.length<8000)},get_more_detailed_data:function(x,z,v,y,w){var u=this._mark_stale(x);if(!u){console.log("ERROR getting more detailed data: no current data");return}if(!w){w={}}if(u.dataset_type==="bigwig"){w.num_samples=1000*y}else{if(u.dataset_type==="summary_tree"){w.level=Math.min(u.level-1,2)}}return this.load_data(x,z,v,w)},_mark_stale:function(v){var u=this.get_elt(v);if(!u){console.log("ERROR: no data to mark as stale: ",this.get("dataset"),v.toString())}u.stale=true;return u},get_genome_wide_data:function(u){var w=this,y=true,x=s.map(u.get("chroms_info").chrom_info,function(A){var z=w.get_elt(new g({chrom:A.chrom,start:0,end:A.len}));if(!z){y=false}return z});if(y){return x}var v=$.Deferred();$.getJSON(this.get("dataset").url(),{data_type:"genome_data"},function(z){w.add_data(z.data);v.resolve(z.data)});return v},get_elt:function(u){return q.prototype.get_elt.call(this,u.toString())},set_elt:function(v,u){return q.prototype.set_elt.call(this,v.toString(),u)}});var n=d.extend({initialize:function(u){var v=new Backbone.Model();v.urlRoot=u.data_url;this.set("dataset",v)},load_data:function(w,x,u,v){if(u>1){return{data:null}}return d.prototype.load_data.call(this,w,x,u,v)}});var c=Backbone.Model.extend({defaults:{name:null,key:null,chroms_info:null},initialize:function(u){this.id=u.dbkey},get_chroms_info:function(){return this.attributes.chroms_info.chrom_info},get_chrom_region:function(u){var v=s.find(this.get_chroms_info(),function(w){return w.chrom==u});return new g({chrom:v.chrom,end:v.len})}});var g=Backbone.RelationalModel.extend({defaults:{chrom:null,start:0,end:0},initialize:function(v){if(v.from_str){var x=v.from_str.split(":"),w=x[0],u=x[1].split("-");this.set({chrom:w,start:parseInt(u[0],10),end:parseInt(u[1],10)})}},copy:function(){return new g({chrom:this.get("chrom"),start:this.get("start"),end:this.get("end")})},length:function(){return this.get("end")-this.get("start")},toString:function(){return this.get("chrom")+":"+this.get("start")+"-"+this.get("end")},toJSON:function(){return{chrom:this.get("chrom"),start:this.get("start"),end:this.get("end")}},compute_overlap:function(B){var v=this.get("chrom"),A=B.get("chrom"),z=this.get("start"),x=B.get("start"),y=this.get("end"),w=B.get("end"),u;if(v&&A&&v!==A){return g.overlap_results.DIF_CHROMS}if(z<x){if(y<x){u=g.overlap_results.BEFORE}else{if(y<=w){u=g.overlap_results.OVERLAP_START}else{u=g.overlap_results.CONTAINS}}}else{if(z>w){u=g.overlap_results.AFTER}else{if(y<=w){u=g.overlap_results.CONTAINED_BY}else{u=g.overlap_results.OVERLAP_END}}}return u},contains:function(u){return this.compute_overlap(u)===g.overlap_results.CONTAINS},overlaps:function(u){return s.intersection([this.compute_overlap(u)],[g.overlap_results.DIF_CHROMS,g.overlap_results.BEFORE,g.overlap_results.AFTER]).length===0}},{overlap_results:{DIF_CHROMS:1000,BEFORE:1001,CONTAINS:1002,OVERLAP_START:1003,OVERLAP_END:1004,CONTAINED_BY:1005,AFTER:1006}});var m=Backbone.Collection.extend({model:g});var e=Backbone.RelationalModel.extend({defaults:{region:null,note:""},relations:[{type:Backbone.HasOne,key:"region",relatedModel:g}]});var r=Backbone.Collection.extend({model:e});var t=i.Dataset.extend({initialize:function(u){this.set("id",u.dataset_id);this.set("config",p.ConfigSettingCollection.from_config_dict(u.prefs));this.get("config").add([{key:"name",value:this.get("name")},{key:"color"}]);var v=this.get("preloaded_data");if(v){v=v.data}else{v=[]}this.set("data_manager",new d({dataset:this,init_data:v}))}});var o=Backbone.RelationalModel.extend({defaults:{title:"",type:""},url:galaxy_paths.get("visualization_url"),save:function(){return $.ajax({url:this.url(),type:"POST",dataType:"json",data:{vis_json:JSON.stringify(this)}})}});var k=o.extend({defaults:s.extend({},o.prototype.defaults,{dbkey:"",tracks:null,bookmarks:null,viewport:null}),relations:[{type:Backbone.HasMany,key:"tracks",relatedModel:t}],add_tracks:function(u){this.get("tracks").add(u)}});var b=Backbone.Model.extend({});var h=Backbone.Router.extend({initialize:function(v){this.view=v.view;this.route(/([\w]+)$/,"change_location");this.route(/([\w]+\:[\d,]+-[\d,]+)$/,"change_location");var u=this;u.view.on("navigate",function(w){u.navigate(w)})},change_location:function(u){this.view.go_to(u)}});return{BackboneTrack:t,BrowserBookmark:e,BrowserBookmarkCollection:r,Cache:q,CanvasManager:f,Genome:c,GenomeDataManager:d,GenomeRegion:g,GenomeRegionCollection:m,GenomeVisualization:k,ReferenceTrackDataManager:n,TrackBrowserRouter:h,TrackConfig:b,Visualization:o,select_datasets:a}});
\ No newline at end of file
+define(["libs/underscore","mvc/data","viz/trackster/util","utils/config"],function(s,i,l,p){var a=function(u,x,w,v){$.ajax({url:u,data:w,error:function(){alert("Grid failed")},success:function(y){show_modal("Select datasets for new tracks",y,{Cancel:function(){hide_modal()},Add:function(){var z=[];$("input[name=id]:checked,input[name=ldda_ids]:checked").each(function(){var A={data_type:"track_config",hda_ldda:"hda"},B=$(this).val();if($(this).attr("name")!=="id"){A.hda_ldda="ldda"}z[z.length]=$.ajax({url:x+"/"+B,data:A,dataType:"json"})});$.when.apply($,z).then(function(){var A=(arguments[0] instanceof Array?$.map(arguments,function(B){return B[0]}):[arguments[0]]);v(A)});hide_modal()}})}})};var j=function(u){return("isResolved" in u)};var f=function(u){this.default_font=u!==undefined?u:"9px Monaco, Lucida Console, monospace";this.dummy_canvas=this.new_canvas();this.dummy_context=this.dummy_canvas.getContext("2d");this.dummy_context.font=this.default_font;this.char_width_px=this.dummy_context.measureText("A").width;this.patterns={};this.load_pattern("right_strand","/visualization/strand_right.png");this.load_pattern("left_strand","/visualization/strand_left.png");this.load_pattern("right_strand_inv","/visualization/strand_right_inv.png");this.load_pattern("left_strand_inv","/visualization/strand_left_inv.png")};s.extend(f.prototype,{load_pattern:function(u,y){var v=this.patterns,w=this.dummy_context,x=new Image();x.src=galaxy_paths.attributes.image_path+y;x.onload=function(){v[u]=w.createPattern(x,"repeat")}},get_pattern:function(u){return this.patterns[u]},new_canvas:function(){var u=$("<canvas/>")[0];if(window.G_vmlCanvasManager){G_vmlCanvasManager.initElement(u)}u.manager=this;return u}});var q=Backbone.Model.extend({defaults:{num_elements:20,obj_cache:null,key_ary:null},initialize:function(u){this.clear()},get_elt:function(v){var w=this.attributes.obj_cache,x=this.attributes.key_ary,u=x.indexOf(v);if(u!==-1){if(w[v].stale){x.splice(u,1);delete w[v]}else{this.move_key_to_end(v,u)}}return w[v]},set_elt:function(v,x){var y=this.attributes.obj_cache,z=this.attributes.key_ary,w=this.attributes.num_elements;if(!y[v]){if(z.length>=w){var u=z.shift();delete y[u]}z.push(v)}y[v]=x;return x},move_key_to_end:function(v,u){this.attributes.key_ary.splice(u,1);this.attributes.key_ary.push(v)},clear:function(){this.attributes.obj_cache={};this.attributes.key_ary=[]},size:function(){return this.attributes.key_ary.length}});var d=q.extend({defaults:s.extend({},q.prototype.defaults,{dataset:null,init_data:null,filters_manager:null,data_type:"data",data_mode_compatible:function(u,v){return true},can_subset:function(u){return false}}),initialize:function(u){q.prototype.initialize.call(this);var v=this.get("init_data");if(v){this.add_data(v)}},add_data:function(u){if(this.get("num_elements")<u.length){this.set("num_elements",u.length)}var v=this;s.each(u,function(w){v.set_data(w.region,w)})},data_is_ready:function(){var x=this.get("dataset"),w=$.Deferred(),u=(this.get("data_type")==="raw_data"?"state":this.get("data_type")==="data"?"converted_datasets_state":"error"),v=new l.ServerStateDeferred({ajax_settings:{url:this.get("dataset").url(),data:{hda_ldda:x.get("hda_ldda"),data_type:u},dataType:"json"},interval:5000,success_fn:function(y){return y!=="pending"}});$.when(v.go()).then(function(y){w.resolve(y==="ok"||y==="data")});return w},search_features:function(u){var v=this.get("dataset"),w={query:u,hda_ldda:v.get("hda_ldda"),data_type:"features"};return $.getJSON(v.url(),w)},load_data:function(C,B,v,A){var y=this.get("dataset"),x={data_type:this.get("data_type"),chrom:C.get("chrom"),low:C.get("start"),high:C.get("end"),mode:B,resolution:v,hda_ldda:y.get("hda_ldda")};$.extend(x,A);var E=this.get("filters_manager");if(E){var F=[];var u=E.filters;for(var z=0;z<u.length;z++){F.push(u[z].name)}x.filter_cols=JSON.stringify(F)}var w=this,D=$.getJSON(y.url(),x,function(G){w.set_data(C,G)});this.set_data(C,D);return D},get_data:function(A,z,w,y){var B=this.get_elt(A);if(B&&(j(B)||this.get("data_mode_compatible")(B,z))){return B}var C=this.get("key_ary"),v=this.get("obj_cache"),D,u;for(var x=0;x<C.length;x++){D=C[x];u=new g({from_str:D});if(u.contains(A)){B=v[D];if(j(B)||(this.get("data_mode_compatible")(B,z)&&this.get("can_subset")(B))){this.move_key_to_end(D,x);return B}}}return this.load_data(A,z,w,y)},set_data:function(v,u){this.set_elt(v,u)},DEEP_DATA_REQ:"deep",BROAD_DATA_REQ:"breadth",get_more_data:function(C,B,x,A,y){var E=this._mark_stale(C);if(!(E&&this.get("data_mode_compatible")(E,B))){console.log("ERROR: problem with getting more data: current data is not compatible");return}var w=C.get("start");if(y===this.DEEP_DATA_REQ){$.extend(A,{start_val:E.data.length+1})}else{if(y===this.BROAD_DATA_REQ){w=(E.max_high?E.max_high:E.data[E.data.length-1][2])+1}}var D=C.copy().set("start",w);var v=this,z=this.load_data(D,B,x,A),u=$.Deferred();this.set_data(C,u);$.when(z).then(function(F){if(F.data){F.data=E.data.concat(F.data);if(F.max_low){F.max_low=E.max_low}if(F.message){F.message=F.message.replace(/[0-9]+/,F.data.length)}}v.set_data(C,F);u.resolve(F)});return u},can_get_more_detailed_data:function(v){var u=this.get_elt(v);return(u.dataset_type==="bigwig"&&u.data.length<8000)},get_more_detailed_data:function(x,z,v,y,w){var u=this._mark_stale(x);if(!u){console.log("ERROR getting more detailed data: no current data");return}if(!w){w={}}if(u.dataset_type==="bigwig"){w.num_samples=1000*y}else{if(u.dataset_type==="summary_tree"){w.level=Math.min(u.level-1,2)}}return this.load_data(x,z,v,w)},_mark_stale:function(v){var u=this.get_elt(v);if(!u){console.log("ERROR: no data to mark as stale: ",this.get("dataset"),v.toString())}u.stale=true;return u},get_genome_wide_data:function(u){var w=this,y=true,x=s.map(u.get("chroms_info").chrom_info,function(A){var z=w.get_elt(new g({chrom:A.chrom,start:0,end:A.len}));if(!z){y=false}return z});if(y){return x}var v=$.Deferred();$.getJSON(this.get("dataset").url(),{data_type:"genome_data"},function(z){w.add_data(z.data);v.resolve(z.data)});return v},get_elt:function(u){return q.prototype.get_elt.call(this,u.toString())},set_elt:function(v,u){return q.prototype.set_elt.call(this,v.toString(),u)}});var n=d.extend({initialize:function(u){var v=new Backbone.Model();v.urlRoot=u.data_url;this.set("dataset",v)},load_data:function(w,x,u,v){if(u>1){return{data:null}}return d.prototype.load_data.call(this,w,x,u,v)}});var c=Backbone.Model.extend({defaults:{name:null,key:null,chroms_info:null},initialize:function(u){this.id=u.dbkey},get_chroms_info:function(){return this.attributes.chroms_info.chrom_info},get_chrom_region:function(u){var v=s.find(this.get_chroms_info(),function(w){return w.chrom===u});return new g({chrom:v.chrom,end:v.len})}});var g=Backbone.RelationalModel.extend({defaults:{chrom:null,start:0,end:0},initialize:function(v){if(v.from_str){var x=v.from_str.split(":"),w=x[0],u=x[1].split("-");this.set({chrom:w,start:parseInt(u[0],10),end:parseInt(u[1],10)})}},copy:function(){return new g({chrom:this.get("chrom"),start:this.get("start"),end:this.get("end")})},length:function(){return this.get("end")-this.get("start")},toString:function(){return this.get("chrom")+":"+this.get("start")+"-"+this.get("end")},toJSON:function(){return{chrom:this.get("chrom"),start:this.get("start"),end:this.get("end")}},compute_overlap:function(B){var v=this.get("chrom"),A=B.get("chrom"),z=this.get("start"),x=B.get("start"),y=this.get("end"),w=B.get("end"),u;if(v&&A&&v!==A){return g.overlap_results.DIF_CHROMS}if(z<x){if(y<x){u=g.overlap_results.BEFORE}else{if(y<=w){u=g.overlap_results.OVERLAP_START}else{u=g.overlap_results.CONTAINS}}}else{if(z>w){u=g.overlap_results.AFTER}else{if(y<=w){u=g.overlap_results.CONTAINED_BY}else{u=g.overlap_results.OVERLAP_END}}}return u},contains:function(u){return this.compute_overlap(u)===g.overlap_results.CONTAINS},overlaps:function(u){return s.intersection([this.compute_overlap(u)],[g.overlap_results.DIF_CHROMS,g.overlap_results.BEFORE,g.overlap_results.AFTER]).length===0}},{overlap_results:{DIF_CHROMS:1000,BEFORE:1001,CONTAINS:1002,OVERLAP_START:1003,OVERLAP_END:1004,CONTAINED_BY:1005,AFTER:1006}});var m=Backbone.Collection.extend({model:g});var e=Backbone.RelationalModel.extend({defaults:{region:null,note:""},relations:[{type:Backbone.HasOne,key:"region",relatedModel:g}]});var r=Backbone.Collection.extend({model:e});var t=i.Dataset.extend({initialize:function(u){this.set("id",u.dataset_id);this.set("config",p.ConfigSettingCollection.from_config_dict(u.prefs));this.get("config").add([{key:"name",value:this.get("name")},{key:"color"}]);var v=this.get("preloaded_data");if(v){v=v.data}else{v=[]}this.set("data_manager",new d({dataset:this,init_data:v}))}});var o=Backbone.RelationalModel.extend({defaults:{title:"",type:""},url:galaxy_paths.get("visualization_url"),save:function(){return $.ajax({url:this.url(),type:"POST",dataType:"json",data:{vis_json:JSON.stringify(this)}})}});var k=o.extend({defaults:s.extend({},o.prototype.defaults,{dbkey:"",tracks:null,bookmarks:null,viewport:null}),relations:[{type:Backbone.HasMany,key:"tracks",relatedModel:t}],add_tracks:function(u){this.get("tracks").add(u)}});var b=Backbone.Model.extend({});var h=Backbone.Router.extend({initialize:function(v){this.view=v.view;this.route(/([\w]+)$/,"change_location");this.route(/([\w]+\:[\d,]+-[\d,]+)$/,"change_location");var u=this;u.view.on("navigate",function(w){u.navigate(w)})},change_location:function(u){this.view.go_to(u)}});return{BackboneTrack:t,BrowserBookmark:e,BrowserBookmarkCollection:r,Cache:q,CanvasManager:f,Genome:c,GenomeDataManager:d,GenomeRegion:g,GenomeRegionCollection:m,GenomeVisualization:k,ReferenceTrackDataManager:n,TrackBrowserRouter:h,TrackConfig:b,Visualization:o,select_datasets:a}});
\ No newline at end of file
diff -r 5ac8d92a066edefa4ab63936372ec9bcc689cafc -r 7c3df0bcbc222f478b1e21886fbb384d3dc96f31 templates/grid_base.mako
--- a/templates/grid_base.mako
+++ b/templates/grid_base.mako
@@ -129,6 +129,19 @@
cur_page: ${cur_page_num},
num_pages: ${num_pages}
});
+
+ // Initialize grid objects on load.
+ // FIXME: use a grid view object eventually.
+ $(document).ready(function() {
+ init_grid_elements();
+ init_grid_controls();
+
+ // Initialize text filters to select text on click and use normal font when user is typing.
+ $('input[type=text]').each(function() {
+ $(this).click(function() { $(this).select(); } )
+ .keyup(function () { $(this).css("font-style", "normal"); });
+ });
+ });
</script></%def>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/5ac8d92a066e/
changeset: 5ac8d92a066e
branch: stable
user: inithello
date: 2013-02-20 14:56:36
summary: Fix missing import in repository controller.
affected #: 1 file
diff -r d5896e8a60fa948f1713d4b5e30126a12a958679 -r 5ac8d92a066edefa4ab63936372ec9bcc689cafc lib/galaxy/webapps/community/controllers/repository.py
--- a/lib/galaxy/webapps/community/controllers/repository.py
+++ b/lib/galaxy/webapps/community/controllers/repository.py
@@ -1,4 +1,4 @@
-import os, logging, re, tempfile, shutil, ConfigParser
+import os, logging, re, tempfile, shutil, ConfigParser, string
from time import gmtime, strftime
from datetime import date, datetime
from galaxy import util, web
https://bitbucket.org/galaxy/galaxy-central/commits/efebd50fd708/
changeset: efebd50fd708
user: inithello
date: 2013-02-20 14:56:36
summary: Fix missing import in repository controller.
affected #: 1 file
diff -r 1895646e8ce0ed905f7fef8a8fc184ae1606a006 -r efebd50fd708aa3af73f954e4da68e88f2bced9a lib/galaxy/webapps/community/controllers/repository.py
--- a/lib/galaxy/webapps/community/controllers/repository.py
+++ b/lib/galaxy/webapps/community/controllers/repository.py
@@ -1,4 +1,4 @@
-import os, logging, re, tempfile, shutil, ConfigParser
+import os, logging, re, tempfile, shutil, ConfigParser, string
from time import gmtime, strftime
from datetime import date, datetime
from galaxy import util, web
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: carlfeberhard: More reporting for buildbot corrections
by Bitbucket 19 Feb '13
by Bitbucket 19 Feb '13
19 Feb '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/1895646e8ce0/
changeset: 1895646e8ce0
user: carlfeberhard
date: 2013-02-20 02:00:27
summary: More reporting for buildbot corrections
affected #: 2 files
diff -r bff6388644ba9b2b5ccb102ce01380778fdefdfe -r 1895646e8ce0ed905f7fef8a8fc184ae1606a006 lib/galaxy/app.py
--- a/lib/galaxy/app.py
+++ b/lib/galaxy/app.py
@@ -20,6 +20,9 @@
from galaxy.openid.providers import OpenIDProviders
from galaxy.tools.data_manager.manager import DataManagers
+import logging
+log = logging.getLogger( __name__ )
+
class UniverseApplication( object ):
"""Encapsulates the state of a Universe application"""
def __init__( self, **kwargs ):
@@ -41,6 +44,8 @@
self.tool_shed_registry = galaxy.tool_shed.tool_shed_registry.Registry( self.config.root, self.config.tool_sheds_config )
else:
self.tool_shed_registry = None
+ log.debug( 'self.config.tool_sheds_config: %s, self.tool_shed_registry: %s',
+ self.config.tool_sheds_config, self.tool_shed_registry )
# Initialize database / check for appropriate schema version. # If this
# is a new installation, we'll restrict the tool migration messaging.
from galaxy.model.migrate.check import create_or_verify_database
diff -r bff6388644ba9b2b5ccb102ce01380778fdefdfe -r 1895646e8ce0ed905f7fef8a8fc184ae1606a006 scripts/functional_tests.py
--- a/scripts/functional_tests.py
+++ b/scripts/functional_tests.py
@@ -277,7 +277,6 @@
os.makedirs( dir )
except OSError:
pass
- log.info( "Database connection:", database_connection )
# ---- Build Application --------------------------------------------------
app = None
@@ -293,7 +292,7 @@
kwargs[ 'object_store' ] = 'distributed'
kwargs[ 'distributed_object_store_config_file' ] = 'distributed_object_store_conf.xml.sample'
# Build the Universe Application
- log.debug( 'app.kwargs:\n%s', pprint.pformat( kwargs ) )
+ print 'app.kwargs:\n%s' %( pprint.pformat( kwargs ) )
app = UniverseApplication( job_queue_workers = 5,
id_secret = 'changethisinproductiontoo',
template_path = "templates",
@@ -321,6 +320,7 @@
running_functional_tests=True,
**kwargs )
log.info( "Embedded Universe application started" )
+
# ---- Run webserver ------------------------------------------------------
server = None
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: inithello: Fix for tool shed functional test 1060 failing when all tests are run.
by Bitbucket 19 Feb '13
by Bitbucket 19 Feb '13
19 Feb '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/bff6388644ba/
changeset: bff6388644ba
user: inithello
date: 2013-02-19 23:04:04
summary: Fix for tool shed functional test 1060 failing when all tests are run.
affected #: 1 file
diff -r bc0114ce0f7b5224cd4ef58d1dd2ef8b30c77a69 -r bff6388644ba9b2b5ccb102ce01380778fdefdfe test/tool_shed/functional/test_0060_workflows.py
--- a/test/tool_shed/functional/test_0060_workflows.py
+++ b/test/tool_shed/functional/test_0060_workflows.py
@@ -88,6 +88,8 @@
strings_displayed=[] )
workflow = file( self.get_filename( 'filtering_workflow/Workflow_for_0060_filter_workflow_repository.ga' ), 'r' ).read()
workflow = workflow.replace( '__TEST_TOOL_SHED_URL__', self.url.replace( 'http://', '' ) )
+ workflow = workflow.replace( 'Workflow for 0060_filter_workflow_repository',
+ 'New workflow for 0060_filter_workflow_repository' )
workflow_filepath = self.generate_temp_path( 'test_0060', additional_paths=[ 'filtering_workflow_2' ] )
if not os.path.exists( workflow_filepath ):
os.makedirs( workflow_filepath )
@@ -106,7 +108,7 @@
def test_0030_check_workflow_repository( self ):
"""Check for strings on the manage page for the filtering_workflow_0060 repository."""
repository = test_db_util.get_repository_by_name_and_owner( workflow_repository_name, common.test_user_1_name )
- strings_displayed = [ 'Workflows', 'Workflow for 0060_filter', '0.1' ]
+ strings_displayed = [ 'Workflows', 'New workflow for 0060_filter', '0.1' ]
strings_not_displayed = [ 'Valid tools', 'Invalid tools' ]
self.display_manage_repository_page( repository, strings_displayed=strings_displayed, strings_not_displayed=strings_not_displayed )
def test_0035_verify_repository_metadata( self ):
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
19 Feb '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/bc0114ce0f7b/
changeset: bc0114ce0f7b
user: carlfeberhard
date: 2013-02-19 22:21:16
summary: functional_tests: fix import
affected #: 1 file
diff -r 7bf93ff108fe4a62ee3084d84a77fa9b71c8b1f7 -r bc0114ce0f7b5224cd4ef58d1dd2ef8b30c77a69 scripts/functional_tests.py
--- a/scripts/functional_tests.py
+++ b/scripts/functional_tests.py
@@ -40,6 +40,7 @@
import nose.loader
import nose.plugins.manager
+import pprint
log = logging.getLogger( "functional_tests.py" )
default_galaxy_test_host = "localhost"
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: inithello: Enhancement to tool shed functional tests for repositories that only contain a workflow.
by Bitbucket 19 Feb '13
by Bitbucket 19 Feb '13
19 Feb '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/7bf93ff108fe/
changeset: 7bf93ff108fe
user: inithello
date: 2013-02-19 22:01:18
summary: Enhancement to tool shed functional tests for repositories that only contain a workflow.
affected #: 2 files
diff -r 305680a482c3096b082daffa5ff665f5f60d1528 -r 7bf93ff108fe4a62ee3084d84a77fa9b71c8b1f7 test/tool_shed/functional/test_0060_workflows.py
--- a/test/tool_shed/functional/test_0060_workflows.py
+++ b/test/tool_shed/functional/test_0060_workflows.py
@@ -4,11 +4,17 @@
repository_name = 'filtering_0060'
repository_description="Galaxy's filtering tool for test 0060"
repository_long_description="Long description of Galaxy's filtering tool for test 0060"
+
workflow_filename = 'Workflow_for_0060_filter_workflow_repository.ga'
workflow_name = 'Workflow for 0060_filter_workflow_repository'
+
category_name = 'Test 0060 Workflow Features'
category_description = 'Test 0060 for workflow features'
+workflow_repository_name = 'filtering_workflow_0060'
+workflow_repository_description="Workflow referencing the filtering tool for test 0060"
+workflow_repository_long_description="Long description of the workflow for test 0060"
+
class TestToolShedWorkflowFeatures( ShedTwillTestCase ):
'''Test valid and invalid workflows.'''
def test_0000_initiate_users( self ):
@@ -69,7 +75,41 @@
strings_displayed=[],
strings_not_displayed=[] )
self.load_workflow_image_in_tool_shed( repository, workflow_name, strings_not_displayed=[ '#EBBCB2' ] )
- def test_0025_verify_repository_metadata( self ):
+ def test_0025_create_repository_with_only_workflow( self ):
+ """Create and populate the filtering_workflow_0060 repository"""
+ category = self.create_category( name=category_name, description=category_description )
+ self.logout()
+ self.login( email=common.test_user_1_email, username=common.test_user_1_name )
+ self.get_or_create_repository( name=workflow_repository_name,
+ description=workflow_repository_description,
+ long_description=workflow_repository_long_description,
+ owner=common.test_user_1_name,
+ category_id=self.security.encode_id( category.id ),
+ strings_displayed=[] )
+ workflow = file( self.get_filename( 'filtering_workflow/Workflow_for_0060_filter_workflow_repository.ga' ), 'r' ).read()
+ workflow = workflow.replace( '__TEST_TOOL_SHED_URL__', self.url.replace( 'http://', '' ) )
+ workflow_filepath = self.generate_temp_path( 'test_0060', additional_paths=[ 'filtering_workflow_2' ] )
+ if not os.path.exists( workflow_filepath ):
+ os.makedirs( workflow_filepath )
+ file( os.path.join( workflow_filepath, workflow_filename ), 'w+' ).write( workflow )
+ repository = test_db_util.get_repository_by_name_and_owner( workflow_repository_name, common.test_user_1_name )
+ self.upload_file( repository,
+ filename=workflow_filename,
+ filepath=workflow_filepath,
+ valid_tools_only=True,
+ uncompress_file=False,
+ remove_repo_files_not_in_tar=False,
+ commit_message='Uploaded filtering workflow.',
+ strings_displayed=[],
+ strings_not_displayed=[] )
+ self.load_workflow_image_in_tool_shed( repository, workflow_name, strings_displayed=[ '#EBBCB2' ] )
+ def test_0030_check_workflow_repository( self ):
+ """Check for strings on the manage page for the filtering_workflow_0060 repository."""
+ repository = test_db_util.get_repository_by_name_and_owner( workflow_repository_name, common.test_user_1_name )
+ strings_displayed = [ 'Workflows', 'Workflow for 0060_filter', '0.1' ]
+ strings_not_displayed = [ 'Valid tools', 'Invalid tools' ]
+ self.display_manage_repository_page( repository, strings_displayed=strings_displayed, strings_not_displayed=strings_not_displayed )
+ def test_0035_verify_repository_metadata( self ):
'''Verify that resetting the metadata does not change it.'''
repository = test_db_util.get_repository_by_name_and_owner( repository_name, common.test_user_1_name )
self.verify_unchanged_repository_metadata( repository )
diff -r 305680a482c3096b082daffa5ff665f5f60d1528 -r 7bf93ff108fe4a62ee3084d84a77fa9b71c8b1f7 test/tool_shed/functional/test_1060_install_repository_with_workflow.py
--- a/test/tool_shed/functional/test_1060_install_repository_with_workflow.py
+++ b/test/tool_shed/functional/test_1060_install_repository_with_workflow.py
@@ -1,15 +1,21 @@
from tool_shed.base.twilltestcase import ShedTwillTestCase, common, os
import tool_shed.base.test_db_util as test_db_util
-import logging
-log = logging.getLogger(__name__)
+
repository_name = 'filtering_0060'
repository_description="Galaxy's filtering tool for test 0060"
repository_long_description="Long description of Galaxy's filtering tool for test 0060"
+
workflow_filename = 'Workflow_for_0060_filter_workflow_repository.ga'
workflow_name = 'Workflow for 0060_filter_workflow_repository'
+second_workflow_name = 'New workflow for 0060_filter_workflow_repository'
+
category_name = 'Test 0060 Workflow Features'
category_description = 'Test 0060 for workflow features'
+workflow_repository_name = 'filtering_workflow_0060'
+workflow_repository_description="Workflow referencing the filtering tool for test 0060"
+workflow_repository_long_description="Long description of the workflow for test 0060"
+
class ToolWithRepositoryDependencies( ShedTwillTestCase ):
'''Test installing a repository with repository dependencies.'''
def test_0000_initiate_users( self ):
@@ -87,3 +93,61 @@
self.display_all_workflows( strings_not_displayed=[ 'Workflow for 0060_filter_workflow_repository' ] )
self.import_workflow( installed_repository, workflow_name )
self.display_all_workflows( strings_displayed=[ 'Workflow for 0060_filter_workflow_repository' ] )
+ def test_0020_create_filter_workflow_repository( self ):
+ '''Create, if necessary, a filtering repository with only a workflow.'''
+ category = self.create_category( name=category_name, description=category_description )
+ self.logout()
+ self.login( email=common.test_user_1_email, username=common.test_user_1_name )
+ repository = self.get_or_create_repository( name=workflow_repository_name,
+ description=workflow_repository_description,
+ long_description=workflow_repository_long_description,
+ owner=common.test_user_1_name,
+ category_id=self.security.encode_id( category.id ),
+ strings_displayed=[] )
+ if self.repository_is_new( repository ):
+ workflow = file( self.get_filename( 'filtering_workflow/Workflow_for_0060_filter_workflow_repository.ga' ), 'r' ).read()
+ workflow = workflow.replace( '__TEST_TOOL_SHED_URL__', self.url.replace( 'http://', '' ) )
+ workflow = workflow.replace( 'Workflow for 0060_filter_workflow_repository',
+ 'New workflow for 0060_filter_workflow_repository' )
+ workflow_filepath = self.generate_temp_path( 'test_0060', additional_paths=[ 'filtering_workflow_2' ] )
+ if not os.path.exists( workflow_filepath ):
+ os.makedirs( workflow_filepath )
+ file( os.path.join( workflow_filepath, workflow_filename ), 'w+' ).write( workflow )
+ self.upload_file( repository,
+ filename=workflow_filename,
+ filepath=workflow_filepath,
+ valid_tools_only=True,
+ uncompress_file=False,
+ remove_repo_files_not_in_tar=False,
+ commit_message='Uploaded filtering workflow.',
+ strings_displayed=[],
+ strings_not_displayed=[] )
+ self.load_workflow_image_in_tool_shed( repository,
+ 'New workflow for 0060_filter_workflow_repository',
+ strings_displayed=[ '#EBBCB2' ] )
+ def test_0025_install_repository_with_workflow( self ):
+ """Browse the available tool sheds in this Galaxy instance and preview the filtering workflow repository."""
+ self.preview_repository_in_tool_shed( workflow_repository_name,
+ common.test_user_1_name,
+ strings_displayed=[ 'filtering_workflow_0060', 'Workflows' ],
+ strings_not_displayed=[ 'Valid tools', 'Invalid tools' ] )
+ self.galaxy_logout()
+ self.galaxy_login( email=common.admin_email, username=common.admin_username )
+ self.install_repository( workflow_repository_name,
+ common.test_user_1_name,
+ 'Test 0060 Workflow Features',
+ install_tool_dependencies=False,
+ includes_tools=False )
+ def test_0030_import_workflow_from_installed_repository( self ):
+ '''Import the workflow from the installed repository and verify that it appears in the list of all workflows.'''
+ installed_repository = test_db_util.get_installed_repository_by_name_owner( workflow_repository_name, common.test_user_1_name )
+ self.display_installed_workflow_image( installed_repository,
+ 'New workflow for 0060_filter_workflow_repository',
+ strings_displayed=[ '#EBD9B2' ],
+ strings_not_displayed=[ '#EBBCB2' ] )
+ self.display_all_workflows( strings_not_displayed=[ 'New workflow for 0060_filter_workflow_repository' ] )
+ self.import_workflow( installed_repository,
+ 'New workflow for 0060_filter_workflow_repository',
+ strings_displayed=[ 'New workflow for 0060_filter_workflow_repository' ] )
+ self.display_all_workflows( strings_displayed=[ 'New workflow for 0060_filter_workflow_repository' ] )
+
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0