1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/1a56c4b28c6b/
changeset: 1a56c4b28c6b
user: rmarenco
date: 2012-09-07 15:54:31
summary: Modified the good files instead of the auto-generated ones
affected #: 5 files
diff -r 95d98a26518349216a335a5860eb370ab74375fd -r 1a56c4b28c6bbaf1b2f1b0efec5fecce7bd454bb database/compiled_templates/webapps/galaxy/base_panels.mako.py
--- a/database/compiled_templates/webapps/galaxy/base_panels.mako.py
+++ /dev/null
@@ -1,415 +0,0 @@
-# -*- encoding:ascii -*-
-from mako import runtime, filters, cache
-UNDEFINED = runtime.UNDEFINED
-__M_dict_builtin = dict
-__M_locals_builtin = locals
-_magic_number = 6
-_modified_time = 1346972736.51736
-_template_filename=u'templates/webapps/galaxy/base_panels.mako'
-_template_uri=u'/webapps/galaxy/base_panels.mako'
-_template_cache=cache.Cache(__name__, _modified_time)
-_source_encoding='ascii'
-_exports = ['masthead', 'javascripts', 'title']
-
-
-def _mako_get_namespace(context, name):
- try:
- return context.namespaces[(__name__, name)]
- except KeyError:
- _mako_generate_namespaces(context)
- return context.namespaces[(__name__, name)]
-def _mako_generate_namespaces(context):
- pass
-def _mako_inherit(template, context):
- _mako_generate_namespaces(context)
- return runtime._inherit_from(context, u'/base_panels.mako', _template_uri)
-def render_body(context,**pageargs):
- context.caller_stack._push_frame()
- try:
- __M_locals = __M_dict_builtin(pageargs=pageargs)
- __M_writer = context.writer()
- # SOURCE LINE 1
- __M_writer(u'\n\n')
- # SOURCE LINE 4
- __M_writer(u'\n\n')
- # SOURCE LINE 8
- __M_writer(u'\n\n')
- # SOURCE LINE 232
- __M_writer(u'\n')
- return ''
- finally:
- context.caller_stack._pop_frame()
-
-
-def render_masthead(context):
- context.caller_stack._push_frame()
- try:
- AssertionError = context.get('AssertionError', UNDEFINED)
- h = context.get('h', UNDEFINED)
- app = context.get('app', UNDEFINED)
- util = context.get('util', UNDEFINED)
- def tab(id,display,href,target='_parent',visible=True,extra_class='',menu_options=None):
- context.caller_stack._push_frame()
- try:
- self = context.get('self', UNDEFINED)
- len = context.get('len', UNDEFINED)
- __M_writer = context.writer()
- # SOURCE LINE 19
- __M_writer(u'\n')
- # SOURCE LINE 22
- __M_writer(u' \n ')
- # SOURCE LINE 23
-
- cls = ""
- a_cls = ""
- extra = ""
- if extra_class:
- cls += " " + extra_class
- if self.active_view == id:
- cls += " active"
- if menu_options:
- cls += " dropdown"
- a_cls += " dropdown-toggle"
- extra = "<b class='caret'></b>"
- style = ""
- if not visible:
- style = "display: none;"
-
-
- # SOURCE LINE 38
- __M_writer(u'\n <li class="')
- # SOURCE LINE 39
- __M_writer(unicode(cls))
- __M_writer(u'" style="')
- __M_writer(unicode(style))
- __M_writer(u'">\n')
- # SOURCE LINE 40
- if href:
- # SOURCE LINE 41
- __M_writer(u' <a class="')
- __M_writer(unicode(a_cls))
- __M_writer(u'" data-toggle="dropdown" target="')
- __M_writer(unicode(target))
- __M_writer(u'" href="')
- __M_writer(unicode(href))
- __M_writer(u'">')
- __M_writer(unicode(display))
- __M_writer(unicode(extra))
- __M_writer(u'</a>\n')
- # SOURCE LINE 42
- else:
- # SOURCE LINE 43
- __M_writer(u' <a class="')
- __M_writer(unicode(a_cls))
- __M_writer(u'" data-toggle="dropdown">')
- __M_writer(unicode(display))
- __M_writer(unicode(extra))
- __M_writer(u'</a>\n')
- pass
- # SOURCE LINE 45
- if menu_options:
- # SOURCE LINE 46
- __M_writer(u' <ul class="dropdown-menu">\n')
- # SOURCE LINE 47
- for menu_item in menu_options:
- # SOURCE LINE 48
- if not menu_item:
- # SOURCE LINE 49
- __M_writer(u' <li class="divider"></li>\n')
- # SOURCE LINE 50
- else:
- # SOURCE LINE 51
- __M_writer(u' <li>\n')
- # SOURCE LINE 52
- if len ( menu_item ) == 1:
- # SOURCE LINE 53
- __M_writer(u' ')
- __M_writer(unicode(menu_item[0]))
- __M_writer(u'\n')
- # SOURCE LINE 54
- elif len ( menu_item ) == 2:
- # SOURCE LINE 55
- __M_writer(u' ')
- name, link = menu_item
-
- __M_writer(u'\n <a href="')
- # SOURCE LINE 56
- __M_writer(unicode(link))
- __M_writer(u'">')
- __M_writer(unicode(name))
- __M_writer(u'</a>\n')
- # SOURCE LINE 57
- else:
- # SOURCE LINE 58
- __M_writer(u' ')
- name, link, target = menu_item
-
- __M_writer(u'\n <a target="')
- # SOURCE LINE 59
- __M_writer(unicode(target))
- __M_writer(u'" href="')
- __M_writer(unicode(link))
- __M_writer(u'">')
- __M_writer(unicode(name))
- __M_writer(u'</a>\n')
- pass
- # SOURCE LINE 61
- __M_writer(u' </li>\n')
- pass
- pass
- # SOURCE LINE 64
- __M_writer(u' </ul>\n')
- pass
- # SOURCE LINE 66
- __M_writer(u' </li>\n ')
- return ''
- finally:
- context.caller_stack._pop_frame()
- trans = context.get('trans', UNDEFINED)
- _ = context.get('_', UNDEFINED)
- __M_writer = context.writer()
- # SOURCE LINE 11
- __M_writer(u'\n\n')
- # SOURCE LINE 14
- __M_writer(u' <div style="position: relative; right: -50%; float: left;">\n <div style="display: block; position: relative; right: 50%;">\n\n <ul class="nav" border="0" cellspacing="0">\n \n ')
- # SOURCE LINE 67
- __M_writer(u'\n\n')
- # SOURCE LINE 70
- __M_writer(u' ')
- __M_writer(unicode(tab( "analysis", _("Analyze Data"), h.url_for( controller='/root', action='index' ) )))
- __M_writer(u'\n \n')
- # SOURCE LINE 73
- __M_writer(u' ')
- __M_writer(unicode(tab( "workflow", _("Workflow"), h.url_for( controller='/workflow', action='index' ) )))
- __M_writer(u'\n \n')
- # SOURCE LINE 76
- __M_writer(u' ')
-
- menu_options = [
- [ _('Data Libraries'), h.url_for( controller='/library', action='index') ],
- None,
- [ _('Published Histories'), h.url_for( controller='/history', action='list_published' ) ],
- [ _('Published Workflows'), h.url_for( controller='/workflow', action='list_published' ) ],
- [ _('Published Visualizations'), h.url_for( controller='/visualization', action='list_published' ) ],
- [ _('Published Pages'), h.url_for( controller='/page', action='list_published' ) ]
- ]
- tab( "shared", _("Shared Data"), h.url_for( controller='/library', action='index'), menu_options=menu_options )
-
-
- # SOURCE LINE 86
- __M_writer(u'\n \n')
- # SOURCE LINE 89
- __M_writer(u' ')
-
- menu_options = [
- [ _('Sequencing Requests'), h.url_for( controller='/requests', action='index' ) ],
- [ _('Find Samples'), h.url_for( controller='/requests', action='find_samples_index' ) ],
- [ _('Help'), app.config.get( "lims_doc_url", "http://main.g2.bx.psu.edu/u/rkchak/p/sts" ), "galaxy_main" ]
- ]
- tab( "lab", "Lab", None, menu_options=menu_options, visible=( trans.user and ( trans.user.requests or trans.app.security_agent.get_accessible_request_types( trans, trans.user ) ) ) )
-
-
- # SOURCE LINE 96
- __M_writer(u'\n\n')
- # SOURCE LINE 99
- __M_writer(u' ')
-
- menu_options = [
- [_('New Visualization'), h.url_for( controller='/tracks', action='index' ) ],
- [_('Saved Visualizations'), h.url_for( controller='/visualization', action='list' ) ]
- ]
- tab( "visualization", _("Visualization"), h.url_for( controller='/visualization', action='list'), menu_options=menu_options )
-
-
- # SOURCE LINE 105
- __M_writer(u'\n\n')
- # SOURCE LINE 108
- if app.config.get_bool( 'enable_cloud_launch', False ):
- # SOURCE LINE 109
- __M_writer(u' ')
-
- menu_options = [
- [_('New Cloud Cluster'), h.url_for( controller='/cloudlaunch', action='index' ) ],
- ]
- tab( "cloud", _("Cloud"), h.url_for( controller='/cloudlaunch', action='index'), menu_options=menu_options )
-
-
- # SOURCE LINE 114
- __M_writer(u'\n')
- pass
- # SOURCE LINE 116
- __M_writer(u'\n')
- # SOURCE LINE 118
- __M_writer(u' ')
- __M_writer(unicode(tab( "admin", "Admin", h.url_for( controller='/admin', action='index' ), extra_class="admin-only", visible=( trans.user and app.config.is_admin_user( trans.user ) ) )))
- __M_writer(u'\n \n')
- # SOURCE LINE 121
- __M_writer(u' ')
-
- menu_options = [
- [_('Galaxy Q&A'), app.config.get( "qa_url", "http://slyfox.bx.psu.edu:8080/" ), "galaxy_main" ],
- [_('Support'), app.config.get( "support_url", "http://wiki.g2.bx.psu.edu/Support" ), "_blank" ],
- [_('Galaxy Wiki'), app.config.get( "wiki_url", "http://wiki.g2.bx.psu.edu/" ), "_blank" ],
- [_('Video tutorials (screencasts)'), app.config.get( "screencasts_url", "http://galaxycast.org" ), "_blank" ],
- [_('How to Cite Galaxy'), app.config.get( "citation_url", "http://wiki.g2.bx.psu.edu/Citing%20Galaxy" ), "_blank" ]
- ]
- if app.config.get( 'terms_url', None ) is not None:
- menu_options.append( [_('Terms and Conditions'), app.config.get( 'terms_url', None ), '_blank'] )
- tab( "help", _("Help"), None, menu_options=menu_options)
-
-
- # SOURCE LINE 132
- __M_writer(u'\n \n')
- # SOURCE LINE 135
- __M_writer(u' ')
-
- # Menu for user who is not logged in.
- menu_options = [ [ _("Login"), h.url_for( controller='/user', action='login' ), "galaxy_main" ] ]
- if app.config.allow_user_creation:
- menu_options.append( [ _("Register"), h.url_for( controller='/user', action='create', cntrller='user', webapp='galaxy' ), "galaxy_main" ] )
- extra_class = "loggedout-only"
- visible = ( trans.user == None )
- tab( "user", _("User"), None, visible=visible, menu_options=menu_options )
-
- # Menu for user who is logged in.
- if trans.user:
- email = trans.user.email
- else:
- email = ""
- menu_options = [ [ '<a>Logged in as <span id="user-email">%s</span></a>' % email ] ]
- if app.config.use_remote_user:
- if app.config.remote_user_logout_href:
- menu_options.append( [ _('Logout'), app.config.remote_user_logout_href, "_top" ] )
- else:
- menu_options.append( [ _('Preferences'), h.url_for( controller='/user', action='index', cntrller='user', webapp='galaxy' ), "galaxy_main" ] )
- menu_options.append( [ 'Custom Builds', h.url_for( controller='/user', action='dbkeys' ), "galaxy_main" ] )
- if app.config.require_login:
- logout_url = h.url_for( controller='/root', action='index', m_c='user', m_a='logout', webapp='galaxy' )
- else:
- logout_url = h.url_for( controller='/user', action='logout', webapp='galaxy' )
- menu_options.append( [ 'Logout', logout_url, "_top" ] )
- menu_options.append( None )
- menu_options.append( [ _('Saved Histories'), h.url_for( controller='/history', action='list' ), "galaxy_main" ] )
- menu_options.append( [ _('Saved Datasets'), h.url_for( controller='/dataset', action='list' ), "galaxy_main" ] )
- menu_options.append( [ _('Saved Pages'), h.url_for( controller='/page', action='list' ), "_top" ] )
- menu_options.append( [ _('API Keys'), h.url_for( controller='/user', action='api_keys', cntrller='user', webapp='galaxy' ), "galaxy_main" ] )
- if app.config.use_remote_user:
- menu_options.append( [ _('Public Name'), h.url_for( controller='/user', action='edit_username', cntrller='user', webapp='galaxy' ), "galaxy_main" ] )
-
- extra_class = "loggedin-only"
- visible = ( trans.user != None )
- tab( "user", "User", None, visible=visible, menu_options=menu_options )
-
-
- # SOURCE LINE 172
- __M_writer(u'\n \n')
- # SOURCE LINE 176
- __M_writer(u' </ul>\n\n </div>\n </div>\n \n')
- # SOURCE LINE 182
- __M_writer(u' <div class="title">\n <a href="')
- # SOURCE LINE 183
- __M_writer(unicode(h.url_for( app.config.get( 'logo_url', '/' ) )))
- __M_writer(u'">\n <img border="0" src="')
- # SOURCE LINE 184
- __M_writer(unicode(h.url_for('/static/images/galaxyIcon_noText.png')))
- __M_writer(u'">\n Galaxy\n')
- # SOURCE LINE 186
- if app.config.brand:
- # SOURCE LINE 187
- __M_writer(u' <span>/ ')
- __M_writer(unicode(app.config.brand))
- __M_writer(u'</span>\n')
- pass
- # SOURCE LINE 189
- __M_writer(u' </a>\n </div>\n\n')
- # SOURCE LINE 193
- __M_writer(u' ')
-
- bar_style = "quota-meter-bar"
- usage = 0
- percent = 0
- quota = None
- try:
- usage = trans.app.quota_agent.get_usage( trans=trans )
- quota = trans.app.quota_agent.get_quota( trans.user )
- percent = trans.app.quota_agent.get_percent( usage=usage, quota=quota )
- if percent is not None:
- if percent >= 100:
- bar_style += " quota-meter-bar-error"
- elif percent >= 85:
- bar_style += " quota-meter-bar-warn"
- else:
- percent = 0
- except AssertionError:
- pass # Probably no history yet
- tooltip = None
- if not trans.user and quota and trans.app.config.allow_user_creation:
- if trans.app.quota_agent.default_registered_quota is None or trans.app.quota_agent.default_unregistered_quota < trans.app.quota_agent.default_registered_quota:
- tooltip = "Your disk quota is %s. You can increase your quota by registering a Galaxy account." % util.nice_size( quota )
-
-
- # SOURCE LINE 215
- __M_writer(u'\n\n <div class="quota-meter-container">\n')
- # SOURCE LINE 218
- if tooltip:
- # SOURCE LINE 219
- __M_writer(u' <div id="quota-meter" class="quota-meter tooltip" title="')
- __M_writer(unicode(tooltip))
- __M_writer(u'">\n')
- # SOURCE LINE 220
- else:
- # SOURCE LINE 221
- __M_writer(u' <div id="quota-meter" class="quota-meter">\n')
- pass
- # SOURCE LINE 223
- __M_writer(u' <div id="quota-meter-bar" class="')
- __M_writer(unicode(bar_style))
- __M_writer(u'" style="width: ')
- __M_writer(unicode(percent))
- __M_writer(u'px;"></div>\n')
- # SOURCE LINE 224
- if quota is not None:
- # SOURCE LINE 225
- __M_writer(u' <div id="quota-meter-text" class="quota-meter-text">Using ')
- __M_writer(unicode(percent))
- __M_writer(u'%</div>\n')
- # SOURCE LINE 226
- else:
- # SOURCE LINE 227
- __M_writer(u' <div id="quota-meter-text" class="quota-meter-text">Using ')
- __M_writer(unicode(util.nice_size( usage )))
- __M_writer(u'</div>\n')
- pass
- # SOURCE LINE 229
- __M_writer(u' </div>\n </div>\n \n')
- return ''
- finally:
- context.caller_stack._pop_frame()
-
-
-def render_javascripts(context):
- context.caller_stack._push_frame()
- try:
- parent = context.get('parent', UNDEFINED)
- __M_writer = context.writer()
- # SOURCE LINE 6
- __M_writer(u'\n ')
- # SOURCE LINE 7
- __M_writer(unicode(parent.javascripts()))
- __M_writer(u'\n')
- return ''
- finally:
- context.caller_stack._pop_frame()
-
-
-def render_title(context):
- context.caller_stack._push_frame()
- try:
- __M_writer = context.writer()
- # SOURCE LINE 4
- __M_writer(u'Galaxy')
- return ''
- finally:
- context.caller_stack._pop_frame()
-
-
diff -r 95d98a26518349216a335a5860eb370ab74375fd -r 1a56c4b28c6bbaf1b2f1b0efec5fecce7bd454bb templates/webapps/community/base_panels.mako
--- a/templates/webapps/community/base_panels.mako
+++ b/templates/webapps/community/base_panels.mako
@@ -71,7 +71,8 @@
## Help tab.
<%
- menu_options = [
+ menu_options = [
+ [_('Galaxy Q&A'), app.config.get( "qa_url", "http://slyfox.bx.psu.edu:8081/" ), "_blank" ],
[_('Support'), app.config.get( "support_url", "http://wiki.g2.bx.psu.edu/Support" ), "_blank" ],
[_('Tool shed wiki'), app.config.get( "wiki_url", "http://wiki.g2.bx.psu.edu/Tool%20Shed" ), "_blank" ],
[_('Galaxy wiki'), app.config.get( "wiki_url", "http://wiki.g2.bx.psu.edu/" ), "_blank" ],
diff -r 95d98a26518349216a335a5860eb370ab74375fd -r 1a56c4b28c6bbaf1b2f1b0efec5fecce7bd454bb templates/webapps/galaxy/base_panels.mako
--- a/templates/webapps/galaxy/base_panels.mako
+++ b/templates/webapps/galaxy/base_panels.mako
@@ -120,7 +120,7 @@
## Help tab.
<%
menu_options = [
- [_('Galaxy Q&A'), app.config.get( "q&a", "http://slyfox.bx.psu.edu:8081/" ), "galaxy_main" ],
+ [_('Galaxy Q&A'), app.config.get( "qa_url", "http://slyfox.bx.psu.edu:8081/" ), "galaxy_main" ],
[_('Support'), app.config.get( "support_url", "http://wiki.g2.bx.psu.edu/Support" ), "_blank" ],
[_('Galaxy Wiki'), app.config.get( "wiki_url", "http://wiki.g2.bx.psu.edu/" ), "_blank" ],
[_('Video tutorials (screencasts)'), app.config.get( "screencasts_url", "http://galaxycast.org" ), "_blank" ],
diff -r 95d98a26518349216a335a5860eb370ab74375fd -r 1a56c4b28c6bbaf1b2f1b0efec5fecce7bd454bb universe_wsgi.ini
--- a/universe_wsgi.ini
+++ /dev/null
@@ -1,753 +0,0 @@
-#
-# Galaxy is configured by default to be useable in a single-user development
-# environment. To tune the application for a multi-user production
-# environment, see the documentation at:
-#
-# http://wiki.g2.bx.psu.edu/Admin/Config/Performance/Production%20Server
-#
-
-# Throughout this sample configuration file, except where stated otherwise,
-# uncommented values override the default if left unset, whereas commented
-# values are set to the default value.
-# Examples of many of these options are explained in more detail in the wiki:
-#
-# http://wiki.g2.bx.psu.edu/Admin/Config
-#
-# Config hackers are encouraged to check there before asking for help.
-
-# ---- HTTP Server ----------------------------------------------------------
-
-# Configuration of the internal HTTP server.
-
-[server:main]
-
-# The internal HTTP server to use. Currently only Paste is provided. This
-# option is required.
-use = egg:Paste#http
-
-# The port on which to listen.
-#port = 8080
-
-# The address on which to listen. By default, only listen to localhost (Galaxy
-# will not be accessible over the network). Use '0.0.0.0' to listen on all
-# available network interfaces.
-#host = 127.0.0.1
-
-# Use a threadpool for the web server instead of creating a thread for each
-# request.
-use_threadpool = True
-
-# Number of threads in the web server thread pool.
-#threadpool_workers = 10
-
-# ---- Filters --------------------------------------------------------------
-
-# Filters sit between Galaxy and the HTTP server.
-
-# These filters are disabled by default. They can be enabled with
-# 'filter-with' in the [app:main] section below.
-
-# Define the gzip filter.
-[filter:gzip]
-use = egg:Paste#gzip
-
-# Define the proxy-prefix filter.
-[filter:proxy-prefix]
-use = egg:PasteDeploy#prefix
-prefix = /galaxy
-
-# ---- Galaxy ---------------------------------------------------------------
-
-# Configuration of the Galaxy application.
-
-[app:main]
-
-# -- Application and filtering
-
-# The factory for the WSGI application. This should not be changed.
-paste.app_factory = galaxy.web.buildapp:app_factory
-
-# If not running behind a proxy server, you may want to enable gzip compression
-# to decrease the size of data transferred over the network. If using a proxy
-# server, please enable gzip compression there instead.
-#filter-with = gzip
-
-# If running behind a proxy server and Galaxy is served from a subdirectory,
-# enable the proxy-prefix filter and set the prefix in the
-# [filter:proxy-prefix] section above.
-#filter-with = proxy-prefix
-
-# If proxy-prefix is enabled and you're running more than one Galaxy instance
-# behind one hostname, you will want to set this to the same path as the prefix
-# in the filter above. This value becomes the "path" attribute set in the
-# cookie so the cookies from each instance will not clobber each other.
-#cookie_path = None
-
-# -- Database
-
-# By default, Galaxy uses a SQLite database at 'database/universe.sqlite'. You
-# may use a SQLAlchemy connection string to specify an external database
-# instead. This string takes many options which are explained in detail in the
-# config file documentation.
-#database_connection = sqlite:///./database/universe.sqlite?isolation_level=IMMEDIATE
-
-# If the server logs errors about not having enough database pool connections,
-# you will want to increase these values, or consider running more Galaxy
-# processes.
-#database_engine_option_pool_size = 5
-#database_engine_option_max_overflow = 10
-
-# If using MySQL and the server logs the error "MySQL server has gone away",
-# you will want to set this to some positive value (7200 should work).
-#database_engine_option_pool_recycle = -1
-
-# If large database query results are causing memory or response time issues in
-# the Galaxy process, leave the result on the server instead. This option is
-# only available for PostgreSQL and is highly recommended.
-#database_engine_option_server_side_cursors = False
-
-# Create only one connection to the database per thread, to reduce the
-# connection overhead. Recommended when not using SQLite:
-#database_engine_option_strategy = threadlocal
-
-# Log all database transactions, can be useful for debugging and performance
-# profiling. Logging is done via Python's 'logging' module under the qualname
-# 'galaxy.model.orm.logging_connection_proxy'
-#database_query_profiling_proxy = False
-
-# -- Files and directories
-
-# Path where genome builds are stored. This defaults to tool-data/genome
-#genome_data_path = tool-data/genome
-
-# URL for rsync server to download pre-built indexes.
-#rsync_url = rsync://scofield.bx.psu.edu/indexes
-
-# Dataset files are stored in this directory.
-#file_path = database/files
-
-# Temporary files are stored in this directory.
-#new_file_path = database/tmp
-
-# Tool config files, defines what tools are available in Galaxy.
-# Tools can be locally developed or installed from Galaxy tool sheds.
-#tool_config_file = tool_conf.xml,shed_tool_conf.xml
-
-# Default path to the directory containing the tools defined in tool_conf.xml.
-# Other tool config files must include the tool_path as an attribute in the <toolbox> tag.
-#tool_path = tools
-
-# Path to the directory in which managed tool dependencies are placed. To use
-# the dependency system, see the documentation at:
-# http://wiki.g2.bx.psu.edu/Admin/Config/Tool%20Dependencies
-#tool_dependency_dir = None
-
-# Enable automatic polling of relative tool sheds to see if any updates
-# are available for installed repositories. Ideally only one Galaxy
-# server process should be able to check for repository updates. The
-# setting for hours_between_check should be an integer between 1 and 24.
-#enable_tool_shed_check = False
-#hours_between_check = 12
-
-# Directory where data used by tools is located, see the samples in that
-# directory and the wiki for help:
-# http://wiki.g2.bx.psu.edu/Admin/Data%20Integration
-#tool_data_path = tool-data
-
-# Directory where chrom len files are kept, currently mainly used by trackster
-#len_file_path = tool-data/shared/ucsc/chrom
-
-# Datatypes config file, defines what data (file) types are available in
-# Galaxy.
-#datatypes_config_file = datatypes_conf.xml
-
-# Each job is given a unique empty directory as its current working directory.
-# This option defines in what parent directory those directories will be
-# created.
-#job_working_directory = database/job_working_directory
-
-# If using a cluster, Galaxy will write job scripts and stdout/stderr to this
-# directory.
-#cluster_files_directory = database/pbs
-
-# External service types config file, defines what types of external_services configurations
-# are available in Galaxy.
-#external_service_type_config_file = external_service_types_conf.xml
-
-# Path to the directory containing the external_service_types defined in the config.
-#external_service_type_path = external_service_types
-
-# Tools with a number of outputs not known until runtime can write these
-# outputs to a directory for collection by Galaxy when the job is done.
-# Previously, this directory was new_file_path, but using one global directory
-# can cause performance problems, so using job_working_directory ('.' or cwd
-# when a job is run) is encouraged. By default, both are checked to avoid
-# breaking existing tools.
-#collect_outputs_from = new_file_path,job_working_directory
-
-# -- Mail and notification
-
-# Galaxy sends mail for various things: Subscribing users to the mailing list
-# if they request it, emailing password resets, notification from the Galaxy
-# Sample Tracking system, and reporting dataset errors. To do this, it needs
-# to send mail through an SMTP server, which you may define here (host:port).
-# Galaxy will automatically try STARTTLS but will continue upon failure.
-#smtp_server = None
-
-# If your SMTP server requires a username and password, you can provide them
-# here (password in cleartext here, but if your server supports STARTTLS it
-# will be sent over the network encrypted).
-#smtp_username = None
-#smtp_password = None
-
-# On the user registration form, users may choose to join the mailing list.
-# This is the address of the list they'll be subscribed to.
-#mailing_join_addr = galaxy-announce-join(a)bx.psu.edu
-
-# Datasets in an error state include a link to report the error. Those reports
-# will be sent to this address. Error reports are disabled if no address is set.
-#error_email_to = None
-
-# -- Display sites
-
-# Galaxy can display data at various external browsers. These options specify
-# which browsers should be available. URLs and builds available at these
-# browsers are defined in the specifield files.
-
-# UCSC browsers: tool-data/shared/ucsc/ucsc_build_sites.txt
-#ucsc_display_sites = main,test,archaea,ucla
-
-# GBrowse servers: tool-data/shared/gbrowse/gbrowse_build_sites.txt
-#gbrowse_display_sites = modencode,sgd_yeast,tair,wormbase,wormbase_ws120,wormbase_ws140,wormbase_ws170,wormbase_ws180,wormbase_ws190,wormbase_ws200,wormbase_ws204,wormbase_ws210,wormbase_ws220,wormbase_ws225
-
-# GeneTrack servers: tool-data/shared/genetrack/genetrack_sites.txt
-#genetrack_display_sites = main,test
-
-# If use_remote_user = True, display application servers will be denied access
-# to Galaxy and so displaying datasets in these sites will fail.
-# display_servers contains a list of hostnames which should be allowed to
-# bypass security to display datasets. Please be aware that there are security
-# implications if this is allowed. More details (including required changes to
-# the proxy server config) are available in the Apache proxy documentation on
-# the wiki.
-#
-# The list of servers in this sample config are for the UCSC Main, Test and
-# Archaea browsers, but the default if left commented is to not allow any
-# display sites to bypass security (you must uncomment the line below to allow
-# them).
-#display_servers = hgw1.cse.ucsc.edu,hgw2.cse.ucsc.edu,hgw3.cse.ucsc.edu,hgw4.cse.ucsc.edu,hgw…
-
-# -- Next gen LIMS interface on top of existing Galaxy Sample/Request management code.
-
-use_nglims = False
-nglims_config_file = tool-data/nglims.yaml
-
-# -- UI Localization
-
-# Append "/{brand}" to the "Galaxy" text in the masthead.
-#brand = None
-
-# The URL linked by the "Galaxy/brand" text.
-#logo_url = /
-
-# The URL linked by the "Galaxy Wiki" link in the "Help" menu.
-#wiki_url = http://wiki.g2.bx.psu.edu/
-
-# The URL linked by the "Support" link in the "Help" menu.
-#support_url = http://wiki.g2.bx.psu.edu/Support
-
-# The URL linked by the "How to Cite..." link in the "Help" menu.
-#citation_url = http://wiki.g2.bx.psu.edu/Citing%20Galaxy
-
-# The URL linked by the "Terms and Conditions" link in the "Help" menu, as well
-# as on the user registration and login forms.
-#terms_url = None
-
-# The URL linked by the "Galaxy Q&A" link in the "Help" menu
-qa_url = http://slyfox.bx.psu.edu:8080/
-
-# Serve static content, which must be enabled if you're not serving it via a
-# proxy server. These options should be self explanatory and so are not
-# documented individually. You can use these paths (or ones in the proxy
-# server) to point to your own styles.
-static_enabled = True
-static_cache_time = 360
-static_dir = %(here)s/static/
-static_images_dir = %(here)s/static/images
-static_favicon_dir = %(here)s/static/favicon.ico
-static_scripts_dir = %(here)s/static/scripts/
-static_style_dir = %(here)s/static/june_2007_style/blue
-static_robots_txt = %(here)s/static/robots.txt
-
-# Pack javascript at launch (/static/scripts/*.js)
-# This only happens if the modified timestamp of the source .js is newer
-# than the version (if it exists) in /static/scripts/packed/
-# Note that this requires java > 1.4 for executing yuicompressor.jar
-#pack_scripts = False
-
-# Enable Cloud Launch
-
-#enable_cloud_launch = False
-
-# -- Advanced proxy features
-
-# For help on configuring the Advanced proxy features, see:
-# http://usegalaxy.org/production
-
-# Apache can handle file downloads (Galaxy-to-user) via mod_xsendfile. Set
-# this to True to inform Galaxy that mod_xsendfile is enabled upstream.
-#apache_xsendfile = False
-
-# The same download handling can be done by nginx using X-Accel-Redirect. This
-# should be set to the path defined in the nginx config as an internal redirect
-# with access to Galaxy's data files (see documentation linked above).
-#nginx_x_accel_redirect_base = False
-
-# nginx can make use of mod_zip to create zip files containing multiple library
-# files. If using X-Accel-Redirect, this can be the same value as that option.
-#nginx_x_archive_files_base = False
-
-# If using compression in the upstream proxy server, use this option to disable
-# gzipping of library .tar.gz and .zip archives, since the proxy server will do
-# it faster on the fly.
-#upstream_gzip = False
-
-# nginx can also handle file uploads (user-to-Galaxy) via nginx_upload_module.
-# Configuration for this is complex and explained in detail in the
-# documentation linked above. The upload store is a temporary directory in
-# which files uploaded by the upload module will be placed.
-#nginx_upload_store = False
-
-# This value overrides the action set on the file upload form, e.g. the web
-# path where the nginx_upload_module has been configured to intercept upload
-# requests.
-#nginx_upload_path = False
-
-# -- Logging and Debugging
-
-# Verbosity of console log messages. Acceptable values can be found here:
-# http://docs.python.org/library/logging.html#logging-levels
-#log_level = DEBUG
-
-# Print database operations to the server log (warning, quite verbose!).
-#database_engine_option_echo = False
-
-# Print database pool operations to the server log (warning, quite verbose!).
-#database_engine_option_echo_pool = False
-
-# Turn on logging of application events and some user events to the database.
-#log_events = True
-
-# Turn on logging of user actions to the database. Actions currently logged are
-# grid views, tool searches, and use of "recently" used tools menu. The
-# log_events and log_actions functionality will eventually be merged.
-#log_actions = True
-
-# Sanitize All HTML Tool Output
-# By default, all tool output served as 'text/html' will be sanitized
-# thoroughly. This can be disabled if you have special tools that require
-# unaltered output.
-#sanitize_all_html = True
-
-# Debug enables access to various config options useful for development and
-# debugging: use_lint, use_profile, use_printdebug and use_interactive. It
-# also causes the files used by PBS/SGE (submission script, output, and error)
-# to remain on disk after the job is complete. Debug mode is disabled if
-# commented, but is uncommented by default in the sample config.
-debug = True
-
-# Check for WSGI compliance.
-#use_lint = False
-
-# Run the Python profiler on each request.
-#use_profile = False
-
-# Intercept print statements and show them on the returned page.
-#use_printdebug = True
-
-# Enable live debugging in your browser. This should NEVER be enabled on a
-# public site. Enabled in the sample config for development.
-use_interactive = True
-
-# Write thread status periodically to 'heartbeat.log', (careful, uses disk
-# space rapidly!). Useful to determine why your processes may be consuming a
-# lot of CPU.
-#use_heartbeat = False
-
-# Enable the memory debugging interface (careful, negatively impacts server
-# performance).
-#use_memdump = False
-
-# -- Data Libraries
-
-# These library upload options are described in much more detail in the wiki:
-# http://wiki.g2.bx.psu.edu/Admin/Data%20Libraries/Uploading%20Library%20Files
-
-# Add an option to the library upload form which allows administrators to
-# upload a directory of files.
-#library_import_dir = None
-
-# Add an option to the library upload form which allows authorized
-# non-administrators to upload a directory of files. The configured directory
-# must contain sub-directories named the same as the non-admin user's Galaxy
-# login ( email ). The non-admin user is restricted to uploading files or
-# sub-directories of files contained in their directory.
-#user_library_import_dir = None
-
-# Add an option to the admin library upload tool allowing admins to paste
-# filesystem paths to files and directories in a box, and these paths will be
-# added to a library. Set to True to enable. Please note the security
-# implication that this will give Galaxy Admins access to anything your Galaxy
-# user has access to.
-#allow_library_path_paste = False
-
-# Users may choose to download multiple files from a library in an archive. By
-# default, Galaxy allows users to select from a few different archive formats
-# if testing shows that Galaxy is able to create files using these formats.
-# Specific formats can be disabled with this option, separate more than one
-# format with commas. Available formats are currently 'zip', 'gz', and 'bz2'.
-#disable_library_comptypes =
-
-# Some sequencer integration features in beta allow you to automatically
-# transfer datasets. This is done using a lightweight transfer manager which
-# runs outside of Galaxy (but is spawned by it automatically). Galaxy will
-# communicate with this manager over the port specified here.
-#transfer_manager_port = 8163
-
-# Search data libraries with whoosh
-#enable_whoosh_library_search = True
-# Whoosh indexes are stored in this directory.
-#whoosh_index_dir = database/whoosh_indexes
-
-# Search data libraries with lucene
-#enable_lucene_library_search = False
-# maxiumum file size to index for searching, in MB
-#fulltext_max_size = 500
-#fulltext_noindex_filetypes=bam,sam,wig,bigwig,fasta,fastq,fastqsolexa,fastqillumina,fastqsanger
-# base URL of server providing search functionality using lucene
-#fulltext_url = http://localhost:8081
-
-# -- Users and Security
-
-# Galaxy encodes various internal values when these values will be output in
-# some format (for example, in a URL or cookie). You should set a key to be
-# used by the algorithm that encodes and decodes these values. It can be any
-# string. If left unchanged, anyone could construct a cookie that would grant
-# them access to others' sessions.
-#id_secret = USING THE DEFAULT IS NOT SECURE!
-
-# User authentication can be delegated to an upstream proxy server (usually
-# Apache). The upstream proxy should set a REMOTE_USER header in the request.
-# Enabling remote user disables regular logins. For more information, see:
-# http://wiki.g2.bx.psu.edu/Admin/Config/Apache%20Proxy
-#use_remote_user = False
-
-# If use_remote_user is enabled and your external authentication
-# method just returns bare usernames, set a default mail domain to be appended
-# to usernames, to become your Galaxy usernames (email addresses).
-#remote_user_maildomain = None
-
-# If use_remote_user is enabled, you can set this to a URL that will log your
-# users out.
-#remote_user_logout_href = None
-
-# Administrative users - set this to a comma-separated list of valid Galaxy
-# users (email addresses). These users will have access to the Admin section
-# of the server, and will have access to create users, groups, roles,
-# libraries, and more. For more information, see:
-# http://wiki.g2.bx.psu.edu/Admin/Interface
-#admin_users = None
-
-# Force everyone to log in (disable anonymous access).
-#require_login = False
-
-# Allow unregistered users to create new accounts (otherwise, they will have to
-# be created by an admin).
-#allow_user_creation = True
-
-# Allow administrators to delete accounts.
-#allow_user_deletion = False
-
-# Allow administrators to log in as other users (useful for debugging)
-#allow_user_impersonation = False
-
-# Allow users to remove their datasets from disk immediately (otherwise,
-# datasets will be removed after a time period specified by an administrator in
-# the cleanup scripts run via cron)
-#allow_user_dataset_purge = False
-
-# By default, users' data will be public, but setting this to True will cause
-# it to be private. Does not affect existing users and data, only ones created
-# after this option is set. Users may still change their default back to
-# public.
-#new_user_dataset_access_role_default_private = False
-
-# -- Beta features
-
-# Object store mode (valid options are: disk, s3, swift, distributed, hierarchical)
-#object_store = disk
-#os_access_key = <your cloud object store access key>
-#os_secret_key = <your cloud object store secret key>
-#os_bucket_name = <name of an existing object store bucket or container>
-# If using 'swift' object store, you must specify the following connection properties
-#os_host = swift.rc.nectar.org.au
-#os_port = 8888
-#os_is_secure = False
-#os_conn_path = /
-# Reduced redundancy can be used only with the 's3' object store
-#os_use_reduced_redundancy = False
-# Size (in GB) that the cache used by object store should be limited to.
-# If the value is not specified, the cache size will be limited only by the
-# file system size. The file system location of the cache is considered the
-# configuration of the ``file_path`` directive defined above.
-#object_store_cache_size = 100
-
-# Configuration file for the distributed object store, if object_store =
-# distributed. See the sample at distributed_object_store_conf.xml.sample
-#distributed_object_store_config_file = None
-
-# Enable Galaxy to communicate directly with a sequencer
-#enable_sequencer_communication = False
-
-# Enable authentication via OpenID. Allows users to log in to their Galaxy
-# account by authenticating with an OpenID provider.
-#enable_openid = False
-#openid_config_file = openid_conf.xml
-
-# Optional list of email addresses of API users who can make calls on behalf of
-# other users
-#api_allow_run_as = None
-
-# Enable tool tags (associating tools with tags). This has its own option
-# since its implementation has a few performance implications on startup for
-# large servers.
-#enable_tool_tags = False
-
-# Enable a feature when running workflows. When enabled, default datasets
-# are selected for "Set at Runtime" inputs from the history such that the
-# same input will not be selected twice, unless there are more inputs than
-# compatible datasets in the history.
-# When False, the most recently added compatible item in the history will
-# be used for each "Set at Runtime" input, independent of others in the Workflow
-#enable_unique_workflow_defaults = False
-
-# The URL to the myExperiment instance being used (omit scheme but include port)
-#myexperiment_url = www.myexperiment.org:80
-
-# Enable Galaxy's "Upload via FTP" interface. You'll need to install and
-# configure an FTP server (we've used ProFTPd since it can use Galaxy's
-# database for authentication) and set the following two options.
-
-# This should point to a directory containing subdirectories matching users'
-# email addresses, where Galaxy will look for files.
-#ftp_upload_dir = None
-
-# This should be the hostname of your FTP server, which will be provided to
-# users in the help text.
-#ftp_upload_site = None
-
-# Enable enforcement of quotas. Quotas can be set from the Admin interface.
-#enable_quotas = False
-
-# Enable a feature when running workflows. When enabled, default datasets
-# are selected for "Set at Runtime" inputs from the history such that the
-# same input will not be selected twice, unless there are more inputs than
-# compatible datasets in the history.
-# When False, the most recently added compatible item in the history will
-# be used for each "Set at Runtime" input, independent of others in the Workflow
-#enable_unique_workflow_defaults = False
-
-# -- Job Execution
-
-# To increase performance of job execution and the web interface, you can
-# separate Galaxy into multiple processes. There are more than one way to do
-# this, and they are explained in detail in the documentation:
-#
-# http://wiki.g2.bx.psu.edu/Admin/Config/Performance/Web%20Application%20Scal…
-#
-# By default, Galaxy manages and executes jobs from within a single process and
-# notifies itself of new jobs via in-memory queues. If you change job_manager
-# and job_handlers from their default values, notification will instead be done
-# using the `state` and `handler` columns of the job table in the database.
-
-# Identify the server_name (the string following server: at the top of this
-# file) which should be designated as the job manager (only one):
-#job_manager = main
-
-# Identify the server_name(s) which should be designated as job handlers
-# (responsible for starting, tracking, finishing, and cleaning up jobs) as a
-# comma-separated list.
-#job_handlers = main
-
-# By default, a handler from job_handlers will be selected at random if the
-# tool to run does specify a handler below in [galaxy:tool_handlers]. If you
-# want certain handlers to only handle jobs for tools/params explicitly
-# assigned below, use default_job_handlers to specify which handlers should be
-# used for jobs without explicit handlers.
-#default_job_handlers = main
-
-# This enables splitting of jobs into tasks, if specified by the particular tool config.
-# This is a new feature and not recommended for production servers yet.
-#use_tasked_jobs = False
-#local_task_queue_workers = 2
-
-# Enable job recovery (if Galaxy is restarted while cluster jobs are running,
-# it can "recover" them when it starts). This is not safe to use if you are
-# running more than one Galaxy server using the same database.
-#enable_job_recovery = True
-
-# Setting metadata on job outputs to in a separate process (or if using a
-# cluster, on the cluster). Thanks to Python's Global Interpreter Lock and the
-# hefty expense that setting metadata incurs, your Galaxy process may become
-# unresponsive when this operation occurs internally.
-#set_metadata_externally = False
-
-# Although it is fairly reliable, setting metadata can occasionally fail. In
-# these instances, you can choose to retry setting it internally or leave it in
-# a failed state (since retrying internally may cause the Galaxy process to be
-# unresponsive). If this option is set to False, the user will be given the
-# option to retry externally, or set metadata manually (when possible).
-#retry_metadata_internally = True
-
-# If (for example) you run on a cluster and your datasets (by default,
-# database/files/) are mounted read-only, this option will override tool output
-# paths to write outputs to the working directory instead, and the job manager
-# will move the outputs to their proper place in the dataset directory on the
-# Galaxy server after the job completes.
-#outputs_to_working_directory = False
-
-# If your network filesystem's caching prevents the Galaxy server from seeing
-# the job's stdout and stderr files when it completes, you can retry reading
-# these files. The job runner will retry the number of times specified below,
-# waiting 1 second between tries. For NFS, you may want to try the -noac mount
-# option (Linux) or -actimeo=0 (Solaris).
-#retry_job_output_collection = 0
-
-# Clean up various bits of jobs left on the filesystem after completion. These
-# bits include the job working directory, external metadata temporary files,
-# and DRM stdout and stderr files (if using a DRM). Possible values are:
-# always, onsuccess, never
-#cleanup_job = always
-
-# Number of concurrent jobs to run (local job runner)
-#local_job_queue_workers = 5
-
-# Jobs can be killed after a certain amount of execution time. Format is in
-# hh:mm:ss. Currently only implemented for PBS.
-#job_walltime = None
-
-# Jobs can be killed if any of their outputs grow over a certain size (in
-# bytes). 0 for no limit.
-#output_size_limit = 0
-
-# Jobs can be held back from submission to a runner if a user already has more
-# jobs queued or running than the number specified below. This prevents a
-# single user from stuffing the queue and preventing other users from being
-# able to run jobs.
-#user_job_limit = None
-
-# Clustering Galaxy is not a straightforward process and requires some
-# pre-configuration. See the the wiki before attempting to set any of these
-# options:
-# http://wiki.g2.bx.psu.edu/Admin/Config/Performance/Cluster
-
-# Comma-separated list of job runners to start. local is always started. If
-# left commented, no jobs will be run on the cluster, even if a cluster URL is
-# explicitly defined in the [galaxy:tool_runners] section below. The runners
-# currently available are 'pbs' and 'drmaa'.
-#start_job_runners = None
-
-# For sites where all users in Galaxy match users on the system on which Galaxy
-# runs, the DRMAA job runner can be configured to submit jobs to the DRM as the
-# actual user instead of as the user running the Galaxy server process. For
-# details on these options, see the documentation at:
-#
-# http://galaxyproject.org/wiki/Admin/Config/Performance/Cluster
-#
-#drmaa_external_runjob_script = scripts/drmaa_external_runner.py
-#drmaa_external_killjob_script = scripts/drmaa_external_killer.py
-#external_chown_script = scripts/external_chown_script.py
-
-# File to source to set up the environment when running jobs. By default, the
-# environment in which the Galaxy server starts is used when running jobs
-# locally, and the environment set up per the DRM's submission method and
-# policy is used when running jobs on a cluster (try testing with `qsub` on the
-# command line). environment_setup_file can be set to the path of a file on
-# the cluster that should be sourced by the user to set up the environment
-# prior to running tools. This can be especially useful for running jobs as
-# the actual user, to remove the need to configure each user's environment
-# individually. This only affects cluster jobs, not local jobs.
-#environment_setup_file = None
-
-# The URL for the default runner to use when a tool doesn't explicitly define a
-# runner below.
-#default_cluster_job_runner = local:///
-
-# The cluster runners have their own thread pools used to prepare and finish
-# jobs (so that these sometimes lengthy operations do not block normal queue
-# operation). The value here is the number of worker threads available to each
-# started runner.
-#cluster_job_queue_workers = 3
-
-# These options are only used when using file staging with PBS.
-#pbs_application_server =
-#pbs_stage_path =
-#pbs_dataset_server =
-
-# This option allows users to see the full path of datasets via the "View
-# Details" option in the history. Administrators can always see this.
-#expose_dataset_path = False
-
-# ---- Per-Tool Job Management ----------------------------------------------
-
-# Per-tool job handler and runner overrides. Parameters can be included to define multiple
-# runners per tool. E.g. to run Cufflinks jobs initiated from Trackster
-# differently than standard Cufflinks jobs:
-#
-# cufflinks = local:///
-# cufflinks[source@trackster] = local:///
-
-[galaxy:tool_handlers]
-
-# By default, Galaxy will select a handler at random from the list of
-# job_handlers set above. You can override as in the following examples:
-#
-#upload1 = upload_handler
-#cufflinks[source@trackster] = realtime_handler
-
-[galaxy:tool_runners]
-
-# If not listed here, a tool will run with the runner defined with
-# default_cluster_job_runner. These overrides for local:/// are done because
-# these tools can fetch data from remote sites, which may not be suitable to
-# run on a cluster (if it does not have access to the Internet, for example).
-
-biomart = local:///
-encode_db1 = local:///
-hbvar = local:///
-microbial_import1 = local:///
-ucsc_table_direct1 = local:///
-ucsc_table_direct_archaea1 = local:///
-ucsc_table_direct_test1 = local:///
-upload1 = local:///
-
-# ---- Galaxy Message Queue -------------------------------------------------
-
-# Galaxy uses AMQ protocol to receive messages from external sources like
-# bar code scanners. Galaxy has been tested against RabbitMQ AMQP implementation.
-# For Galaxy to receive messages from a message queue the RabbitMQ server has
-# to be set up with a user account and other parameters listed below. The 'host'
-# and 'port' fields should point to where the RabbitMQ server is running.
-
-[galaxy_amqp]
-
-#host = 127.0.0.1
-#port = 5672
-#userid = galaxy
-#password = galaxy
-#virtual_host = galaxy_messaging_engine
-#queue = galaxy_queue
-#exchange = galaxy_exchange
-#routing_key = bar_code_scanner
-#rabbitmqctl_path = /path/to/rabbitmqctl
-
diff -r 95d98a26518349216a335a5860eb370ab74375fd -r 1a56c4b28c6bbaf1b2f1b0efec5fecce7bd454bb universe_wsgi.ini.sample
--- a/universe_wsgi.ini.sample
+++ b/universe_wsgi.ini.sample
@@ -263,6 +263,9 @@
# as on the user registration and login forms.
#terms_url = None
+# The URL linked by the "Galaxy Q&A" link in the "Help" menu
+qa_url = http://slyfox.bx.psu.edu:8080/
+
# Serve static content, which must be enabled if you're not serving it via a
# proxy server. These options should be self explanatory and so are not
# documented individually. You can use these paths (or ones in the proxy
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/95d98a265183/
changeset: 95d98a265183
user: rmarenco
date: 2012-09-07 02:55:35
summary: When asking to create a new post from galaxy tool form, tag is automatically fulfilled with the name of the tool + Added the url as global variable in unverse
affected #: 3 files
diff -r 2f684dfa8d474560f241388bbb1fec5c612eca75 -r 95d98a26518349216a335a5860eb370ab74375fd database/compiled_templates/webapps/galaxy/base_panels.mako.py
--- /dev/null
+++ b/database/compiled_templates/webapps/galaxy/base_panels.mako.py
@@ -0,0 +1,415 @@
+# -*- encoding:ascii -*-
+from mako import runtime, filters, cache
+UNDEFINED = runtime.UNDEFINED
+__M_dict_builtin = dict
+__M_locals_builtin = locals
+_magic_number = 6
+_modified_time = 1346972736.51736
+_template_filename=u'templates/webapps/galaxy/base_panels.mako'
+_template_uri=u'/webapps/galaxy/base_panels.mako'
+_template_cache=cache.Cache(__name__, _modified_time)
+_source_encoding='ascii'
+_exports = ['masthead', 'javascripts', 'title']
+
+
+def _mako_get_namespace(context, name):
+ try:
+ return context.namespaces[(__name__, name)]
+ except KeyError:
+ _mako_generate_namespaces(context)
+ return context.namespaces[(__name__, name)]
+def _mako_generate_namespaces(context):
+ pass
+def _mako_inherit(template, context):
+ _mako_generate_namespaces(context)
+ return runtime._inherit_from(context, u'/base_panels.mako', _template_uri)
+def render_body(context,**pageargs):
+ context.caller_stack._push_frame()
+ try:
+ __M_locals = __M_dict_builtin(pageargs=pageargs)
+ __M_writer = context.writer()
+ # SOURCE LINE 1
+ __M_writer(u'\n\n')
+ # SOURCE LINE 4
+ __M_writer(u'\n\n')
+ # SOURCE LINE 8
+ __M_writer(u'\n\n')
+ # SOURCE LINE 232
+ __M_writer(u'\n')
+ return ''
+ finally:
+ context.caller_stack._pop_frame()
+
+
+def render_masthead(context):
+ context.caller_stack._push_frame()
+ try:
+ AssertionError = context.get('AssertionError', UNDEFINED)
+ h = context.get('h', UNDEFINED)
+ app = context.get('app', UNDEFINED)
+ util = context.get('util', UNDEFINED)
+ def tab(id,display,href,target='_parent',visible=True,extra_class='',menu_options=None):
+ context.caller_stack._push_frame()
+ try:
+ self = context.get('self', UNDEFINED)
+ len = context.get('len', UNDEFINED)
+ __M_writer = context.writer()
+ # SOURCE LINE 19
+ __M_writer(u'\n')
+ # SOURCE LINE 22
+ __M_writer(u' \n ')
+ # SOURCE LINE 23
+
+ cls = ""
+ a_cls = ""
+ extra = ""
+ if extra_class:
+ cls += " " + extra_class
+ if self.active_view == id:
+ cls += " active"
+ if menu_options:
+ cls += " dropdown"
+ a_cls += " dropdown-toggle"
+ extra = "<b class='caret'></b>"
+ style = ""
+ if not visible:
+ style = "display: none;"
+
+
+ # SOURCE LINE 38
+ __M_writer(u'\n <li class="')
+ # SOURCE LINE 39
+ __M_writer(unicode(cls))
+ __M_writer(u'" style="')
+ __M_writer(unicode(style))
+ __M_writer(u'">\n')
+ # SOURCE LINE 40
+ if href:
+ # SOURCE LINE 41
+ __M_writer(u' <a class="')
+ __M_writer(unicode(a_cls))
+ __M_writer(u'" data-toggle="dropdown" target="')
+ __M_writer(unicode(target))
+ __M_writer(u'" href="')
+ __M_writer(unicode(href))
+ __M_writer(u'">')
+ __M_writer(unicode(display))
+ __M_writer(unicode(extra))
+ __M_writer(u'</a>\n')
+ # SOURCE LINE 42
+ else:
+ # SOURCE LINE 43
+ __M_writer(u' <a class="')
+ __M_writer(unicode(a_cls))
+ __M_writer(u'" data-toggle="dropdown">')
+ __M_writer(unicode(display))
+ __M_writer(unicode(extra))
+ __M_writer(u'</a>\n')
+ pass
+ # SOURCE LINE 45
+ if menu_options:
+ # SOURCE LINE 46
+ __M_writer(u' <ul class="dropdown-menu">\n')
+ # SOURCE LINE 47
+ for menu_item in menu_options:
+ # SOURCE LINE 48
+ if not menu_item:
+ # SOURCE LINE 49
+ __M_writer(u' <li class="divider"></li>\n')
+ # SOURCE LINE 50
+ else:
+ # SOURCE LINE 51
+ __M_writer(u' <li>\n')
+ # SOURCE LINE 52
+ if len ( menu_item ) == 1:
+ # SOURCE LINE 53
+ __M_writer(u' ')
+ __M_writer(unicode(menu_item[0]))
+ __M_writer(u'\n')
+ # SOURCE LINE 54
+ elif len ( menu_item ) == 2:
+ # SOURCE LINE 55
+ __M_writer(u' ')
+ name, link = menu_item
+
+ __M_writer(u'\n <a href="')
+ # SOURCE LINE 56
+ __M_writer(unicode(link))
+ __M_writer(u'">')
+ __M_writer(unicode(name))
+ __M_writer(u'</a>\n')
+ # SOURCE LINE 57
+ else:
+ # SOURCE LINE 58
+ __M_writer(u' ')
+ name, link, target = menu_item
+
+ __M_writer(u'\n <a target="')
+ # SOURCE LINE 59
+ __M_writer(unicode(target))
+ __M_writer(u'" href="')
+ __M_writer(unicode(link))
+ __M_writer(u'">')
+ __M_writer(unicode(name))
+ __M_writer(u'</a>\n')
+ pass
+ # SOURCE LINE 61
+ __M_writer(u' </li>\n')
+ pass
+ pass
+ # SOURCE LINE 64
+ __M_writer(u' </ul>\n')
+ pass
+ # SOURCE LINE 66
+ __M_writer(u' </li>\n ')
+ return ''
+ finally:
+ context.caller_stack._pop_frame()
+ trans = context.get('trans', UNDEFINED)
+ _ = context.get('_', UNDEFINED)
+ __M_writer = context.writer()
+ # SOURCE LINE 11
+ __M_writer(u'\n\n')
+ # SOURCE LINE 14
+ __M_writer(u' <div style="position: relative; right: -50%; float: left;">\n <div style="display: block; position: relative; right: 50%;">\n\n <ul class="nav" border="0" cellspacing="0">\n \n ')
+ # SOURCE LINE 67
+ __M_writer(u'\n\n')
+ # SOURCE LINE 70
+ __M_writer(u' ')
+ __M_writer(unicode(tab( "analysis", _("Analyze Data"), h.url_for( controller='/root', action='index' ) )))
+ __M_writer(u'\n \n')
+ # SOURCE LINE 73
+ __M_writer(u' ')
+ __M_writer(unicode(tab( "workflow", _("Workflow"), h.url_for( controller='/workflow', action='index' ) )))
+ __M_writer(u'\n \n')
+ # SOURCE LINE 76
+ __M_writer(u' ')
+
+ menu_options = [
+ [ _('Data Libraries'), h.url_for( controller='/library', action='index') ],
+ None,
+ [ _('Published Histories'), h.url_for( controller='/history', action='list_published' ) ],
+ [ _('Published Workflows'), h.url_for( controller='/workflow', action='list_published' ) ],
+ [ _('Published Visualizations'), h.url_for( controller='/visualization', action='list_published' ) ],
+ [ _('Published Pages'), h.url_for( controller='/page', action='list_published' ) ]
+ ]
+ tab( "shared", _("Shared Data"), h.url_for( controller='/library', action='index'), menu_options=menu_options )
+
+
+ # SOURCE LINE 86
+ __M_writer(u'\n \n')
+ # SOURCE LINE 89
+ __M_writer(u' ')
+
+ menu_options = [
+ [ _('Sequencing Requests'), h.url_for( controller='/requests', action='index' ) ],
+ [ _('Find Samples'), h.url_for( controller='/requests', action='find_samples_index' ) ],
+ [ _('Help'), app.config.get( "lims_doc_url", "http://main.g2.bx.psu.edu/u/rkchak/p/sts" ), "galaxy_main" ]
+ ]
+ tab( "lab", "Lab", None, menu_options=menu_options, visible=( trans.user and ( trans.user.requests or trans.app.security_agent.get_accessible_request_types( trans, trans.user ) ) ) )
+
+
+ # SOURCE LINE 96
+ __M_writer(u'\n\n')
+ # SOURCE LINE 99
+ __M_writer(u' ')
+
+ menu_options = [
+ [_('New Visualization'), h.url_for( controller='/tracks', action='index' ) ],
+ [_('Saved Visualizations'), h.url_for( controller='/visualization', action='list' ) ]
+ ]
+ tab( "visualization", _("Visualization"), h.url_for( controller='/visualization', action='list'), menu_options=menu_options )
+
+
+ # SOURCE LINE 105
+ __M_writer(u'\n\n')
+ # SOURCE LINE 108
+ if app.config.get_bool( 'enable_cloud_launch', False ):
+ # SOURCE LINE 109
+ __M_writer(u' ')
+
+ menu_options = [
+ [_('New Cloud Cluster'), h.url_for( controller='/cloudlaunch', action='index' ) ],
+ ]
+ tab( "cloud", _("Cloud"), h.url_for( controller='/cloudlaunch', action='index'), menu_options=menu_options )
+
+
+ # SOURCE LINE 114
+ __M_writer(u'\n')
+ pass
+ # SOURCE LINE 116
+ __M_writer(u'\n')
+ # SOURCE LINE 118
+ __M_writer(u' ')
+ __M_writer(unicode(tab( "admin", "Admin", h.url_for( controller='/admin', action='index' ), extra_class="admin-only", visible=( trans.user and app.config.is_admin_user( trans.user ) ) )))
+ __M_writer(u'\n \n')
+ # SOURCE LINE 121
+ __M_writer(u' ')
+
+ menu_options = [
+ [_('Galaxy Q&A'), app.config.get( "qa_url", "http://slyfox.bx.psu.edu:8080/" ), "galaxy_main" ],
+ [_('Support'), app.config.get( "support_url", "http://wiki.g2.bx.psu.edu/Support" ), "_blank" ],
+ [_('Galaxy Wiki'), app.config.get( "wiki_url", "http://wiki.g2.bx.psu.edu/" ), "_blank" ],
+ [_('Video tutorials (screencasts)'), app.config.get( "screencasts_url", "http://galaxycast.org" ), "_blank" ],
+ [_('How to Cite Galaxy'), app.config.get( "citation_url", "http://wiki.g2.bx.psu.edu/Citing%20Galaxy" ), "_blank" ]
+ ]
+ if app.config.get( 'terms_url', None ) is not None:
+ menu_options.append( [_('Terms and Conditions'), app.config.get( 'terms_url', None ), '_blank'] )
+ tab( "help", _("Help"), None, menu_options=menu_options)
+
+
+ # SOURCE LINE 132
+ __M_writer(u'\n \n')
+ # SOURCE LINE 135
+ __M_writer(u' ')
+
+ # Menu for user who is not logged in.
+ menu_options = [ [ _("Login"), h.url_for( controller='/user', action='login' ), "galaxy_main" ] ]
+ if app.config.allow_user_creation:
+ menu_options.append( [ _("Register"), h.url_for( controller='/user', action='create', cntrller='user', webapp='galaxy' ), "galaxy_main" ] )
+ extra_class = "loggedout-only"
+ visible = ( trans.user == None )
+ tab( "user", _("User"), None, visible=visible, menu_options=menu_options )
+
+ # Menu for user who is logged in.
+ if trans.user:
+ email = trans.user.email
+ else:
+ email = ""
+ menu_options = [ [ '<a>Logged in as <span id="user-email">%s</span></a>' % email ] ]
+ if app.config.use_remote_user:
+ if app.config.remote_user_logout_href:
+ menu_options.append( [ _('Logout'), app.config.remote_user_logout_href, "_top" ] )
+ else:
+ menu_options.append( [ _('Preferences'), h.url_for( controller='/user', action='index', cntrller='user', webapp='galaxy' ), "galaxy_main" ] )
+ menu_options.append( [ 'Custom Builds', h.url_for( controller='/user', action='dbkeys' ), "galaxy_main" ] )
+ if app.config.require_login:
+ logout_url = h.url_for( controller='/root', action='index', m_c='user', m_a='logout', webapp='galaxy' )
+ else:
+ logout_url = h.url_for( controller='/user', action='logout', webapp='galaxy' )
+ menu_options.append( [ 'Logout', logout_url, "_top" ] )
+ menu_options.append( None )
+ menu_options.append( [ _('Saved Histories'), h.url_for( controller='/history', action='list' ), "galaxy_main" ] )
+ menu_options.append( [ _('Saved Datasets'), h.url_for( controller='/dataset', action='list' ), "galaxy_main" ] )
+ menu_options.append( [ _('Saved Pages'), h.url_for( controller='/page', action='list' ), "_top" ] )
+ menu_options.append( [ _('API Keys'), h.url_for( controller='/user', action='api_keys', cntrller='user', webapp='galaxy' ), "galaxy_main" ] )
+ if app.config.use_remote_user:
+ menu_options.append( [ _('Public Name'), h.url_for( controller='/user', action='edit_username', cntrller='user', webapp='galaxy' ), "galaxy_main" ] )
+
+ extra_class = "loggedin-only"
+ visible = ( trans.user != None )
+ tab( "user", "User", None, visible=visible, menu_options=menu_options )
+
+
+ # SOURCE LINE 172
+ __M_writer(u'\n \n')
+ # SOURCE LINE 176
+ __M_writer(u' </ul>\n\n </div>\n </div>\n \n')
+ # SOURCE LINE 182
+ __M_writer(u' <div class="title">\n <a href="')
+ # SOURCE LINE 183
+ __M_writer(unicode(h.url_for( app.config.get( 'logo_url', '/' ) )))
+ __M_writer(u'">\n <img border="0" src="')
+ # SOURCE LINE 184
+ __M_writer(unicode(h.url_for('/static/images/galaxyIcon_noText.png')))
+ __M_writer(u'">\n Galaxy\n')
+ # SOURCE LINE 186
+ if app.config.brand:
+ # SOURCE LINE 187
+ __M_writer(u' <span>/ ')
+ __M_writer(unicode(app.config.brand))
+ __M_writer(u'</span>\n')
+ pass
+ # SOURCE LINE 189
+ __M_writer(u' </a>\n </div>\n\n')
+ # SOURCE LINE 193
+ __M_writer(u' ')
+
+ bar_style = "quota-meter-bar"
+ usage = 0
+ percent = 0
+ quota = None
+ try:
+ usage = trans.app.quota_agent.get_usage( trans=trans )
+ quota = trans.app.quota_agent.get_quota( trans.user )
+ percent = trans.app.quota_agent.get_percent( usage=usage, quota=quota )
+ if percent is not None:
+ if percent >= 100:
+ bar_style += " quota-meter-bar-error"
+ elif percent >= 85:
+ bar_style += " quota-meter-bar-warn"
+ else:
+ percent = 0
+ except AssertionError:
+ pass # Probably no history yet
+ tooltip = None
+ if not trans.user and quota and trans.app.config.allow_user_creation:
+ if trans.app.quota_agent.default_registered_quota is None or trans.app.quota_agent.default_unregistered_quota < trans.app.quota_agent.default_registered_quota:
+ tooltip = "Your disk quota is %s. You can increase your quota by registering a Galaxy account." % util.nice_size( quota )
+
+
+ # SOURCE LINE 215
+ __M_writer(u'\n\n <div class="quota-meter-container">\n')
+ # SOURCE LINE 218
+ if tooltip:
+ # SOURCE LINE 219
+ __M_writer(u' <div id="quota-meter" class="quota-meter tooltip" title="')
+ __M_writer(unicode(tooltip))
+ __M_writer(u'">\n')
+ # SOURCE LINE 220
+ else:
+ # SOURCE LINE 221
+ __M_writer(u' <div id="quota-meter" class="quota-meter">\n')
+ pass
+ # SOURCE LINE 223
+ __M_writer(u' <div id="quota-meter-bar" class="')
+ __M_writer(unicode(bar_style))
+ __M_writer(u'" style="width: ')
+ __M_writer(unicode(percent))
+ __M_writer(u'px;"></div>\n')
+ # SOURCE LINE 224
+ if quota is not None:
+ # SOURCE LINE 225
+ __M_writer(u' <div id="quota-meter-text" class="quota-meter-text">Using ')
+ __M_writer(unicode(percent))
+ __M_writer(u'%</div>\n')
+ # SOURCE LINE 226
+ else:
+ # SOURCE LINE 227
+ __M_writer(u' <div id="quota-meter-text" class="quota-meter-text">Using ')
+ __M_writer(unicode(util.nice_size( usage )))
+ __M_writer(u'</div>\n')
+ pass
+ # SOURCE LINE 229
+ __M_writer(u' </div>\n </div>\n \n')
+ return ''
+ finally:
+ context.caller_stack._pop_frame()
+
+
+def render_javascripts(context):
+ context.caller_stack._push_frame()
+ try:
+ parent = context.get('parent', UNDEFINED)
+ __M_writer = context.writer()
+ # SOURCE LINE 6
+ __M_writer(u'\n ')
+ # SOURCE LINE 7
+ __M_writer(unicode(parent.javascripts()))
+ __M_writer(u'\n')
+ return ''
+ finally:
+ context.caller_stack._pop_frame()
+
+
+def render_title(context):
+ context.caller_stack._push_frame()
+ try:
+ __M_writer = context.writer()
+ # SOURCE LINE 4
+ __M_writer(u'Galaxy')
+ return ''
+ finally:
+ context.caller_stack._pop_frame()
+
+
diff -r 2f684dfa8d474560f241388bbb1fec5c612eca75 -r 95d98a26518349216a335a5860eb370ab74375fd templates/tool_form.mako
--- a/templates/tool_form.mako
+++ b/templates/tool_form.mako
@@ -302,8 +302,8 @@
${tool.name} ${tool_version_select_field.get_html()}
%endif
<!-- BioStar links -->
- <a href="http://slyfox.bx.psu.edu:8081/new/post/" target="galaxy_main" class="icon-button general-question tooltip close-side-panels" data-original-title="Ask a tool related question"></a>
- <a href="http://slyfox.bx.psu.edu:8081/show/tag/${low_tool_name}/" target="galaxy_main" class="icon-button tag-question tooltip close-side-panels" data-original-title="See tool related posts" ></a>
+ <a href="http://slyfox.bx.psu.edu:8080/new/post/tagged/${low_tool_name}" target="galaxy_main" class="icon-button general-question tooltip close-side-panels" data-original-title="Ask a tool related question"></a>
+ <a href="http://slyfox.bx.psu.edu:8080/show/tag/${low_tool_name}/" target="galaxy_main" class="icon-button tag-question tooltip close-side-panels" data-original-title="See tool related posts" ></a><!-- End of BioStar links --></div><div class="toolFormBody">
diff -r 2f684dfa8d474560f241388bbb1fec5c612eca75 -r 95d98a26518349216a335a5860eb370ab74375fd universe_wsgi.ini
--- /dev/null
+++ b/universe_wsgi.ini
@@ -0,0 +1,753 @@
+#
+# Galaxy is configured by default to be useable in a single-user development
+# environment. To tune the application for a multi-user production
+# environment, see the documentation at:
+#
+# http://wiki.g2.bx.psu.edu/Admin/Config/Performance/Production%20Server
+#
+
+# Throughout this sample configuration file, except where stated otherwise,
+# uncommented values override the default if left unset, whereas commented
+# values are set to the default value.
+# Examples of many of these options are explained in more detail in the wiki:
+#
+# http://wiki.g2.bx.psu.edu/Admin/Config
+#
+# Config hackers are encouraged to check there before asking for help.
+
+# ---- HTTP Server ----------------------------------------------------------
+
+# Configuration of the internal HTTP server.
+
+[server:main]
+
+# The internal HTTP server to use. Currently only Paste is provided. This
+# option is required.
+use = egg:Paste#http
+
+# The port on which to listen.
+#port = 8080
+
+# The address on which to listen. By default, only listen to localhost (Galaxy
+# will not be accessible over the network). Use '0.0.0.0' to listen on all
+# available network interfaces.
+#host = 127.0.0.1
+
+# Use a threadpool for the web server instead of creating a thread for each
+# request.
+use_threadpool = True
+
+# Number of threads in the web server thread pool.
+#threadpool_workers = 10
+
+# ---- Filters --------------------------------------------------------------
+
+# Filters sit between Galaxy and the HTTP server.
+
+# These filters are disabled by default. They can be enabled with
+# 'filter-with' in the [app:main] section below.
+
+# Define the gzip filter.
+[filter:gzip]
+use = egg:Paste#gzip
+
+# Define the proxy-prefix filter.
+[filter:proxy-prefix]
+use = egg:PasteDeploy#prefix
+prefix = /galaxy
+
+# ---- Galaxy ---------------------------------------------------------------
+
+# Configuration of the Galaxy application.
+
+[app:main]
+
+# -- Application and filtering
+
+# The factory for the WSGI application. This should not be changed.
+paste.app_factory = galaxy.web.buildapp:app_factory
+
+# If not running behind a proxy server, you may want to enable gzip compression
+# to decrease the size of data transferred over the network. If using a proxy
+# server, please enable gzip compression there instead.
+#filter-with = gzip
+
+# If running behind a proxy server and Galaxy is served from a subdirectory,
+# enable the proxy-prefix filter and set the prefix in the
+# [filter:proxy-prefix] section above.
+#filter-with = proxy-prefix
+
+# If proxy-prefix is enabled and you're running more than one Galaxy instance
+# behind one hostname, you will want to set this to the same path as the prefix
+# in the filter above. This value becomes the "path" attribute set in the
+# cookie so the cookies from each instance will not clobber each other.
+#cookie_path = None
+
+# -- Database
+
+# By default, Galaxy uses a SQLite database at 'database/universe.sqlite'. You
+# may use a SQLAlchemy connection string to specify an external database
+# instead. This string takes many options which are explained in detail in the
+# config file documentation.
+#database_connection = sqlite:///./database/universe.sqlite?isolation_level=IMMEDIATE
+
+# If the server logs errors about not having enough database pool connections,
+# you will want to increase these values, or consider running more Galaxy
+# processes.
+#database_engine_option_pool_size = 5
+#database_engine_option_max_overflow = 10
+
+# If using MySQL and the server logs the error "MySQL server has gone away",
+# you will want to set this to some positive value (7200 should work).
+#database_engine_option_pool_recycle = -1
+
+# If large database query results are causing memory or response time issues in
+# the Galaxy process, leave the result on the server instead. This option is
+# only available for PostgreSQL and is highly recommended.
+#database_engine_option_server_side_cursors = False
+
+# Create only one connection to the database per thread, to reduce the
+# connection overhead. Recommended when not using SQLite:
+#database_engine_option_strategy = threadlocal
+
+# Log all database transactions, can be useful for debugging and performance
+# profiling. Logging is done via Python's 'logging' module under the qualname
+# 'galaxy.model.orm.logging_connection_proxy'
+#database_query_profiling_proxy = False
+
+# -- Files and directories
+
+# Path where genome builds are stored. This defaults to tool-data/genome
+#genome_data_path = tool-data/genome
+
+# URL for rsync server to download pre-built indexes.
+#rsync_url = rsync://scofield.bx.psu.edu/indexes
+
+# Dataset files are stored in this directory.
+#file_path = database/files
+
+# Temporary files are stored in this directory.
+#new_file_path = database/tmp
+
+# Tool config files, defines what tools are available in Galaxy.
+# Tools can be locally developed or installed from Galaxy tool sheds.
+#tool_config_file = tool_conf.xml,shed_tool_conf.xml
+
+# Default path to the directory containing the tools defined in tool_conf.xml.
+# Other tool config files must include the tool_path as an attribute in the <toolbox> tag.
+#tool_path = tools
+
+# Path to the directory in which managed tool dependencies are placed. To use
+# the dependency system, see the documentation at:
+# http://wiki.g2.bx.psu.edu/Admin/Config/Tool%20Dependencies
+#tool_dependency_dir = None
+
+# Enable automatic polling of relative tool sheds to see if any updates
+# are available for installed repositories. Ideally only one Galaxy
+# server process should be able to check for repository updates. The
+# setting for hours_between_check should be an integer between 1 and 24.
+#enable_tool_shed_check = False
+#hours_between_check = 12
+
+# Directory where data used by tools is located, see the samples in that
+# directory and the wiki for help:
+# http://wiki.g2.bx.psu.edu/Admin/Data%20Integration
+#tool_data_path = tool-data
+
+# Directory where chrom len files are kept, currently mainly used by trackster
+#len_file_path = tool-data/shared/ucsc/chrom
+
+# Datatypes config file, defines what data (file) types are available in
+# Galaxy.
+#datatypes_config_file = datatypes_conf.xml
+
+# Each job is given a unique empty directory as its current working directory.
+# This option defines in what parent directory those directories will be
+# created.
+#job_working_directory = database/job_working_directory
+
+# If using a cluster, Galaxy will write job scripts and stdout/stderr to this
+# directory.
+#cluster_files_directory = database/pbs
+
+# External service types config file, defines what types of external_services configurations
+# are available in Galaxy.
+#external_service_type_config_file = external_service_types_conf.xml
+
+# Path to the directory containing the external_service_types defined in the config.
+#external_service_type_path = external_service_types
+
+# Tools with a number of outputs not known until runtime can write these
+# outputs to a directory for collection by Galaxy when the job is done.
+# Previously, this directory was new_file_path, but using one global directory
+# can cause performance problems, so using job_working_directory ('.' or cwd
+# when a job is run) is encouraged. By default, both are checked to avoid
+# breaking existing tools.
+#collect_outputs_from = new_file_path,job_working_directory
+
+# -- Mail and notification
+
+# Galaxy sends mail for various things: Subscribing users to the mailing list
+# if they request it, emailing password resets, notification from the Galaxy
+# Sample Tracking system, and reporting dataset errors. To do this, it needs
+# to send mail through an SMTP server, which you may define here (host:port).
+# Galaxy will automatically try STARTTLS but will continue upon failure.
+#smtp_server = None
+
+# If your SMTP server requires a username and password, you can provide them
+# here (password in cleartext here, but if your server supports STARTTLS it
+# will be sent over the network encrypted).
+#smtp_username = None
+#smtp_password = None
+
+# On the user registration form, users may choose to join the mailing list.
+# This is the address of the list they'll be subscribed to.
+#mailing_join_addr = galaxy-announce-join(a)bx.psu.edu
+
+# Datasets in an error state include a link to report the error. Those reports
+# will be sent to this address. Error reports are disabled if no address is set.
+#error_email_to = None
+
+# -- Display sites
+
+# Galaxy can display data at various external browsers. These options specify
+# which browsers should be available. URLs and builds available at these
+# browsers are defined in the specifield files.
+
+# UCSC browsers: tool-data/shared/ucsc/ucsc_build_sites.txt
+#ucsc_display_sites = main,test,archaea,ucla
+
+# GBrowse servers: tool-data/shared/gbrowse/gbrowse_build_sites.txt
+#gbrowse_display_sites = modencode,sgd_yeast,tair,wormbase,wormbase_ws120,wormbase_ws140,wormbase_ws170,wormbase_ws180,wormbase_ws190,wormbase_ws200,wormbase_ws204,wormbase_ws210,wormbase_ws220,wormbase_ws225
+
+# GeneTrack servers: tool-data/shared/genetrack/genetrack_sites.txt
+#genetrack_display_sites = main,test
+
+# If use_remote_user = True, display application servers will be denied access
+# to Galaxy and so displaying datasets in these sites will fail.
+# display_servers contains a list of hostnames which should be allowed to
+# bypass security to display datasets. Please be aware that there are security
+# implications if this is allowed. More details (including required changes to
+# the proxy server config) are available in the Apache proxy documentation on
+# the wiki.
+#
+# The list of servers in this sample config are for the UCSC Main, Test and
+# Archaea browsers, but the default if left commented is to not allow any
+# display sites to bypass security (you must uncomment the line below to allow
+# them).
+#display_servers = hgw1.cse.ucsc.edu,hgw2.cse.ucsc.edu,hgw3.cse.ucsc.edu,hgw4.cse.ucsc.edu,hgw…
+
+# -- Next gen LIMS interface on top of existing Galaxy Sample/Request management code.
+
+use_nglims = False
+nglims_config_file = tool-data/nglims.yaml
+
+# -- UI Localization
+
+# Append "/{brand}" to the "Galaxy" text in the masthead.
+#brand = None
+
+# The URL linked by the "Galaxy/brand" text.
+#logo_url = /
+
+# The URL linked by the "Galaxy Wiki" link in the "Help" menu.
+#wiki_url = http://wiki.g2.bx.psu.edu/
+
+# The URL linked by the "Support" link in the "Help" menu.
+#support_url = http://wiki.g2.bx.psu.edu/Support
+
+# The URL linked by the "How to Cite..." link in the "Help" menu.
+#citation_url = http://wiki.g2.bx.psu.edu/Citing%20Galaxy
+
+# The URL linked by the "Terms and Conditions" link in the "Help" menu, as well
+# as on the user registration and login forms.
+#terms_url = None
+
+# The URL linked by the "Galaxy Q&A" link in the "Help" menu
+qa_url = http://slyfox.bx.psu.edu:8080/
+
+# Serve static content, which must be enabled if you're not serving it via a
+# proxy server. These options should be self explanatory and so are not
+# documented individually. You can use these paths (or ones in the proxy
+# server) to point to your own styles.
+static_enabled = True
+static_cache_time = 360
+static_dir = %(here)s/static/
+static_images_dir = %(here)s/static/images
+static_favicon_dir = %(here)s/static/favicon.ico
+static_scripts_dir = %(here)s/static/scripts/
+static_style_dir = %(here)s/static/june_2007_style/blue
+static_robots_txt = %(here)s/static/robots.txt
+
+# Pack javascript at launch (/static/scripts/*.js)
+# This only happens if the modified timestamp of the source .js is newer
+# than the version (if it exists) in /static/scripts/packed/
+# Note that this requires java > 1.4 for executing yuicompressor.jar
+#pack_scripts = False
+
+# Enable Cloud Launch
+
+#enable_cloud_launch = False
+
+# -- Advanced proxy features
+
+# For help on configuring the Advanced proxy features, see:
+# http://usegalaxy.org/production
+
+# Apache can handle file downloads (Galaxy-to-user) via mod_xsendfile. Set
+# this to True to inform Galaxy that mod_xsendfile is enabled upstream.
+#apache_xsendfile = False
+
+# The same download handling can be done by nginx using X-Accel-Redirect. This
+# should be set to the path defined in the nginx config as an internal redirect
+# with access to Galaxy's data files (see documentation linked above).
+#nginx_x_accel_redirect_base = False
+
+# nginx can make use of mod_zip to create zip files containing multiple library
+# files. If using X-Accel-Redirect, this can be the same value as that option.
+#nginx_x_archive_files_base = False
+
+# If using compression in the upstream proxy server, use this option to disable
+# gzipping of library .tar.gz and .zip archives, since the proxy server will do
+# it faster on the fly.
+#upstream_gzip = False
+
+# nginx can also handle file uploads (user-to-Galaxy) via nginx_upload_module.
+# Configuration for this is complex and explained in detail in the
+# documentation linked above. The upload store is a temporary directory in
+# which files uploaded by the upload module will be placed.
+#nginx_upload_store = False
+
+# This value overrides the action set on the file upload form, e.g. the web
+# path where the nginx_upload_module has been configured to intercept upload
+# requests.
+#nginx_upload_path = False
+
+# -- Logging and Debugging
+
+# Verbosity of console log messages. Acceptable values can be found here:
+# http://docs.python.org/library/logging.html#logging-levels
+#log_level = DEBUG
+
+# Print database operations to the server log (warning, quite verbose!).
+#database_engine_option_echo = False
+
+# Print database pool operations to the server log (warning, quite verbose!).
+#database_engine_option_echo_pool = False
+
+# Turn on logging of application events and some user events to the database.
+#log_events = True
+
+# Turn on logging of user actions to the database. Actions currently logged are
+# grid views, tool searches, and use of "recently" used tools menu. The
+# log_events and log_actions functionality will eventually be merged.
+#log_actions = True
+
+# Sanitize All HTML Tool Output
+# By default, all tool output served as 'text/html' will be sanitized
+# thoroughly. This can be disabled if you have special tools that require
+# unaltered output.
+#sanitize_all_html = True
+
+# Debug enables access to various config options useful for development and
+# debugging: use_lint, use_profile, use_printdebug and use_interactive. It
+# also causes the files used by PBS/SGE (submission script, output, and error)
+# to remain on disk after the job is complete. Debug mode is disabled if
+# commented, but is uncommented by default in the sample config.
+debug = True
+
+# Check for WSGI compliance.
+#use_lint = False
+
+# Run the Python profiler on each request.
+#use_profile = False
+
+# Intercept print statements and show them on the returned page.
+#use_printdebug = True
+
+# Enable live debugging in your browser. This should NEVER be enabled on a
+# public site. Enabled in the sample config for development.
+use_interactive = True
+
+# Write thread status periodically to 'heartbeat.log', (careful, uses disk
+# space rapidly!). Useful to determine why your processes may be consuming a
+# lot of CPU.
+#use_heartbeat = False
+
+# Enable the memory debugging interface (careful, negatively impacts server
+# performance).
+#use_memdump = False
+
+# -- Data Libraries
+
+# These library upload options are described in much more detail in the wiki:
+# http://wiki.g2.bx.psu.edu/Admin/Data%20Libraries/Uploading%20Library%20Files
+
+# Add an option to the library upload form which allows administrators to
+# upload a directory of files.
+#library_import_dir = None
+
+# Add an option to the library upload form which allows authorized
+# non-administrators to upload a directory of files. The configured directory
+# must contain sub-directories named the same as the non-admin user's Galaxy
+# login ( email ). The non-admin user is restricted to uploading files or
+# sub-directories of files contained in their directory.
+#user_library_import_dir = None
+
+# Add an option to the admin library upload tool allowing admins to paste
+# filesystem paths to files and directories in a box, and these paths will be
+# added to a library. Set to True to enable. Please note the security
+# implication that this will give Galaxy Admins access to anything your Galaxy
+# user has access to.
+#allow_library_path_paste = False
+
+# Users may choose to download multiple files from a library in an archive. By
+# default, Galaxy allows users to select from a few different archive formats
+# if testing shows that Galaxy is able to create files using these formats.
+# Specific formats can be disabled with this option, separate more than one
+# format with commas. Available formats are currently 'zip', 'gz', and 'bz2'.
+#disable_library_comptypes =
+
+# Some sequencer integration features in beta allow you to automatically
+# transfer datasets. This is done using a lightweight transfer manager which
+# runs outside of Galaxy (but is spawned by it automatically). Galaxy will
+# communicate with this manager over the port specified here.
+#transfer_manager_port = 8163
+
+# Search data libraries with whoosh
+#enable_whoosh_library_search = True
+# Whoosh indexes are stored in this directory.
+#whoosh_index_dir = database/whoosh_indexes
+
+# Search data libraries with lucene
+#enable_lucene_library_search = False
+# maxiumum file size to index for searching, in MB
+#fulltext_max_size = 500
+#fulltext_noindex_filetypes=bam,sam,wig,bigwig,fasta,fastq,fastqsolexa,fastqillumina,fastqsanger
+# base URL of server providing search functionality using lucene
+#fulltext_url = http://localhost:8081
+
+# -- Users and Security
+
+# Galaxy encodes various internal values when these values will be output in
+# some format (for example, in a URL or cookie). You should set a key to be
+# used by the algorithm that encodes and decodes these values. It can be any
+# string. If left unchanged, anyone could construct a cookie that would grant
+# them access to others' sessions.
+#id_secret = USING THE DEFAULT IS NOT SECURE!
+
+# User authentication can be delegated to an upstream proxy server (usually
+# Apache). The upstream proxy should set a REMOTE_USER header in the request.
+# Enabling remote user disables regular logins. For more information, see:
+# http://wiki.g2.bx.psu.edu/Admin/Config/Apache%20Proxy
+#use_remote_user = False
+
+# If use_remote_user is enabled and your external authentication
+# method just returns bare usernames, set a default mail domain to be appended
+# to usernames, to become your Galaxy usernames (email addresses).
+#remote_user_maildomain = None
+
+# If use_remote_user is enabled, you can set this to a URL that will log your
+# users out.
+#remote_user_logout_href = None
+
+# Administrative users - set this to a comma-separated list of valid Galaxy
+# users (email addresses). These users will have access to the Admin section
+# of the server, and will have access to create users, groups, roles,
+# libraries, and more. For more information, see:
+# http://wiki.g2.bx.psu.edu/Admin/Interface
+#admin_users = None
+
+# Force everyone to log in (disable anonymous access).
+#require_login = False
+
+# Allow unregistered users to create new accounts (otherwise, they will have to
+# be created by an admin).
+#allow_user_creation = True
+
+# Allow administrators to delete accounts.
+#allow_user_deletion = False
+
+# Allow administrators to log in as other users (useful for debugging)
+#allow_user_impersonation = False
+
+# Allow users to remove their datasets from disk immediately (otherwise,
+# datasets will be removed after a time period specified by an administrator in
+# the cleanup scripts run via cron)
+#allow_user_dataset_purge = False
+
+# By default, users' data will be public, but setting this to True will cause
+# it to be private. Does not affect existing users and data, only ones created
+# after this option is set. Users may still change their default back to
+# public.
+#new_user_dataset_access_role_default_private = False
+
+# -- Beta features
+
+# Object store mode (valid options are: disk, s3, swift, distributed, hierarchical)
+#object_store = disk
+#os_access_key = <your cloud object store access key>
+#os_secret_key = <your cloud object store secret key>
+#os_bucket_name = <name of an existing object store bucket or container>
+# If using 'swift' object store, you must specify the following connection properties
+#os_host = swift.rc.nectar.org.au
+#os_port = 8888
+#os_is_secure = False
+#os_conn_path = /
+# Reduced redundancy can be used only with the 's3' object store
+#os_use_reduced_redundancy = False
+# Size (in GB) that the cache used by object store should be limited to.
+# If the value is not specified, the cache size will be limited only by the
+# file system size. The file system location of the cache is considered the
+# configuration of the ``file_path`` directive defined above.
+#object_store_cache_size = 100
+
+# Configuration file for the distributed object store, if object_store =
+# distributed. See the sample at distributed_object_store_conf.xml.sample
+#distributed_object_store_config_file = None
+
+# Enable Galaxy to communicate directly with a sequencer
+#enable_sequencer_communication = False
+
+# Enable authentication via OpenID. Allows users to log in to their Galaxy
+# account by authenticating with an OpenID provider.
+#enable_openid = False
+#openid_config_file = openid_conf.xml
+
+# Optional list of email addresses of API users who can make calls on behalf of
+# other users
+#api_allow_run_as = None
+
+# Enable tool tags (associating tools with tags). This has its own option
+# since its implementation has a few performance implications on startup for
+# large servers.
+#enable_tool_tags = False
+
+# Enable a feature when running workflows. When enabled, default datasets
+# are selected for "Set at Runtime" inputs from the history such that the
+# same input will not be selected twice, unless there are more inputs than
+# compatible datasets in the history.
+# When False, the most recently added compatible item in the history will
+# be used for each "Set at Runtime" input, independent of others in the Workflow
+#enable_unique_workflow_defaults = False
+
+# The URL to the myExperiment instance being used (omit scheme but include port)
+#myexperiment_url = www.myexperiment.org:80
+
+# Enable Galaxy's "Upload via FTP" interface. You'll need to install and
+# configure an FTP server (we've used ProFTPd since it can use Galaxy's
+# database for authentication) and set the following two options.
+
+# This should point to a directory containing subdirectories matching users'
+# email addresses, where Galaxy will look for files.
+#ftp_upload_dir = None
+
+# This should be the hostname of your FTP server, which will be provided to
+# users in the help text.
+#ftp_upload_site = None
+
+# Enable enforcement of quotas. Quotas can be set from the Admin interface.
+#enable_quotas = False
+
+# Enable a feature when running workflows. When enabled, default datasets
+# are selected for "Set at Runtime" inputs from the history such that the
+# same input will not be selected twice, unless there are more inputs than
+# compatible datasets in the history.
+# When False, the most recently added compatible item in the history will
+# be used for each "Set at Runtime" input, independent of others in the Workflow
+#enable_unique_workflow_defaults = False
+
+# -- Job Execution
+
+# To increase performance of job execution and the web interface, you can
+# separate Galaxy into multiple processes. There are more than one way to do
+# this, and they are explained in detail in the documentation:
+#
+# http://wiki.g2.bx.psu.edu/Admin/Config/Performance/Web%20Application%20Scal…
+#
+# By default, Galaxy manages and executes jobs from within a single process and
+# notifies itself of new jobs via in-memory queues. If you change job_manager
+# and job_handlers from their default values, notification will instead be done
+# using the `state` and `handler` columns of the job table in the database.
+
+# Identify the server_name (the string following server: at the top of this
+# file) which should be designated as the job manager (only one):
+#job_manager = main
+
+# Identify the server_name(s) which should be designated as job handlers
+# (responsible for starting, tracking, finishing, and cleaning up jobs) as a
+# comma-separated list.
+#job_handlers = main
+
+# By default, a handler from job_handlers will be selected at random if the
+# tool to run does specify a handler below in [galaxy:tool_handlers]. If you
+# want certain handlers to only handle jobs for tools/params explicitly
+# assigned below, use default_job_handlers to specify which handlers should be
+# used for jobs without explicit handlers.
+#default_job_handlers = main
+
+# This enables splitting of jobs into tasks, if specified by the particular tool config.
+# This is a new feature and not recommended for production servers yet.
+#use_tasked_jobs = False
+#local_task_queue_workers = 2
+
+# Enable job recovery (if Galaxy is restarted while cluster jobs are running,
+# it can "recover" them when it starts). This is not safe to use if you are
+# running more than one Galaxy server using the same database.
+#enable_job_recovery = True
+
+# Setting metadata on job outputs to in a separate process (or if using a
+# cluster, on the cluster). Thanks to Python's Global Interpreter Lock and the
+# hefty expense that setting metadata incurs, your Galaxy process may become
+# unresponsive when this operation occurs internally.
+#set_metadata_externally = False
+
+# Although it is fairly reliable, setting metadata can occasionally fail. In
+# these instances, you can choose to retry setting it internally or leave it in
+# a failed state (since retrying internally may cause the Galaxy process to be
+# unresponsive). If this option is set to False, the user will be given the
+# option to retry externally, or set metadata manually (when possible).
+#retry_metadata_internally = True
+
+# If (for example) you run on a cluster and your datasets (by default,
+# database/files/) are mounted read-only, this option will override tool output
+# paths to write outputs to the working directory instead, and the job manager
+# will move the outputs to their proper place in the dataset directory on the
+# Galaxy server after the job completes.
+#outputs_to_working_directory = False
+
+# If your network filesystem's caching prevents the Galaxy server from seeing
+# the job's stdout and stderr files when it completes, you can retry reading
+# these files. The job runner will retry the number of times specified below,
+# waiting 1 second between tries. For NFS, you may want to try the -noac mount
+# option (Linux) or -actimeo=0 (Solaris).
+#retry_job_output_collection = 0
+
+# Clean up various bits of jobs left on the filesystem after completion. These
+# bits include the job working directory, external metadata temporary files,
+# and DRM stdout and stderr files (if using a DRM). Possible values are:
+# always, onsuccess, never
+#cleanup_job = always
+
+# Number of concurrent jobs to run (local job runner)
+#local_job_queue_workers = 5
+
+# Jobs can be killed after a certain amount of execution time. Format is in
+# hh:mm:ss. Currently only implemented for PBS.
+#job_walltime = None
+
+# Jobs can be killed if any of their outputs grow over a certain size (in
+# bytes). 0 for no limit.
+#output_size_limit = 0
+
+# Jobs can be held back from submission to a runner if a user already has more
+# jobs queued or running than the number specified below. This prevents a
+# single user from stuffing the queue and preventing other users from being
+# able to run jobs.
+#user_job_limit = None
+
+# Clustering Galaxy is not a straightforward process and requires some
+# pre-configuration. See the the wiki before attempting to set any of these
+# options:
+# http://wiki.g2.bx.psu.edu/Admin/Config/Performance/Cluster
+
+# Comma-separated list of job runners to start. local is always started. If
+# left commented, no jobs will be run on the cluster, even if a cluster URL is
+# explicitly defined in the [galaxy:tool_runners] section below. The runners
+# currently available are 'pbs' and 'drmaa'.
+#start_job_runners = None
+
+# For sites where all users in Galaxy match users on the system on which Galaxy
+# runs, the DRMAA job runner can be configured to submit jobs to the DRM as the
+# actual user instead of as the user running the Galaxy server process. For
+# details on these options, see the documentation at:
+#
+# http://galaxyproject.org/wiki/Admin/Config/Performance/Cluster
+#
+#drmaa_external_runjob_script = scripts/drmaa_external_runner.py
+#drmaa_external_killjob_script = scripts/drmaa_external_killer.py
+#external_chown_script = scripts/external_chown_script.py
+
+# File to source to set up the environment when running jobs. By default, the
+# environment in which the Galaxy server starts is used when running jobs
+# locally, and the environment set up per the DRM's submission method and
+# policy is used when running jobs on a cluster (try testing with `qsub` on the
+# command line). environment_setup_file can be set to the path of a file on
+# the cluster that should be sourced by the user to set up the environment
+# prior to running tools. This can be especially useful for running jobs as
+# the actual user, to remove the need to configure each user's environment
+# individually. This only affects cluster jobs, not local jobs.
+#environment_setup_file = None
+
+# The URL for the default runner to use when a tool doesn't explicitly define a
+# runner below.
+#default_cluster_job_runner = local:///
+
+# The cluster runners have their own thread pools used to prepare and finish
+# jobs (so that these sometimes lengthy operations do not block normal queue
+# operation). The value here is the number of worker threads available to each
+# started runner.
+#cluster_job_queue_workers = 3
+
+# These options are only used when using file staging with PBS.
+#pbs_application_server =
+#pbs_stage_path =
+#pbs_dataset_server =
+
+# This option allows users to see the full path of datasets via the "View
+# Details" option in the history. Administrators can always see this.
+#expose_dataset_path = False
+
+# ---- Per-Tool Job Management ----------------------------------------------
+
+# Per-tool job handler and runner overrides. Parameters can be included to define multiple
+# runners per tool. E.g. to run Cufflinks jobs initiated from Trackster
+# differently than standard Cufflinks jobs:
+#
+# cufflinks = local:///
+# cufflinks[source@trackster] = local:///
+
+[galaxy:tool_handlers]
+
+# By default, Galaxy will select a handler at random from the list of
+# job_handlers set above. You can override as in the following examples:
+#
+#upload1 = upload_handler
+#cufflinks[source@trackster] = realtime_handler
+
+[galaxy:tool_runners]
+
+# If not listed here, a tool will run with the runner defined with
+# default_cluster_job_runner. These overrides for local:/// are done because
+# these tools can fetch data from remote sites, which may not be suitable to
+# run on a cluster (if it does not have access to the Internet, for example).
+
+biomart = local:///
+encode_db1 = local:///
+hbvar = local:///
+microbial_import1 = local:///
+ucsc_table_direct1 = local:///
+ucsc_table_direct_archaea1 = local:///
+ucsc_table_direct_test1 = local:///
+upload1 = local:///
+
+# ---- Galaxy Message Queue -------------------------------------------------
+
+# Galaxy uses AMQ protocol to receive messages from external sources like
+# bar code scanners. Galaxy has been tested against RabbitMQ AMQP implementation.
+# For Galaxy to receive messages from a message queue the RabbitMQ server has
+# to be set up with a user account and other parameters listed below. The 'host'
+# and 'port' fields should point to where the RabbitMQ server is running.
+
+[galaxy_amqp]
+
+#host = 127.0.0.1
+#port = 5672
+#userid = galaxy
+#password = galaxy
+#virtual_host = galaxy_messaging_engine
+#queue = galaxy_queue
+#exchange = galaxy_exchange
+#routing_key = bar_code_scanner
+#rabbitmqctl_path = /path/to/rabbitmqctl
+
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/5c09dfcc62c2/
changeset: 5c09dfcc62c2
user: greg
date: 2012-09-06 22:27:35
summary: Fix bug introduced 2 commits ago.
affected #: 1 file
diff -r f0fe84f46793708dc5eac02442e2e9a72e1d9c70 -r 5c09dfcc62c27857e0653c3a95df24c6fb316baf lib/galaxy/webapps/community/controllers/repository.py
--- a/lib/galaxy/webapps/community/controllers/repository.py
+++ b/lib/galaxy/webapps/community/controllers/repository.py
@@ -1427,10 +1427,7 @@
tool_state = self.__new_state( trans )
is_malicious = changeset_is_malicious( trans, repository_id, repository.tip )
invalid_file_tups = []
- if tool is None:
- if not valid:
- invalid_file_tups = [ ( name, error_message ) ]
- else:
+ if tool:
invalid_file_tups = check_tool_input_params( trans.app,
repository.repo_path,
tool_config,
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/f0fe84f46793/
changeset: f0fe84f46793
user: greg
date: 2012-09-06 22:22:57
summary: Fix a mis-named variable.
affected #: 1 file
diff -r 42e544d00e75be44887b17f6985bcb251573e1de -r f0fe84f46793708dc5eac02442e2e9a72e1d9c70 lib/galaxy/webapps/community/controllers/common.py
--- a/lib/galaxy/webapps/community/controllers/common.py
+++ b/lib/galaxy/webapps/community/controllers/common.py
@@ -266,7 +266,7 @@
if name.endswith( '.sample' ):
relative_path = os.path.join( root, name )
copy_sample_file( trans.app, relative_path, dest_path=dest_path )
- sample_files_copied.append( name )
+ sample_files.append( name )
return sample_files
def copy_file_from_disk( filename, repo_dir, dir ):
file_path = None
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/42e544d00e75/
changeset: 42e544d00e75
user: greg
date: 2012-09-06 22:17:15
summary: Fixes for displaying error messages when displaying invalid tools in tool shed repository changeset revisions.
affected #: 2 files
diff -r b43eadc8cdb486b7bfa971ff89552ba04126ef0d -r 42e544d00e75be44887b17f6985bcb251573e1de lib/galaxy/util/shed_util.py
--- a/lib/galaxy/util/shed_util.py
+++ b/lib/galaxy/util/shed_util.py
@@ -248,6 +248,34 @@
except:
pass
return converter_path, display_path
+def can_generate_tool_dependency_metadata( root, metadata_dict ):
+ """
+ Make sure the combination of name, version and type (the type will be the value of elem.tag) of each root element tag in the tool_dependencies.xml
+ file is defined in the <requirement> tag for at least one tool in the repository.
+ """
+ can_generate_dependency_metadata = False
+ for elem in root:
+ can_generate_dependency_metadata = False
+ tool_dependency_name = elem.get( 'name', None )
+ tool_dependency_version = elem.get( 'version', None )
+ tool_dependency_type = elem.tag
+ if tool_dependency_name and tool_dependency_version and tool_dependency_type:
+ for tool_dict in metadata_dict[ 'tools' ]:
+ requirements = tool_dict.get( 'requirements', [] )
+ for requirement_dict in requirements:
+ req_name = requirement_dict.get( 'name', None )
+ req_version = requirement_dict.get( 'version', None )
+ req_type = requirement_dict.get( 'type', None )
+ if req_name==tool_dependency_name and req_version==tool_dependency_version and req_type==tool_dependency_type:
+ can_generate_dependency_metadata = True
+ break
+ if requirements and not can_generate_dependency_metadata:
+ # We've discovered at least 1 combination of name, version and type that is not defined in the <requirement>
+ # tag for any tool in the repository.
+ break
+ if not can_generate_dependency_metadata:
+ break
+ return can_generate_dependency_metadata
def check_tool_input_params( app, repo_dir, tool_config_name, tool, sample_files, webapp='galaxy' ):
"""
Check all of the tool's input parameters, looking for any that are dynamically generated using external data files to make
@@ -512,34 +540,6 @@
if tool_dependencies_dict:
metadata_dict[ 'tool_dependencies' ] = tool_dependencies_dict
return metadata_dict
-def can_generate_tool_dependency_metadata( root, metadata_dict ):
- """
- Make sure the combination of name, version and type (the type will be the value of elem.tag) of each root element tag in the tool_dependencies.xml
- file is defined in the <requirement> tag for at least one tool in the repository.
- """
- can_generate_dependency_metadata = False
- for elem in root:
- can_generate_dependency_metadata = False
- tool_dependency_name = elem.get( 'name', None )
- tool_dependency_version = elem.get( 'version', None )
- tool_dependency_type = elem.tag
- if tool_dependency_name and tool_dependency_version and tool_dependency_type:
- for tool_dict in metadata_dict[ 'tools' ]:
- requirements = tool_dict.get( 'requirements', [] )
- for requirement_dict in requirements:
- req_name = requirement_dict.get( 'name', None )
- req_version = requirement_dict.get( 'version', None )
- req_type = requirement_dict.get( 'type', None )
- if req_name==tool_dependency_name and req_version==tool_dependency_version and req_type==tool_dependency_type:
- can_generate_dependency_metadata = True
- break
- if requirements and not can_generate_dependency_metadata:
- # We've discovered at least 1 combination of name, version and type that is not defined in the <requirement>
- # tag for any tool in the repository.
- break
- if not can_generate_dependency_metadata:
- break
- return can_generate_dependency_metadata
def generate_metadata_for_changeset_revision( app, repository_clone_url, relative_install_dir=None, repository_files_dir=None,
resetting_all_metadata_on_repository=False, webapp='galaxy' ):
"""
@@ -597,50 +597,53 @@
# Find all tool configs.
if name not in NOT_TOOL_CONFIGS and name.endswith( '.xml' ):
full_path = os.path.abspath( os.path.join( root, name ) )
- if not ( check_binary( full_path ) or check_image( full_path ) or check_gzip( full_path )[ 0 ]
- or check_bz2( full_path )[ 0 ] or check_zip( full_path ) ):
- try:
- # Make sure we're looking at a tool config and not a display application config or something else.
- element_tree = util.parse_xml( full_path )
- element_tree_root = element_tree.getroot()
- is_tool = element_tree_root.tag == 'tool'
- except Exception, e:
- print "Error parsing %s", full_path, ", exception: ", str( e )
- is_tool = False
- if is_tool:
- tool, valid, error_message = load_tool_from_config( app, full_path )
- if tool is None:
- if not valid:
- invalid_file_tups.append( ( name, error_message ) )
- else:
- invalid_files_and_errors_tups = check_tool_input_params( app, files_dir, name, tool, sample_file_metadata_paths, webapp=webapp )
- can_set_metadata = True
- for tup in invalid_files_and_errors_tups:
- if name in tup:
- can_set_metadata = False
- invalid_tool_configs.append( name )
- break
- if can_set_metadata:
- if resetting_all_metadata_on_repository:
- full_path_to_tool_config = os.path.join( root, name )
- stripped_path_to_tool_config = full_path_to_tool_config.replace( work_dir, '' )
- if stripped_path_to_tool_config.startswith( '/' ):
- stripped_path_to_tool_config = stripped_path_to_tool_config[ 1: ]
- relative_path_to_tool_config = os.path.join( relative_install_dir, stripped_path_to_tool_config )
+ if os.path.getsize( full_path ) > 0:
+ if not ( check_binary( full_path ) or check_image( full_path ) or check_gzip( full_path )[ 0 ]
+ or check_bz2( full_path )[ 0 ] or check_zip( full_path ) ):
+ try:
+ # Make sure we're looking at a tool config and not a display application config or something else.
+ element_tree = util.parse_xml( full_path )
+ element_tree_root = element_tree.getroot()
+ is_tool = element_tree_root.tag == 'tool'
+ except Exception, e:
+ print "Error parsing %s", full_path, ", exception: ", str( e )
+ is_tool = False
+ if is_tool:
+ tool, valid, error_message = load_tool_from_config( app, full_path )
+ if tool is None:
+ if not valid:
+ invalid_file_tups.append( ( name, error_message ) )
+ else:
+ invalid_files_and_errors_tups = check_tool_input_params( app, files_dir, name, tool, sample_file_metadata_paths, webapp=webapp )
+ can_set_metadata = True
+ for tup in invalid_files_and_errors_tups:
+ if name in tup:
+ can_set_metadata = False
+ invalid_tool_configs.append( name )
+ break
+ if can_set_metadata:
+ if resetting_all_metadata_on_repository:
+ full_path_to_tool_config = os.path.join( root, name )
+ stripped_path_to_tool_config = full_path_to_tool_config.replace( work_dir, '' )
+ if stripped_path_to_tool_config.startswith( '/' ):
+ stripped_path_to_tool_config = stripped_path_to_tool_config[ 1: ]
+ relative_path_to_tool_config = os.path.join( relative_install_dir, stripped_path_to_tool_config )
+ else:
+ relative_path_to_tool_config = os.path.join( root, name )
+ metadata_dict = generate_tool_metadata( relative_path_to_tool_config, tool, repository_clone_url, metadata_dict )
else:
- relative_path_to_tool_config = os.path.join( root, name )
- metadata_dict = generate_tool_metadata( relative_path_to_tool_config, tool, repository_clone_url, metadata_dict )
- else:
- invalid_file_tups.extend( invalid_files_and_errors_tups )
+ for tup in invalid_files_and_errors_tups:
+ invalid_file_tups.append( tup )
# Find all exported workflows
elif name.endswith( '.ga' ):
relative_path = os.path.join( root, name )
- fp = open( relative_path, 'rb' )
- workflow_text = fp.read()
- fp.close()
- exported_workflow_dict = from_json_string( workflow_text )
- if 'a_galaxy_workflow' in exported_workflow_dict and exported_workflow_dict[ 'a_galaxy_workflow' ] == 'true':
- metadata_dict = generate_workflow_metadata( relative_path, exported_workflow_dict, metadata_dict )
+ if os.path.getsize( os.path.abspath( relative_path ) ) > 0:
+ fp = open( relative_path, 'rb' )
+ workflow_text = fp.read()
+ fp.close()
+ exported_workflow_dict = from_json_string( workflow_text )
+ if 'a_galaxy_workflow' in exported_workflow_dict and exported_workflow_dict[ 'a_galaxy_workflow' ] == 'true':
+ metadata_dict = generate_workflow_metadata( relative_path, exported_workflow_dict, metadata_dict )
if 'tools' in metadata_dict:
# This step must be done after metadata for tools has been defined.
tool_dependencies_config = get_config_from_disk( 'tool_dependencies.xml', files_dir )
diff -r b43eadc8cdb486b7bfa971ff89552ba04126ef0d -r 42e544d00e75be44887b17f6985bcb251573e1de lib/galaxy/webapps/community/controllers/repository.py
--- a/lib/galaxy/webapps/community/controllers/repository.py
+++ b/lib/galaxy/webapps/community/controllers/repository.py
@@ -1426,9 +1426,22 @@
repository, tool, error_message = load_tool_from_changeset_revision( trans, repository_id, changeset_revision, tool_config )
tool_state = self.__new_state( trans )
is_malicious = changeset_is_malicious( trans, repository_id, repository.tip )
+ invalid_file_tups = []
+ if tool is None:
+ if not valid:
+ invalid_file_tups = [ ( name, error_message ) ]
+ else:
+ invalid_file_tups = check_tool_input_params( trans.app,
+ repository.repo_path,
+ tool_config,
+ tool,
+ [],
+ webapp=webapp )
+ if invalid_file_tups:
+ message = generate_message_for_invalid_tools( invalid_file_tups, repository, {}, as_html=True, displaying_invalid_tool=True )
+ elif error_message:
+ message = error_message
try:
- if error_message:
- message = error_message
return trans.fill_template( "/webapps/community/repository/tool_form.mako",
repository=repository,
changeset_revision=changeset_revision,
@@ -1439,7 +1452,7 @@
message=message,
status='error' )
except Exception, e:
- message = "This tool is invalid because: %s." % str( e )
+ message = "Exception thrown attempting to display tool: %s." % str( e )
if webapp == 'galaxy':
return trans.response.send_redirect( web.url_for( controller='repository',
action='preview_tools_in_changeset',
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.