galaxy-commits
Threads by month
- ----- 2025 -----
- July
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- 15302 discussions

commit/galaxy-central: dannon: Introduction of the dataset "Paused" state and basic resume-all functionality for a history. This will need to be reimplemented in backbone, when that's committed and enabled.
by Bitbucket 14 Nov '12
by Bitbucket 14 Nov '12
14 Nov '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/cc7df5ca1d47/
changeset: cc7df5ca1d47
user: dannon
date: 2012-11-14 22:18:26
summary: Introduction of the dataset "Paused" state and basic resume-all functionality for a history. This will need to be reimplemented in backbone, when that's committed and enabled.
affected #: 7 files
diff -r 73e05bc14cf1478b5ff9d8e8fffdf28d701dd2cb -r cc7df5ca1d47dbbd98614c21589435f84c67f9f5 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -698,6 +698,11 @@
def unhide_datasets( self ):
for dataset in self.datasets:
dataset.mark_unhidden()
+ def resume_paused_jobs( self ):
+ for dataset in self.datasets:
+ job = dataset.creating_job
+ if job.state == Job.states.PAUSED:
+ job.set_state(Job.states.QUEUED)
def get_disk_size( self, nice_size=False ):
# unique datasets only
db_session = object_session( self )
diff -r 73e05bc14cf1478b5ff9d8e8fffdf28d701dd2cb -r cc7df5ca1d47dbbd98614c21589435f84c67f9f5 lib/galaxy/webapps/galaxy/controllers/history.py
--- a/lib/galaxy/webapps/galaxy/controllers/history.py
+++ b/lib/galaxy/webapps/galaxy/controllers/history.py
@@ -564,6 +564,20 @@
return trans.show_ok_message( "Your datasets have been unhidden.", refresh_frames=refresh_frames )
@web.expose
+ def resume_paused_jobs( self, trans, current=False, ids=None ):
+ """Resume paused jobs the active history -- this does not require a logged in user."""
+ if not ids and util.string_as_bool( current ):
+ histories = [ trans.get_history() ]
+ refresh_frames = ['history']
+ else:
+ raise NotImplementedError( "You can currently only resume all the datasets of the current history." )
+ for history in histories:
+ history.resume_paused_jobs()
+ trans.sa_session.add( history )
+ trans.sa_session.flush()
+ return trans.show_ok_message( "Your jobs have been resumed.", refresh_frames=refresh_frames )
+
+ @web.expose
@web.require_login( "rate items" )
@web.json
def rate_async( self, trans, id, rating ):
diff -r 73e05bc14cf1478b5ff9d8e8fffdf28d701dd2cb -r cc7df5ca1d47dbbd98614c21589435f84c67f9f5 static/june_2007_style/base.less
--- a/static/june_2007_style/base.less
+++ b/static/june_2007_style/base.less
@@ -1637,6 +1637,11 @@
opacity: .60;
}
+div.historyItem-paused {
+ // border-color: @history_paused_border;
+ background: @history_paused_bg;
+}
+
// Special case for showing the spinner but not changing the background
div.historyItemTitleBar.spinner .state-icon {
background: url(data_running.gif) 0 1px no-repeat !important;
diff -r 73e05bc14cf1478b5ff9d8e8fffdf28d701dd2cb -r cc7df5ca1d47dbbd98614c21589435f84c67f9f5 static/june_2007_style/blue/base.css
--- a/static/june_2007_style/blue/base.css
+++ b/static/june_2007_style/blue/base.css
@@ -851,6 +851,7 @@
div.historyItem-upload{background:#ccccff;}div.historyItem-upload .state-icon{background-image:url(data_upload.gif);}
div.historyItem-queued{background:#eeeeee;}
div.historyItem-noPermission{filter:alpha(opacity=60);-moz-opacity:.60;opacity:.60;}
+div.historyItem-paused{background:#d9edf7;}
div.historyItemTitleBar.spinner .state-icon{background:url(data_running.gif) 0 1px no-repeat !important;}
div.historyItemButtons{float:right;}
div.historyItemBody div{padding-top:2px;}
diff -r 73e05bc14cf1478b5ff9d8e8fffdf28d701dd2cb -r cc7df5ca1d47dbbd98614c21589435f84c67f9f5 static/june_2007_style/blue_colors.ini
--- a/static/june_2007_style/blue_colors.ini
+++ b/static/june_2007_style/blue_colors.ini
@@ -44,6 +44,8 @@
history_deleted_bg=#3399FF
history_error_border=#AA6666
history_error_bg=#FFCCCC
+history_paused_border=#6666AA
+history_paused_bg=#d9edf7
history_running_border=#AAAA66
history_running_bg=#FFFFCC
history_ok_border=#66AA66
diff -r 73e05bc14cf1478b5ff9d8e8fffdf28d701dd2cb -r cc7df5ca1d47dbbd98614c21589435f84c67f9f5 templates/root/history_common.mako
--- a/templates/root/history_common.mako
+++ b/templates/root/history_common.mako
@@ -127,7 +127,12 @@
%endif
%endif
</div>
- <span class="state-icon"></span>
+ ## Hack, do it in css
+ %if data_state == "paused":
+ <span class="ficon pause"></span>
+ %else:
+ <span class="state-icon"></span>
+ %endif
<span class="historyItemTitle">${hid}: ${data.display_name()}</span></div>
@@ -146,6 +151,15 @@
<a href="${h.url_for( controller='tool_runner', action='rerun', id=data.id )}" target="galaxy_main" title='${_("Run this job again")}' class="icon-button arrow-circle tooltip"></a>
%endif
</div>
+ %elif data_state == "paused":
+ <div>
+ ${_('Job is currently paused. Check your quota and parent jobs for failure, use the history menu to resume.')}</div>
+ <div>
+ <a href="${h.url_for( controller='dataset', action='show_params', dataset_id=dataset_id )}" target="galaxy_main" title='${_("View Details")}' class="icon-button information tooltip"></a>
+ %if for_editing:
+ <a href="${h.url_for( controller='tool_runner', action='rerun', id=data.id )}" target="galaxy_main" title='${_("Run this job again")}' class="icon-button arrow-circle tooltip"></a>
+ %endif
+ </div>
%elif data_state == "running":
<div>${_('Job is currently running')}</div><div>
diff -r 73e05bc14cf1478b5ff9d8e8fffdf28d701dd2cb -r cc7df5ca1d47dbbd98614c21589435f84c67f9f5 templates/root/index.mako
--- a/templates/root/index.mako
+++ b/templates/root/index.mako
@@ -37,6 +37,9 @@
"${_("Dataset Security")}": function() {
galaxy_main.location = "${h.url_for( controller='root', action='history_set_default_permissions' )}";
},
+ "${_("Resume Paused Jobs")}": function() {
+ galaxy_history.location = "${h.url_for( controller='history', action='resume_paused_jobs', current=True)}";
+ },
"${_("Show Deleted Datasets")}": function() {
galaxy_history.location = "${h.url_for( controller='root', action='history', show_deleted=True)}";
},
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0

commit/galaxy-central: natefoo: Add more featureful job limiting and optimize the query for checking whether
by Bitbucket 13 Nov '12
by Bitbucket 13 Nov '12
13 Nov '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/73e05bc14cf1/
changeset: 73e05bc14cf1
user: natefoo
date: 2012-11-13 21:00:55
summary: Add more featureful job limiting and optimize the query for checking whether
jobs are ready to run. Input dependency checks are now performed via SQL
rather than walking up the object chain. Limits on the number of jobs a user
can run can now be set across the entire instance and a job runner URL.
Quota checks at job runtime are only performed once, after limit checks. If a
user is over quota, jobs are moved to a "paused" state. Once the user is under
quota, jobs can be unpaused and continue to run (once this UI is added in
another commit, shortly). This obviates the need for quota checks on every
job, on every queue cycle.
When a job's input dataset errors, the job (and all jobs dependent upon that
job) are no longer errored. It will then be possible to remap a job to new
inputs to allow execution to continue from the point of failure. Commit for
that is also coming shortly.
affected #: 7 files
diff -r ed0738c6001654d5456dd36579b278cd10fcd00c -r 73e05bc14cf1478b5ff9d8e8fffdf28d701dd2cb lib/galaxy/config.py
--- a/lib/galaxy/config.py
+++ b/lib/galaxy/config.py
@@ -124,6 +124,9 @@
self.enable_beta_job_managers = string_as_bool( kwargs.get( 'enable_beta_job_managers', 'False' ) )
# Per-user Job concurrency limitations
self.user_job_limit = int( kwargs.get( 'user_job_limit', 0 ) )
+ # user_job_limit for backwards-compatibility
+ self.registered_user_job_limit = int( kwargs.get( 'registered_user_job_limit', self.user_job_limit ) )
+ self.anonymous_user_job_limit = int( kwargs.get( 'anonymous_user_job_limit', self.user_job_limit ) )
self.default_cluster_job_runner = kwargs.get( 'default_cluster_job_runner', 'local:///' )
self.pbs_application_server = kwargs.get('pbs_application_server', "" )
self.pbs_dataset_server = kwargs.get('pbs_dataset_server', "" )
@@ -216,6 +219,19 @@
self.job_manager = kwargs.get('job_manager', self.server_name).strip()
self.job_handlers = [ x.strip() for x in kwargs.get('job_handlers', self.server_name).split(',') ]
self.default_job_handlers = [ x.strip() for x in kwargs.get('default_job_handlers', ','.join( self.job_handlers ) ).split(',') ]
+ # parse the [galaxy:job_limits] section
+ self.job_limits = {}
+ try:
+ job_limits = global_conf_parser.items( 'galaxy:job_limits' )
+ for k, v in job_limits:
+ # ConfigParser considers the first colon to be the delimiter, undo this behavior
+ more_k, v = v.split('=', 1)
+ k = '%s:%s' % (k, more_k.strip())
+ v = v.strip().rsplit(None, 1)
+ v[1] = int(v[1])
+ self.job_limits[k] = v
+ except ConfigParser.NoSectionError:
+ pass
# Use database for IPC unless this is a standalone server (or multiple servers doing self dispatching in memory)
if self.track_jobs_in_database is None or self.track_jobs_in_database == "None":
self.track_jobs_in_database = True
diff -r ed0738c6001654d5456dd36579b278cd10fcd00c -r 73e05bc14cf1478b5ff9d8e8fffdf28d701dd2cb lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py
+++ b/lib/galaxy/jobs/__init__.py
@@ -81,7 +81,7 @@
self.tool_provided_job_metadata = None
# Wrapper holding the info required to restore and clean up from files used for setting metadata externally
self.external_output_metadata = metadata.JobExternalOutputMetadataWrapper( job )
- self.job_runner_mapper = JobRunnerMapper( self )
+ self.job_runner_mapper = JobRunnerMapper( self, job.job_runner_name )
self.params = None
if job.params:
self.params = from_json_string( job.params )
diff -r ed0738c6001654d5456dd36579b278cd10fcd00c -r 73e05bc14cf1478b5ff9d8e8fffdf28d701dd2cb lib/galaxy/jobs/handler.py
--- a/lib/galaxy/jobs/handler.py
+++ b/lib/galaxy/jobs/handler.py
@@ -8,7 +8,7 @@
import threading
from Queue import Queue, Empty
-from sqlalchemy.sql.expression import and_, or_
+from sqlalchemy.sql.expression import and_, or_, select, func
from galaxy import util, model
from galaxy.jobs import Sleeper, JobWrapper, TaskWrapper
@@ -16,7 +16,7 @@
log = logging.getLogger( __name__ )
# States for running a job. These are NOT the same as data states
-JOB_WAIT, JOB_ERROR, JOB_INPUT_ERROR, JOB_INPUT_DELETED, JOB_READY, JOB_DELETED, JOB_ADMIN_DELETED = 'wait', 'error', 'input_error', 'input_deleted', 'ready', 'deleted', 'admin_deleted'
+JOB_WAIT, JOB_ERROR, JOB_INPUT_ERROR, JOB_INPUT_DELETED, JOB_READY, JOB_DELETED, JOB_ADMIN_DELETED, JOB_USER_OVER_QUOTA = 'wait', 'error', 'input_error', 'input_deleted', 'ready', 'deleted', 'admin_deleted', 'user_over_quota'
class JobHandler( object ):
"""
@@ -126,9 +126,32 @@
# Clear the session so we get fresh states for job and all datasets
self.sa_session.expunge_all()
# Fetch all new jobs
- jobs_to_check = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
- .filter( ( model.Job.state == model.Job.states.NEW ) \
- & ( model.Job.handler == self.app.config.server_name ) ).all()
+ hda_not_ready = self.sa_session.query(model.Job.id).enable_eagerloads(False) \
+ .join(model.JobToInputDatasetAssociation) \
+ .join(model.HistoryDatasetAssociation) \
+ .join(model.Dataset) \
+ .filter(and_((model.Job.state == model.Job.states.NEW),
+ or_((model.HistoryDatasetAssociation._state != None),
+ (model.HistoryDatasetAssociation.deleted == True ),
+ (model.Dataset.state != model.Dataset.states.OK ),
+ (model.Dataset.deleted == True)))).subquery()
+ ldda_not_ready = self.sa_session.query(model.Job.id).enable_eagerloads(False) \
+ .join(model.JobToInputLibraryDatasetAssociation) \
+ .join(model.LibraryDatasetDatasetAssociation) \
+ .join(model.Dataset) \
+ .filter(and_((model.Job.state == model.Job.states.NEW),
+ or_((model.LibraryDatasetDatasetAssociation._state != None),
+ (model.LibraryDatasetDatasetAssociation.deleted == True),
+ (model.Dataset.state != model.Dataset.states.OK),
+ (model.Dataset.deleted == True)))).subquery()
+ jobs_to_check = self.sa_session.query(model.Job).enable_eagerloads(False) \
+ .filter(and_((model.Job.state == model.Job.states.NEW),
+ (model.Job.handler == self.app.config.server_name),
+ ~model.Job.table.c.id.in_(hda_not_ready),
+ ~model.Job.table.c.id.in_(ldda_not_ready))) \
+ .order_by(model.Job.id).all()
+ # Ensure that we get new job counts on each iteration
+ self.__clear_user_job_count()
else:
# Get job objects and append to watch queue for any which were
# previously waiting
@@ -150,7 +173,8 @@
new_waiting_jobs = []
for job in jobs_to_check:
try:
- # Check the job's dependencies, requeue if they're not done
+ # Check the job's dependencies, requeue if they're not done.
+ # Some of these states will only happen when using the in-memory job queue
job_state = self.__check_if_ready_to_run( job )
if job_state == JOB_WAIT:
if not self.track_jobs_in_database:
@@ -166,6 +190,10 @@
log.info( "(%d) Job deleted by user while still queued" % job.id )
elif job_state == JOB_ADMIN_DELETED:
log.info( "(%d) Job deleted by admin while still queued" % job.id )
+ elif job_state == JOB_USER_OVER_QUOTA:
+ log.info( "(%d) User (%s) is over quota: job paused" % ( job.id, job.user_id ) )
+ job.state = model.Job.states.PAUSED
+ self.sa_session.add( job )
else:
log.error( "(%d) Job in unknown state '%s'" % ( job.id, job_state ) )
if not self.track_jobs_in_database:
@@ -174,6 +202,8 @@
log.exception( "failure running job %d" % job.id )
# Update the waiting list
self.waiting_jobs = new_waiting_jobs
+ # Flush, if we updated the state
+ self.sa_session.flush()
# Done with the session
self.sa_session.remove()
@@ -187,57 +217,88 @@
job can be dispatched. Otherwise, return JOB_WAIT indicating that input
datasets are still being prepared.
"""
- if job.state == model.Job.states.DELETED:
- return JOB_DELETED
- elif job.state == model.Job.states.ERROR:
- return JOB_ADMIN_DELETED
- elif self.app.config.enable_quotas:
+ # If tracking in the database, job.state is guaranteed to be NEW and the inputs are guaranteed to be OK
+ if not self.track_jobs_in_database:
+ if job.state == model.Job.states.DELETED:
+ return JOB_DELETED
+ elif job.state == model.Job.states.ERROR:
+ return JOB_ADMIN_DELETED
+ for dataset_assoc in job.input_datasets + job.input_library_datasets:
+ idata = dataset_assoc.dataset
+ if not idata:
+ continue
+ # don't run jobs for which the input dataset was deleted
+ if idata.deleted:
+ JobWrapper( job, self ).fail( "input data %s (file: %s) was deleted before the job started" % ( idata.hid, idata.file_name ) )
+ return JOB_INPUT_DELETED
+ # an error in the input data causes us to bail immediately
+ elif idata.state == idata.states.ERROR:
+ JobWrapper( job, self ).fail( "input data %s is in error state" % ( idata.hid ) )
+ return JOB_INPUT_ERROR
+ elif idata.state == idata.states.FAILED_METADATA:
+ JobWrapper( job, self ).fail( "input data %s failed to properly set metadata" % ( idata.hid ) )
+ return JOB_INPUT_ERROR
+ elif idata.state != idata.states.OK and not ( idata.state == idata.states.SETTING_METADATA and job.tool_id is not None and job.tool_id == self.app.datatypes_registry.set_external_metadata_tool.id ):
+ # need to requeue
+ return JOB_WAIT
+ state = self.__check_user_jobs( job )
+ if state == JOB_READY and self.app.config.enable_quotas:
quota = self.app.quota_agent.get_quota( job.user )
if quota is not None:
try:
usage = self.app.quota_agent.get_usage( user=job.user, history=job.history )
if usage > quota:
- return JOB_WAIT
+ return JOB_USER_OVER_QUOTA
except AssertionError, e:
pass # No history, should not happen with an anon user
- for dataset_assoc in job.input_datasets + job.input_library_datasets:
- idata = dataset_assoc.dataset
- if not idata:
- continue
- # don't run jobs for which the input dataset was deleted
- if idata.deleted:
- JobWrapper( job, self ).fail( "input data %s (file: %s) was deleted before the job started" % ( idata.hid, idata.file_name ) )
- return JOB_INPUT_DELETED
- # an error in the input data causes us to bail immediately
- elif idata.state == idata.states.ERROR:
- JobWrapper( job, self ).fail( "input data %s is in error state" % ( idata.hid ) )
- return JOB_INPUT_ERROR
- elif idata.state == idata.states.FAILED_METADATA:
- JobWrapper( job, self ).fail( "input data %s failed to properly set metadata" % ( idata.hid ) )
- return JOB_INPUT_ERROR
- elif idata.state != idata.states.OK and not ( idata.state == idata.states.SETTING_METADATA and job.tool_id is not None and job.tool_id == self.app.datatypes_registry.set_external_metadata_tool.id ):
- # need to requeue
- return JOB_WAIT
- return self.__check_user_jobs( job )
+ return state
+
+ def __clear_user_job_count( self ):
+ self.user_job_count = {}
+ self.user_job_count_per_runner = {}
def __check_user_jobs( self, job ):
- if not self.app.config.user_job_limit:
- return JOB_READY
if job.user:
- count = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
- .filter( and_( model.Job.user_id == job.user.id,
- or_( model.Job.state == model.Job.states.RUNNING,
- model.Job.state == model.Job.states.QUEUED ) ) ).count()
+ # Check the hard limit first
+ if self.app.config.registered_user_job_limit:
+ # Cache the job count if necessary
+ if not self.user_job_count:
+ query = self.sa_session.execute(select([model.Job.table.c.user_id, func.count(model.Job.table.c.user_id)]) \
+ .where(and_(model.Job.table.c.state.in_((model.Job.states.QUEUED, model.Job.states.RUNNING)), (model.Job.table.c.user_id is not None))) \
+ .group_by(model.Job.table.c.user_id))
+ for row in query:
+ self.user_job_count[row[0]] = row[1]
+ if self.user_job_count.get(job.user_id, 0) >= self.app.config.registered_user_job_limit:
+ return JOB_WAIT
+ # If we pass the hard limit, also check the per-runner count
+ if job.job_runner_name in self.app.config.job_limits:
+ # Cache the job count if necessary
+ if job.job_runner_name not in self.user_job_count_per_runner:
+ self.user_job_count_per_runner[job.job_runner_name] = {}
+ query_url, limit = self.app.config.job_limits[job.job_runner_name]
+ base_query = select([model.Job.table.c.user_id, model.Job.table.c.job_runner_name, func.count(model.Job.table.c.user_id).label('job_count')]) \
+ .where(model.Job.table.c.state.in_((model.Job.states.QUEUED, model.Job.states.RUNNING))) \
+ .group_by(model.Job.table.c.user_id, model.Job.table.c.job_runner_name)
+ if '%' in query_url or '_' in query_url:
+ subq = base_query.having(model.Job.table.c.job_runner_name.like(query_url)).alias('subq')
+ query = self.sa_session.execute(select([subq.c.user_id, func.sum(subq.c.job_count).label('job_count')]).group_by(subq.c.user_id))
+ else:
+ query = self.sa_session.execute(base_query.having(model.Job.table.c.job_runner_name == query_url))
+ for row in query:
+ self.user_job_count_per_runner[job.job_runner_name][row['user_id']] = row['job_count']
+ if self.user_job_count_per_runner[job.job_runner_name].get(job.user_id, 0) >= self.app.config.job_limits[job.job_runner_name][1]:
+ return JOB_WAIT
elif job.galaxy_session:
- count = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
- .filter( and_( model.Job.session_id == job.galaxy_session.id,
- or_( model.Job.state == model.Job.states.RUNNING,
- model.Job.state == model.Job.states.QUEUED ) ) ).count()
+ # Anonymous users only get the hard limit
+ if self.app.config.anonymous_user_job_limit:
+ count = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
+ .filter( and_( model.Job.session_id == job.galaxy_session.id,
+ or_( model.Job.state == model.Job.states.RUNNING,
+ model.Job.state == model.Job.states.QUEUED ) ) ).count()
+ if count >= self.app.config.anonymous_user_job_limit:
+ return JOB_WAIT
else:
log.warning( 'Job %s is not associated with a user or session so job concurrency limit cannot be checked.' % job.id )
- return JOB_READY
- if count >= self.app.config.user_job_limit:
- return JOB_WAIT
return JOB_READY
def put( self, job_id, tool_id ):
diff -r ed0738c6001654d5456dd36579b278cd10fcd00c -r 73e05bc14cf1478b5ff9d8e8fffdf28d701dd2cb lib/galaxy/jobs/manager.py
--- a/lib/galaxy/jobs/manager.py
+++ b/lib/galaxy/jobs/manager.py
@@ -146,6 +146,7 @@
for job in jobs_to_check:
job.handler = self.__get_handler( job )
+ job.job_runner_name = self.__get_runner_url( job )
log.debug( "(%s) Job assigned to handler '%s'" % ( job.id, job.handler ) )
self.sa_session.add( job )
@@ -168,6 +169,14 @@
log.exception( "(%s) Caught exception attempting to get tool-specific job handler for tool '%s', selecting at random from available handlers instead:" % ( job.id, job.tool_id ) )
return random.choice( self.app.config.job_handlers )
+ def __get_runner_url( self, job ):
+ """This fetches the raw runner URL, and does not perform any computation e.g. for the dynamic runner"""
+ try:
+ return self.app.toolbox.tools_by_id.get( job.tool_id, None ).get_job_runner_url( job.params )
+ except Exception, e:
+ log.warning( 'Unable to determine job runner URL for job %s: %s' % (job.id, str(e)) )
+ return None
+
def put( self, job_id, tool ):
"""Add a job to the queue (by job identifier)"""
if not self.app.config.track_jobs_in_database:
diff -r ed0738c6001654d5456dd36579b278cd10fcd00c -r 73e05bc14cf1478b5ff9d8e8fffdf28d701dd2cb lib/galaxy/jobs/mapper.py
--- a/lib/galaxy/jobs/mapper.py
+++ b/lib/galaxy/jobs/mapper.py
@@ -14,8 +14,9 @@
(in the form of job_wrappers) to job runner url strings.
"""
- def __init__( self, job_wrapper ):
+ def __init__( self, job_wrapper, job_runner_name=None ):
self.job_wrapper = job_wrapper
+ self.job_runner_name = job_runner_name
self.rule_modules = self.__get_rule_modules( )
def __get_rule_modules( self ):
@@ -114,7 +115,11 @@
raise Exception( "Unhandled dynamic job runner type specified - %s" % expand_type )
def __cache_job_runner_url( self, params ):
- raw_job_runner_url = self.job_wrapper.tool.get_job_runner_url( params )
+ # If there's already a runner set in the Job object, don't overwrite from the tool
+ if self.job_runner_name is not None:
+ raw_job_runner_url = self.job_runner_name
+ else:
+ raw_job_runner_url = self.job_wrapper.tool.get_job_runner_url( params )
if raw_job_runner_url.startswith( DYNAMIC_RUNNER_PREFIX ):
job_runner_url = self.__expand_dynamic_job_runner_url( raw_job_runner_url[ len( DYNAMIC_RUNNER_PREFIX ) : ] )
else:
diff -r ed0738c6001654d5456dd36579b278cd10fcd00c -r 73e05bc14cf1478b5ff9d8e8fffdf28d701dd2cb lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -112,6 +112,7 @@
RUNNING = 'running',
OK = 'ok',
ERROR = 'error',
+ PAUSED = 'paused',
DELETED = 'deleted',
DELETED_NEW = 'deleted_new' )
# Please include an accessor (get/set pair) for any new columns/members.
diff -r ed0738c6001654d5456dd36579b278cd10fcd00c -r 73e05bc14cf1478b5ff9d8e8fffdf28d701dd2cb universe_wsgi.ini.sample
--- a/universe_wsgi.ini.sample
+++ b/universe_wsgi.ini.sample
@@ -659,12 +659,6 @@
# bytes). 0 for no limit.
#output_size_limit = 0
-# Jobs can be held back from submission to a runner if a user already has more
-# jobs queued or running than the number specified below. This prevents a
-# single user from stuffing the queue and preventing other users from being
-# able to run jobs.
-#user_job_limit = None
-
# Clustering Galaxy is not a straightforward process and requires some
# pre-configuration. See the the wiki before attempting to set any of these
# options:
@@ -717,6 +711,36 @@
# Details" option in the history. Administrators can always see this.
#expose_dataset_path = False
+# -- Job Limiting
+
+# A hard limit on the total number of jobs a user can have running across all
+# configured job destinations can be configured here.
+#registered_user_job_limit = None
+#anonymous_user_job_limit = None
+
+# Additionally, jobs can be limited based on runner URLs (or matching of runner
+# URLs). Matching is via SQL's 'LIKE' operator, so the wildcard characters are
+# '_' and '%' (regex is not supported). Since the job runner code often
+# rewrites the URL once the job has been submitted to the cluster, you will
+# need to define how to match the runner URL stored in the database. When in
+# doubt, you can run a job and then examine the stored value of
+# 'job_runner_name' in the 'job' table of the database to see what you'll need
+# to match.
+#
+# For example, if default_cluster_job_runner is set to pbs:/// and the default
+# Torque cluster happens to be pbs.example.org, the job_runner_name is likely
+# to be stored as 'pbs://pbs.example.org/'. To limit the number of jobs a user
+# can run on this cluster to 4, use the following:
+#
+# pbs:/// = pbs://pbs.example.org/ 4
+#
+# An example that uses matching (if, for example, your runner URL contains
+# native options):
+#
+# drmaa:/// = drmaa://sge.example.org/% 4
+
+[galaxy:job_limits]
+
# ---- Per-Tool Job Management ----------------------------------------------
# Per-tool job handler and runner overrides. Parameters can be included to define multiple
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0

13 Nov '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/ed0738c60016/
changeset: ed0738c60016
user: greg
date: 2012-11-13 20:36:40
summary: Relocate the tool shed's ShedCounter.
affected #: 3 files
diff -r 424d407c67f7ea4f830317b7ab033815415a5a3b -r ed0738c6001654d5456dd36579b278cd10fcd00c lib/galaxy/util/shed_util.py
--- a/lib/galaxy/util/shed_util.py
+++ b/lib/galaxy/util/shed_util.py
@@ -1,7 +1,6 @@
import sys, os, tempfile, shutil, logging, string, urllib2
import galaxy.tools.data
from datetime import date, datetime, timedelta
-from time import strftime, gmtime
from galaxy import util
from galaxy.web import url_for
from galaxy.web.form_builder import SelectField
@@ -40,85 +39,6 @@
VALID_CHARS = set( string.letters + string.digits + "'\"-=_.()/+*^,:?!#[]%\\$@;{}" )
TOOL_SHED_ADMIN_CONTROLLER = 'TOOL_SHED_ADMIN_CONTROLLER'
-class ShedCounter( object ):
- def __init__( self, model ):
- # TODO: Enhance the ShedCounter to retrieve information from the db instead of displaying what's currently in memory.
- self.model = model
- self.generation_time = strftime( "%b %d, %Y", gmtime() )
- self.repositories = 0
- #self.new_repositories = 0
- self.deleted_repositories = 0
- self.invalid_tools = 0
- self.valid_tools = 0
- self.workflows = 0
- self.proprietary_datatypes = 0
- self.total_clones = 0
- self.generate_statistics()
- @property
- def sa_session( self ):
- """Returns a SQLAlchemy session"""
- return self.model.context
- def generate_statistics( self ):
- self.repositories = 0
- #self.new_repositories = 0
- self.deleted_repositories = 0
- self.invalid_tools = 0
- self.valid_tools = 0
- self.workflows = 0
- self.proprietary_datatypes = 0
- self.total_clones = 0
- for repository in self.sa_session.query( self.model.Repository ):
- self.repositories += 1
- self.total_clones += repository.times_downloaded
- is_deleted = repository.deleted
- #is_new = repository.is_new
- #if is_deleted and is_new:
- if is_deleted:
- self.deleted_repositories += 1
- # self.new_repositories += 1
- #elif is_deleted:
- # self.deleted_repositories += 1
- #elif is_new:
- # self.new_repositories += 1
- else:
- processed_guids = []
- processed_invalid_tool_configs = []
- processed_relative_workflow_paths = []
- processed_datatypes = []
- # A repository's metadata_revisions are those that ignore the value of the repository_metadata.downloadable column.
- for metadata_revision in repository.metadata_revisions:
- metadata = metadata_revision.metadata
- if 'tools' in metadata:
- tool_dicts = metadata[ 'tools' ]
- for tool_dict in tool_dicts:
- if 'guid' in tool_dict:
- guid = tool_dict[ 'guid' ]
- if guid not in processed_guids:
- self.valid_tools += 1
- processed_guids.append( guid )
- if 'invalid_tools' in metadata:
- invalid_tool_configs = metadata[ 'invalid_tools' ]
- for invalid_tool_config in invalid_tool_configs:
- if invalid_tool_config not in processed_invalid_tool_configs:
- self.invalid_tools += 1
- processed_invalid_tool_configs.append( invalid_tool_config )
- if 'datatypes' in metadata:
- datatypes = metadata[ 'datatypes' ]
- for datatypes_dict in datatypes:
- if 'extension' in datatypes_dict:
- extension = datatypes_dict[ 'extension' ]
- if extension not in processed_datatypes:
- self.proprietary_datatypes += 1
- processed_datatypes.append( extension )
- if 'workflows' in metadata:
- workflows = metadata[ 'workflows' ]
- for workflow_tup in workflows:
- relative_path, exported_workflow_dict = workflow_tup
- if relative_path not in processed_relative_workflow_paths:
- self.workflows += 1
- processed_relative_workflow_paths.append( relative_path )
- self.generation_time = strftime( "%b %d, %Y", gmtime() )
-
def add_to_shed_tool_config( app, shed_tool_conf_dict, elem_list ):
# A tool shed repository is being installed so change the shed_tool_conf file. Parse the config file to generate the entire list
# of config_elems instead of using the in-memory list since it will be a subset of the entire list if one or more repositories have
diff -r 424d407c67f7ea4f830317b7ab033815415a5a3b -r ed0738c6001654d5456dd36579b278cd10fcd00c lib/galaxy/webapps/community/model/mapping.py
--- a/lib/galaxy/webapps/community/model/mapping.py
+++ b/lib/galaxy/webapps/community/model/mapping.py
@@ -13,7 +13,7 @@
from galaxy.model.orm.ext.assignmapper import *
from galaxy.model.custom_types import *
from galaxy.util.bunch import Bunch
-from galaxy.util.shed_util import ShedCounter
+from galaxy.webapps.community.util.shed_statistics import *
from galaxy.webapps.community.util.hgweb_config import *
from galaxy.webapps.community.security import CommunityRBACAgent
diff -r 424d407c67f7ea4f830317b7ab033815415a5a3b -r ed0738c6001654d5456dd36579b278cd10fcd00c lib/galaxy/webapps/community/util/shed_statistics.py
--- /dev/null
+++ b/lib/galaxy/webapps/community/util/shed_statistics.py
@@ -0,0 +1,80 @@
+from time import strftime, gmtime
+
+class ShedCounter( object ):
+ def __init__( self, model ):
+ # TODO: Enhance the ShedCounter to retrieve information from the db instead of displaying what's currently in memory.
+ self.model = model
+ self.generation_time = strftime( "%b %d, %Y", gmtime() )
+ self.repositories = 0
+ #self.new_repositories = 0
+ self.deleted_repositories = 0
+ self.invalid_tools = 0
+ self.valid_tools = 0
+ self.workflows = 0
+ self.proprietary_datatypes = 0
+ self.total_clones = 0
+ self.generate_statistics()
+ @property
+ def sa_session( self ):
+ """Returns a SQLAlchemy session"""
+ return self.model.context
+ def generate_statistics( self ):
+ self.repositories = 0
+ #self.new_repositories = 0
+ self.deleted_repositories = 0
+ self.invalid_tools = 0
+ self.valid_tools = 0
+ self.workflows = 0
+ self.proprietary_datatypes = 0
+ self.total_clones = 0
+ for repository in self.sa_session.query( self.model.Repository ):
+ self.repositories += 1
+ self.total_clones += repository.times_downloaded
+ is_deleted = repository.deleted
+ #is_new = repository.is_new
+ #if is_deleted and is_new:
+ if is_deleted:
+ self.deleted_repositories += 1
+ # self.new_repositories += 1
+ #elif is_deleted:
+ # self.deleted_repositories += 1
+ #elif is_new:
+ # self.new_repositories += 1
+ else:
+ processed_guids = []
+ processed_invalid_tool_configs = []
+ processed_relative_workflow_paths = []
+ processed_datatypes = []
+ # A repository's metadata_revisions are those that ignore the value of the repository_metadata.downloadable column.
+ for metadata_revision in repository.metadata_revisions:
+ metadata = metadata_revision.metadata
+ if 'tools' in metadata:
+ tool_dicts = metadata[ 'tools' ]
+ for tool_dict in tool_dicts:
+ if 'guid' in tool_dict:
+ guid = tool_dict[ 'guid' ]
+ if guid not in processed_guids:
+ self.valid_tools += 1
+ processed_guids.append( guid )
+ if 'invalid_tools' in metadata:
+ invalid_tool_configs = metadata[ 'invalid_tools' ]
+ for invalid_tool_config in invalid_tool_configs:
+ if invalid_tool_config not in processed_invalid_tool_configs:
+ self.invalid_tools += 1
+ processed_invalid_tool_configs.append( invalid_tool_config )
+ if 'datatypes' in metadata:
+ datatypes = metadata[ 'datatypes' ]
+ for datatypes_dict in datatypes:
+ if 'extension' in datatypes_dict:
+ extension = datatypes_dict[ 'extension' ]
+ if extension not in processed_datatypes:
+ self.proprietary_datatypes += 1
+ processed_datatypes.append( extension )
+ if 'workflows' in metadata:
+ workflows = metadata[ 'workflows' ]
+ for workflow_tup in workflows:
+ relative_path, exported_workflow_dict = workflow_tup
+ if relative_path not in processed_relative_workflow_paths:
+ self.workflows += 1
+ processed_relative_workflow_paths.append( relative_path )
+ self.generation_time = strftime( "%b %d, %Y", gmtime() )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/424d407c67f7/
changeset: 424d407c67f7
user: inithello
date: 2012-11-13 19:56:59
summary: Fix for renaming repository.
affected #: 1 file
diff -r 447448028a2f7326fc452ee32055de5c58e822f6 -r 424d407c67f7ea4f830317b7ab033815415a5a3b lib/galaxy/webapps/community/controllers/repository.py
--- a/lib/galaxy/webapps/community/controllers/repository.py
+++ b/lib/galaxy/webapps/community/controllers/repository.py
@@ -1676,8 +1676,7 @@
# Change the entry in the hgweb.config file for the repository.
old_lhs = "repos/%s/%s" % ( repository.user.username, repository.name )
new_lhs = "repos/%s/%s" % ( repository.user.username, repo_name )
- new_rhs = "%s\n" % repo_dir
- trans.app.hgweb_config_manager.change_entry( old_lhs, new_lhs, new_rhs )
+ trans.app.hgweb_config_manager.change_entry( old_lhs, new_lhs, repo_dir )
# Change the entry in the repository's hgrc file.
hgrc_file = os.path.join( repo_dir, '.hg', 'hgrc' )
self.__change_repository_name_in_hgrc_file( hgrc_file, repo_name )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0

commit/galaxy-central: greg: Add the framework components to enable creating a functional test framework for the tool shed.
by Bitbucket 13 Nov '12
by Bitbucket 13 Nov '12
13 Nov '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/447448028a2f/
changeset: 447448028a2f
user: greg
date: 2012-11-13 19:49:09
summary: Add the framework components to enable creating a functional test framework for the tool shed.
affected #: 9 files
diff -r 3419a45c1f01790b3108d1726253fcb46feedbf4 -r 447448028a2f7326fc452ee32055de5c58e822f6 run_tool_shed_functional_tests.sh
--- /dev/null
+++ b/run_tool_shed_functional_tests.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+# A good place to look for nose info: http://somethingaboutorange.com/mrl/projects/nose/
+#rm -f ./test/tool_shed/run_functional_tests.log
+
+if [ ! $1 ]; then
+ python ./test/tool_shed/functional_tests.py -v --with-nosehtml --html-report-file ./test/tool_shed/run_functional_tests.html ./test/tool_shed/functional
+elif [ $1 = 'help' ]; then
+ echo "'run_tool_shed_functional_tests.sh' for running all the test scripts in the ./test/tool_shed/functional directory"
+ echo "'run_tool_shed_functional_tests.sh testscriptname' for running one test script named testscriptname in the .test/tool_shed/functional directory"
+else
+ python ./test/tool_shed/functional_tests.py -v --with-nosehtml --html-report-file ./test/tool_shed/run_functional_tests.html $1
+fi
+
+echo "'sh run_tool_shed_functional_tests.sh help' for help"
diff -r 3419a45c1f01790b3108d1726253fcb46feedbf4 -r 447448028a2f7326fc452ee32055de5c58e822f6 test/tool_shed/__init__.py
--- /dev/null
+++ b/test/tool_shed/__init__.py
@@ -0,0 +1,1 @@
+"""Tool shed functional Tests"""
\ No newline at end of file
diff -r 3419a45c1f01790b3108d1726253fcb46feedbf4 -r 447448028a2f7326fc452ee32055de5c58e822f6 test/tool_shed/base/test_db_util.py
--- /dev/null
+++ b/test/tool_shed/base/test_db_util.py
@@ -0,0 +1,39 @@
+import galaxy.webapps.community.model as model
+from galaxy.model.orm import *
+from galaxy.webapps.community.model.mapping import context as sa_session
+from base.twilltestcase import *
+import sys
+
+def delete_obj( obj ):
+ sa_session.delete( obj )
+ sa_session.flush()
+def delete_user_roles( user ):
+ for ura in user.roles:
+ sa_session.delete( ura )
+ sa_session.flush()
+def flush( obj ):
+ sa_session.add( obj )
+ sa_session.flush()
+def get_default_user_permissions_by_role( role ):
+ return sa_session.query( model.DefaultUserPermissions ) \
+ .filter( model.DefaultUserPermissions.table.c.role_id == role.id ) \
+ .all()
+def get_default_user_permissions_by_user( user ):
+ return sa_session.query( model.DefaultUserPermissions ) \
+ .filter( model.DefaultUserPermissions.table.c.user_id==user.id ) \
+ .all()
+def get_private_role( user ):
+ for role in user.all_roles():
+ if role.name == user.email and role.description == 'Private Role for %s' % user.email:
+ return role
+ raise AssertionError( "Private role not found for user '%s'" % user.email )
+def get_user( email ):
+ return sa_session.query( model.User ) \
+ .filter( model.User.table.c.email==email ) \
+ .first()
+def mark_obj_deleted( obj ):
+ obj.deleted = True
+ sa_session.add( obj )
+ sa_session.flush()
+def refresh( obj ):
+ sa_session.refresh( obj )
diff -r 3419a45c1f01790b3108d1726253fcb46feedbf4 -r 447448028a2f7326fc452ee32055de5c58e822f6 test/tool_shed/base/twilltestcase.py
--- /dev/null
+++ b/test/tool_shed/base/twilltestcase.py
@@ -0,0 +1,20 @@
+from base.twilltestcase import *
+
+class ShedTwillTestCase( TwillTestCase ):
+ def setUp( self ):
+ # Security helper
+ self.security = security.SecurityHelper( id_secret='changethisinproductiontoo' )
+ self.history_id = None
+ self.host = os.environ.get( 'TOOL_SHED_TEST_HOST' )
+ self.port = os.environ.get( 'TOOL_SHED_TEST_PORT' )
+ self.url = "http://%s:%s" % ( self.host, self.port )
+ self.file_dir = os.environ.get( 'TOOL_SHED_TEST_FILE_DIR', None )
+ self.tool_shed_test_file = None
+ self.shed_tools_dict = {}
+ self.keepOutdir = os.environ.get( 'TOOL_SHED_TEST_SAVE', '' )
+ if self.keepOutdir > '':
+ try:
+ os.makedirs( self.keepOutdir )
+ except:
+ pass
+ self.home()
diff -r 3419a45c1f01790b3108d1726253fcb46feedbf4 -r 447448028a2f7326fc452ee32055de5c58e822f6 test/tool_shed/functional/test_0000_create_repository.py
--- /dev/null
+++ b/test/tool_shed/functional/test_0000_create_repository.py
@@ -0,0 +1,44 @@
+import tempfile, time, re, tempfile, os, shutil
+import galaxy.webapps.community.model
+from galaxy.util import parse_xml, string_as_bool
+from galaxy.util.shed_util import clean_tool_shed_url
+from galaxy.model.orm import *
+from tool_shed.base.twilltestcase import *
+from tool_shed.base.test_db_util import *
+
+admin_user = None
+admin_user_private_role = None
+admin_email = 'test(a)bx.psu.edu'
+admin_username = 'admin-user'
+
+class TestCreateRepository( ShedTwillTestCase ):
+
+ def test_0000_initiate_users( self ):
+ """Create necessary users and login as an admin user."""
+ self.logout()
+ self.login( email=admin_email, username=admin_username )
+ admin_user = get_user( admin_email )
+ assert admin_user is not None, 'Problem retrieving user with email %s from the database' % admin_email
+ admin_user_private_role = get_private_role( admin_user )
+ def test_0005_create_category( self ):
+ """Create a category"""
+ self.visit_url( '/admin/manage_categories?operation=create' )
+ try:
+ tc.fv( "1", "name", "Text Manipulation" )
+ tc.fv( "1", "description", "Tools for manipulating text" )
+ tc.submit( "create_category_button" )
+ except Exception, e:
+ errmsg = "Problem creating a category: %s" % str( e )
+ raise AssertionError( e )
+ def test_0010_create_filter_repository( self ):
+ """Create a repository"""
+ self.visit_url( '/repository/create_repository' )
+ try:
+ tc.fv( "1", "name", "filter" )
+ tc.fv( "1", "description", "Galaxy's filter tool" )
+ tc.fv( "1", "long_description", "Long description of Galaxy's filter tool" )
+ tc.fv( "1", "category_id", "Text Manipulation" )
+ tc.submit( "create_repository_button" )
+ except Exception, e:
+ errmsg = "Problem creating a repository: %s" % str( e )
+ raise AssertionError( e )
diff -r 3419a45c1f01790b3108d1726253fcb46feedbf4 -r 447448028a2f7326fc452ee32055de5c58e822f6 test/tool_shed/functional_tests.py
--- /dev/null
+++ b/test/tool_shed/functional_tests.py
@@ -0,0 +1,274 @@
+#!/usr/bin/env python
+
+import os, sys, shutil, tempfile, re
+
+# Assume we are run from the galaxy root directory, add lib to the python path
+cwd = os.getcwd()
+tool_shed_home_directory = os.path.join( cwd, 'test', 'tool_shed' )
+default_tool_shed_test_file_dir = os.path.join( tool_shed_home_directory, 'test_data' )
+new_path = [ os.path.join( cwd, "lib" ) ]
+new_path.extend( sys.path[1:] )
+sys.path = new_path
+
+from galaxy import eggs
+
+eggs.require( "nose" )
+eggs.require( "NoseHTML" )
+eggs.require( "NoseTestDiff" )
+eggs.require( "twill==0.9" )
+eggs.require( "Paste" )
+eggs.require( "PasteDeploy" )
+eggs.require( "Cheetah" )
+
+# This should not be required, but it is under certain conditions, thanks to this bug: http://code.google.com/p/python-nose/issues/detail?id=284
+eggs.require( "pysqlite" )
+
+import atexit, logging, os, os.path, sys, tempfile
+import twill, unittest, time
+import sys, threading, random
+import httplib, socket
+from paste import httpserver
+import galaxy.webapps.community.app
+from galaxy.webapps.community.app import UniverseApplication
+from galaxy.webapps.community import buildapp
+
+import nose.core
+import nose.config
+import nose.loader
+import nose.plugins.manager
+
+log = logging.getLogger( "tool_shed_functional_tests.py" )
+
+default_tool_shed_test_host = "localhost"
+default_tool_shed_test_port_min = 8000
+default_tool_shed_test_port_max = 9999
+default_tool_shed_locales = 'en'
+
+def run_tests( test_config ):
+ loader = nose.loader.TestLoader( config=test_config )
+ plug_loader = test_config.plugins.prepareTestLoader( loader )
+ if plug_loader is not None:
+ loader = plug_loader
+ tests = loader.loadTestsFromNames( test_config.testNames )
+ test_runner = nose.core.TextTestRunner( stream=test_config.stream,
+ verbosity=test_config.verbosity,
+ config=test_config )
+ plug_runner = test_config.plugins.prepareTestRunner( test_runner )
+ if plug_runner is not None:
+ test_runner = plug_runner
+ return test_runner.run( tests )
+
+def main():
+ # ---- Configuration ------------------------------------------------------
+ tool_shed_test_host = os.environ.get( 'TOOL_SHED_TEST_HOST', default_tool_shed_test_host )
+ tool_shed_test_port = os.environ.get( 'TOOL_SHED_TEST_PORT', None )
+ tool_shed_test_save = os.environ.get( 'TOOL_SHED_TEST_SAVE', None )
+ tool_path = os.environ.get( 'TOOL_SHED_TEST_TOOL_PATH', 'tools' )
+ start_server = 'TOOL_SHED_TEST_EXTERNAL' not in os.environ
+ if 'HTTP_ACCEPT_LANGUAGE' not in os.environ:
+ os.environ[ 'HTTP_ACCEPT_LANGUAGE' ] = default_tool_shed_locales
+ tool_shed_test_file_dir = os.environ.get( 'TOOL_SHED_TEST_FILE_DIR', default_tool_shed_test_file_dir )
+ if not os.path.isabs( tool_shed_test_file_dir ):
+ tool_shed_test_file_dir = tool_shed_test_file_dir
+ ignore_files = ()
+ if os.path.exists( 'tool_data_table_conf.test.xml' ):
+ tool_data_table_config_path = 'tool_data_table_conf.test.xml'
+ else:
+ tool_data_table_config_path = 'tool_data_table_conf.xml'
+ shed_tool_data_table_config = 'shed_tool_data_table_conf.xml'
+ tool_dependency_dir = os.environ.get( 'TOOL_SHED_TOOL_DEPENDENCY_DIR', None )
+ use_distributed_object_store = os.environ.get( 'TOOL_SHED_USE_DISTRIBUTED_OBJECT_STORE', False )
+
+ if start_server:
+ psu_production = False
+ tool_shed_test_proxy_port = None
+ if 'TOOL_SHED_TEST_PSU_PRODUCTION' in os.environ:
+ if not tool_shed_test_port:
+ raise Exception( 'Set TOOL_SHED_TEST_PORT to the port to which the proxy server will proxy' )
+ tool_shed_test_proxy_port = os.environ.get( 'TOOL_SHED_TEST_PROXY_PORT', None )
+ if not tool_shed_test_proxy_port:
+ raise Exception( 'Set TOOL_SHED_TEST_PROXY_PORT to the port on which the proxy server is listening' )
+ base_file_path = os.environ.get( 'TOOL_SHED_TEST_BASE_FILE_PATH', None )
+ if not base_file_path:
+ raise Exception( 'Set TOOL_SHED_TEST_BASE_FILE_PATH to the directory which will contain the dataset files directory' )
+ base_new_file_path = os.environ.get( 'TOOL_SHED_TEST_BASE_NEW_FILE_PATH', None )
+ if not base_new_file_path:
+ raise Exception( 'Set TOOL_SHED_TEST_BASE_NEW_FILE_PATH to the directory which will contain the temporary directory' )
+ database_connection = os.environ.get( 'TOOL_SHED_TEST_DBURI', None )
+ if not database_connection:
+ raise Exception( 'Set TOOL_SHED_TEST_DBURI to the URI of the database to be used for tests' )
+ nginx_upload_store = os.environ.get( 'TOOL_SHED_TEST_NGINX_UPLOAD_STORE', None )
+ if not nginx_upload_store:
+ raise Exception( 'Set TOOL_SHED_TEST_NGINX_UPLOAD_STORE to the path where the nginx upload module places uploaded files' )
+ file_path = tempfile.mkdtemp( dir=base_file_path )
+ new_file_path = tempfile.mkdtemp( dir=base_new_file_path )
+ kwargs = dict( database_engine_option_pool_size = '10',
+ database_engine_option_max_overflow = '20',
+ database_engine_option_strategy = 'threadlocal',
+ static_enabled = 'False',
+ debug = 'False' )
+ psu_production = True
+ else:
+ if 'TOOL_SHED_TEST_DBPATH' in os.environ:
+ db_path = os.environ[ 'TOOL_SHED_TEST_DBPATH' ]
+ else:
+ tempdir = tempfile.mkdtemp()
+ db_path = os.path.join( tempdir, 'database' )
+ file_path = os.path.join( db_path, 'files' )
+ new_file_path = os.path.join( db_path, 'tmp' )
+ if 'TOOL_SHED_TEST_DBURI' in os.environ:
+ database_connection = os.environ[ 'TOOL_SHED_TEST_DBURI' ]
+ else:
+ database_connection = 'sqlite:///' + os.path.join( db_path, 'universe.sqlite' )
+ kwargs = {}
+ for dir in file_path, new_file_path:
+ try:
+ os.makedirs( dir )
+ except OSError:
+ pass
+
+ print "Database connection:", database_connection
+
+ # ---- Build Application --------------------------------------------------
+ app = None
+ if start_server:
+ global_conf = { '__file__' : 'community_wsgi.ini.sample' }
+ if psu_production:
+ global_conf = None
+ if not database_connection.startswith( 'sqlite://' ):
+ kwargs[ 'database_engine_option_max_overflow' ] = '20'
+ if tool_dependency_dir is not None:
+ kwargs[ 'tool_dependency_dir' ] = tool_dependency_dir
+ if use_distributed_object_store:
+ kwargs[ 'object_store' ] = 'distributed'
+ kwargs[ 'distributed_object_store_config_file' ] = 'distributed_object_store_conf.xml.sample'
+
+ app = UniverseApplication( job_queue_workers = 5,
+ id_secret = 'changethisinproductiontoo',
+ template_path = 'templates',
+ database_connection = database_connection,
+ database_engine_option_pool_size = '10',
+ file_path = file_path,
+ new_file_path = new_file_path,
+ tool_path=tool_path,
+ datatype_converters_config_file = 'datatype_converters_conf.xml.sample',
+ tool_parse_help = False,
+ tool_data_table_config_path = tool_data_table_config_path,
+ shed_tool_data_table_config = shed_tool_data_table_config,
+ log_destination = "stdout",
+ use_heartbeat = False,
+ allow_user_creation = True,
+ allow_user_deletion = True,
+ admin_users = 'test(a)bx.psu.edu',
+ global_conf = global_conf,
+ running_functional_tests = True,
+ hgweb_config_dir = new_file_path,
+ **kwargs )
+ log.info( "Embedded Universe application started" )
+
+ # ---- Run webserver ------------------------------------------------------
+ server = None
+ if start_server:
+ webapp = buildapp.app_factory( dict( database_file=database_connection ),
+ use_translogger=False,
+ static_enabled=False,
+ app=app )
+ if tool_shed_test_port is not None:
+ server = httpserver.serve( webapp, host=tool_shed_test_host, port=tool_shed_test_port, start_loop=False )
+ else:
+ random.seed()
+ for i in range( 0, 9 ):
+ try:
+ tool_shed_test_port = str( random.randint( default_tool_shed_test_port_min, default_tool_shed_test_port_max ) )
+ log.debug( "Attempting to serve app on randomly chosen port: %s" % tool_shed_test_port )
+ server = httpserver.serve( webapp, host=tool_shed_test_host, port=tool_shed_test_port, start_loop=False )
+ break
+ except socket.error, e:
+ if e[0] == 98:
+ continue
+ raise
+ else:
+ raise Exception( "Unable to open a port between %s and %s to start Galaxy server" % ( default_tool_shed_test_port_min, default_tool_shed_test_port_max ) )
+ if tool_shed_test_proxy_port:
+ os.environ[ 'TOOL_SHED_TEST_PORT' ] = tool_shed_test_proxy_port
+ else:
+ os.environ[ 'TOOL_SHED_TEST_PORT' ] = tool_shed_test_port
+ t = threading.Thread( target=server.serve_forever )
+ t.start()
+ # Test if the server is up
+ for i in range( 10 ):
+ # Directly test the app, not the proxy.
+ conn = httplib.HTTPConnection( tool_shed_test_host, tool_shed_test_port )
+ conn.request( "GET", "/" )
+ if conn.getresponse().status == 200:
+ break
+ time.sleep( 0.1 )
+ else:
+ raise Exception( "Test HTTP server did not return '200 OK' after 10 tries" )
+ # Test if the proxy server is up.
+ if psu_production:
+ # Directly test the app, not the proxy.
+ conn = httplib.HTTPConnection( tool_shed_test_host, tool_shed_test_proxy_port )
+ conn.request( "GET", "/" )
+ if not conn.getresponse().status == 200:
+ raise Exception( "Test HTTP proxy server did not return '200 OK'" )
+ log.info( "Embedded web server started" )
+ # We don't add the tests to the path until everything is up and running
+ new_path = [ os.path.join( cwd, 'test' ) ]
+ new_path.extend( sys.path[1:] )
+ sys.path = new_path
+ # ---- Find tests ---------------------------------------------------------
+ if tool_shed_test_proxy_port:
+ log.info( "Functional tests will be run against %s:%s" % ( tool_shed_test_host, tool_shed_test_proxy_port ) )
+ else:
+ log.info( "Functional tests will be run against %s:%s" % ( tool_shed_test_host, tool_shed_test_port ) )
+ success = False
+ try:
+ # What requires these? Handy for (eg) functional tests to save outputs?
+ if tool_shed_test_save:
+ os.environ[ 'TOOL_SHED_TEST_SAVE' ] = tool_shed_test_save
+ # Pass in through script set env, will leave a copy of ALL test validate files.
+ os.environ[ 'TOOL_SHED_TEST_HOST' ] = tool_shed_test_host
+ if tool_shed_test_file_dir:
+ os.environ[ 'TOOL_SHED_TEST_FILE_DIR' ] = tool_shed_test_file_dir
+ test_config = nose.config.Config( env=os.environ, ignoreFiles=ignore_files, plugins=nose.plugins.manager.DefaultPluginManager() )
+ test_config.configure( sys.argv )
+ # Run the tests.
+ result = run_tests( test_config )
+ success = result.wasSuccessful()
+ except:
+ log.exception( "Failure running tests" )
+
+ log.info( "Shutting down" )
+ # ---- Tear down -----------------------------------------------------------
+ if server:
+ log.info( "Shutting down embedded web server" )
+ server.server_close()
+ server = None
+ log.info( "Embedded web server stopped" )
+ if app:
+ log.info( "Shutting down app" )
+ app.shutdown()
+ app = None
+ log.info( "Embedded Universe application stopped" )
+ try:
+ if os.path.exists( tempdir ) and 'TOOL_SHED_TEST_NO_CLEANUP' not in os.environ:
+ log.info( "Cleaning up temporary files in %s" % tempdir )
+ shutil.rmtree( tempdir )
+ except:
+ pass
+ if psu_production and 'TOOL_SHED_TEST_NO_CLEANUP' not in os.environ:
+ for dir in ( file_path, new_file_path ):
+ try:
+ if os.path.exists( dir ):
+ log.info( 'Cleaning up temporary files in %s' % dir )
+ shutil.rmtree( dir )
+ except:
+ pass
+ if success:
+ return 0
+ else:
+ return 1
+
+if __name__ == "__main__":
+ sys.exit( main() )
diff -r 3419a45c1f01790b3108d1726253fcb46feedbf4 -r 447448028a2f7326fc452ee32055de5c58e822f6 test/tool_shed/test_data/filtering_1.1.0.tar
Binary file test/tool_shed/test_data/filtering_1.1.0.tar has changed
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0

commit/galaxy-central: greg: Create a new, empty hgweb.config file if one isn't found in the configured location.
by Bitbucket 13 Nov '12
by Bitbucket 13 Nov '12
13 Nov '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/3419a45c1f01/
changeset: 3419a45c1f01
user: greg
date: 2012-11-13 19:45:03
summary: Create a new, empty hgweb.config file if one isn't found in the configured location.
affected #: 2 files
diff -r 4471a94e8fbff8d8c165c58f0f76867500953b82 -r 3419a45c1f01790b3108d1726253fcb46feedbf4 lib/galaxy/webapps/community/app.py
--- a/lib/galaxy/webapps/community/app.py
+++ b/lib/galaxy/webapps/community/app.py
@@ -49,5 +49,6 @@
# Let the HgwebConfigManager know where the hgweb.config file is located.
self.hgweb_config_manager = self.model.hgweb_config_manager
self.hgweb_config_manager.hgweb_config_dir = self.config.hgweb_config_dir
+ print >> sys.stderr, "Tool shed hgweb.config file is: ", self.hgweb_config_manager.hgweb_config
def shutdown( self ):
pass
diff -r 4471a94e8fbff8d8c165c58f0f76867500953b82 -r 3419a45c1f01790b3108d1726253fcb46feedbf4 lib/galaxy/webapps/community/util/hgweb_config.py
--- a/lib/galaxy/webapps/community/util/hgweb_config.py
+++ b/lib/galaxy/webapps/community/util/hgweb_config.py
@@ -4,6 +4,11 @@
log = logging.getLogger( __name__ )
+new_hgweb_config_template = """
+[paths]
+
+"""
+
class HgWebConfigManager( object ):
def __init__( self ):
self.hgweb_config_dir = None
@@ -41,7 +46,13 @@
def hgweb_config( self ):
hgweb_config = os.path.join( self.hgweb_config_dir, 'hgweb.config' )
if not os.path.exists( hgweb_config ):
- raise Exception( "Required file %s does not exist - check config setting for hgweb_config_dir." % hgweb_config )
+ # We used to raise an exception here...
+ # raise Exception( "Required file %s does not exist - check config setting for hgweb_config_dir." % hgweb_config )
+ # ...but now we just log the missing file and create a new empty one.
+ log.debug( "Required file %s does not exist, so creating a new, empty file. Check your config setting for hgweb_config_dir." % hgweb_config )
+ hgweb_config_file = open( hgweb_config, 'wb' )
+ hgweb_config_file.write( new_hgweb_config_template )
+ hgweb_config_file.close()
return os.path.abspath( hgweb_config )
def make_backup( self ):
# Make a backup of the hgweb.config file.
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0

commit/galaxy-central: dannon: Modules.py: Fix sa_session.add() for PJAs, remove unused imports, whitespace cleanup.
by Bitbucket 13 Nov '12
by Bitbucket 13 Nov '12
13 Nov '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/4471a94e8fbf/
changeset: 4471a94e8fbf
user: dannon
date: 2012-11-13 19:07:32
summary: Modules.py: Fix sa_session.add() for PJAs, remove unused imports, whitespace cleanup.
affected #: 1 file
diff -r 5013377e0bf7a656ea593098f1d1b38f3d6928c6 -r 4471a94e8fbff8d8c165c58f0f76867500953b82 lib/galaxy/workflow/modules.py
--- a/lib/galaxy/workflow/modules.py
+++ b/lib/galaxy/workflow/modules.py
@@ -3,7 +3,6 @@
from galaxy import web
from galaxy.tools.parameters import DataToolParameter, DummyDataset, RuntimeValue, check_param, visit_input_values
from galaxy.tools import DefaultToolState
-from galaxy.tools.parameters.grouping import Repeat, Conditional
from galaxy.util.bunch import Bunch
from galaxy.util.json import from_json_string, to_json_string
from galaxy.jobs.actions.post import ActionBox
@@ -13,12 +12,12 @@
log = logging.getLogger( __name__ )
class WorkflowModule( object ):
-
+
def __init__( self, trans ):
self.trans = trans
-
+
## ---- Creating modules from various representations ---------------------
-
+
@classmethod
def new( Class, trans, tool_id=None ):
"""
@@ -37,12 +36,12 @@
return Class( trans )
## ---- Saving in various forms ------------------------------------------
-
+
def save_to_step( self, step ):
step.type = self.type
-
+
## ---- General attributes -----------------------------------------------
-
+
def get_type( self ):
return self.type
def get_name( self ):
@@ -51,9 +50,9 @@
return None
def get_tooltip( self, static_path='' ):
return None
-
+
## ---- Configuration time -----------------------------------------------
-
+
def get_state( self ):
return None
def get_errors( self ):
@@ -66,16 +65,16 @@
pass
def get_config_form( self ):
raise TypeError( "Abstract method" )
-
+
def check_and_update_state( self ):
"""
If the state is not in sync with the current implementation of the
module, try to update. Returns a list of messages to be displayed
"""
pass
-
+
## ---- Run time ---------------------------------------------------------
-
+
def get_runtime_inputs( self ):
raise TypeError( "Abstract method" )
def get_runtime_state( self ):
@@ -86,7 +85,7 @@
raise TypeError( "Abstract method" )
def update_runtime_state( self, trans, state, values ):
raise TypeError( "Abstract method" )
-
+
def execute( self, trans, state ):
raise TypeError( "Abstract method" )
@@ -112,6 +111,7 @@
if step.tool_inputs and "name" in step.tool_inputs:
module.state['name'] = step.tool_inputs[ 'name' ]
return module
+
def save_to_step( self, step ):
step.type = self.type
step.tool_id = None
@@ -119,8 +119,10 @@
def get_data_inputs( self ):
return []
+
def get_data_outputs( self ):
return [ dict( name='output', extensions=['input'] ) ]
+
def get_config_form( self ):
form = web.FormBuilder( title=self.name ) \
.add_text( "name", "Name", value=self.state['name'] )
@@ -128,25 +130,29 @@
module=self, form=form )
def get_state( self, secure=True ):
return to_json_string( self.state )
-
+
def update_state( self, incoming ):
self.state['name'] = incoming.get( 'name', 'Input Dataset' )
-
+
def get_runtime_inputs( self, filter_set=['data'] ):
label = self.state.get( "name", "Input Dataset" )
return dict( input=DataToolParameter( None, Element( "param", name="input", label=label, multiple=True, type="data", format=', '.join(filter_set) ), self.trans ) )
+
def get_runtime_state( self ):
state = DefaultToolState()
state.inputs = dict( input=None )
return state
+
def encode_runtime_state( self, trans, state ):
fake_tool = Bunch( inputs = self.get_runtime_inputs() )
return state.encode( fake_tool, trans.app )
+
def decode_runtime_state( self, trans, string ):
fake_tool = Bunch( inputs = self.get_runtime_inputs() )
state = DefaultToolState()
state.decode( string, fake_tool, trans.app )
return state
+
def update_runtime_state( self, trans, state, values ):
errors = {}
for name, param in self.get_runtime_inputs().iteritems():
@@ -155,14 +161,14 @@
if error:
errors[ name ] = error
return errors
-
+
def execute( self, trans, state ):
return None, dict( output=state.inputs['input'])
-
+
class ToolModule( WorkflowModule ):
-
+
type = "tool"
-
+
def __init__( self, trans, tool_id ):
self.trans = trans
self.tool_id = tool_id
@@ -175,11 +181,13 @@
else:
self.errors = {}
self.errors[ tool_id ] = 'Tool unavailable'
+
@classmethod
def new( Class, trans, tool_id=None ):
module = Class( trans, tool_id )
module.state = module.tool.new_state( trans, all_pages=True )
return module
+
@classmethod
def from_dict( Class, trans, d, secure=True ):
tool_id = d[ 'tool_id' ]
@@ -191,6 +199,7 @@
module.post_job_actions = d.get( "post_job_actions", {} )
module.workflow_outputs = d.get( "workflow_outputs", [] )
return module
+
@classmethod
def from_workflow_step( Class, trans, step ):
tool_id = step.tool_id
@@ -215,12 +224,14 @@
module.post_job_actions = pjadict
return module
return None
+
@classmethod
def __get_tool_version( cls, trans, tool_id ):
# Return a ToolVersion if one exists for tool_id.
return trans.sa_session.query( trans.app.model.ToolVersion ) \
.filter( trans.app.model.ToolVersion.table.c.tool_id == tool_id ) \
.first()
+
def save_to_step( self, step ):
step.type = self.type
step.tool_id = self.tool_id
@@ -241,24 +252,31 @@
action_arguments = v['action_arguments']
else:
action_arguments = None
- n_p = PostJobAction(v['action_type'], step, output_name, action_arguments)
+ self.trans.sa_session.add(PostJobAction(v['action_type'], step, output_name, action_arguments))
+
def get_name( self ):
if self.tool:
return self.tool.name
return 'unavailable'
+
def get_tool_id( self ):
return self.tool_id
+
def get_tool_version( self ):
return self.tool.version
+
def get_state( self, secure=True ):
return self.state.encode( self.tool, self.trans.app, secure=secure )
+
def get_errors( self ):
return self.errors
+
def get_tooltip( self, static_path='' ):
if self.tool.help:
return self.tool.help.render( static_path=static_path )
else:
return None
+
def get_data_inputs( self ):
data_inputs = []
def callback( input, value, prefixed_name, prefixed_label ):
@@ -270,6 +288,7 @@
extensions=input.extensions ) )
visit_input_values( self.tool.inputs, self.state.inputs, callback )
return data_inputs
+
def get_data_outputs( self ):
data_outputs = []
data_inputs = None
@@ -293,20 +312,23 @@
formats.append( format )
data_outputs.append( dict( name=name, extensions=formats ) )
return data_outputs
+
def get_post_job_actions( self ):
return self.post_job_actions
+
def get_config_form( self ):
self.add_dummy_datasets()
- return self.trans.fill_template( "workflow/editor_tool_form.mako",
+ return self.trans.fill_template( "workflow/editor_tool_form.mako",
tool=self.tool, values=self.state.inputs, errors=( self.errors or {} ) )
- def update_state( self, incoming ):
+
+ def update_state( self, incoming ):
# Build a callback that handles setting an input to be required at
# runtime. We still process all other parameters the user might have
# set. We also need to make sure all datasets have a dummy value
# for dependencies to see
-
+
self.post_job_actions = ActionBox.handle_incoming(incoming)
-
+
make_runtime_key = incoming.get( 'make_runtime', None )
make_buildtime_key = incoming.get( 'make_buildtime', None )
def item_callback( trans, key, input, value, error, old_value, context ):
@@ -328,8 +350,10 @@
# Update state using incoming values
errors = self.tool.update_state( self.trans, self.tool.inputs, self.state.inputs, incoming, item_callback=item_callback )
self.errors = errors or None
+
def check_and_update_state( self ):
return self.tool.check_and_update_param_values( self.state.inputs, self.trans )
+
def add_dummy_datasets( self, connections=None):
if connections:
# Store onnections by input name
@@ -348,11 +372,12 @@
else:
replacement = DummyDataset()
return replacement
- visit_input_values( self.tool.inputs, self.state.inputs, callback )
+ visit_input_values( self.tool.inputs, self.state.inputs, callback )
class WorkflowModuleFactory( object ):
def __init__( self, module_types ):
self.module_types = module_types
+
def new( self, trans, type, tool_id=None ):
"""
Return module for type and (optional) tool_id intialized with
@@ -360,18 +385,20 @@
"""
assert type in self.module_types
return self.module_types[type].new( trans, tool_id )
+
def from_dict( self, trans, d, **kwargs ):
"""
Return module initialized from the data in dictionary `d`.
"""
type = d['type']
assert type in self.module_types
- return self.module_types[type].from_dict( trans, d, **kwargs )
+ return self.module_types[type].from_dict( trans, d, **kwargs )
+
def from_workflow_step( self, trans, step ):
"""
Return module initializd from the WorkflowStep object `step`.
"""
type = step.type
return self.module_types[type].from_workflow_step( trans, step )
-
+
module_factory = WorkflowModuleFactory( dict( data_input=InputDataModule, tool=ToolModule ) )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0

commit/galaxy-central: dannon: Fix for rendering workflow tooltips when tool.help is nonexistent.
by Bitbucket 13 Nov '12
by Bitbucket 13 Nov '12
13 Nov '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/5013377e0bf7/
changeset: 5013377e0bf7
user: dannon
date: 2012-11-13 18:26:55
summary: Fix for rendering workflow tooltips when tool.help is nonexistent.
affected #: 1 file
diff -r d3ac39a6f8d7a5f95ffabb4ba8f8a846b17ac15a -r 5013377e0bf7a656ea593098f1d1b38f3d6928c6 lib/galaxy/workflow/modules.py
--- a/lib/galaxy/workflow/modules.py
+++ b/lib/galaxy/workflow/modules.py
@@ -255,7 +255,10 @@
def get_errors( self ):
return self.errors
def get_tooltip( self, static_path='' ):
- return self.tool.help.render( static_path=static_path )
+ if self.tool.help:
+ return self.tool.help.render( static_path=static_path )
+ else:
+ return None
def get_data_inputs( self ):
data_inputs = []
def callback( input, value, prefixed_name, prefixed_label ):
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0

commit/galaxy-central: greg: Add a new HgWebConfigManager to manage the tool shed's hgweb.config file.
by Bitbucket 13 Nov '12
by Bitbucket 13 Nov '12
13 Nov '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/d3ac39a6f8d7/
changeset: d3ac39a6f8d7
user: greg
date: 2012-11-13 18:04:58
summary: Add a new HgWebConfigManager to manage the tool shed's hgweb.config file.
affected #: 8 files
diff -r 3024a86c0d0d21b1a85ff1b91516f34f416bcfcb -r d3ac39a6f8d7a5f95ffabb4ba8f8a846b17ac15a lib/galaxy/util/shed_util.py
--- a/lib/galaxy/util/shed_util.py
+++ b/lib/galaxy/util/shed_util.py
@@ -118,6 +118,7 @@
self.workflows += 1
processed_relative_workflow_paths.append( relative_path )
self.generation_time = strftime( "%b %d, %Y", gmtime() )
+
def add_to_shed_tool_config( app, shed_tool_conf_dict, elem_list ):
# A tool shed repository is being installed so change the shed_tool_conf file. Parse the config file to generate the entire list
# of config_elems instead of using the in-memory list since it will be a subset of the entire list if one or more repositories have
@@ -1353,11 +1354,6 @@
ctx = get_changectx_for_changeset( repo, changeset_revision )
named_tmp_file = get_named_tmpfile_from_ctx( ctx, file_name, dir )
return named_tmp_file
-def get_hgweb_config( app ):
- hgweb_config = os.path.join( app.config.hgweb_config_dir, 'hgweb.config' )
- if not os.path.exists( hgweb_config ):
- raise Exception( "Required file %s does not exist - check config setting for hgweb_config_dir." % hgweb_config )
- return hgweb_config
def get_installed_tool_shed_repository( trans, id ):
"""Get a repository on the Galaxy side from the database via id"""
return trans.sa_session.query( trans.model.ToolShedRepository ).get( trans.security.decode_id( id ) )
diff -r 3024a86c0d0d21b1a85ff1b91516f34f416bcfcb -r d3ac39a6f8d7a5f95ffabb4ba8f8a846b17ac15a lib/galaxy/webapps/community/app.py
--- a/lib/galaxy/webapps/community/app.py
+++ b/lib/galaxy/webapps/community/app.py
@@ -46,5 +46,8 @@
# TODO: Add OpenID support
self.openid_providers = OpenIDProviders()
self.shed_counter = self.model.shed_counter
+ # Let the HgwebConfigManager know where the hgweb.config file is located.
+ self.hgweb_config_manager = self.model.hgweb_config_manager
+ self.hgweb_config_manager.hgweb_config_dir = self.config.hgweb_config_dir
def shutdown( self ):
pass
diff -r 3024a86c0d0d21b1a85ff1b91516f34f416bcfcb -r d3ac39a6f8d7a5f95ffabb4ba8f8a846b17ac15a lib/galaxy/webapps/community/controllers/hg.py
--- a/lib/galaxy/webapps/community/controllers/hg.py
+++ b/lib/galaxy/webapps/community/controllers/hg.py
@@ -1,7 +1,6 @@
import os, logging
from galaxy.web.base.controller import *
from galaxy.webapps.community.controllers.common import *
-from galaxy.util.shed_util import get_hgweb_config
from galaxy import eggs
eggs.require('mercurial')
@@ -18,9 +17,7 @@
# hg clone http://test@127.0.0.1:9009/repos/test/convert_characters1
hg_version = mercurial.__version__.version
cmd = kwd.get( 'cmd', None )
- hgweb_config = get_hgweb_config( trans.app )
- if not os.path.exists( hgweb_config ):
- raise Exception( "Required file %s does not exist." % str( hgweb_config ) )
+ hgweb_config = trans.app.hgweb_config_manager.hgweb_config
def make_web_app():
hgwebapp = hgwebdir( hgweb_config )
return hgwebapp
diff -r 3024a86c0d0d21b1a85ff1b91516f34f416bcfcb -r d3ac39a6f8d7a5f95ffabb4ba8f8a846b17ac15a lib/galaxy/webapps/community/controllers/repository.py
--- a/lib/galaxy/webapps/community/controllers/repository.py
+++ b/lib/galaxy/webapps/community/controllers/repository.py
@@ -1,4 +1,4 @@
-import os, logging, tempfile, shutil
+import os, logging, tempfile, shutil, ConfigParser
from time import strftime
from datetime import date, datetime
from galaxy import util
@@ -11,7 +11,7 @@
from galaxy.model.orm import *
# TODO: re-factor shed_util to eliminate the following restricted imports
from galaxy.util.shed_util import create_repo_info_dict, generate_clone_url_for_repository_in_tool_shed, generate_message_for_invalid_tools
-from galaxy.util.shed_util import get_changectx_for_changeset, get_configured_ui, get_file_from_changeset_revision, get_hgweb_config
+from galaxy.util.shed_util import get_changectx_for_changeset, get_configured_ui, get_file_from_changeset_revision
from galaxy.util.shed_util import get_repository_file_contents, get_repository_in_tool_shed, get_repository_metadata_by_changeset_revision
from galaxy.util.shed_util import handle_sample_files_and_load_tool_from_disk, handle_sample_files_and_load_tool_from_tmp_config
from galaxy.util.shed_util import INITIAL_CHANGELOG_HASH, load_tool_from_config, NOT_TOOL_CONFIGS, open_repository_files_folder, remove_dir
@@ -530,26 +530,6 @@
repositories_i_own_grid = RepositoriesIOwnGrid()
deprecated_repositories_i_own_grid = DeprecatedRepositoriesIOwnGrid()
- def __add_hgweb_config_entry( self, trans, repository, repository_path ):
- # Add an entry in the hgweb.config file for a new repository. An entry looks something like:
- # repos/test/mira_assembler = database/community_files/000/repo_123.
- hgweb_config = get_hgweb_config( trans.app )
- if repository_path.startswith( './' ):
- repository_path = repository_path.replace( './', '', 1 )
- entry = "repos/%s/%s = %s" % ( repository.user.username, repository.name, repository_path )
- tmp_fd, tmp_fname = tempfile.mkstemp()
- if os.path.exists( hgweb_config ):
- # Make a backup of the hgweb.config file since we're going to be changing it.
- self.__make_hgweb_config_copy( trans, hgweb_config )
- new_hgweb_config = open( tmp_fname, 'wb' )
- for i, line in enumerate( open( hgweb_config ) ):
- new_hgweb_config.write( line )
- else:
- new_hgweb_config = open( tmp_fname, 'wb' )
- new_hgweb_config.write( '[paths]\n' )
- new_hgweb_config.write( "%s\n" % entry )
- new_hgweb_config.flush()
- shutil.move( tmp_fname, os.path.abspath( hgweb_config ) )
@web.expose
def browse_categories( self, trans, **kwd ):
# The request came from the tool shed.
@@ -824,25 +804,14 @@
selected_value=selected_value,
refresh_on_change=False,
multiple=True )
- def __change_hgweb_config_entry( self, trans, repository, old_repository_name, new_repository_name ):
- # Change an entry in the hgweb.config file for a repository. This only happens when
- # the owner changes the name of the repository. An entry looks something like:
- # repos/test/mira_assembler = database/community_files/000/repo_123.
- hgweb_config = get_hgweb_config( trans.app )
- # Make a backup of the hgweb.config file since we're going to be changing it.
- self.__make_hgweb_config_copy( trans, hgweb_config )
- repo_dir = repository.repo_path( trans.app )
- old_lhs = "repos/%s/%s" % ( repository.user.username, old_repository_name )
- new_entry = "repos/%s/%s = %s\n" % ( repository.user.username, new_repository_name, repo_dir )
- tmp_fd, tmp_fname = tempfile.mkstemp()
- new_hgweb_config = open( tmp_fname, 'wb' )
- for i, line in enumerate( open( hgweb_config ) ):
- if line.startswith( old_lhs ):
- new_hgweb_config.write( new_entry )
- else:
- new_hgweb_config.write( line )
- new_hgweb_config.flush()
- shutil.move( tmp_fname, os.path.abspath( hgweb_config ) )
+ def __change_repository_name_in_hgrc_file( self, hgrc_file, new_name ):
+ config = ConfigParser.ConfigParser()
+ config.read( hgrc_file )
+ config.read( hgrc_file )
+ config.set( 'web', 'name', new_name )
+ new_file = open( hgrc_file, 'wb' )
+ config.write( new_file )
+ new_file.close()
@web.expose
def check_for_updates( self, trans, **kwd ):
"""Handle a request from a local Galaxy instance."""
@@ -1001,8 +970,9 @@
os.makedirs( repository_path )
# Create the local repository
repo = hg.repository( get_configured_ui(), repository_path, create=True )
- # Add an entry in the hgweb.config file for the local repository, enabling calls to repository.repo_path( trans.app )
- self.__add_hgweb_config_entry( trans, repository, repository_path )
+ # Add an entry in the hgweb.config file for the local repository.
+ lhs = "repos/%s/%s" % ( repository.user.username, repository.name )
+ trans.app.hgweb_config_manager.add_entry( lhs, repository_path )
# Create a .hg/hgrc file for the local repository
self.__create_hgrc_file( trans, repository )
flush_needed = False
@@ -1616,13 +1586,6 @@
changeset_revision=changeset_revision,
message=message,
status='error' ) )
- def __make_hgweb_config_copy( self, trans, hgweb_config ):
- # Make a backup of the hgweb.config file
- today = date.today()
- backup_date = today.strftime( "%Y_%m_%d" )
- hgweb_config_backup_filename = 'hgweb.config_%s_backup' % backup_date
- hgweb_config_copy = os.path.join( trans.app.config.hgweb_config_dir, hgweb_config_backup_filename )
- shutil.copy( os.path.abspath( hgweb_config ), os.path.abspath( hgweb_config_copy ) )
def __make_same_length( self, list1, list2 ):
# If either list is 1 item, we'll append to it until its length is the same as the other.
if len( list1 ) == 1:
@@ -1710,7 +1673,14 @@
if message:
error = True
else:
- self.__change_hgweb_config_entry( trans, repository, repository.name, repo_name )
+ # Change the entry in the hgweb.config file for the repository.
+ old_lhs = "repos/%s/%s" % ( repository.user.username, repository.name )
+ new_lhs = "repos/%s/%s" % ( repository.user.username, repo_name )
+ new_rhs = "%s\n" % repo_dir
+ trans.app.hgweb_config_manager.change_entry( old_lhs, new_lhs, new_rhs )
+ # Change the entry in the repository's hgrc file.
+ hgrc_file = os.path.join( repo_dir, '.hg', 'hgrc' )
+ self.__change_repository_name_in_hgrc_file( hgrc_file, repo_name )
repository.name = repo_name
flush_needed = True
elif repository.times_downloaded != 0 and repo_name != repository.name:
diff -r 3024a86c0d0d21b1a85ff1b91516f34f416bcfcb -r d3ac39a6f8d7a5f95ffabb4ba8f8a846b17ac15a lib/galaxy/webapps/community/model/__init__.py
--- a/lib/galaxy/webapps/community/model/__init__.py
+++ b/lib/galaxy/webapps/community/model/__init__.py
@@ -6,7 +6,6 @@
"""
import os.path, os, errno, sys, codecs, operator, logging, tarfile, mimetypes, ConfigParser
from galaxy import util
-from galaxy.util.shed_util import get_hgweb_config
from galaxy.util.bunch import Bunch
from galaxy.util.hash_util import *
from galaxy.web.form_builder import *
@@ -110,12 +109,7 @@
MARKED_FOR_REMOVAL = 'r',
MARKED_FOR_ADDITION = 'a',
NOT_TRACKED = '?' )
- # Handle to the hgweb.config file on disk.
- hgweb_config_file = None
- # This repository's entry in the hgweb.config file on disk.
- hgweb_path = None
- def __init__( self, name=None, description=None, long_description=None, user_id=None, private=False, email_alerts=None, times_downloaded=0,
- deprecated=False ):
+ def __init__( self, name=None, description=None, long_description=None, user_id=None, private=False, email_alerts=None, times_downloaded=0, deprecated=False ):
self.name = name or "Unnamed repository"
self.description = description
self.long_description = long_description
@@ -124,27 +118,8 @@
self.email_alerts = email_alerts
self.times_downloaded = times_downloaded
self.deprecated = deprecated
- def get_hgweb_config_file( self, app ):
- if self.hgweb_config_file is None:
- self.hgweb_config_file = get_hgweb_config( app )
- return self.hgweb_config_file
- def get_hgweb_path( self, app ):
- # TODO: If possible, handle this using the mercurial api.
- if self.hgweb_path is None:
- lhs = os.path.join( "repos", self.user.username, self.name )
- config = ConfigParser.ConfigParser()
- config.read( self.get_hgweb_config_file( app ) )
- for option in config.options( "paths" ):
- if option == lhs:
- self.hgweb_path = config.get( "paths", option )
- break
- if self.hgweb_path is None:
- raise Exception( "Entry for repository %s missing in file %s." % ( lhs, hgweb_config ) )
- return self.hgweb_path
def repo_path( self, app ):
- # Repository locations on disk are stored in the hgweb.config file located in the directory defined by the config setting hgweb_config_dir.
- # An entry looks something like: repos/test/mira_assembler = database/community_files/000/repo_123
- return self.get_hgweb_path( app )
+ return app.hgweb_config_manager.get_entry( os.path.join( "repos", self.user.username, self.name ) )
def revision( self, app ):
repo = hg.repository( ui.ui(), self.repo_path( app ) )
tip_ctx = repo.changectx( repo.changelog.tip() )
diff -r 3024a86c0d0d21b1a85ff1b91516f34f416bcfcb -r d3ac39a6f8d7a5f95ffabb4ba8f8a846b17ac15a lib/galaxy/webapps/community/model/mapping.py
--- a/lib/galaxy/webapps/community/model/mapping.py
+++ b/lib/galaxy/webapps/community/model/mapping.py
@@ -14,6 +14,7 @@
from galaxy.model.custom_types import *
from galaxy.util.bunch import Bunch
from galaxy.util.shed_util import ShedCounter
+from galaxy.webapps.community.util.hgweb_config import *
from galaxy.webapps.community.security import CommunityRBACAgent
metadata = MetaData()
@@ -318,4 +319,5 @@
# Load local tool shed security policy
result.security_agent = CommunityRBACAgent( result )
result.shed_counter = ShedCounter( result )
+ result.hgweb_config_manager = HgWebConfigManager()
return result
diff -r 3024a86c0d0d21b1a85ff1b91516f34f416bcfcb -r d3ac39a6f8d7a5f95ffabb4ba8f8a846b17ac15a lib/galaxy/webapps/community/util/hgweb_config.py
--- /dev/null
+++ b/lib/galaxy/webapps/community/util/hgweb_config.py
@@ -0,0 +1,63 @@
+import sys, os, ConfigParser, logging, shutil
+from time import strftime
+from datetime import date
+
+log = logging.getLogger( __name__ )
+
+class HgWebConfigManager( object ):
+ def __init__( self ):
+ self.hgweb_config_dir = None
+ self.in_memory_config = None
+ def add_entry( self, lhs, rhs ):
+ """Add an entry in the hgweb.config file for a new repository."""
+ # Since we're changing the config, make sure the latest is loaded into memory.
+ self.read_config( force_read=True )
+ # An entry looks something like: repos/test/mira_assembler = database/community_files/000/repo_123.
+ if rhs.startswith( './' ):
+ rhs = rhs.replace( './', '', 1 )
+ self.make_backup()
+ # Add the new entry into memory.
+ self.in_memory_config.set( 'paths', lhs, rhs )
+ # Persist our in-memory configuration.
+ self.write_config()
+ def change_entry( self, old_lhs, new_lhs, new_rhs ):
+ """Change an entry in the hgweb.config file for a repository - this only happens when the owner changes the name of the repository."""
+ self.make_backup()
+ # Remove the old entry.
+ self.in_memory_config.remove_option( 'paths', old_lhs )
+ # Add the new entry.
+ self.in_memory_config.set( 'paths', new_lhs, new_rhs )
+ # Persist our in-memory configuration.
+ self.write_config()
+ def get_entry( self, lhs ):
+ """Return an entry in the hgweb.config file for a repository"""
+ self.read_config()
+ try:
+ entry = self.in_memory_config.get( 'paths', lhs )
+ except ConfigParser.NoOptionError:
+ raise Exception( "Entry for repository %s missing in file %s." % ( lhs, self.hgweb_config ) )
+ return entry
+ @property
+ def hgweb_config( self ):
+ hgweb_config = os.path.join( self.hgweb_config_dir, 'hgweb.config' )
+ if not os.path.exists( hgweb_config ):
+ raise Exception( "Required file %s does not exist - check config setting for hgweb_config_dir." % hgweb_config )
+ return os.path.abspath( hgweb_config )
+ def make_backup( self ):
+ # Make a backup of the hgweb.config file.
+ today = date.today()
+ backup_date = today.strftime( "%Y_%m_%d" )
+ hgweb_config_backup_filename = 'hgweb.config_%s_backup' % backup_date
+ hgweb_config_copy = os.path.join( self.hgweb_config_dir, hgweb_config_backup_filename )
+ shutil.copy( os.path.abspath( self.hgweb_config ), os.path.abspath( hgweb_config_copy ) )
+ def read_config( self, force_read=False ):
+ if force_read or self.in_memory_config is None:
+ config = ConfigParser.ConfigParser()
+ config.read( self.hgweb_config )
+ self.in_memory_config = config
+ def write_config( self ):
+ """Writing the in-memory configuration to the hgweb.config file on disk."""
+ config_file = open( self.hgweb_config, 'wb' )
+ self.in_memory_config.write( config_file )
+ config_file.close
+
\ No newline at end of file
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0

commit/galaxy-central: greg: Move the important details to the begining of the new tool shed repository alert email template, and include the repository name in the email subject.
by Bitbucket 12 Nov '12
by Bitbucket 12 Nov '12
12 Nov '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/3024a86c0d0d/
changeset: 3024a86c0d0d
user: greg
date: 2012-11-12 22:39:49
summary: Move the important details to the begining of the new tool shed repository alert email template, and include the repository name in the email subject.
affected #: 1 file
diff -r 74a7bc65f1ed8bde40425d1823ad8761b6cdcea8 -r 3024a86c0d0d21b1a85ff1b91516f34f416bcfcb lib/galaxy/webapps/community/controllers/common.py
--- a/lib/galaxy/webapps/community/controllers/common.py
+++ b/lib/galaxy/webapps/community/controllers/common.py
@@ -25,25 +25,23 @@
log = logging.getLogger( __name__ )
new_repo_email_alert_template = """
-GALAXY TOOL SHED NEW REPOSITORY ALERT
------------------------------------------------------------------------------
-You received this alert because you registered to receive email when
-new repositories were created in the Galaxy tool shed named "${host}".
------------------------------------------------------------------------------
+Revision: ${revision}
+Change description:
+${description}
Repository name: ${repository_name}
+Uploaded by: ${username}
Date content uploaded: ${display_date}
-Uploaded by: ${username}
-
-Revision: ${revision}
-Change description:
-${description}
${content_alert_str}
-----------------------------------------------------------------------------
This change alert was sent from the Galaxy tool shed hosted on the server
"${host}"
+-----------------------------------------------------------------------------
+You received this alert because you registered to receive email when
+new repositories were created in the Galaxy tool shed named "${host}".
+-----------------------------------------------------------------------------
"""
email_alert_template = """
@@ -497,7 +495,8 @@
admin_users = trans.app.config.get( "admin_users", "" ).split( "," )
frm = email_from
if new_repo_alert:
- subject = "New Galaxy tool shed repository alert"
+ subject = "Galaxy tool shed alert for new repository named %s" % str( repository.name )
+ subject = subject[ :80 ]
email_alerts = []
for user in trans.sa_session.query( trans.model.User ) \
.filter( and_( trans.model.User.table.c.deleted == False,
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0