1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/6641f66fdafd/
changeset: 6641f66fdafd
user: fubar
date: 2012-10-25 09:45:32
summary: Fix to enable columnlistparameter to correctly restore values on redo if use_header_names is set
affected #: 1 file
diff -r 75f3092d9978faebb6aa41f50df9b7f5dd5e8d53 -r 6641f66fdafd6684deda5c8d132c28491c5d39c7 lib/galaxy/tools/parameters/basic.py
--- a/lib/galaxy/tools/parameters/basic.py
+++ b/lib/galaxy/tools/parameters/basic.py
@@ -1002,7 +1002,7 @@
for col in column_list:
if col != 'None':
if type(col) == type(()) and len(col) == 2: # fiddled
- options.append((col[1],'c' + col[0],False))
+ options.append((col[1],col[0],False))
else:
options.append( ( 'c' + col, col, False ) )
return options
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/6430c28aaa4e/
changeset: 6430c28aaa4e
user: greg
date: 2012-10-24 20:32:50
summary: Add more information to the community_wsgi.ini.sample file.
affected #: 1 file
diff -r 29532deaf240a8df6ac1c904d009fe5d23696020 -r 6430c28aaa4ea4721baf2e03fd6b924483728f24 community_wsgi.ini.sample
--- a/community_wsgi.ini.sample
+++ b/community_wsgi.ini.sample
@@ -38,9 +38,21 @@
session_key = galaxysessions
session_secret = changethisinproduction
-# Galaxy session security
+# -- Users and Security
+
+# Galaxy encodes various internal values when these values will be output in
+# some format (for example, in a URL or cookie). You should set a key to be
+# used by the algorithm that encodes and decodes these values. It can be any
+# string. If left unchanged, anyone could construct a cookie that would grant
+# them access to others' sessions.
id_secret = changethisinproductiontoo
+# User authentication can be delegated to an upstream proxy server (usually
+# Apache). The upstream proxy should set a REMOTE_USER header in the request.
+# Enabling remote user disables regular logins. For more information, see:
+# http://wiki.g2.bx.psu.edu/Admin/Config/Apache%20Proxy
+#use_remote_user = False
+
# Configuration for debugging middleware
debug = true
use_lint = false
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/ce8cabd05c3b/
changeset: ce8cabd05c3b
user: dan
date: 2012-10-24 18:33:34
summary: Enhance GenomeSpace import tool to display a better history item name for additional primary datasets.
affected #: 1 file
diff -r 3fe486853787d10c2ba6f0b79f82aa59aaed8790 -r ce8cabd05c3b5cb4d441eac42beaebaeb048aa9a tools/genomespace/genomespace_importer.py
--- a/tools/genomespace/genomespace_importer.py
+++ b/tools/genomespace/genomespace_importer.py
@@ -100,7 +100,7 @@
datasource_params = json_params.get( 'param_dict' )
assert None not in [ username, token ], "Missing GenomeSpace username or token."
output_filename = datasource_params.get( "output_file1", None )
- dataset_id = json_params['output_data'][0]['dataset_id']
+ dataset_id = base_dataset_id = json_params['output_data'][0]['dataset_id']
hda_id = json_params['output_data'][0]['hda_id']
url_opener = get_cookie_opener( username, token )
#load and set genomespace format ids to galaxy exts
@@ -182,12 +182,18 @@
name = "GenomeSpace importer on %s" % ( filename ) ) ) )
#if using tmp file, move the file to the new file path dir to get scooped up later
if using_temp_file:
+ original_filename = filename
filename = ''.join( c in VALID_CHARS and c or '-' for c in filename )
while filename in used_filenames:
filename = "-%s" % filename
used_filenames.append( filename )
- shutil.move( output_filename, os.path.join( datasource_params['__new_file_path__'], 'primary_%i_%s_visible_%s' % ( hda_id, filename, file_type ) ) )
-
+ target_output_filename = os.path.join( datasource_params['__new_file_path__'], 'primary_%i_%s_visible_%s' % ( hda_id, filename, file_type ) )
+ shutil.move( output_filename, target_output_filename )
+ metadata_parameter_file.write( "%s\n" % simplejson.dumps( dict( type = 'new_primary_dataset',
+ base_dataset_id = base_dataset_id,
+ ext = file_type,
+ filename = target_output_filename,
+ name = "GenomeSpace importer on %s" % ( original_filename ) ) ) )
dataset_id = None #only one primary dataset available
output_filename = None #only have one filename available
metadata_parameter_file.close()
https://bitbucket.org/galaxy/galaxy-central/changeset/87e6251af95f/
changeset: 87e6251af95f
user: dan
date: 2012-10-24 18:33:34
summary: Enhance GenomeSpace filebrowser importer to display a better history item name for additional primary datasets.
affected #: 1 file
diff -r ce8cabd05c3b5cb4d441eac42beaebaeb048aa9a -r 87e6251af95f57e4fa24d6e0c9eaefec6016025d tools/genomespace/genomespace_file_browser.py
--- a/tools/genomespace/genomespace_file_browser.py
+++ b/tools/genomespace/genomespace_file_browser.py
@@ -140,11 +140,17 @@
if not filename:
filename = download_url
if output_filename is None:
+ original_filename = filename
filename = ''.join( c in VALID_CHARS and c or '-' for c in filename )
while filename in used_filenames:
filename = "-%s" % filename
used_filenames.append( filename )
output_filename = os.path.join( datasource_params['__new_file_path__'], 'primary_%i_%s_visible_%s' % ( hda_id, filename, galaxy_ext ) )
+ metadata_parameter_file.write( "%s\n" % simplejson.dumps( dict( type = 'new_primary_dataset',
+ base_dataset_id = dataset_id,
+ ext = galaxy_ext,
+ filename = output_filename,
+ name = "GenomeSpace import on %s" % ( original_filename ) ) ) )
else:
if dataset_id is not None:
metadata_parameter_file.write( "%s\n" % simplejson.dumps( dict( type = 'dataset',
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/0c56502c7fd7/
changeset: 0c56502c7fd7
user: greg
date: 2012-10-24 17:44:32
summary: Apply patch from Peter Cock which issues a warning if loading a loc file with inconsistent numbers of tabs.
affected #: 1 file
diff -r 7c671a79de1ed98fb2b7253e91fae551c0222a29 -r 0c56502c7fd78431001012cb56b3df1224472769 lib/galaxy/tools/parameters/dynamic_options.py
--- a/lib/galaxy/tools/parameters/dynamic_options.py
+++ b/lib/galaxy/tools/parameters/dynamic_options.py
@@ -471,6 +471,7 @@
def parse_file_fields( self, reader ):
rval = []
+ field_count = None
for line in reader:
if line.startswith( '#' ) or ( self.line_startswith and not line.startswith( self.line_startswith ) ):
continue
@@ -478,6 +479,16 @@
if line:
fields = line.split( self.separator )
if self.largest_index < len( fields ):
+ if not field_count:
+ field_count = len( fields )
+ elif field_count != len( fields ):
+ try:
+ name = reader.name
+ except AttributeError:
+ name = "a configuration file"
+ # Perhaps this should be an error, but even a warning is useful.
+ log.warn( "Inconsistent number of fields (%i vs %i) in %s using separator %r, check line: %r" % \
+ ( field_count, len( fields ), name, self.separator, line ) )
rval.append( fields )
return rval
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/7c671a79de1e/
changeset: 7c671a79de1e
user: greg
date: 2012-10-24 17:35:59
summary: Add the free text search feature (enabling searching on repository name) to the tool shed's RepositoryMetadata grid in the admin controller.
affected #: 1 file
diff -r bb365c782b64c37b90333d964f55cba0acd401da -r 7c671a79de1ed98fb2b7253e91fae551c0222a29 lib/galaxy/webapps/community/controllers/admin.py
--- a/lib/galaxy/webapps/community/controllers/admin.py
+++ b/lib/galaxy/webapps/community/controllers/admin.py
@@ -424,6 +424,11 @@
DeprecatedColumn( "Deprecated", attach_popup=False ),
MaliciousColumn( "Malicious", attach_popup=False )
]
+ columns.append( grids.MulticolFilterColumn( "Search repository name",
+ cols_to_filter=[ columns[1] ],
+ key="free-text-search",
+ visible=False,
+ filterable="standard" ) )
operations = [ grids.GridOperation( "Delete",
allow_multiple=False,
allow_popup=True,
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/bb365c782b64/
changeset: bb365c782b64
user: natefoo
date: 2012-10-24 17:34:16
summary: Implement job_walltime for the local runner.
affected #: 3 files
diff -r a5db4601ddbe117b5eb393f5d92982e50a866c7d -r bb365c782b64c37b90333d964f55cba0acd401da lib/galaxy/config.py
--- a/lib/galaxy/config.py
+++ b/lib/galaxy/config.py
@@ -5,6 +5,7 @@
import sys, os, tempfile
import logging, logging.config
import ConfigParser
+from datetime import timedelta
from galaxy.util import string_as_bool, listify, parse_xml
from galaxy import eggs
@@ -99,6 +100,10 @@
self.output_size_limit = int( kwargs.get( 'output_size_limit', 0 ) )
self.retry_job_output_collection = int( kwargs.get( 'retry_job_output_collection', 0 ) )
self.job_walltime = kwargs.get( 'job_walltime', None )
+ self.job_walltime_delta = None
+ if self.job_walltime is not None:
+ h, m, s = [ int( v ) for v in self.job_walltime.split( ':' ) ]
+ self.job_walltime_delta = timedelta( 0, s, 0, 0, m, h )
self.admin_users = kwargs.get( "admin_users", "" )
self.mailing_join_addr = kwargs.get('mailing_join_addr',"galaxy-announce-join(a)bx.psu.edu")
self.error_email_to = kwargs.get( 'error_email_to', None )
diff -r a5db4601ddbe117b5eb393f5d92982e50a866c7d -r bb365c782b64c37b90333d964f55cba0acd401da lib/galaxy/jobs/runners/local.py
--- a/lib/galaxy/jobs/runners/local.py
+++ b/lib/galaxy/jobs/runners/local.py
@@ -1,6 +1,7 @@
import logging
import subprocess
import tempfile
+import datetime
from Queue import Queue
import threading
@@ -89,28 +90,36 @@
preexec_fn = os.setpgrp )
job_wrapper.set_runner( 'local:///', proc.pid )
job_wrapper.change_state( model.Job.states.RUNNING )
- if self.app.config.output_size_limit > 0:
- sleep_time = 1
- while proc.poll() is None:
+ sleep_time = 1
+ job_start = datetime.datetime.now()
+ while proc.poll() is None:
+ if self.app.config.output_size_limit > 0:
for outfile, size in job_wrapper.check_output_sizes():
if size > self.app.config.output_size_limit:
# Error the job immediately
- job_wrapper.fail( 'Job output grew too large (greater than %s), please try different job parameters or' \
+ job_wrapper.fail( 'Job output grew too large (greater than %s), please try different job parameters' \
% nice_size( self.app.config.output_size_limit ) )
log.warning( 'Terminating job %s due to output %s growing larger than %s limit' \
% ( job_wrapper.job_id, os.path.basename( outfile ), nice_size( self.app.config.output_size_limit ) ) )
# Then kill it
- os.killpg( proc.pid, 15 )
- sleep( 1 )
- if proc.poll() is None:
- os.killpg( proc.pid, 9 )
- proc.wait() # reap
+ self._terminate( proc )
log.debug( 'Job %s (pid %s) terminated' % ( job_wrapper.job_id, proc.pid ) )
return
sleep( sleep_time )
- if sleep_time < 8:
- # So we don't stat every second
- sleep_time *= 2
+ if self.app.config.job_walltime_delta is not None:
+ time_executing = datetime.datetime.now() - job_start
+ if time_executing > self.app.config.job_walltime_delta:
+ # Error the job immediately
+ job_wrapper.fail( 'Job ran longer than maximum allowed execution time (%s), please try different job parameters' \
+ % self.app.config.job_walltime )
+ log.warning( 'Terminating job %s since walltime has been reached' % job_wrapper.job_id )
+ # Then kill it
+ self._terminate( proc )
+ log.debug( 'Job %s (pid %s) terminated' % ( job_wrapper.job_id, proc.pid ) )
+ return
+ if sleep_time < 8:
+ # So we don't stat every second
+ sleep_time *= 2
# Reap the process and get the exit code.
exit_code = proc.wait()
stdout_file.seek( 0 )
@@ -202,3 +211,9 @@
# local jobs can't be recovered
job_wrapper.change_state( model.Job.states.ERROR, info = "This job was killed when Galaxy was restarted. Please retry the job." )
+ def _terminate( self, proc ):
+ os.killpg( proc.pid, 15 )
+ sleep( 1 )
+ if proc.poll() is None:
+ os.killpg( proc.pid, 9 )
+ return proc.wait() # reap
diff -r a5db4601ddbe117b5eb393f5d92982e50a866c7d -r bb365c782b64c37b90333d964f55cba0acd401da lib/galaxy/jobs/runners/pbs.py
--- a/lib/galaxy/jobs/runners/pbs.py
+++ b/lib/galaxy/jobs/runners/pbs.py
@@ -128,10 +128,6 @@
# set the default server during startup
self.default_pbs_server = None
self.determine_pbs_server( 'pbs:///' )
- self.job_walltime = None
- if self.app.config.job_walltime is not None:
- h, m, s = [ int( v ) for v in self.app.config.job_walltime.split( ':' ) ]
- self.job_walltime = timedelta( 0, s, 0, 0, m, h )
self.monitor_thread = threading.Thread( target=self.monitor )
self.monitor_thread.start()
self.work_queue = Queue()
@@ -422,7 +418,7 @@
fail = False
for outfile, size in pbs_job_state.job_wrapper.check_output_sizes():
if size > self.app.config.output_size_limit:
- pbs_job_state.fail_message = 'Job output grew too large (greater than %s), please try different job parameters or' \
+ pbs_job_state.fail_message = 'Job output grew too large (greater than %s), please try different job parameters' \
% nice_size( self.app.config.output_size_limit )
log.warning( '(%s/%s) Dequeueing job due to output %s growing larger than %s limit' \
% ( galaxy_job_id, job_id, os.path.basename( outfile ), nice_size( self.app.config.output_size_limit ) ) )
@@ -432,14 +428,14 @@
break
if fail:
continue
- if self.job_walltime is not None:
+ if self.app.config.job_walltime_delta is not None:
# Check the job's execution time
if status.get( 'resources_used', False ):
# resources_used may not be in the status for new jobs
h, m, s = [ int( i ) for i in status.resources_used.walltime.split( ':' ) ]
time_executing = timedelta( 0, s, 0, 0, m, h )
- if time_executing > self.job_walltime:
- pbs_job_state.fail_message = 'Job ran longer than maximum allowed execution time (%s), please try different job parameters or' \
+ if time_executing > self.app.config.job_walltime_delta:
+ pbs_job_state.fail_message = 'Job ran longer than maximum allowed execution time (%s), please try different job parameters' \
% self.app.config.job_walltime
log.warning( '(%s/%s) Dequeueing job since walltime has been reached' \
% ( galaxy_job_id, job_id ) )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.