1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/96aabc4e66f1/
Changeset: 96aabc4e66f1
Branch: stable
User: natefoo
Date: 2015-02-02 19:00:23+00:00
Summary: Update tag latest_2015.01.13 for changeset fd75aaee91cf
Affected #: 1 file
diff -r fd75aaee91cf3e8a0916689dfea72e0c752c447c -r 96aabc4e66f193d4bb3b7b37d8ad4dce671f1b04 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -22,4 +22,4 @@
2092948937ac30ef82f71463a235c66d34987088 release_2014.10.06
782fa60fc65488aea0c618d723e9a63d42caf865 latest_2014.10.06
2e8dd2949dd3eee0f56f9a3a5ebf1b2baca24aee release_2015.01.13
-c5e7535b4d229dbbe52d48ea35a27ab601205b7b latest_2015.01.13
+fd75aaee91cf3e8a0916689dfea72e0c752c447c latest_2015.01.13
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/d1b39530bd1f/
Changeset: d1b39530bd1f
User: natefoo
Date: 2015-02-02 18:42:29+00:00
Summary: Remove GeneTrack tag from eggs.ini as well.
Affected #: 1 file
diff -r eca1b4b64574cf09b4cbe35c4a8b103c1b455a79 -r d1b39530bd1ffa10c482e7f7c49b2b41c5a847a1 eggs.ini
--- a/eggs.ini
+++ b/eggs.ini
@@ -82,7 +82,6 @@
psycopg2 = _9.2.4_static
pysqlite = _3.6.17_static
MySQL_python = _5.1.41_static
-GeneTrack = _dev_48da9e998f0caf01c5be731e926f4b0481f658f0
pysam = _kanwei_b10f6e722e9a
; dependency source urls, necessary for scrambling. for an explanation, see
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/2911cf250e75/
Changeset: 2911cf250e75
User: jmchilton
Date: 2015-02-02 14:27:19+00:00
Summary: Temporary config option to isolate tool commands in their own shell.
Like done for Docker to isolate metadata commands from the environment modifications required to resolve tool dependencies. Should allow for Python 3 dependencies (originally also allowed samtools - but Nate other commit resolved that problem also).
Just meant as a config option for now - it will become the default once tested more thoroughly. For now enable it by setting enable_beta_tool_command_isolation to True in galaxy.ini.
Affected #: 6 files
diff -r 0d92ab68b8b3764e98d338c7d594b8e77aaf5b99 -r 2911cf250e75c953700e19f2e02b4f7f6f6c7f02 config/galaxy.ini.sample
--- a/config/galaxy.ini.sample
+++ b/config/galaxy.ini.sample
@@ -790,6 +790,11 @@
# Enable Galaxy to communicate directly with a sequencer
#enable_sequencer_communication = False
+# Separate tool command from rest of job script so tool dependencies
+# don't interfer with metadata generation (this will be the default
+# in a future release).
+#enable_beta_tool_command_isolation = False
+
# Enable beta workflow modules that should not yet be considered part of Galaxy's
# stable API.
diff -r 0d92ab68b8b3764e98d338c7d594b8e77aaf5b99 -r 2911cf250e75c953700e19f2e02b4f7f6f6c7f02 lib/galaxy/config.py
--- a/lib/galaxy/config.py
+++ b/lib/galaxy/config.py
@@ -193,6 +193,7 @@
# Tasked job runner.
self.use_tasked_jobs = string_as_bool( kwargs.get( 'use_tasked_jobs', False ) )
self.local_task_queue_workers = int(kwargs.get("local_task_queue_workers", 2))
+ self.commands_in_new_shell = string_as_bool( kwargs.get( 'enable_beta_tool_command_isolation', "False" ) )
# The transfer manager and deferred job queue
self.enable_beta_job_managers = string_as_bool( kwargs.get( 'enable_beta_job_managers', 'False' ) )
# These workflow modules should not be considered part of Galaxy's
diff -r 0d92ab68b8b3764e98d338c7d594b8e77aaf5b99 -r 2911cf250e75c953700e19f2e02b4f7f6f6c7f02 lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py
+++ b/lib/galaxy/jobs/__init__.py
@@ -765,6 +765,10 @@
def get_parallelism(self):
return self.tool.parallelism
+ @property
+ def commands_in_new_shell(self):
+ return self.app.config.commands_in_new_shell
+
# legacy naming
get_job_runner = get_job_runner_url
diff -r 0d92ab68b8b3764e98d338c7d594b8e77aaf5b99 -r 2911cf250e75c953700e19f2e02b4f7f6f6c7f02 lib/galaxy/jobs/command_factory.py
--- a/lib/galaxy/jobs/command_factory.py
+++ b/lib/galaxy/jobs/command_factory.py
@@ -5,6 +5,7 @@
CAPTURE_RETURN_CODE = "return_code=$?"
YIELD_CAPTURED_CODE = 'sh -c "exit $return_code"'
+DEFAULT_SHELL = "/bin/sh"
from logging import getLogger
log = getLogger( __name__ )
@@ -45,28 +46,21 @@
if not container:
__handle_dependency_resolution(commands_builder, job_wrapper, remote_command_params)
- if container:
- # Stop now and build command before handling metadata and copying
- # working directory files back. These should always happen outside
- # of docker container - no security implications when generating
- # metadata and means no need for Galaxy to be available to container
- # and not copying workdir outputs back means on can be more restrictive
- # of where container can write to in some circumstances.
-
- local_container_script = join( job_wrapper.working_directory, "container.sh" )
- fh = file( local_container_script, "w" )
- fh.write( "#!/bin/sh\n%s" % commands_builder.build() )
- fh.close()
- chmod( local_container_script, 0755 )
-
- compute_container_script = local_container_script
- if 'working_directory' in remote_command_params:
- compute_container_script = "/bin/sh %s" % join(remote_command_params['working_directory'], "container.sh")
-
- run_in_container_command = container.containerize_command(
- compute_container_script
- )
- commands_builder = CommandsBuilder( run_in_container_command )
+ if container or job_wrapper.commands_in_new_shell:
+ externalized_commands = __externalize_commands(job_wrapper, commands_builder, remote_command_params)
+ if container:
+ # Stop now and build command before handling metadata and copying
+ # working directory files back. These should always happen outside
+ # of docker container - no security implications when generating
+ # metadata and means no need for Galaxy to be available to container
+ # and not copying workdir outputs back means on can be more restrictive
+ # of where container can write to in some circumstances.
+ run_in_container_command = container.containerize_command(
+ externalized_commands
+ )
+ commands_builder = CommandsBuilder( run_in_container_command )
+ else:
+ commands_builder = CommandsBuilder( externalized_commands )
if include_work_dir_outputs:
__handle_work_dir_outputs(commands_builder, job_wrapper, runner, remote_command_params)
@@ -77,6 +71,20 @@
return commands_builder.build()
+def __externalize_commands(job_wrapper, commands_builder, remote_command_params, script_name="tool_script.sh"):
+ local_container_script = join( job_wrapper.working_directory, script_name )
+ fh = file( local_container_script, "w" )
+ fh.write( "#!%s\n%s" % (DEFAULT_SHELL, commands_builder.build()))
+ fh.close()
+ chmod( local_container_script, 0755 )
+
+ commands = local_container_script
+ if 'working_directory' in remote_command_params:
+ commands = "%s %s" % (DEFAULT_SHELL, join(remote_command_params['working_directory'], script_name))
+ log.info("Built script [%s] for tool command[%s]" % (local_container_script, commands))
+ return commands
+
+
def __handle_version_command(commands_builder, job_wrapper):
# Prepend version string
write_version_cmd = job_wrapper.write_version_cmd
diff -r 0d92ab68b8b3764e98d338c7d594b8e77aaf5b99 -r 2911cf250e75c953700e19f2e02b4f7f6f6c7f02 test/unit/jobs/test_command_factory.py
--- a/test/unit/jobs/test_command_factory.py
+++ b/test/unit/jobs/test_command_factory.py
@@ -140,6 +140,7 @@
self.configured_external_metadata_kwds = None
self.working_directory = "job1"
self.prepare_input_files_cmds = None
+ self.commands_in_new_shell = False
def get_command_line(self):
return self.command_line
diff -r 0d92ab68b8b3764e98d338c7d594b8e77aaf5b99 -r 2911cf250e75c953700e19f2e02b4f7f6f6c7f02 test/unit/jobs/test_runner_local.py
--- a/test/unit/jobs/test_runner_local.py
+++ b/test/unit/jobs/test_runner_local.py
@@ -106,6 +106,7 @@
self.tool = tool
self.state = model.Job.states.QUEUED
self.command_line = "echo HelloWorld"
+ self.commands_in_new_shell = False
self.prepare_called = False
self.write_version_cmd = None
self.dependency_shell_commands = None
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/a56fdaab0e25/
Changeset: a56fdaab0e25
User: jmchilton
Date: 2015-02-02 14:14:09+00:00
Summary: Merged in nsoranzo/galaxy-central (pull request #647)
More enhancements to rolling_restart.sh
Affected #: 1 file
diff -r 16efc5713d85f631c8ef9ad11895730f4747ea7e -r a56fdaab0e25011febcd98fe376adf39c7ebfb10 rolling_restart.sh
--- a/rolling_restart.sh
+++ b/rolling_restart.sh
@@ -1,21 +1,7 @@
#!/bin/sh
+
cd `dirname $0`
-check_if_not_started(){
- # Search for all pids in the logs and tail for the last one
- latest_pid=`egrep '^Starting server in PID [0-9]+\.$' $1 -o | sed 's/Starting server in PID //g;s/\.$//g' | tail -n 1`
- # Grab the current pid from the file we were given
- current_pid_in_file=$(cat $2);
- # If they're equivalent, then the current pid file agrees with our logs
- # and we've succesfully started
- if [ $latest_pid -eq $current_pid_in_file ];
- then
- echo 0;
- else
- echo 1;
- fi
-}
-
# If there is a .venv/ directory, assume it contains a virtualenv that we
# should run this instance in.
if [ -d .venv ];
@@ -23,6 +9,15 @@
. .venv/bin/activate
fi
+python ./scripts/check_python.py
+[ $? -ne 0 ] && exit 1
+
+./scripts/common_startup.sh
+
+if [ -n "$GALAXY_UNIVERSE_CONFIG_DIR" ]; then
+ python ./scripts/build_universe_config.py "$GALAXY_UNIVERSE_CONFIG_DIR"
+fi
+
if [ -z "$GALAXY_CONFIG_FILE" ]; then
if [ -f universe_wsgi.ini ]; then
GALAXY_CONFIG_FILE=universe_wsgi.ini
@@ -35,32 +30,32 @@
fi
servers=`sed -n 's/^\[server:\(.*\)\]/\1/ p' $GALAXY_CONFIG_FILE | xargs echo`
-for server in $servers;
-do
- # If there's a pid
- if [ -e $server.pid ]
- then
- # Then kill it
- echo "Killing $server"
- pid=`cat $server.pid`
- kill $pid
- else
- # Otherwise just continue
- echo "$server not running"
- fi
- # Start the server (and background) (should this be nohup'd?)
- python ./scripts/paster.py serve $GALAXY_CONFIG_FILE --server-name=$server --pid-file=$server.pid --log-file=$server.log --daemon $@
- # Wait for the server to start
- sleep 1
- # Grab the new pid
- pid=`cat $server.pid`
- result=1
- # Wait for the latest pid in the file to be the pid we've grabbed
- while [ $result -eq 1 ]
- do
- result=$(check_if_not_started $server.log $server.pid)
- printf "."
- sleep 1
- done
- echo
+for server in $servers; do
+ # If there's a pid
+ if [ -e $server.pid ]; then
+ # Then kill it
+ echo "Killing $server"
+ pid=`cat $server.pid`
+ kill $pid
+ else
+ # Otherwise just continue
+ echo "$server not running"
+ fi
+ # Start the server (and background) (should this be nohup'd?)
+ python ./scripts/paster.py serve $GALAXY_CONFIG_FILE --server-name=$server --pid-file=$server.pid --log-file=$server.log --daemon $@
+ while true; do
+ sleep 1
+ printf "."
+ # Grab the current pid from the pid file
+ if ! current_pid_in_file=$(cat $server.pid); then
+ echo "A Galaxy process died, interrupting" >&2
+ exit 1
+ fi
+ # Search for all pids in the logs and tail for the last one
+ latest_pid=`egrep '^Starting server in PID [0-9]+\.$' $server.log -o | sed 's/Starting server in PID //g;s/\.$//g' | tail -n 1`
+ # If they're equivalent, then the current pid file agrees with our logs
+ # and we've succesfully started
+ [ -n "$latest_pid" ] && [ $latest_pid -eq $current_pid_in_file ] && break
+ done
+ echo
done
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.