1 new commit in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/commits/ae7da2ebe773/ Changeset: ae7da2ebe773 User: Jeremy Goecks Date: 2014-05-09 22:15:27 Summary: Add a CLI-driven Slurm runner so that Galaxy can be run on non-submit hosts with a Slurm cluster. Add some flexibility for obtaining the external job ID in the CLI runner as well. Affected #: 2 files diff -r 3210eaa1db6cb6af4e5c4846c1fec39585405cd9 -r ae7da2ebe7737d6e6ba142d1ac24a24b467285ad lib/galaxy/jobs/runners/cli.py --- a/lib/galaxy/jobs/runners/cli.py +++ b/lib/galaxy/jobs/runners/cli.py @@ -99,7 +99,9 @@ log.error('(%s) submission failed (stderr): %s' % (galaxy_id_tag, cmd_out.stderr)) job_wrapper.fail("failure submitting job") return - external_job_id = cmd_out.stdout.strip() + # Some job runners return something like 'Submitted batch job XXXX' + # Strip and split to get job ID. + external_job_id = cmd_out.stdout.strip().split()[-1] if not external_job_id: log.error('(%s) submission did not return a job identifier, failing job' % galaxy_id_tag) job_wrapper.fail("failure submitting job") diff -r 3210eaa1db6cb6af4e5c4846c1fec39585405cd9 -r ae7da2ebe7737d6e6ba142d1ac24a24b467285ad lib/galaxy/jobs/runners/util/cli/job/slurm.py --- /dev/null +++ b/lib/galaxy/jobs/runners/util/cli/job/slurm.py @@ -0,0 +1,94 @@ +# A simple CLI runner for slurm that can be used when running Galaxy from a +# non-submit host and using a Slurm cluster. + +try: + from galaxy.model import Job + job_states = Job.states +except ImportError: + # Not in Galaxy, map Galaxy job states to LWR ones. + from galaxy.util import enum + job_states = enum(RUNNING='running', OK='complete', QUEUED='queued') + +from ..job import BaseJobExec + +__all__ = ('Slurm',) + +from logging import getLogger +log = getLogger(__name__) + +argmap = { + 'time': '-t', + 'ncpus': '-c', + 'partition': '-p' +} + +class Slurm(BaseJobExec): + + def __init__(self, **params): + self.params = {} + for k, v in params.items(): + self.params[k] = v + + def job_script_kwargs(self, ofile, efile, job_name): + scriptargs = {'-o': ofile, + '-e': efile, + '-J': job_name} + + # Map arguments using argmap. + for k, v in self.params.items(): + if k == 'plugin': + continue + try: + if not k.startswith('-'): + k = argmap[k] + scriptargs[k] = v + except: + log.warning('Unrecognized long argument passed to Slurm CLI plugin: %s' % k) + + # Generated template. + template_scriptargs = '' + for k, v in scriptargs.items(): + template_scriptargs += '#SBATCH %s %s\n' % (k, v) + return dict(headers=template_scriptargs) + + def submit(self, script_file): + return 'sbatch %s' % script_file + + def delete(self, job_id): + return 'scancel %s' % job_id + + def get_status(self, job_ids=None): + return 'squeue -a -o \\"%A %t\\"' + + def get_single_status(self, job_id): + return 'squeue -a -o \\"%A %t\\" -j ' + job_id + + def parse_status(self, status, job_ids): + # Get status for each job, skipping header. + rval = {} + for line in status.splitlines()[ 1: ]: + id, state = line.split() + if id in job_ids: + # map job states to Galaxy job states. + rval[id] = self._get_job_state(state) + return rval + + def parse_single_status(self, status, job_id): + status = status.splitlines() + if len( status ) > 1: + # Job still on cluster and has state. + id, state = status[1].split() + return self._get_job_state(state) + return job_states.OK + + def _get_job_state(self, state): + try: + return { + 'F': job_states.ERROR, + 'R': job_states.RUNNING, + 'CG': job_states.RUNNING, + 'PD': job_states.QUEUED, + 'CD': job_states.OK + }.get(state) + except KeyError: + raise KeyError("Failed to map slurm status code [%s] to job state." % state) Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.