1 new commit in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/changeset/d2aba0918cf0/ changeset: d2aba0918cf0 user: inithello date: 2012-06-25 15:39:41 summary: Added Ensembl build parser. Improved genome downloader interface. Added post-download indexing feature. affected #: 17 files diff -r c4f325ba7caa1c86b0f97a0a486c3bbdea15c754 -r d2aba0918cf01b1c5be95f7b0b59cc52cd889dbd .hgignore --- a/.hgignore +++ b/.hgignore @@ -50,7 +50,9 @@ tool-data/shared/igv/igv_build_sites.txt tool-data/shared/rviewer/rviewer_build_sites.txt tool-data/shared/ucsc/builds.txt +tool-data/shared/ensembl/builds.txt tool-data/*.loc +tool-data/genome/* # Test output run_functional_tests.html @@ -72,4 +74,5 @@ *.orig .DS_Store *.rej -*~ \ No newline at end of file +*~ + diff -r c4f325ba7caa1c86b0f97a0a486c3bbdea15c754 -r d2aba0918cf01b1c5be95f7b0b59cc52cd889dbd cron/get_ensembl.py --- /dev/null +++ b/cron/get_ensembl.py @@ -0,0 +1,22 @@ +from galaxy import eggs +import pkg_resources +pkg_resources.require("SQLAlchemy >= 0.4") +pkg_resources.require("MySQL_python") +from sqlalchemy import * + + +engine = create_engine( 'mysql://anonymous@ensembldb.ensembl.org:5306', pool_recycle=3600 ) +conn = engine.connect() +dbs = conn.execute( "SHOW DATABASES LIKE 'ensembl_website_%%'" ) +builds = {} +lines = [] +for res in dbs: + dbname = res[0] + release = dbname.split('_')[-1] + genomes = conn.execute( "SELECT RS.assembly_code, S.name, S.common_name, %s FROM ensembl_website_%s.release_species RS LEFT JOIN ensembl_website_%s.species S on RS.species_id = S.species_id" % ( release, release, release ) ) + for genome in genomes: + builds[genome[0]] = dict( release=genome[3], species='%s (%s/%s)' % ( genome[1], genome[2], genome[0] ) ) +for build in builds.items(): + lines.append( '\t'.join( [ build[0], '%d' % build[1]['release'], build[1]['species'] ] ) ) + +print '\n'.join( lines ) \ No newline at end of file diff -r c4f325ba7caa1c86b0f97a0a486c3bbdea15c754 -r d2aba0918cf01b1c5be95f7b0b59cc52cd889dbd cron/parse_publicbuilds.py --- /dev/null +++ b/cron/parse_publicbuilds.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python + +""" +Connects to the URL specified and outputs builds available at that +DSN in tabular format. USCS Test gateway is used as default. +build description +""" + +import sys +import urllib +if sys.version_info[:2] >= ( 2, 5 ): + import xml.etree.ElementTree as ElementTree +else: + from galaxy import eggs + import pkg_resources; pkg_resources.require( "elementtree" ) + from elementtree import ElementTree + +URL = "http://genome.cse.ucsc.edu/cgi-bin/das/dsn" + +def getbuilds(url): + try: + page = urllib.urlopen(URL) + except: + print "#Unable to open " + URL + print "?\tunspecified (?)" + sys.exit(1) + + text = page.read() + try: + tree = ElementTree.fromstring(text) + except: + print "#Invalid xml passed back from " + URL + print "?\tunspecified (?)" + sys.exit(1) + + print "#Harvested from http://genome.cse.ucsc.edu/cgi-bin/das/dsn" + print "?\tunspecified (?)" + for dsn in tree: + build = dsn.find("SOURCE").attrib['id'] + description = dsn.find("DESCRIPTION").text.replace(" - Genome at UCSC","").replace(" Genome at UCSC","") + + fields = description.split(" ") + temp = fields[0] + for i in range(len(fields)-1): + if temp == fields[i+1]: + fields.pop(i+1) + else: + temp = fields[i+1] + description = " ".join(fields) + yield [build,description] + +if __name__ == "__main__": + if len(sys.argv) > 1: + URL = sys.argv[1] + for build in getbuilds(URL): + print build[0]+"\t"+build[1]+" ("+build[0]+")" + diff -r c4f325ba7caa1c86b0f97a0a486c3bbdea15c754 -r d2aba0918cf01b1c5be95f7b0b59cc52cd889dbd cron/updateensembl.sh.sample --- /dev/null +++ b/cron/updateensembl.sh.sample @@ -0,0 +1,42 @@ +#!/bin/sh +# +# Script to update Ensembl shared data tables. The idea is to update, but if +# the update fails, not replace current data/tables with error +# messages. + +# Edit this line to refer to galaxy's path: +GALAXY=/path/to/galaxy +PYTHONPATH=${GALAXY}/lib +export PYTHONPATH + +# setup directories +echo "Creating required directories." +DIRS=" +${GALAXY}/tool-data/shared/ensembl +${GALAXY}/tool-data/shared/ensembl/new +" +for dir in $DIRS; do + if [ ! -d $dir ]; then + echo "Creating $dir" + mkdir $dir + else + echo "$dir already exists, continuing." + fi +done + +date +echo "Updating Ensembl shared data tables." + +# Try to build "builds.txt" +echo "Updating builds.txt" +python ${GALAXY}/cron/get_ensembl.py > ${GALAXY}/tool-data/shared/ensembl/new/builds.txt +if [ $? -eq 0 ] +then + diff ${GALAXY}/tool-data/shared/ensembl/new/builds.txt ${GALAXY}/tool-data/shared/ensembl/builds.txt > /dev/null 2>&1 + if [ $? -ne 0 ] + then + cp -f ${GALAXY}/tool-data/shared/ensembl/new/builds.txt ${GALAXY}/tool-data/shared/ensembl/builds.txt + fi +else + echo "Failed to update builds.txt" >&2 +fi diff -r c4f325ba7caa1c86b0f97a0a486c3bbdea15c754 -r d2aba0918cf01b1c5be95f7b0b59cc52cd889dbd cron/updateucsc.sh.sample --- a/cron/updateucsc.sh.sample +++ b/cron/updateucsc.sh.sample @@ -28,6 +28,20 @@ date echo "Updating UCSC shared data tables." +# Try to build "publicbuilds.txt" +echo "Updating publicbuilds.txt" +python ${GALAXY}/cron/parse_publicbuilds.py > ${GALAXY}/tool-data/shared/ucsc/new/publicbuilds.txt +if [ $? -eq 0 ] +then + diff ${GALAXY}/tool-data/shared/ucsc/new/publicbuilds.txt ${GALAXY}/tool-data/shared/ucsc/publicbuilds.txt > /dev/null 2>&1 + if [ $? -ne 0 ] + then + cp -f ${GALAXY}/tool-data/shared/ucsc/new/publicbuilds.txt ${GALAXY}/tool-data/shared/ucsc/publicbuilds.txt + fi +else + echo "Failed to update publicbuilds.txt" >&2 +fi + # Try to build "builds.txt" echo "Updating builds.txt" python ${GALAXY}/cron/parse_builds.py > ${GALAXY}/tool-data/shared/ucsc/new/builds.txt diff -r c4f325ba7caa1c86b0f97a0a486c3bbdea15c754 -r d2aba0918cf01b1c5be95f7b0b59cc52cd889dbd lib/galaxy/jobs/deferred/genome_index.py --- /dev/null +++ b/lib/galaxy/jobs/deferred/genome_index.py @@ -0,0 +1,43 @@ +""" +Module for managing genome transfer jobs. +""" +from __future__ import with_statement + +import logging, shutil, gzip, bz2, zipfile, tempfile, tarfile, sys, os + +from galaxy import eggs +from sqlalchemy import and_ +from data_transfer import * + +log = logging.getLogger( __name__ ) + +__all__ = [ 'GenomeIndexPlugin' ] + +class GenomeIndexPlugin( DataTransfer ): + + def __init__( self, app ): + super( GenomeIndexPlugin, self ).__init__( app ) + self.app = app + self.tool = app.toolbox.tools_by_id['__GENOME_INDEX__'] + self.sa_session = app.model.context.current + + def create_job( self, trans, path, indexes, dbkey, intname ): + params = dict( user=trans.user.id, path=path, indexes=indexes, dbkey=dbkey, intname=intname ) + deferred = trans.app.model.DeferredJob( state = self.app.model.DeferredJob.states.NEW, plugin = 'GenomeIndexPlugin', params = params ) + self.sa_session.add( deferred ) + self.sa_session.flush() + log.debug( 'Job created, id %d' % deferred.id ) + return deferred.id + + def check_job( self, job ): + log.debug( 'Job check' ) + return 'ready' + + def run_job( self, job ): + incoming = dict( path=os.path.abspath( job.params[ 'path' ] ), indexer=job.params[ 'indexes' ][0], user=job.params[ 'user' ] ) + indexjob = self.tool.execute( self, set_output_hid=False, history=None, incoming=incoming, transfer=None, deferred=job ) + job.params[ 'indexjob' ] = indexjob[0].id + job.state = self.app.model.DeferredJob.states.RUNNING + self.sa_session.add( job ) + self.sa_session.flush() + return self.app.model.DeferredJob.states.RUNNING diff -r c4f325ba7caa1c86b0f97a0a486c3bbdea15c754 -r d2aba0918cf01b1c5be95f7b0b59cc52cd889dbd lib/galaxy/jobs/deferred/genome_transfer.py --- a/lib/galaxy/jobs/deferred/genome_transfer.py +++ b/lib/galaxy/jobs/deferred/genome_transfer.py @@ -78,10 +78,11 @@ def get_job_status( self, jobid ): job = self.sa_session.query( self.app.model.DeferredJob ).get( int( jobid ) ) - if not hasattr( job, 'transfer_job' ): - job.transfer_job = self.sa_session.query( self.app.model.TransferJob ).get( int( job.params[ 'transfer_job_id' ] ) ) - else: - self.sa_session.refresh( job.transfer_job ) + if 'transfer_job_id' in job.params: + if not hasattr( job, 'transfer_job' ): + job.transfer_job = self.sa_session.query( self.app.model.TransferJob ).get( int( job.params[ 'transfer_job_id' ] ) ) + else: + self.sa_session.refresh( job.transfer_job ) return job def run_job( self, job ): @@ -139,7 +140,6 @@ if not chunk: break os.write( fd, chunk ) - os.write( fd, '\n' ) os.close( fd ) compressed.close() elif data_type == 'bzip': @@ -154,7 +154,6 @@ if not chunk: break os.write( fd, chunk ) - os.write( fd, '\n' ) os.close( fd ) compressed.close() elif data_type == 'zip': @@ -177,7 +176,6 @@ if not chunk: break os.write( fd, chunk ) - os.write( fd, '\n' ) zipped_file.close() else: try: @@ -223,8 +221,8 @@ else: job.state = self.app.model.DeferredJob.states.OK self.sa_session.add( job ) + self.sa_session.flush() return self.app.model.DeferredJob.states.OK - self.sa_session.flush() def _check_compress( self, filepath ): retval = '' diff -r c4f325ba7caa1c86b0f97a0a486c3bbdea15c754 -r d2aba0918cf01b1c5be95f7b0b59cc52cd889dbd lib/galaxy/jobs/deferred/liftover_transfer.py --- a/lib/galaxy/jobs/deferred/liftover_transfer.py +++ b/lib/galaxy/jobs/deferred/liftover_transfer.py @@ -40,7 +40,7 @@ deferred = trans.app.model.DeferredJob( state = self.app.model.DeferredJob.states.NEW, plugin = 'LiftOverTransferPlugin', params = params ) self.sa_session.add( deferred ) self.sa_session.flush() - return deferred.id + return job.id def check_job( self, job ): if job.params['type'] == 'init_transfer': @@ -98,7 +98,9 @@ transfer = job.transfer_job if params[ 'type' ] == 'extract_transfer': CHUNK_SIZE = 2**20 - destpath = os.path.join( self.app.config.get( 'genome_data_path', 'tool-data/genome' ), job.params[ 'dbkey' ], 'liftOver' ) + destpath = os.path.join( self.app.config.get( 'genome_data_path', 'tool-data/genome' ), source, 'liftOver' ) + if not os.path.exists( destpath ): + os.makedirs( destpath ) destfile = job.params[ 'destfile' ] destfilepath = os.path.join( destpath, destfile ) tmpprefix = '%s_%s_download_unzip_' % ( job.params['dbkey'], job.params[ 'transfer_job_id' ] ) diff -r c4f325ba7caa1c86b0f97a0a486c3bbdea15c754 -r d2aba0918cf01b1c5be95f7b0b59cc52cd889dbd lib/galaxy/tools/actions/index_genome.py --- a/lib/galaxy/tools/actions/index_genome.py +++ b/lib/galaxy/tools/actions/index_genome.py @@ -21,7 +21,9 @@ job.tool_id = tool.id job.user_id = incoming['user'] start_job_state = job.state # should be job.states.NEW - job.state = job.states.WAITING # we need to set job state to something other than NEW, or else when tracking jobs in db it will be picked up before we have added input / output parameters + job.state = job.states.WAITING # we need to set job state to something other than NEW, + # or else when tracking jobs in db it will be picked up + # before we have added input / output parameters trans.sa_session.add( job ) # Create dataset that will serve as archive. diff -r c4f325ba7caa1c86b0f97a0a486c3bbdea15c754 -r d2aba0918cf01b1c5be95f7b0b59cc52cd889dbd lib/galaxy/tools/genome_index/__init__.py --- a/lib/galaxy/tools/genome_index/__init__.py +++ b/lib/galaxy/tools/genome_index/__init__.py @@ -13,13 +13,12 @@ def load_genome_index_tools( toolbox ): """ Adds tools for indexing genomes via the main job runner. """ - # Use same process as that used in load_external_metadata_tool; see that - # method for why create tool description files on the fly. + # Create XML for loading the tool. tool_xml_text = """ <tool id="__GENOME_INDEX__" name="Index Genome" version="0.1" tool_type="genome_index"><type class="GenomeIndexTool" module="galaxy.tools"/><action module="galaxy.tools.actions.index_genome" class="GenomeIndexToolAction"/> - <command>$__GENOME_INDEX_COMMAND__ $output_file $output_file.files_path $__app__.config.rsync_url</command> + <command>$__GENOME_INDEX_COMMAND__ $output_file $output_file.files_path $__app__.config.rsync_url "$__app__.config.tool_data_path"</command><inputs><param name="__GENOME_INDEX_COMMAND__" type="hidden"/></inputs> @@ -29,7 +28,7 @@ </tool> """ - # Load export tool. + # Load index tool. tmp_name = tempfile.NamedTemporaryFile() tmp_name.write( tool_xml_text ) tmp_name.flush() @@ -166,6 +165,10 @@ self._check_link( fasta, target ) for line in location: self._add_line( line[ 'file' ], line[ 'line' ] ) + deferred.state = app.model.DeferredJob.states.OK + sa_session.add( deferred ) + sa_session.flush() + def _check_link( self, targetfile, symlink ): target = os.path.relpath( targetfile, os.path.dirname( symlink ) ) diff -r c4f325ba7caa1c86b0f97a0a486c3bbdea15c754 -r d2aba0918cf01b1c5be95f7b0b59cc52cd889dbd lib/galaxy/tools/genome_index/index_genome.py --- a/lib/galaxy/tools/genome_index/index_genome.py +++ b/lib/galaxy/tools/genome_index/index_genome.py @@ -10,7 +10,8 @@ import optparse, sys, os, tempfile, time, subprocess, shlex, json, tarfile, shutil class ManagedIndexer(): - def __init__( self, output_file, infile, workingdir, rsync_url ): + def __init__( self, output_file, infile, workingdir, rsync_url, tooldata ): + self.tooldatapath = os.path.abspath( tooldata ) self.workingdir = os.path.abspath( workingdir ) self.outfile = open( os.path.abspath( output_file ), 'w' ) self.basedir = os.path.split( self.workingdir )[0] @@ -44,11 +45,12 @@ with WithChDir( self.workingdir ): self._log( 'Running indexer %s.' % indexer ) result = getattr( self, self.indexers[ indexer ] )() - if result is None: - self._log( 'Error running indexer %s.' % indexer ) + if result in [ None, False ]: + self._log( 'Error running indexer %s, %s' % ( indexer, result ) ) self._flush_files() return True else: + self._log( self.locations ) self._log( 'Indexer %s completed successfully.' % indexer ) self._flush_files() @@ -93,6 +95,7 @@ os.remove( self.fafile ) return self._bwa_cs() else: + self._log( 'BWA (base) exited with code %s' % result ) return False def _bwa_cs( self ): @@ -109,6 +112,7 @@ self.locations[ 'cs' ].append( self.fafile ) os.remove( self.fafile ) else: + self._log( 'BWA (color) exited with code %s' % result ) return False else: self.locations[ 'cs' ].append( self.fafile ) @@ -136,6 +140,7 @@ os.remove( self.fafile ) return self._bowtie_cs() else: + self._log( 'Bowtie (base) exited with code %s' % result ) return False def _bowtie_cs( self ): @@ -149,6 +154,7 @@ if result == 0: self.locations[ 'cs' ].append( self.genome ) else: + self._log( 'Bowtie (color) exited with code %s' % result ) return False os.remove( os.path.join( indexdir, self.fafile ) ) else: @@ -174,6 +180,7 @@ os.remove( self.fafile ) return True else: + self._log( 'Bowtie2 exited with code %s' % result ) return False def _twobit( self ): @@ -193,6 +200,7 @@ os.remove( self.fafile ) return True else: + self._log( 'faToTwoBit exited with code %s' % result ) return False def _perm( self ): @@ -208,12 +216,15 @@ command = shlex.split("PerM %s %s --readFormat fastq --seed %s -m -s %s" % (self.fafile, read_length, seed, index)) result = subprocess.call( command ) if result != 0: + self._log( 'PerM (base) exited with code %s' % result ) return False self.locations[ 'nt' ].append( [ key, desc, index ] ) os.remove( self.fafile ) return self._perm_cs() def _perm_cs( self ): + genome = self.genome + read_length = 50 if not os.path.exists( 'cs' ): os.makedirs( 'cs' ) with WithChDir( 'cs' ): @@ -223,12 +234,13 @@ desc = '%s: seed=%s, read length=%s' % (genome, seed, read_length) index = "%s_color_%s_%s.index" % (genome, seed, read_length) if not os.path.exists( index ): - command = shlex.split("PerM %s %s --readFormat csfastq --seed %s -m -s %s" % (local_ref, read_length, seed, index)) + command = shlex.split("PerM %s %s --readFormat csfastq --seed %s -m -s %s" % (self.fafile, read_length, seed, index)) result = subprocess.call( command, stderr=self.logfile, stdout=self.logfile ) if result != 0: + self._log( 'PerM (color) exited with code %s' % result ) return False self.locations[ 'cs' ].append( [ key, desc, index ] ) - os.remove( local_ref ) + os.remove( self.fafile ) temptar = tarfile.open( 'cs.tar', 'w' ) temptar.add( 'cs' ) temptar.close() @@ -241,17 +253,19 @@ self.locations[ 'nt' ].append( self.fafile ) return True local_ref = self.fafile - srma = 'tool-data/shared/jars/srma.jar' + srma = os.path.abspath( os.path.join( self.tooldatapath, 'shared/jars/picard/CreateSequenceDictionary.jar' ) ) genome = os.path.splitext( self.fafile )[0] self._check_link() if not os.path.exists( '%s.fai' % self.fafile ) and not os.path.exists( '%s.fai' % self.genome ): command = shlex.split( 'samtools faidx %s' % self.fafile ) subprocess.call( command, stderr=self.logfile ) - command = shlex.split( "java -cp %s net.sf.picard.sam.CreateSequenceDictionary R=%s O=%s/%s.dict URI=%s" \ - % ( srma, local_ref, os.curdir, genome, local_ref ) ) + command = shlex.split( "java -jar %s R=%s O=%s.dict URI=%s" \ + % ( srma, local_ref, genome, local_ref ) ) if not os.path.exists( '%s.dict' % self.genome ): result = subprocess.call( command, stderr=self.logfile, stdout=self.logfile ) + self._log( ' '.join( command ) ) if result != 0: + self._log( 'Picard exited with code %s' % result ) return False self.locations[ 'nt' ].append( self.fafile ) os.remove( self.fafile ) @@ -260,17 +274,20 @@ def _sam( self ): local_ref = self.fafile local_file = os.path.splitext( self.fafile )[ 0 ] + print 'Trying rsync' result = self._do_rsync( '/sam_index/' ) if result == 0 and ( os.path.exists( '%s.fai' % self.fafile ) or os.path.exists( '%s.fai' % self.genome ) ): - self.locations[ 'nt' ].append( local_ref ) + self.locations[ 'nt' ].append( '%s.fai' % local_ref ) return True self._check_link() + print 'Trying indexer' command = shlex.split("samtools faidx %s" % local_ref) - result = subprocess.call( command, stderr=self.logfile ) + result = subprocess.call( command, stderr=self.logfile, stdout=self.logfile ) if result != 0: + self._log( 'SAM exited with code %s' % result ) return False else: - self.locations[ 'nt' ].append( local_ref ) + self.locations[ 'nt' ].append( '%s.fai' % local_ref ) os.remove( local_ref ) return True @@ -288,9 +305,9 @@ # Parse command line. parser = optparse.OptionParser() (options, args) = parser.parse_args() - indexer, infile, outfile, working_dir, rsync_url = args + indexer, infile, outfile, working_dir, rsync_url, tooldata = args # Create archive. - idxobj = ManagedIndexer( outfile, infile, working_dir, rsync_url ) + idxobj = ManagedIndexer( outfile, infile, working_dir, rsync_url, tooldata ) idxobj.run_indexer( indexer ) \ No newline at end of file diff -r c4f325ba7caa1c86b0f97a0a486c3bbdea15c754 -r d2aba0918cf01b1c5be95f7b0b59cc52cd889dbd lib/galaxy/util/__init__.py --- a/lib/galaxy/util/__init__.py +++ b/lib/galaxy/util/__init__.py @@ -407,6 +407,22 @@ db_names = DBNames( [( db_names.default_value, db_names.default_name )] ) return db_names +def read_ensembl( filename, ucsc ): + """ Read Ensembl build names from file """ + ucsc_builds = [] + for build in ucsc: + ucsc_builds.append( build[0] ) + ensembl_builds = list() + try: + for line in open( filename ): + if line[0:1] in [ '#', '\t' ]: continue + fields = line.replace("\r","").replace("\n","").split("\t") + if fields[0] in ucsc_builds: continue + ensembl_builds.append( dict( dbkey=fields[0], release=fields[1], name=fields[2].replace( '_', ' ' ) ) ) + except Exception, e: + print "ERROR: Unable to read builds file:", e + return ensembl_builds + def read_build_sites( filename, check_builds=True ): """ read db names to ucsc mappings from file, this file should probably be merged with the one above """ build_sites = [] @@ -634,11 +650,15 @@ s.quit() galaxy_root_path = os.path.join(__path__[0], "..","..","..") + # The dbnames list is used in edit attributes and the upload tool dbnames = read_dbnames( os.path.join( galaxy_root_path, "tool-data", "shared", "ucsc", "builds.txt" ) ) +ucsc_names = read_dbnames( os.path.join( galaxy_root_path, "tool-data", "shared", "ucsc", "publicbuilds.txt" ) ) +ensembl_names = read_ensembl( os.path.join( galaxy_root_path, "tool-data", "shared", "ensembl", "builds.txt" ), ucsc_names ) ucsc_build_sites = read_build_sites( os.path.join( galaxy_root_path, "tool-data", "shared", "ucsc", "ucsc_build_sites.txt" ) ) gbrowse_build_sites = read_build_sites( os.path.join( galaxy_root_path, "tool-data", "shared", "gbrowse", "gbrowse_build_sites.txt" ) ) genetrack_sites = read_build_sites( os.path.join( galaxy_root_path, "tool-data", "shared", "genetrack", "genetrack_sites.txt" ), check_builds=False ) +dlnames = dict(ucsc=ucsc_names, ensembl=ensembl_names) def galaxy_directory(): return os.path.abspath(galaxy_root_path) diff -r c4f325ba7caa1c86b0f97a0a486c3bbdea15c754 -r d2aba0918cf01b1c5be95f7b0b59cc52cd889dbd lib/galaxy/web/controllers/data_admin.py --- a/lib/galaxy/web/controllers/data_admin.py +++ b/lib/galaxy/web/controllers/data_admin.py @@ -26,17 +26,69 @@ error='panel-error-message', queued='state-color-waiting' ) - + @web.expose @web.require_admin def manage_data( self, trans, **kwd ): + genomes = dict() if trans.app.config.get_bool( 'enable_beta_job_managers', False ) == False: return trans.fill_template( '/admin/data_admin/betajob.mako' ) - dbkeys = trans.db_builds - return trans.fill_template( '/admin/data_admin/data_form.mako', dbkeys=dbkeys ) + for line in trans.app.tool_data_tables.data_tables[ 'all_fasta' ].data: + indexers = dict( bowtie_indexes='Generate', bowtie2_indexes='Generate', bwa_indexes='Generate', perm_base_indexes='Generate', srma_indexes='Generate', sam_fa_indexes='Generate' ) + dbkey = line[0] + name = line[2] + indexers[ 'name' ] = name + indexers[ 'fapath' ] = line[3] + genomes[ dbkey ] = indexers + for table in [ 'bowtie_indexes', 'bowtie2_indexes', 'bwa_indexes', 'srma_indexes' ]: + for line in trans.app.tool_data_tables.data_tables[ table ].data: + dbkey = line[0] + genomes[ dbkey ][ table ] = 'Generated' + for line in trans.app.tool_data_tables.data_tables[ 'sam_fa_indexes' ].data: + genomes[ line[1] ][ 'sam_fa_indexes' ] = 'Generated' + for line in trans.app.tool_data_tables.data_tables[ 'perm_base_indexes' ].data: + genomes[ line[1].split(':')[0] ][ 'perm_base_indexes' ] = 'Generated' + jobgrid = [] + sa_session = trans.app.model.context.current + jobs = sa_session.query( model.GenomeIndexToolData ).order_by( model.GenomeIndexToolData.created_time.desc() ).filter_by( user_id=trans.get_user().id ).group_by( model.GenomeIndexToolData.deferred ).limit( 20 ).all() + prevjobid = 0 + for job in jobs: + if prevjobid == job.deferred.id: + continue + prevjobid = job.deferred.id + state = job.deferred.state + params = job.deferred.params + if job.transfer is not None: + jobtype = 'download' + else: + jobtype = 'index' + indexers = ', '.join( params['indexes'] ) + jobgrid.append( dict( jobtype=jobtype, indexers=indexers, rowclass=state, deferred=job.deferred.id, state=state, intname=job.deferred.params[ 'intname' ], dbkey=job.deferred.params[ 'dbkey' ] ) ) + return trans.fill_template( '/admin/data_admin/local_data.mako', jobgrid=jobgrid, genomes=genomes ) + + @web.expose + @web.require_admin + def add_genome( self, trans, **kwd ): + if trans.app.config.get_bool( 'enable_beta_job_managers', False ) == False: + return trans.fill_template( '/admin/data_admin/betajob.mako' ) + dbkeys = trans.ucsc_builds + ensemblkeys = trans.ensembl_builds + return trans.fill_template( '/admin/data_admin/data_form.mako', dbkeys=dbkeys, ensembls=ensemblkeys ) @web.expose @web.require_admin + def index_build( self, trans, **kwd ): + """Index a previously downloaded genome.""" + params = util.Params( kwd ) + path = os.path.abspath( params.get( 'path', None ) ) + indexes = [ params.get( 'indexes', None ) ] + dbkey = params.get( 'dbkey', None ) + intname = params.get( 'longname', None ) + indexjob = trans.app.job_manager.deferred_job_queue.plugins['GenomeIndexPlugin'].create_job( trans, path, indexes, dbkey, intname ) + return indexjob + + @web.expose + @web.require_admin def download_build( self, trans, **kwd ): """Download a genome from a remote source and add it to the library.""" params = util.Params( kwd ) @@ -57,21 +109,21 @@ protocol = 'http' if source == 'NCBI': - dbkey = params.get('dbkey', '')[0] + dbkey = params.get('ncbi_dbkey', '')[0] url = 'http://togows.dbcls.jp/entry/ncbi-nucleotide/%s.fasta' % dbkey elif source == 'Broad': - dbkey = params.get('dbkey', '')[0] + dbkey = params.get('broad_dbkey', '')[0] url = 'ftp://ftp.broadinstitute.org/pub/seq/references/%s.fasta' % dbkey elif source == 'UCSC': longname = None - for build in trans.db_builds: - if dbkey[1] == build[0]: + for build in trans.ucsc_builds: + if dbkey == build[0]: dbkey = build[0] longname = build[1] break assert dbkey is not '?', 'That build was not found' ftp = ftplib.FTP('hgdownload.cse.ucsc.edu') - ftp.login('anonymous', 'user@example.com') + ftp.login('anonymous', trans.get_user().email) checker = [] liftover = [] newlift = [] @@ -81,10 +133,12 @@ fname = chain.split( '/' )[-1] target = fname.replace( '.over.chain.gz', '' ).split( 'To' )[1] target = target[0].lower() + target[1:] - newlift.append( [ chain, dbkey, target ] ) + if not os.path.exists( os.path.join( trans.app.config.get( 'genome_data_path', 'tool-data/genome' ), dbkey, 'liftOver', fname ) ): + newlift.append( [ chain, dbkey, target ] ) current = dbkey[0].upper() + dbkey[1:] targetfile = '%sTo%s.over.chain.gz' % ( target, current ) - newlift.append( [ '/goldenPath/%s/liftOver/%s' % ( target, targetfile ), target, dbkey ] ) + if not os.path.exists( os.path.join( trans.app.config.get( 'genome_data_path', 'tool-data/genome' ), target, 'liftOver', targetfile ) ): + newlift.append( [ '/goldenPath/%s/liftOver/%s' % ( target, targetfile ), target, dbkey ] ) except: newlift = None pass @@ -103,36 +157,35 @@ status = u'error' return trans.fill_template( '/admin/data_admin/data_form.mako', message=message, - status=status ) + status=status, + ensembls=trans.ensembl_builds, + dbkeys=trans.ucsc_builds ) elif source == 'Ensembl': - section = params.get('ensembl_section', '') - release1 = params.get('release_number', '') - organism = params.get('organism', '') - name = params.get('name', '') - longname = organism - dbkey = name - release2 = params.get('release2', '') - release2 = ".%s" % release2 if release2 else "" - if section == 'standard': - url = 'ftp://ftp.ensembl.org/pub/release-%s/fasta/%s/dna/%s.%s%s.dna.toplevel.fa.gz' % \ - (release1, organism.lower(), organism, name, release2) - else: - url = 'ftp://ftp.ensemblgenomes.org/pub/%s/release-%s/fasta/%s/dna/%s.%s%s.dna.toplevel.fa.gz' % \ - (section, release1, organism.lower(), organism, name, release2) - elif source == 'local': - url = 'http://127.0.0.1/%s.tar.gz' % dbkey + dbkey = params.get( 'ensembl_dbkey', None ) + assert dbkey is not '?', 'That build was not found' + for build in trans.ensembl_builds: + if build[ 'dbkey' ] == dbkey: + dbkey = build[ 'dbkey' ] + release = build[ 'release' ] + pathname = '_'.join( build[ 'name' ].split(' ')[0:2] ) + longname = build[ 'name' ].replace('_', ' ') + break + url = 'ftp://ftp.ensembl.org/pub/release-%s/fasta/%s/dna/%s.%s.%s.dna.toplevel.fa.gz' % ( release, pathname.lower(), pathname, dbkey, release ) + log.debug( build ) + log.debug( url ) else: - raise ValueError + raise ValueError, 'Somehow an invalid data source was specified.' params = dict( protocol='http', name=dbkey, datatype='fasta', url=url, user=trans.user.id ) jobid = trans.app.job_manager.deferred_job_queue.plugins['GenomeTransferPlugin'].create_job( trans, url, dbkey, longname, indexers ) chainjob = [] if newlift is not None: for chain in newlift: - liftover_url = u'ftp://hgdownload.cse.ucsc.edu%s' % chain[0] + liftover_url = u'ftp://hgdownload.cse.ucsc.edu%s' % chain[0] from_genome = chain[1] to_genome = chain[2] destfile = liftover_url.split('/')[-1].replace('.gz', '') - chainjob.append( trans.app.job_manager.deferred_job_queue.plugins['LiftOverTransferPlugin'].create_job( trans, liftover_url, dbkey, from_genome, to_genome, destfile, jobid ) ) + lochain = trans.app.job_manager.deferred_job_queue.plugins['LiftOverTransferPlugin'].create_job( trans, liftover_url, dbkey, from_genome, to_genome, destfile, jobid ) + chainjob.append( lochain ) job = trans.app.job_manager.deferred_job_queue.plugins['GenomeTransferPlugin'].get_job_status( jobid ) job.params['liftover'] = chainjob trans.app.model.context.current.add( job ) @@ -146,9 +199,13 @@ def monitor_status( self, trans, **kwd ): params = util.Params( kwd ) jobid = params.get( 'job', '' ) + gname = params.get( 'intname', '' ) + deferred = trans.app.model.context.current.query( model.DeferredJob ).filter_by( id=jobid ).first() + gname = deferred.params[ 'intname' ] + indexers = ', '.join( deferred.params[ 'indexes' ] ) jobs = self._get_jobs( jobid, trans ) jsonjobs = json.dumps( jobs ) - return trans.fill_template( '/admin/data_admin/download_status.mako', mainjob=jobid, jobs=jobs, jsonjobs=jsonjobs ) + return trans.fill_template( '/admin/data_admin/download_status.mako', name=gname, indexers=indexers, mainjob=jobid, jobs=jobs, jsonjobs=jsonjobs ) @web.expose @web.require_admin @@ -160,16 +217,6 @@ jobs = self._get_jobs( jobid, trans ) return trans.fill_template( '/admin/data_admin/ajax_status.mako', json=json.dumps( jobs ) ) - @web.expose - @web.require_admin - def job_status( self, trans, **kwd ): - params = util.Params( kwd ) - jobid = params.get( 'jobid', None ) - jobtype = params.get( 'jobtype', None ) - fillvals = None - fillvals = self._get_job( jobid, jobtype, trans ) - return trans.fill_template( '/admin/data_admin/ajax_status.mako', json=json.dumps( fillvals ) ) - def _get_job( self, jobid, jobtype, trans ): sa = trans.app.model.context.current if jobtype == 'liftover': @@ -191,12 +238,12 @@ job = trans.app.job_manager.deferred_job_queue.plugins['GenomeTransferPlugin'].get_job_status( jobid ) sa_session = trans.app.model.context.current jobs.append( self._get_job( job.id, 'deferred', trans ) ) - jobs.append( self._get_job( job.transfer_job.id, 'transfer', trans ) ) - idxjobs = sa_session.query( model.GenomeIndexToolData ).filter_by( deferred_job_id=job.id, transfer_job_id=job.transfer_job.id ).all() - if job.params.has_key( 'liftover' ): - for jobid in job.params[ 'liftover' ]: - jobs.append( self._get_job( jobid, 'liftover', trans ) ) - for idxjob in idxjobs: - #print idxjob - jobs.append( self._get_job( idxjob.job_id, 'index', trans ) ) + if hasattr( job, 'transfer_job' ): # This is a transfer job, check for indexers + jobs.append( self._get_job( job.transfer_job.id, 'transfer', trans ) ) + idxjobs = sa_session.query( model.GenomeIndexToolData ).filter_by( deferred_job_id=job.id, transfer_job_id=job.transfer_job.id ).all() + if job.params.has_key( 'liftover' ) and job.params[ 'liftover' ] is not None: + for jobid in job.params[ 'liftover' ]: + jobs.append( self._get_job( jobid, 'liftover', trans ) ) + for idxjob in idxjobs: + jobs.append( self._get_job( idxjob.job_id, 'index', trans ) ) return jobs diff -r c4f325ba7caa1c86b0f97a0a486c3bbdea15c754 -r d2aba0918cf01b1c5be95f7b0b59cc52cd889dbd lib/galaxy/web/framework/__init__.py --- a/lib/galaxy/web/framework/__init__.py +++ b/lib/galaxy/web/framework/__init__.py @@ -799,6 +799,14 @@ dbnames.extend( util.dbnames ) return dbnames + @property + def ucsc_builds( self ): + return util.dlnames['ucsc'] + + @property + def ensembl_builds( self ): + return util.dlnames['ensembl'] + def db_dataset_for( self, dbkey ): """ Returns the db_file dataset associated/needed by `dataset`, or `None`. @@ -957,6 +965,14 @@ dbnames.append((key, "%s (%s) [Custom]" % (chrom_dict['name'], key) )) dbnames.extend( util.dbnames ) return dbnames + + @property + def ucsc_builds( self ): + return util.dlnames['ucsc'] + + @property + def ensembl_builds( self ): + return util.dlnames['ensembl'] class GalaxyWebUITransaction( GalaxyWebTransaction ): def __init__( self, environ, app, webapp, session_cookie ): diff -r c4f325ba7caa1c86b0f97a0a486c3bbdea15c754 -r d2aba0918cf01b1c5be95f7b0b59cc52cd889dbd templates/admin/data_admin/data_form.mako --- a/templates/admin/data_admin/data_form.mako +++ b/templates/admin/data_admin/data_form.mako @@ -62,7 +62,7 @@ <div class="form-row"><label for="indexers">Indexers</label><select name="indexers" multiple style="width: 200px; height: 125px;"> - <option value="2bit">TwoBit</option> + <option value="2bit" selected>TwoBit</option><option value="bowtie">Bowtie</option><option value="bowtie2">Bowtie 2</option><option value="bwa">BWA</option> @@ -75,7 +75,7 @@ </div></div><h2>Parameters</h2> - <div id="params_generic" class="params-block" style="display: block;"> + <div id="params_Broad" class="params-block" style="display: block;"><div class="form-row"><label for="longname">Internal Name</label><input name="longname" type="text" label="Internal Name" /> @@ -88,55 +88,47 @@ </div><div id="dlparams"><div class="form-row"> - <label for="dbkey">External Name</label> - <input name="dbkey" type="text" label="Genome Unique Name" /> + <label for="broad_dbkey">External Name</label> + <input name="broad_dbkey" type="text" label="Genome Unique Name" /><div style="clear: both;"> </div></div></div></div> - <div id="params_ensembl" class="params-block"> + <div id="params_NCBI" class="params-block" style="display: block;"><div class="form-row"> - <label for="ensembl_section">Section</label> - <input name="ensembl_section" type="text" label="Section" /> + <label for="longname">Internal Name</label> + <input name="longname" type="text" label="Internal Name" /><div style="clear: both;"> </div> - <div class="toolParamHelp" style="clear: both;"> - Ensembl section, either standard or one of plants, protists, metazoa, fungi, bacteria. - </div></div><div class="form-row"> - <label for="release_number">Release Number</label> - <input name="release_number" type="text" label="Release" /> + <label for="uniqid">Internal Unique Identifier</label> + <input name="uniqid" type="text" label="Internal Identifier" /><div style="clear: both;"> </div> - <div class="toolParamHelp" style="clear: both;"> - Release number, e.g. ftp://ftp.ensembl.org/pub/release-<strong style="color: red;">56</strong>/fasta/callithrix_jacchus/dna/Callithrix_jacchus.calJac3.56.dna.toplevel.fa.gz - </div></div> - <div class="form-row"> - <label for="organism">Organism</label> - <input name="organism" type="text" label="Organism" /> - <div style="clear: both;"> </div> - <div class="toolParamHelp" style="clear: both;"> - Organism long name, e.g. ftp://ftp.ensembl.org/pub/release-56/fasta/callithrix_jacchus/dna/<strong style="color: red;">Callithrix_jacchus</strong>.calJac3.56.dna.toplevel.fa.gz - </div> - </div> - <div class="form-row"> - <label for="name">Name</label> - <input name="name" type="text" label="name" /> - <div style="clear: both;"> </div> - <div class="toolParamHelp" style="clear: both;"> - Organism short name, e.g. ftp://ftp.ensembl.org/pub/release-56/fasta/callithrix_jacchus/dna/Callithrix_jacchus.<strong style="color: red;">calJac3</strong>.56.dna.toplevel.fa.gz - </div> - </div> - <div class="form-row"> - <label for="release2">Release ID</label> - <input name="release2" type="text" label="Release ID" /> - <div style="clear: both;"> </div> - <div class="toolParamHelp" style="clear: both;"> - Release ID, e.g. ftp://ftp.ensembl.org/pub/release-56/fasta/callithrix_jacchus/dna/Callithrix_jacchus.calJac3.<strong style="color: red;">56</strong>.dna.toplevel.fa.gz + <div id="dlparams"> + <div class="form-row"> + <label for="ncbi_dbkey">External Name</label> + <input name="ncbi_dbkey" type="text" label="Genome Unique Name" /> + <div style="clear: both;"> </div></div></div></div> - <div id="params_ucsc" class="params-block"> + <div id="params_Ensembl" class="params-block"> + <div class="form-row"> + <label>Genome:</label> + <div class="form-row-input"> + <select name="ensembl_dbkey" last_selected_value="?"> + %for dbkey in ensembls: + <option value="${dbkey['dbkey']}">${dbkey['dbkey']} - ${dbkey['name']}</option> + %endfor + </select> + </div> + <div class="toolParamHelp" style="clear: both;"> + If you can't find the build you want in this list, <insert link to instructions here> + </div> + </div> + </div> + <div id="params_UCSC" class="params-block"><div class="form-row"><label>Genome:</label><div class="form-row-input"> @@ -166,23 +158,11 @@ checkDataSource(); }); function checkDataSource() { - var ds = $('#datasource').val() + var ds = $('#datasource').val(); $('.params-block').each(function() { $(this).hide(); }); - switch (ds) { - case 'UCSC': - $('#params_ucsc').show(); - break; - case 'Ensembl': - $('#params_ensembl').show(); - break; - case 'NCBI': - case 'Broad': - default: - $('#params_generic').show(); - break; - } + $('#params_' + ds).show(); }; </script></form> diff -r c4f325ba7caa1c86b0f97a0a486c3bbdea15c754 -r d2aba0918cf01b1c5be95f7b0b59cc52cd889dbd templates/admin/data_admin/download_status.mako --- a/templates/admin/data_admin/download_status.mako +++ b/templates/admin/data_admin/download_status.mako @@ -33,10 +33,15 @@ </div></div></%def> -<p>The genome build and any selected indexers have been added to the job queue. Below you will see the status of each job.</p> +<p>${name} been added to the job queue + %if indexers: + to be indexed with ${indexers} + %endif + </p><table id="jobStatus"></table> -<a href="${h.url_for( controller='data_admin', action='manage_data' )}">Return to the download form</a> +<p><a href="${h.url_for( controller='data_admin', action='manage_data' )}">Overview</a>.</p> +<p><a href="${h.url_for( controller='data_admin', action='add_genome' )}">Download form</a>.</p><script type="text/javascript"> jobs = ${jsonjobs} finalstates = new Array('done', 'error', 'ok'); diff -r c4f325ba7caa1c86b0f97a0a486c3bbdea15c754 -r d2aba0918cf01b1c5be95f7b0b59cc52cd889dbd templates/admin/data_admin/local_data.mako --- /dev/null +++ b/templates/admin/data_admin/local_data.mako @@ -0,0 +1,161 @@ +<%inherit file="/base.mako"/> +<%namespace file="/message.mako" import="render_msg" /> +<%namespace file="/library/common/common.mako" import="common_javascripts" /> + +<%! + def inherit(context): + if context.get('use_panels'): + return '/webapps/galaxy/base_panels.mako' + else: + return '/base.mako' +%> +<%inherit file="${inherit(context)}"/> + +<%def name="init()"> +<% + self.has_left_panel=False + self.has_right_panel=False + self.message_box_visible=False + self.active_view="user" + self.overlay_visible=False + self.has_accessible_datasets = False +%> +</%def> +<%def name="stylesheets()"> + ${parent.stylesheets()} + ${h.css( "autocomplete_tagging" )} +</%def> +<%def name="javascripts()"> + ${parent.javascripts()} + ${h.js("jquery.autocomplete", "autocomplete_tagging" )} +</%def> +## +## Override methods from base.mako and base_panels.mako +## +<%def name="center_panel()"> + <div style="overflow: auto; height: 100%;"> + <div class="page-container" style="padding: 10px;"> + ${render_content()} + </div> + </div> +</%def> +<style type="text/css"> + .params-block { display: none; } + td, th { padding-left: 10px; padding-right: 10px; } + td.Generate { text-decoration: underline; background-color: #EEEEEE; } + td.Generating { text-decoration: none; background-color: #FFFFCC; } + td.Generated { background-color: #CCFFCC; } +</style> +<div class="toolForm"> + %if message: + <div class="${status}">${message}</div> + %endif + <div class="toolFormTitle">Currently tracked builds <a class="action-button" href="/data_admin/add_genome">Add new</a></div> + <div class="toolFormBody"> + <h2>Locally cached data:</h2> + <h3>NOTE: Indexers queued here will not be reflected in the table until Galaxy is restarted.</h3> + <table id="locfiles"> + <tr><th>Database ID</th><th>Name</th><th>Bowtie</th><th>Bowtie 2</th><th>BWA</th><th>Sam</th><th>Picard</th><th>PerM</th></tr> + %for dbkey in sorted(genomes.keys()): + <tr> + <td>${dbkey}</td> + <td>${genomes[dbkey]['name']}</td> + <td id="${dbkey}-bowtie" class="indexcell ${genomes[dbkey]['bowtie_indexes']}" data-fapath="${genomes[dbkey]['fapath']}" data-longname="${genomes[dbkey]['name']}" data-index="bowtie" data-dbkey="${dbkey}">${genomes[dbkey]['bowtie_indexes']}</td> + <td id="${dbkey}-bowtie2" class="indexcell ${genomes[dbkey]['bowtie2_indexes']}" data-fapath="${genomes[dbkey]['fapath']}" data-longname="${genomes[dbkey]['name']}" data-index="bowtie2" data-dbkey="${dbkey}">${genomes[dbkey]['bowtie2_indexes']}</td> + <td id="${dbkey}-bwa" class="indexcell ${genomes[dbkey]['bwa_indexes']}" data-fapath="${genomes[dbkey]['fapath']}" data-longname="${genomes[dbkey]['name']}" data-index="bwa" data-dbkey="${dbkey}">${genomes[dbkey]['bwa_indexes']}</td> + <td id="${dbkey}-sam" class="indexcell ${genomes[dbkey]['sam_fa_indexes']}" data-fapath="${genomes[dbkey]['fapath']}" data-longname="${genomes[dbkey]['name']}" data-index="sam" data-dbkey="${dbkey}">${genomes[dbkey]['sam_fa_indexes']}</td> + <td id="${dbkey}-picard" class="indexcell ${genomes[dbkey]['srma_indexes']}" data-fapath="${genomes[dbkey]['fapath']}" data-longname="${genomes[dbkey]['name']}" data-index="picard" data-dbkey="${dbkey}">${genomes[dbkey]['srma_indexes']}</td> + <td id="${dbkey}-perm" class="indexcell ${genomes[dbkey]['perm_base_indexes']}" data-fapath="${genomes[dbkey]['fapath']}" data-longname="${genomes[dbkey]['name']}" data-index="perm" data-dbkey="${dbkey}">${genomes[dbkey]['perm_base_indexes']}</td> + </tr> + %endfor + </table> + <h2>Recent jobs:</h2> + <p>Click the job ID to see job details. Note that this list only shows jobs initiated by your account.</p> + <div id="recentJobs"> + %for job in jobgrid: + <div id="job-${job['deferred']}" data-dbkey="${job['dbkey']}" data-name="${job['intname']}" data-indexes="${job['indexers']}" data-jobid="${job['deferred']}" data-state="${job['state']}" class="historyItem-${job['state']} historyItemWrapper historyItem"> + <p>Job ID <a href="${h.url_for( controller='data_admin', action='monitor_status', job=job['deferred'] )}">${job['deferred']}</a>: + %if job['jobtype'] == 'download': + Download <em>${job['intname']}</em> + %if job['indexers']: + and index with ${job['indexers']} + %endif + %else: + Index <em>${job['intname']}</em> with ${job['indexers']} + %endif + </p> + </div> + %endfor + </div> +</div> +<script type="text/javascript"> + finalstates = new Array('done', 'error', 'ok'); + $('.indexcell').click(function() { + status = $(this).html(); + elem = $(this); + if (status != 'Generate') { + return; + } + longname = $(this).attr('data-longname'); + dbkey = $(this).attr('data-dbkey'); + indexes = $(this).attr('data-index'); + path = $(this).attr('data-fapath'); + $.post('${h.url_for( controller='data_admin', action='index_build' )}', { longname: longname, dbkey: dbkey, indexes: indexes, path: path }, function(data) { + if (data == 'ERROR') { + alert('There was an error.'); + } + else { + elem.html('Generating'); + elem.attr('class', 'indexcell Generating'); + } + newhtml = '<div data-dbkey="' + dbkey + '" data-name="' + longname + '" data-indexes="' + indexes + '" id="job-' + data + '" class="historyItem-new historyItemWrapper historyItem">' + + '<p>Job ID <a href="${h.url_for( controller='data_admin', action='monitor_status')}?job=' + data + '">' + data + '</a>: ' + + 'Index <em>' + longname + '</em> with ' + indexes + '</p></div>'; + $('#recentJobs').prepend(newhtml); + $('#job-' + data).delay(3000).queue(function(n) { + checkJob(data); + n(); + }); + }); + }); + + function checkJob(jobid) { + $.get('${h.url_for( controller='data_admin', action='get_jobs' )}', { jobid: jobid }, function(data) { + jsondata = JSON.parse(data)[0]; + jsondata["name"] = $('#job-' + jobid).attr('data-name'); + jsondata["dbkey"] = $('#job-' + jobid).attr('data-dbkey'); + jsondata["indexes"] = $('#job-' + jobid).attr('data-indexes'); + newhtml = makeNewHTML(jsondata); + $('#job-' + jobid).replaceWith(newhtml); + if ($.inArray(jsondata["status"], finalstates) == -1) { + $('#job-' + jobid).delay(3000).queue(function(n) { + checkJob(jobid); + n(); + }); + } + if (jsondata["status"] == 'done' || jsondata["status"] == 'ok') { + elem = $('#' + jsondata["dbkey"] + '-' + jsondata["indexes"]); + elem.html('Generated'); + elem.attr('class', 'indexcell Generated'); + } + }); + } + + function makeNewHTML(jsondata) { + newhtml = '<div data-dbkey="' + jsondata["dbkey"] + '" data-name="' + jsondata["name"] + '" data-indexes="' + jsondata["indexes"] + '" id="job-' + jsondata["jobid"] + '" class="historyItem-' + jsondata["status"] + ' historyItemWrapper historyItem">' + + '<p>Job ID <a href="${h.url_for( controller='data_admin', action='monitor_status')}?job=' + jsondata["jobid"] + '">' + jsondata["jobid"] + '</a>: ' + + 'Index <em>' + jsondata["name"] + '</em> with ' + jsondata["indexes"] + '</p></div>'; + return newhtml; + } + + $(document).ready(function() { + $('.historyItem').each(function() { + state = $(this).attr('data-state'); + jobid = $(this).attr('data-jobid'); + if ($.inArray(state, finalstates) == -1) { + checkJob(jobid); + } + }); + }); + +</script> \ No newline at end of file Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.