galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
June 2012
- 1 participants
- 98 discussions
commit/galaxy-central: inithello: Made genome tool backwards compatible with python 2.5.
by Bitbucket 04 Jun '12
by Bitbucket 04 Jun '12
04 Jun '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/29b8e39db109/
changeset: 29b8e39db109
user: inithello
date: 2012-06-04 19:18:58
summary: Made genome tool backwards compatible with python 2.5.
affected #: 9 files
diff -r e276278db3b01601b0432294fa0eef297a0e7937 -r 29b8e39db1094cb5a5586c4f7b404428641d518c lib/galaxy/jobs/deferred/genome_transfer.py
--- a/lib/galaxy/jobs/deferred/genome_transfer.py
+++ b/lib/galaxy/jobs/deferred/genome_transfer.py
@@ -1,6 +1,8 @@
"""
Module for managing genome transfer jobs.
"""
+from __future__ import with_statement
+
import logging, shutil, gzip, bz2, zipfile, tempfile, tarfile, sys
from galaxy import eggs
@@ -212,13 +214,17 @@
transfer.state = 'done'
self.sa_session.add( job )
self.sa_session.add( transfer )
+ if transfer.state == 'done':
+ if params[ 'indexes' ] is not None:
+ for indexer in params[ 'indexes' ]:
+ incoming = dict(indexer=indexer, dbkey=params[ 'dbkey' ], intname=params[ 'intname' ], path=transfer.path, user=params['user'] )
+ deferred = self.tool.execute( self, set_output_hid=False, history=None, incoming=incoming, transfer=transfer, deferred=job )
+ job.params[ 'indexjobs' ].append( deferred[0].id )
+ else:
+ job.state = self.app.model.DeferredJob.states.OK
+ self.sa_session.add( job )
+ return self.app.model.DeferredJob.states.OK
self.sa_session.flush()
- if transfer.state == 'done' and params[ 'indexes' ] is not None:
- for indexer in params[ 'indexes' ]:
- incoming = dict(indexer=indexer, dbkey=params[ 'dbkey' ], intname=params[ 'intname' ], path=transfer.path, user=params['user'] )
- deferred = self.tool.execute( self, set_output_hid=False, history=None, incoming=incoming, transfer=transfer, deferred=job )
- job.params[ 'indexjobs' ].append( deferred[0].id )
- return self.app.model.DeferredJob.states.OK
def _check_compress( self, filepath ):
retval = ''
diff -r e276278db3b01601b0432294fa0eef297a0e7937 -r 29b8e39db1094cb5a5586c4f7b404428641d518c lib/galaxy/jobs/deferred/liftover_transfer.py
--- a/lib/galaxy/jobs/deferred/liftover_transfer.py
+++ b/lib/galaxy/jobs/deferred/liftover_transfer.py
@@ -1,6 +1,8 @@
"""
Module for managing genome transfer jobs.
"""
+from __future__ import with_statement
+
import logging, shutil, gzip, tempfile, sys
from galaxy import eggs
@@ -31,11 +33,11 @@
self.app = app
self.sa_session = app.model.context.current
- def create_job( self, trans, url, dbkey, from_genome, to_genome, destfile ):
+ def create_job( self, trans, url, dbkey, from_genome, to_genome, destfile, parentjob ):
job = trans.app.transfer_manager.new( protocol='http', url=url )
params = dict( user=trans.user.id, transfer_job_id=job.id, protocol='http',
type='init_transfer', dbkey=dbkey, from_genome=from_genome,
- to_genome=to_genome, destfile=destfile )
+ to_genome=to_genome, destfile=destfile, parentjob=parentjob )
deferred = trans.app.model.DeferredJob( state = self.app.model.DeferredJob.states.NEW, plugin = 'LiftOverTransferPlugin', params = params )
self.sa_session.add( deferred )
self.sa_session.flush()
@@ -59,7 +61,19 @@
return self.job_states.WAIT
elif job.transfer_job.state == 'new':
assert job.params[ 'protocol' ] in [ 'http', 'ftp', 'https' ], 'Unknown protocol %s' % job.params[ 'protocol' ]
- self.app.transfer_manager.run( job.transfer_job )
+ ready = True
+ parent = self.sa_session.query( self.app.model.DeferredJob ).get( int( job.params[ 'parentjob' ] ) )
+ if not hasattr( parent, 'transfer_job' ):
+ parent.transfer_job = self.sa_session.query( self.app.model.TransferJob ).get( int( parent.params[ 'transfer_job_id' ] ) )
+ if parent.transfer_job.state not in [ 'ok', 'error', 'done' ]:
+ ready = False
+ for lo_job in parent.params[ 'liftover' ]:
+ liftoverjob = self.sa_session.query( self.app.model.TransferJob ).get( int( lo_job ) )
+ if liftoverjob:
+ if liftoverjob.state not in [ 'ok', 'error', 'new', 'done' ]:
+ ready = False
+ if ready:
+ self.app.transfer_manager.run( job.transfer_job )
self.sa_session.add( job.transfer_job )
self.sa_session.flush()
return self.job_states.WAIT
@@ -116,6 +130,15 @@
job.params[ 'type' ] = 'finish_transfer'
transfer.path = os.path.abspath(destfilepath)
transfer.state = 'done'
+ parentjob = self.sa_session.query( self.app.model.DeferredJob ).get( int( job.params[ 'parentjob' ] ) )
+ finished = True
+ for i in parentjob.params[ 'liftover' ]:
+ sibling = self.sa_session.query( self.app.model.DeferredJob ).get( int( i ) )
+ if sibling.state not in [ 'done', 'ok', 'error' ]:
+ finished = False
+ if finished:
+ parentjob.state = self.app.model.DeferredJob.states.OK
+ self.sa_session.add( parentjob )
self.sa_session.add( job )
self.sa_session.add( transfer )
self.sa_session.flush()
diff -r e276278db3b01601b0432294fa0eef297a0e7937 -r 29b8e39db1094cb5a5586c4f7b404428641d518c lib/galaxy/tools/genome_index/__init__.py
--- a/lib/galaxy/tools/genome_index/__init__.py
+++ b/lib/galaxy/tools/genome_index/__init__.py
@@ -1,4 +1,7 @@
+from __future__ import with_statement
+
import os, shutil, logging, tempfile, json, tarfile
+
from galaxy import model, util
from galaxy.web.framework.helpers import to_unicode
from galaxy.model.item_attrs import UsesAnnotations
diff -r e276278db3b01601b0432294fa0eef297a0e7937 -r 29b8e39db1094cb5a5586c4f7b404428641d518c lib/galaxy/tools/genome_index/index_genome.py
--- a/lib/galaxy/tools/genome_index/index_genome.py
+++ b/lib/galaxy/tools/genome_index/index_genome.py
@@ -5,6 +5,8 @@
usage: %prog history_attrs dataset_attrs job_attrs out_file
-G, --gzip: gzip archive file
"""
+from __future__ import with_statement
+
import optparse, sys, os, tempfile, time, subprocess, shlex, json, tarfile, shutil
class ManagedIndexer():
diff -r e276278db3b01601b0432294fa0eef297a0e7937 -r 29b8e39db1094cb5a5586c4f7b404428641d518c lib/galaxy/web/controllers/data_admin.py
--- a/lib/galaxy/web/controllers/data_admin.py
+++ b/lib/galaxy/web/controllers/data_admin.py
@@ -30,6 +30,8 @@
@web.expose
@web.require_admin
def manage_data( self, trans, **kwd ):
+ if trans.app.config.get_bool( 'enable_beta_job_managers', False ) == False:
+ return trans.fill_template( '/admin/data_admin/betajob.mako' )
dbkeys = trans.db_builds
return trans.fill_template( '/admin/data_admin/data_form.mako', dbkeys=dbkeys )
@@ -87,6 +89,7 @@
newlift = None
pass
ftp.retrlines('NLST /goldenPath/%s/bigZips/' % dbkey, checker.append)
+ ftp.quit()
for filename in [ dbkey, 'chromFa' ]:
for extension in [ '.tar.gz', '.tar.bz2', '.zip', '.fa.gz', '.fa.bz2' ]:
testfile = '/goldenPath/%s/bigZips/%s%s' % ( dbkey, filename, extension )
@@ -129,7 +132,7 @@
from_genome = chain[1]
to_genome = chain[2]
destfile = liftover_url.split('/')[-1].replace('.gz', '')
- chainjob.append( trans.app.job_manager.deferred_job_queue.plugins['LiftOverTransferPlugin'].create_job( trans, liftover_url, dbkey, from_genome, to_genome, destfile ) )
+ chainjob.append( trans.app.job_manager.deferred_job_queue.plugins['LiftOverTransferPlugin'].create_job( trans, liftover_url, dbkey, from_genome, to_genome, destfile, jobid ) )
job = trans.app.job_manager.deferred_job_queue.plugins['GenomeTransferPlugin'].get_job_status( jobid )
job.params['liftover'] = chainjob
trans.app.model.context.current.add( job )
diff -r e276278db3b01601b0432294fa0eef297a0e7937 -r 29b8e39db1094cb5a5586c4f7b404428641d518c templates/admin/data_admin/betajob.mako
--- /dev/null
+++ b/templates/admin/data_admin/betajob.mako
@@ -0,0 +1,35 @@
+<%inherit file="/base.mako"/>
+<%namespace file="/message.mako" import="render_msg" />
+<%namespace file="/library/common/common.mako" import="common_javascripts" />
+
+<%!
+ def inherit(context):
+ if context.get('use_panels'):
+ return '/webapps/galaxy/base_panels.mako'
+ else:
+ return '/base.mako'
+%>
+<%inherit file="${inherit(context)}"/>
+
+<%def name="init()">
+<%
+ self.has_left_panel=False
+ self.has_right_panel=False
+ self.message_box_visible=False
+ self.active_view="user"
+ self.overlay_visible=False
+ self.has_accessible_datasets = False
+%>
+</%def>
+<%def name="stylesheets()">
+ ${parent.stylesheets()}
+ ${h.css( "autocomplete_tagging" )}
+</%def>
+<%def name="javascripts()">
+ ${parent.javascripts()}
+ ${h.js("jquery.autocomplete", "autocomplete_tagging" )}
+</%def>
+##
+## Override methods from base.mako and base_panels.mako
+##
+<p class="panel-error-message">This feature requires that enable_beta_job_managers be set to True in your Galaxy configuration.</p>
\ No newline at end of file
diff -r e276278db3b01601b0432294fa0eef297a0e7937 -r 29b8e39db1094cb5a5586c4f7b404428641d518c templates/admin/data_admin/data_form.mako
--- a/templates/admin/data_admin/data_form.mako
+++ b/templates/admin/data_admin/data_form.mako
@@ -52,7 +52,6 @@
<div class="form-row"><label for="source">Data Source</label><select id="datasource" name="source" label="Data Source">
- <option value="local">localhost</option><option value="UCSC">UCSC</option><option value="Broad">Broad Institute</option><option value="NCBI">NCBI</option>
diff -r e276278db3b01601b0432294fa0eef297a0e7937 -r 29b8e39db1094cb5a5586c4f7b404428641d518c templates/admin/data_admin/download_status.mako
--- a/templates/admin/data_admin/download_status.mako
+++ b/templates/admin/data_admin/download_status.mako
@@ -39,6 +39,7 @@
<a href="${h.url_for( controller='data_admin', action='manage_data' )}">Return to the download form</a><script type="text/javascript">
jobs = ${jsonjobs}
+ finalstates = new Array('done', 'error', 'ok');
function makeHTML(jobrow) {
jc = 'jobrow ' + jobrow['style'];
@@ -50,18 +51,6 @@
'<td style="padding: 0px 5px;">' + jobrow['status'] + '</td></tr>';
}
- function getNewHtml(jobid, jobtype, elm) {
- $.get('${h.url_for( controller='data_admin', action='job_status' )}', { jobid: jobid, jobtype: jobtype }, function(data) {
- jsondata = JSON.parse(data);
- status = jsondata['status'];
- htmldata = makeHTML(jsondata);
- idval = '#' + jobtype + '-job-' + jobid;
- if (htmldata != undefined) {
- $(elm).replaceWith(htmldata);
- }
- });
- }
-
function checkJobs() {
var alldone = true;
var mainjob;
@@ -70,9 +59,8 @@
if ($(this).attr('data-jobtype') == 'deferred') {
mainjob = $(this).attr('data-jobid');
}
- if (status != 'done' && status != 'error' && status != 'ok') {
+ if ($.inArray(status, finalstates) == -1) {
alldone = false;
- getNewHtml($(this).attr('data-jobid'), $(this).attr('data-jobtype'), $(this));
}
});
if (!alldone) {
@@ -88,10 +76,12 @@
$.get('${h.url_for( controller='data_admin', action='get_jobs' )}', { jobid: mainjob }, function(data) {
jsondata = JSON.parse(data);
for (i in jsondata) {
+ currentjob = jsondata[i]
if (jobs[i] == undefined) {
$('#jobStatus').append(makeHTML(jsondata[i]));
jobs.push(jsondata[i]);
}
+ $('#' + currentjob['type'] + '-job-' + currentjob['jobid']).replaceWith(makeHTML(currentjob));
}
});
}
diff -r e276278db3b01601b0432294fa0eef297a0e7937 -r 29b8e39db1094cb5a5586c4f7b404428641d518c templates/webapps/galaxy/admin/index.mako
--- a/templates/webapps/galaxy/admin/index.mako
+++ b/templates/webapps/galaxy/admin/index.mako
@@ -57,7 +57,9 @@
<div class="toolSectionBg"><div class="toolTitle"><a href="${h.url_for( controller='admin', action='quotas', webapp=webapp )}" target="galaxy_main">Manage quotas</a></div><div class="toolTitle"><a href="${h.url_for( controller='library_admin', action='browse_libraries' )}" target="galaxy_main">Manage data libraries</a></div>
- <div class="toolTitle"><a href="${h.url_for( controller='data_admin', action='manage_data' )}" target="galaxy_main">Manage local data</a></div>
+ %if trans.app.config.enable_beta_job_managers:
+ <div class="toolTitle"><a href="${h.url_for( controller='data_admin', action='manage_data' )}" target="galaxy_main">Manage local data</a></div>
+ %endif
</div></div><div class="toolSectionPad"></div>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jgoecks: Place all JavaScript visualization code under static/scripts/viz directory.
by Bitbucket 04 Jun '12
by Bitbucket 04 Jun '12
04 Jun '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/e276278db3b0/
changeset: e276278db3b0
user: jgoecks
date: 2012-06-04 18:26:52
summary: Place all JavaScript visualization code under static/scripts/viz directory.
affected #: 14 files
Diff too large to display.
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jgoecks: Clarify inputs for forward, reverse reads in Tophat2 wrapper.
by Bitbucket 04 Jun '12
by Bitbucket 04 Jun '12
04 Jun '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/4b799028749a/
changeset: 4b799028749a
user: jgoecks
date: 2012-06-04 15:29:51
summary: Clarify inputs for forward, reverse reads in Tophat2 wrapper.
affected #: 1 file
diff -r 82f11c6b5da6339115dd4499ad317eb341835fa8 -r 4b799028749a3e8c0138a8a507c79f78e6edd349 tools/ngs_rna/tophat2_wrapper.xml
--- a/tools/ngs_rna/tophat2_wrapper.xml
+++ b/tools/ngs_rna/tophat2_wrapper.xml
@@ -124,8 +124,8 @@
<param format="fastqsanger" name="input1" type="data" label="RNA-Seq FASTQ file" help="Nucleotide-space: Must have Sanger-scaled quality values with ASCII offset 33"/></when><when value="paired">
- <param format="fastqsanger" name="input1" type="data" label="RNA-Seq FASTQ file" help="Nucleotide-space: Must have Sanger-scaled quality values with ASCII offset 33" />
- <param format="fastqsanger" name="input2" type="data" label="RNA-Seq FASTQ file" help="Nucleotide-space: Must have Sanger-scaled quality values with ASCII offset 33" />
+ <param format="fastqsanger" name="input1" type="data" label="RNA-Seq FASTQ file, forward reads" help="Nucleotide-space: Must have Sanger-scaled quality values with ASCII offset 33" />
+ <param format="fastqsanger" name="input2" type="data" label="RNA-Seq FASTQ file, reverse reads" help="Nucleotide-space: Must have Sanger-scaled quality values with ASCII offset 33" /><param name="mate_inner_distance" type="integer" value="300" label="Mean Inner Distance between Mate Pairs" /><param name="mate_std_dev" type="integer" value="20" label="Std. Dev for Distance between Mate Pairs" help="The standard deviation for the distribution on inner distances between mate pairs."/><!-- Discordant pairs. -->
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jgoecks: Add entry for Tophat2 indices to tool data table.
by Bitbucket 04 Jun '12
by Bitbucket 04 Jun '12
04 Jun '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/82f11c6b5da6/
changeset: 82f11c6b5da6
user: jgoecks
date: 2012-06-04 15:07:19
summary: Add entry for Tophat2 indices to tool data table.
affected #: 1 file
diff -r 44cc39cf2bd41154aa1573e253b87f77ec97ae99 -r 82f11c6b5da6339115dd4499ad317eb341835fa8 tool_data_table_conf.xml.sample
--- a/tool_data_table_conf.xml.sample
+++ b/tool_data_table_conf.xml.sample
@@ -105,6 +105,11 @@
<columns>value, dbkey, name, path</columns><file path="tool-data/bowtie_indices_color.loc" /></table>
+ <!-- Locations of indexes in the Bowtie2 mapper format for TopHat2 to use -->
+ <table name="tophat2_indexes" comment_char="#">
+ <columns>value, dbkey, name, path</columns>
+ <file path="tool-data/bowtie2_indices.loc" />
+ </table><!-- Locations of configurations in the CCAT peak/region caller format --><table name="ccat_configurations" comment_char="#"><columns>value, name, path</columns>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/97338d705f3a/
changeset: 97338d705f3a
user: jgoecks
date: 2012-06-04 05:07:54
summary: Fix import to be compatible with previous changeset.
affected #: 1 file
diff -r 4051e35d4cfd4f0f692b392553d9d99c9f388724 -r 97338d705f3acb4baa4e2f4d460c74fd8dde5761 lib/galaxy/web/controllers/tracks.py
--- a/lib/galaxy/web/controllers/tracks.py
+++ b/lib/galaxy/web/controllers/tracks.py
@@ -15,7 +15,7 @@
from galaxy.datatypes.interval import Gff, Bed
from galaxy.model import NoConverterException, ConverterDependencyException
from galaxy.visualization.tracks.data_providers import *
-from galaxy.visualization.tracks.genomes import decode_dbkey, Genomes
+from galaxy.visualization.genomes import decode_dbkey, Genomes
from galaxy.visualization.tracks.visual_analytics import get_tool_def, get_dataset_job
# Message strings returned to browser
https://bitbucket.org/galaxy/galaxy-central/changeset/44cc39cf2bd4/
changeset: 44cc39cf2bd4
user: jgoecks
date: 2012-06-04 05:09:08
summary: Merge
affected #: 1 file
diff -r 97338d705f3acb4baa4e2f4d460c74fd8dde5761 -r 44cc39cf2bd41154aa1573e253b87f77ec97ae99 lib/galaxy/visualization/genomes.py
--- a/lib/galaxy/visualization/genomes.py
+++ b/lib/galaxy/visualization/genomes.py
@@ -32,47 +32,12 @@
self.key = key
self.len_file = len_file
self.twobit_file = twobit_file
-
-
-class Genomes( object ):
- """
- Provides information about available genome data and methods for manipulating that data.
- """
-
- def __init__( self, app ):
- # Create list of known genomes from len files.
- self.genomes = {}
- len_files = glob.glob( os.path.join( app.config.len_file_path, "*.len" ) )
- for f in len_files:
- key = os.path.split( f )[1].split( ".len" )[0]
- self.genomes[ key ] = Genome( key, len_file=f )
-
- # Add genome data (twobit files) to genomes.
- for line in open( os.path.join( app.config.tool_data_path, "twobit.loc" ) ):
- if line.startswith("#"): continue
- val = line.split()
- if len( val ) == 2:
- key, path = val
- if key in self.genomes:
- self.genomes[ key ].twobit_file = path
-
- def get_dbkeys_with_chrom_info( self, trans ):
- """ Returns all valid dbkeys that have chromosome information. """
-
- # All user keys have a len file.
- user_keys = {}
- user = trans.get_user()
- if 'dbkeys' in user.preferences:
- user_keys = from_json_string( user.preferences['dbkeys'] )
-
- dbkeys = [ (v, k) for k, v in trans.db_builds if ( ( k in self.genomes and self.genomes[ k ].len_file ) or k in user_keys ) ]
- return dbkeys
-
- def chroms( self, trans, dbkey=None, num=None, chrom=None, low=None ):
+
+ def to_dict( self, num=None, chrom=None, low=None ):
"""
- Returns a naturally sorted list of chroms/contigs for a given dbkey.
- Use either chrom or low to specify the starting chrom in the return list.
+ Returns representation of self as a dictionary.
"""
+
def check_int(s):
if s.isdigit():
return int(s)
@@ -97,47 +62,13 @@
else:
low = 0
- # If there is no dbkey owner, default to current user.
- dbkey_owner, dbkey = decode_dbkey( dbkey )
- if dbkey_owner:
- dbkey_user = trans.sa_session.query( trans.app.model.User ).filter_by( username=dbkey_owner ).first()
- else:
- dbkey_user = trans.user
-
- #
- # Get len file.
- #
-
- # Look first in user's custom builds.
- len_file = None
- len_ds = None
- user_keys = {}
- if dbkey_user and 'dbkeys' in dbkey_user.preferences:
- user_keys = from_json_string( dbkey_user.preferences['dbkeys'] )
- if dbkey in user_keys:
- dbkey_attributes = user_keys[ dbkey ]
- if 'fasta' in dbkey_attributes:
- build_fasta = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( dbkey_attributes[ 'fasta' ] )
- len_file = build_fasta.get_converted_dataset( trans, 'len' ).file_name
- # Backwards compatibility: look for len file directly.
- elif 'len' in dbkey_attributes:
- len_file = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( user_keys[ dbkey ][ 'len' ] ).file_name
-
- # Look in system builds.
- if not len_file:
- len_ds = trans.db_dataset_for( dbkey )
- if not len_ds:
- len_file = self.genomes[ dbkey ].len_file
- else:
- len_file = len_ds.file_name
-
#
# Get chroms data:
# (a) chrom name, len;
# (b) whether there are previous, next chroms;
# (c) index of start chrom.
#
- len_file_enumerate = enumerate( open( len_file ) )
+ len_file_enumerate = enumerate( open( self.len_file ) )
chroms = {}
prev_chroms = False
start_index = 0
@@ -169,11 +100,6 @@
start_index = low
# Read chrom data from len file.
- # TODO: this may be too slow for very large numbers of chroms/contigs,
- # but try it out for now.
- if not os.path.exists( len_file ):
- return None
-
for line_num, line in len_file_enumerate:
if line_num < low:
continue
@@ -197,9 +123,99 @@
to_sort = [{ 'chrom': chrom, 'len': length } for chrom, length in chroms.iteritems()]
to_sort.sort(lambda a,b: cmp( split_by_number(a['chrom']), split_by_number(b['chrom']) ))
- return { 'reference': self.has_reference_data( trans, dbkey, dbkey_user ), 'chrom_info': to_sort,
- 'prev_chroms' : prev_chroms, 'next_chroms' : next_chroms, 'start_index' : start_index }
+ return {
+ 'id': self.key,
+ 'reference': self.twobit_file is not None,
+ 'chrom_info': to_sort,
+ 'prev_chroms' : prev_chroms,
+ 'next_chroms' : next_chroms,
+ 'start_index' : start_index
+ }
+
+class Genomes( object ):
+ """
+ Provides information about available genome data and methods for manipulating that data.
+ """
+
+ def __init__( self, app ):
+ # Create list of known genomes from len files.
+ self.genomes = {}
+ len_files = glob.glob( os.path.join( app.config.len_file_path, "*.len" ) )
+ for f in len_files:
+ key = os.path.split( f )[1].split( ".len" )[0]
+ self.genomes[ key ] = Genome( key, len_file=f )
+
+ # Add genome data (twobit files) to genomes.
+ for line in open( os.path.join( app.config.tool_data_path, "twobit.loc" ) ):
+ if line.startswith("#"): continue
+ val = line.split()
+ if len( val ) == 2:
+ key, path = val
+ if key in self.genomes:
+ self.genomes[ key ].twobit_file = path
+
+ def get_build( self, dbkey ):
+ """ Returns build for the given key. """
+ rval = None
+ if dbkey in self.genomes:
+ rval = self.genomes[ dbkey ]
+ return rval
+
+ def get_dbkeys_with_chrom_info( self, trans ):
+ """ Returns all valid dbkeys that have chromosome information. """
+ # All user keys have a len file.
+ user_keys = {}
+ user = trans.get_user()
+ if 'dbkeys' in user.preferences:
+ user_keys = from_json_string( user.preferences['dbkeys'] )
+
+ dbkeys = [ (v, k) for k, v in trans.db_builds if ( ( k in self.genomes and self.genomes[ k ].len_file ) or k in user_keys ) ]
+ return dbkeys
+
+ def chroms( self, trans, dbkey=None, num=None, chrom=None, low=None ):
+ """
+ Returns a naturally sorted list of chroms/contigs for a given dbkey.
+ Use either chrom or low to specify the starting chrom in the return list.
+ """
+
+ # If there is no dbkey owner, default to current user.
+ dbkey_owner, dbkey = decode_dbkey( dbkey )
+ if dbkey_owner:
+ dbkey_user = trans.sa_session.query( trans.app.model.User ).filter_by( username=dbkey_owner ).first()
+ else:
+ dbkey_user = trans.user
+
+ #
+ # Get/create genome object.
+ #
+ genome = None
+
+ # Look first in user's custom builds.
+ if dbkey_user and 'dbkeys' in dbkey_user.preferences:
+ user_keys = from_json_string( dbkey_user.preferences['dbkeys'] )
+ if dbkey in user_keys:
+ dbkey_attributes = user_keys[ dbkey ]
+ if 'fasta' in dbkey_attributes:
+ build_fasta = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( dbkey_attributes[ 'fasta' ] )
+ len_file = build_fasta.get_converted_dataset( trans, 'len' ).file_name
+ # Backwards compatibility: look for len file directly.
+ elif 'len' in dbkey_attributes:
+ len_file = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( user_keys[ dbkey ][ 'len' ] ).file_name
+ if len_file:
+ genome = Genome( dbkey, len_file=len_file )
+
+
+ # Look in system builds.
+ if not genome:
+ len_ds = trans.db_dataset_for( dbkey )
+ if not len_ds:
+ genome = self.genomes[ dbkey ]
+ else:
+ gneome = Genome( dbkey, len_file=len_ds.file_name )
+
+ return genome.to_dict( num=num, chrom=chrom, low=low )
+
def has_reference_data( self, trans, dbkey, dbkey_owner=None ):
"""
Returns true if there is reference data for the specified dbkey. If dbkey is custom,
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
3 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/a3ca6efab775/
changeset: a3ca6efab775
user: jgoecks
date: 2012-06-03 17:08:45
summary: Enhance genome support: (a) make genome information available in Galaxy app and (b) refactor genome code out of visualization mixin and into Genome objects.
affected #: 6 files
diff -r 1890cb0d1cfbb3ef5a09affcdd18d2b8acf7d811 -r a3ca6efab775b82435770daea85c842617912727 lib/galaxy/app.py
--- a/lib/galaxy/app.py
+++ b/lib/galaxy/app.py
@@ -12,6 +12,7 @@
from galaxy.objectstore import build_object_store_from_config
import galaxy.quota
from galaxy.tags.tag_handler import GalaxyTagHandler
+from galaxy.visualization.genomes import Genomes
from galaxy.tools.imp_exp import load_history_imp_exp_tools
from galaxy.tools.genome_index import load_genome_index_tools
from galaxy.sample_tracking import external_service_types
@@ -70,6 +71,8 @@
self.security = security.SecurityHelper( id_secret=self.config.id_secret )
# Tag handler
self.tag_handler = GalaxyTagHandler()
+ # Genomes
+ self.genomes = Genomes( self )
# Tool data tables
self.tool_data_tables = galaxy.tools.data.ToolDataTableManager( self.config.tool_data_table_config_path )
# Initialize the tools, making sure the list of tool configs includes the reserved migrated_tools_conf.xml file.
diff -r 1890cb0d1cfbb3ef5a09affcdd18d2b8acf7d811 -r a3ca6efab775b82435770daea85c842617912727 lib/galaxy/visualization/genomes.py
--- /dev/null
+++ b/lib/galaxy/visualization/genomes.py
@@ -0,0 +1,289 @@
+import os, re, sys, glob
+from bx.seq.twobit import TwoBitFile
+from galaxy.util.json import from_json_string
+from galaxy import model
+from galaxy.util.bunch import Bunch
+
+# FIXME: copied from tracks.py
+# Message strings returned to browser
+messages = Bunch(
+ PENDING = "pending",
+ NO_DATA = "no data",
+ NO_CHROMOSOME = "no chromosome",
+ NO_CONVERTER = "no converter",
+ NO_TOOL = "no tool",
+ DATA = "data",
+ ERROR = "error",
+ OK = "ok"
+)
+
+def decode_dbkey( dbkey ):
+ """ Decodes dbkey and returns tuple ( username, dbkey )"""
+ if ':' in dbkey:
+ return dbkey.split( ':' )
+ else:
+ return None, dbkey
+
+class Genome( object ):
+ """
+ Encapsulates information about a known genome/dbkey.
+ """
+ def __init__( self, key, len_file=None, twobit_file=None ):
+ self.key = key
+ self.len_file = len_file
+ self.twobit_file = twobit_file
+
+
+class Genomes( object ):
+ """
+ Provides information about available genome data and methods for manipulating that data.
+ """
+
+ def __init__( self, app ):
+ # Create list of known genomes from len files.
+ self.genomes = {}
+ len_files = glob.glob( os.path.join( app.config.len_file_path, "*.len" ) )
+ for f in len_files:
+ key = os.path.split( f )[1].split( ".len" )[0]
+ self.genomes[ key ] = Genome( key, len_file=f )
+
+ # Add genome data (twobit files) to genomes.
+ for line in open( os.path.join( app.config.tool_data_path, "twobit.loc" ) ):
+ if line.startswith("#"): continue
+ val = line.split()
+ if len( val ) == 2:
+ key, path = val
+ if key in self.genomes:
+ self.genomes[ key ].twobit_file = path
+
+ def get_dbkeys_with_chrom_info( self, trans ):
+ """ Returns all valid dbkeys that have chromosome information. """
+
+ # All user keys have a len file.
+ user_keys = {}
+ user = trans.get_user()
+ if 'dbkeys' in user.preferences:
+ user_keys = from_json_string( user.preferences['dbkeys'] )
+
+ dbkeys = [ (v, k) for k, v in trans.db_builds if ( ( k in self.genomes and self.genomes[ k ].len_file ) or k in user_keys ) ]
+ return dbkeys
+
+ def chroms( self, trans, dbkey=None, num=None, chrom=None, low=None ):
+ """
+ Returns a naturally sorted list of chroms/contigs for a given dbkey.
+ Use either chrom or low to specify the starting chrom in the return list.
+ """
+ def check_int(s):
+ if s.isdigit():
+ return int(s)
+ else:
+ return s
+
+ def split_by_number(s):
+ return [ check_int(c) for c in re.split('([0-9]+)', s) ]
+
+ #
+ # Parameter check, setting.
+ #
+ if num:
+ num = int( num )
+ else:
+ num = sys.maxint
+
+ if low:
+ low = int( low )
+ if low < 0:
+ low = 0
+ else:
+ low = 0
+
+ # If there is no dbkey owner, default to current user.
+ dbkey_owner, dbkey = decode_dbkey( dbkey )
+ if dbkey_owner:
+ dbkey_user = trans.sa_session.query( trans.app.model.User ).filter_by( username=dbkey_owner ).first()
+ else:
+ dbkey_user = trans.user
+
+ #
+ # Get len file.
+ #
+ len_file = None
+ len_ds = None
+ user_keys = {}
+ if dbkey_user and 'dbkeys' in dbkey_user.preferences:
+ user_keys = from_json_string( dbkey_user.preferences['dbkeys'] )
+ if dbkey in user_keys:
+ dbkey_attributes = user_keys[ dbkey ]
+ if 'fasta' in dbkey_attributes:
+ build_fasta = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( dbkey_attributes[ 'fasta' ] )
+ len_file = build_fasta.get_converted_dataset( trans, 'len' ).file_name
+ # Backwards compatibility: look for len file directly.
+ elif 'len' in dbkey_attributes:
+ len_file = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( user_keys[ dbkey ][ 'len' ] ).file_name
+
+ if not len_file:
+ len_ds = trans.db_dataset_for( dbkey )
+ if not len_ds:
+ len_file = os.path.join( trans.app.config.len_file_path, "%s.len" % dbkey )
+ else:
+ len_file = len_ds.file_name
+
+ #
+ # Get chroms data:
+ # (a) chrom name, len;
+ # (b) whether there are previous, next chroms;
+ # (c) index of start chrom.
+ #
+ len_file_enumerate = enumerate( open( len_file ) )
+ chroms = {}
+ prev_chroms = False
+ start_index = 0
+ if chrom:
+ # Use starting chrom to start list.
+ found = False
+ count = 0
+ for line_num, line in len_file_enumerate:
+ if line.startswith("#"):
+ continue
+ name, len = line.split("\t")
+ if found:
+ chroms[ name ] = int( len )
+ count += 1
+ elif name == chrom:
+ # Found starting chrom.
+ chroms[ name ] = int ( len )
+ count += 1
+ found = True
+ start_index = line_num
+ if line_num != 0:
+ prev_chroms = True
+ if count >= num:
+ break
+ else:
+ # Use low to start list.
+ high = low + int( num )
+ prev_chroms = ( low != 0 )
+ start_index = low
+
+ # Read chrom data from len file.
+ # TODO: this may be too slow for very large numbers of chroms/contigs,
+ # but try it out for now.
+ if not os.path.exists( len_file ):
+ return None
+
+ for line_num, line in len_file_enumerate:
+ if line_num < low:
+ continue
+ if line_num >= high:
+ break
+ if line.startswith("#"):
+ continue
+ # LEN files have format:
+ # <chrom_name><tab><chrom_length>
+ fields = line.split("\t")
+ chroms[ fields[0] ] = int( fields[1] )
+
+ # Set flag to indicate whether there are more chroms after list.
+ next_chroms = False
+ try:
+ len_file_enumerate.next()
+ next_chroms = True
+ except:
+ # No more chroms to read.
+ pass
+
+ to_sort = [{ 'chrom': chrom, 'len': length } for chrom, length in chroms.iteritems()]
+ to_sort.sort(lambda a,b: cmp( split_by_number(a['chrom']), split_by_number(b['chrom']) ))
+ return { 'reference': self.has_reference_data( trans, dbkey, dbkey_user ), 'chrom_info': to_sort,
+ 'prev_chroms' : prev_chroms, 'next_chroms' : next_chroms, 'start_index' : start_index }
+
+ def has_reference_data( self, trans, dbkey, dbkey_owner=None ):
+ """
+ Returns true if there is reference data for the specified dbkey. If dbkey is custom,
+ dbkey_owner is needed to determine if there is reference data.
+ """
+ # Look for key in built-in builds.
+ if dbkey in self.available_genomes:
+ # There is built-in reference data.
+ return True
+
+ # Look for key in owner's custom builds.
+ if dbkey_owner and 'dbkeys' in dbkey_owner.preferences:
+ user_keys = from_json_string( dbkey_owner.preferences[ 'dbkeys' ] )
+ if dbkey in user_keys:
+ dbkey_attributes = user_keys[ dbkey ]
+ if 'fasta' in dbkey_attributes:
+ # Fasta + converted datasets can provide reference data.
+ return True
+
+ return False
+
+ def reference( self, trans, dbkey, chrom, low, high, **kwargs ):
+ """
+ Return reference data for a build.
+ """
+
+ # If there is no dbkey owner, default to current user.
+ dbkey_owner, dbkey = decode_dbkey( dbkey )
+ if dbkey_owner:
+ dbkey_user = trans.sa_session.query( trans.app.model.User ).filter_by( username=dbkey_owner ).first()
+ else:
+ dbkey_user = trans.user
+
+ if not self.has_reference_data( trans, dbkey, dbkey_user ):
+ return None
+
+ #
+ # Get twobit file with reference data.
+ #
+ twobit_file_name = None
+ if dbkey in self.available_genomes:
+ # Built-in twobit.
+ twobit_file_name = self.available_genomes[dbkey]
+ else:
+ user_keys = from_json_string( dbkey_user.preferences['dbkeys'] )
+ dbkey_attributes = user_keys[ dbkey ]
+ fasta_dataset = trans.app.model.HistoryDatasetAssociation.get( dbkey_attributes[ 'fasta' ] )
+ error = self._convert_dataset( trans, fasta_dataset, 'twobit' )
+ if error:
+ return error
+ else:
+ twobit_dataset = fasta_dataset.get_converted_dataset( trans, 'twobit' )
+ twobit_file_name = twobit_dataset.file_name
+
+ # Read and return reference data.
+ try:
+ twobit = TwoBitFile( open( twobit_file_name ) )
+ if chrom in twobit:
+ seq_data = twobit[chrom].get( int(low), int(high) )
+ return { 'dataset_type': 'refseq', 'data': seq_data }
+ except IOError:
+ return None
+
+ ## FIXME: copied from tracks.py (tracks controller) - this should be consolidated when possible.
+ def _convert_dataset( self, trans, dataset, target_type ):
+ """
+ Converts a dataset to the target_type and returns a message indicating
+ status of the conversion. None is returned to indicate that dataset
+ was converted successfully.
+ """
+
+ # Get converted dataset; this will start the conversion if necessary.
+ try:
+ converted_dataset = dataset.get_converted_dataset( trans, target_type )
+ except NoConverterException:
+ return messages.NO_CONVERTER
+ except ConverterDependencyException, dep_error:
+ return { 'kind': messages.ERROR, 'message': dep_error.value }
+
+ # Check dataset state and return any messages.
+ msg = None
+ if converted_dataset and converted_dataset.state == model.Dataset.states.ERROR:
+ job_id = trans.sa_session.query( trans.app.model.JobToOutputDatasetAssociation ) \
+ .filter_by( dataset_id=converted_dataset.id ).first().job_id
+ job = trans.sa_session.query( trans.app.model.Job ).get( job_id )
+ msg = { 'kind': messages.ERROR, 'message': job.stderr }
+ elif not converted_dataset or converted_dataset.state != model.Dataset.states.OK:
+ msg = messages.PENDING
+
+ return msg
\ No newline at end of file
diff -r 1890cb0d1cfbb3ef5a09affcdd18d2b8acf7d811 -r a3ca6efab775b82435770daea85c842617912727 lib/galaxy/visualization/tracks/genomes.py
--- a/lib/galaxy/visualization/tracks/genomes.py
+++ /dev/null
@@ -1,264 +0,0 @@
-import os, re, sys
-from bx.seq.twobit import TwoBitFile
-from galaxy.util.json import from_json_string
-from galaxy import model
-from galaxy.util.bunch import Bunch
-
-# FIXME: copied from tracks.py
-# Message strings returned to browser
-messages = Bunch(
- PENDING = "pending",
- NO_DATA = "no data",
- NO_CHROMOSOME = "no chromosome",
- NO_CONVERTER = "no converter",
- NO_TOOL = "no tool",
- DATA = "data",
- ERROR = "error",
- OK = "ok"
-)
-
-def decode_dbkey( dbkey ):
- """ Decodes dbkey and returns tuple ( username, dbkey )"""
- if ':' in dbkey:
- return dbkey.split( ':' )
- else:
- return None, dbkey
-
-
-class Genomes( object ):
- """
- Provides information about available genome data and methods for manipulating that data.
- """
-
- def __init__( self, app ):
- # Create list of available genomes.
- self.available_genomes = None
- avail_genomes = {}
- for line in open( os.path.join( app.config.tool_data_path, "twobit.loc" ) ):
- if line.startswith("#"): continue
- val = line.split()
- if len( val ) == 2:
- key, path = val
- avail_genomes[ key ] = path
- self.available_genomes = avail_genomes
-
- def chroms( self, trans, dbkey=None, num=None, chrom=None, low=None ):
- """
- Returns a naturally sorted list of chroms/contigs for a given dbkey.
- Use either chrom or low to specify the starting chrom in the return list.
- """
- def check_int(s):
- if s.isdigit():
- return int(s)
- else:
- return s
-
- def split_by_number(s):
- return [ check_int(c) for c in re.split('([0-9]+)', s) ]
-
- #
- # Parameter check, setting.
- #
- if num:
- num = int( num )
- else:
- num = sys.maxint
-
- if low:
- low = int( low )
- if low < 0:
- low = 0
- else:
- low = 0
-
- # If there is no dbkey owner, default to current user.
- dbkey_owner, dbkey = decode_dbkey( dbkey )
- if dbkey_owner:
- dbkey_user = trans.sa_session.query( trans.app.model.User ).filter_by( username=dbkey_owner ).first()
- else:
- dbkey_user = trans.user
-
- #
- # Get len file.
- #
- len_file = None
- len_ds = None
- user_keys = {}
- if dbkey_user and 'dbkeys' in dbkey_user.preferences:
- user_keys = from_json_string( dbkey_user.preferences['dbkeys'] )
- if dbkey in user_keys:
- dbkey_attributes = user_keys[ dbkey ]
- if 'fasta' in dbkey_attributes:
- build_fasta = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( dbkey_attributes[ 'fasta' ] )
- len_file = build_fasta.get_converted_dataset( trans, 'len' ).file_name
- # Backwards compatibility: look for len file directly.
- elif 'len' in dbkey_attributes:
- len_file = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( user_keys[ dbkey ][ 'len' ] ).file_name
-
- if not len_file:
- len_ds = trans.db_dataset_for( dbkey )
- if not len_ds:
- len_file = os.path.join( trans.app.config.len_file_path, "%s.len" % dbkey )
- else:
- len_file = len_ds.file_name
-
- #
- # Get chroms data:
- # (a) chrom name, len;
- # (b) whether there are previous, next chroms;
- # (c) index of start chrom.
- #
- len_file_enumerate = enumerate( open( len_file ) )
- chroms = {}
- prev_chroms = False
- start_index = 0
- if chrom:
- # Use starting chrom to start list.
- found = False
- count = 0
- for line_num, line in len_file_enumerate:
- if line.startswith("#"):
- continue
- name, len = line.split("\t")
- if found:
- chroms[ name ] = int( len )
- count += 1
- elif name == chrom:
- # Found starting chrom.
- chroms[ name ] = int ( len )
- count += 1
- found = True
- start_index = line_num
- if line_num != 0:
- prev_chroms = True
- if count >= num:
- break
- else:
- # Use low to start list.
- high = low + int( num )
- prev_chroms = ( low != 0 )
- start_index = low
-
- # Read chrom data from len file.
- # TODO: this may be too slow for very large numbers of chroms/contigs,
- # but try it out for now.
- if not os.path.exists( len_file ):
- return None
-
- for line_num, line in len_file_enumerate:
- if line_num < low:
- continue
- if line_num >= high:
- break
- if line.startswith("#"):
- continue
- # LEN files have format:
- # <chrom_name><tab><chrom_length>
- fields = line.split("\t")
- chroms[ fields[0] ] = int( fields[1] )
-
- # Set flag to indicate whether there are more chroms after list.
- next_chroms = False
- try:
- len_file_enumerate.next()
- next_chroms = True
- except:
- # No more chroms to read.
- pass
-
- to_sort = [{ 'chrom': chrom, 'len': length } for chrom, length in chroms.iteritems()]
- to_sort.sort(lambda a,b: cmp( split_by_number(a['chrom']), split_by_number(b['chrom']) ))
- return { 'reference': self.has_reference_data( trans, dbkey, dbkey_user ), 'chrom_info': to_sort,
- 'prev_chroms' : prev_chroms, 'next_chroms' : next_chroms, 'start_index' : start_index }
-
- def has_reference_data( self, trans, dbkey, dbkey_owner=None ):
- """
- Returns true if there is reference data for the specified dbkey. If dbkey is custom,
- dbkey_owner is needed to determine if there is reference data.
- """
- # Look for key in built-in builds.
- if dbkey in self.available_genomes:
- # There is built-in reference data.
- return True
-
- # Look for key in owner's custom builds.
- if dbkey_owner and 'dbkeys' in dbkey_owner.preferences:
- user_keys = from_json_string( dbkey_owner.preferences[ 'dbkeys' ] )
- if dbkey in user_keys:
- dbkey_attributes = user_keys[ dbkey ]
- if 'fasta' in dbkey_attributes:
- # Fasta + converted datasets can provide reference data.
- return True
-
- return False
-
- def reference( self, trans, dbkey, chrom, low, high, **kwargs ):
- """
- Return reference data for a build.
- """
-
- # If there is no dbkey owner, default to current user.
- dbkey_owner, dbkey = decode_dbkey( dbkey )
- if dbkey_owner:
- dbkey_user = trans.sa_session.query( trans.app.model.User ).filter_by( username=dbkey_owner ).first()
- else:
- dbkey_user = trans.user
-
- if not self.has_reference_data( trans, dbkey, dbkey_user ):
- return None
-
- #
- # Get twobit file with reference data.
- #
- twobit_file_name = None
- if dbkey in self.available_genomes:
- # Built-in twobit.
- twobit_file_name = self.available_genomes[dbkey]
- else:
- user_keys = from_json_string( dbkey_user.preferences['dbkeys'] )
- dbkey_attributes = user_keys[ dbkey ]
- fasta_dataset = trans.app.model.HistoryDatasetAssociation.get( dbkey_attributes[ 'fasta' ] )
- error = self._convert_dataset( trans, fasta_dataset, 'twobit' )
- if error:
- return error
- else:
- twobit_dataset = fasta_dataset.get_converted_dataset( trans, 'twobit' )
- twobit_file_name = twobit_dataset.file_name
-
- # Read and return reference data.
- try:
- twobit = TwoBitFile( open( twobit_file_name ) )
- if chrom in twobit:
- seq_data = twobit[chrom].get( int(low), int(high) )
- return { 'dataset_type': 'refseq', 'data': seq_data }
- except IOError:
- return None
-
- ## FIXME: copied from tracks.py (tracks controller) - this should be consolidated when possible.
- def _convert_dataset( self, trans, dataset, target_type ):
- """
- Converts a dataset to the target_type and returns a message indicating
- status of the conversion. None is returned to indicate that dataset
- was converted successfully.
- """
-
- # Get converted dataset; this will start the conversion if necessary.
- try:
- converted_dataset = dataset.get_converted_dataset( trans, target_type )
- except NoConverterException:
- return messages.NO_CONVERTER
- except ConverterDependencyException, dep_error:
- return { 'kind': messages.ERROR, 'message': dep_error.value }
-
- # Check dataset state and return any messages.
- msg = None
- if converted_dataset and converted_dataset.state == model.Dataset.states.ERROR:
- job_id = trans.sa_session.query( trans.app.model.JobToOutputDatasetAssociation ) \
- .filter_by( dataset_id=converted_dataset.id ).first().job_id
- job = trans.sa_session.query( trans.app.model.Job ).get( job_id )
- msg = { 'kind': messages.ERROR, 'message': job.stderr }
- elif not converted_dataset or converted_dataset.state != model.Dataset.states.OK:
- msg = messages.PENDING
-
- return msg
-
\ No newline at end of file
diff -r 1890cb0d1cfbb3ef5a09affcdd18d2b8acf7d811 -r a3ca6efab775b82435770daea85c842617912727 lib/galaxy/web/base/controller.py
--- a/lib/galaxy/web/base/controller.py
+++ b/lib/galaxy/web/base/controller.py
@@ -370,22 +370,6 @@
encoded_id = trans.security.encode_id( vis.id )
return { "vis_id": encoded_id, "url": url_for( action='browser', id=encoded_id ) }
- def _get_dbkeys( self, trans ):
- """ Returns all valid dbkeys that a user can use in a visualization. """
-
- # Read len files.
- if not self.len_files:
- len_files = glob.glob( os.path.join(trans.app.config.len_file_path, "*.len") )
- self.len_files = [ os.path.split(f)[1].split(".len")[0] for f in len_files ] # get xxx.len
-
- user_keys = {}
- user = trans.get_user()
- if 'dbkeys' in user.preferences:
- user_keys = from_json_string( user.preferences['dbkeys'] )
-
- dbkeys = [ (v, k) for k, v in trans.db_builds if k in self.len_files or k in user_keys ]
- return dbkeys
-
def get_visualization( self, trans, id, check_ownership=True, check_accessible=False ):
""" Get a Visualization from the database by id, verifying ownership. """
# Load workflow from database
diff -r 1890cb0d1cfbb3ef5a09affcdd18d2b8acf7d811 -r a3ca6efab775b82435770daea85c842617912727 lib/galaxy/web/controllers/tracks.py
--- a/lib/galaxy/web/controllers/tracks.py
+++ b/lib/galaxy/web/controllers/tracks.py
@@ -173,10 +173,6 @@
histories_grid = HistorySelectionGrid()
history_datasets_grid = HistoryDatasetsSelectionGrid()
tracks_grid = TracksterSelectionGrid()
-
- def __init__(self, app ):
- BaseUIController.__init__( self, app )
- self.genomes = Genomes( self.app )
@web.expose
@web.require_login()
@@ -188,7 +184,7 @@
@web.expose
@web.require_login()
def new_browser( self, trans, **kwargs ):
- return trans.fill_template( "tracks/new_browser.mako", dbkeys=self._get_dbkeys( trans ), default_dbkey=kwargs.get("default_dbkey", None) )
+ return trans.fill_template( "tracks/new_browser.mako", dbkeys=trans.app.genomes.get_dbkeys_with_chrom_info( trans ), default_dbkey=kwargs.get("default_dbkey", None) )
@web.json
def add_track_async(self, trans, hda_id=None, ldda_id=None):
diff -r 1890cb0d1cfbb3ef5a09affcdd18d2b8acf7d811 -r a3ca6efab775b82435770daea85c842617912727 lib/galaxy/web/controllers/visualization.py
--- a/lib/galaxy/web/controllers/visualization.py
+++ b/lib/galaxy/web/controllers/visualization.py
@@ -377,7 +377,7 @@
from the visualization title, but can be edited. This field
must contain only lowercase letters, numbers, and
the '-' character.""" )
- .add_select( "visualization_dbkey", "Visualization DbKey/Build", value=visualization_dbkey, options=self._get_dbkeys( trans ), error=None)
+ .add_select( "visualization_dbkey", "Visualization DbKey/Build", value=visualization_dbkey, options=trans.app.genomes.get_dbkeys_with_chrom_info( trans ), error=None)
.add_text( "visualization_annotation", "Visualization annotation", value=visualization_annotation, error=visualization_annotation_err,
help="A description of the visualization; annotation is shown alongside published visualizations."),
template="visualization/create.mako" )
https://bitbucket.org/galaxy/galaxy-central/changeset/4051e35d4cfd/
changeset: 4051e35d4cfd
user: jgoecks
date: 2012-06-03 18:19:30
summary: More support for genomes as first-class objects: (a) add genomes API; (b) fix bugs in providing genome data.
affected #: 4 files
diff -r a3ca6efab775b82435770daea85c842617912727 -r 4051e35d4cfd4f0f692b392553d9d99c9f388724 lib/galaxy/visualization/genomes.py
--- a/lib/galaxy/visualization/genomes.py
+++ b/lib/galaxy/visualization/genomes.py
@@ -107,6 +107,8 @@
#
# Get len file.
#
+
+ # Look first in user's custom builds.
len_file = None
len_ds = None
user_keys = {}
@@ -121,10 +123,11 @@
elif 'len' in dbkey_attributes:
len_file = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( user_keys[ dbkey ][ 'len' ] ).file_name
+ # Look in system builds.
if not len_file:
len_ds = trans.db_dataset_for( dbkey )
if not len_ds:
- len_file = os.path.join( trans.app.config.len_file_path, "%s.len" % dbkey )
+ len_file = self.genomes[ dbkey ].len_file
else:
len_file = len_ds.file_name
@@ -203,7 +206,7 @@
dbkey_owner is needed to determine if there is reference data.
"""
# Look for key in built-in builds.
- if dbkey in self.available_genomes:
+ if dbkey in self.genomes and self.genomes[ dbkey ].twobit_file:
# There is built-in reference data.
return True
diff -r a3ca6efab775b82435770daea85c842617912727 -r 4051e35d4cfd4f0f692b392553d9d99c9f388724 lib/galaxy/web/api/genomes.py
--- /dev/null
+++ b/lib/galaxy/web/api/genomes.py
@@ -0,0 +1,25 @@
+from galaxy import config, tools, web, util
+from galaxy.web.base.controller import BaseController, BaseAPIController
+from galaxy.util.bunch import Bunch
+
+class GenomesController( BaseAPIController ):
+ """
+ RESTful controller for interactions with genome data.
+ """
+
+ @web.expose_api
+ def index( self, trans, **kwds ):
+ """
+ GET /api/genomes: returns a list of installed genomes
+ """
+
+ return []
+
+ @web.json
+ def show( self, trans, id, num=None, chrom=None, low=None ):
+ """
+ GET /api/genomes/{id}
+
+ Returns information about build <id>
+ """
+ return self.app.genomes.chroms( trans, dbkey=id, num=num, chrom=chrom, low=low )
\ No newline at end of file
diff -r a3ca6efab775b82435770daea85c842617912727 -r 4051e35d4cfd4f0f692b392553d9d99c9f388724 lib/galaxy/web/buildapp.py
--- a/lib/galaxy/web/buildapp.py
+++ b/lib/galaxy/web/buildapp.py
@@ -132,6 +132,7 @@
webapp.api_mapper.resource_with_deleted( 'quota', 'quotas', path_prefix='/api' )
webapp.api_mapper.resource( 'tool', 'tools', path_prefix='/api' )
webapp.api_mapper.resource_with_deleted( 'user', 'users', path_prefix='/api' )
+ webapp.api_mapper.resource( 'genome', 'genomes', path_prefix='/api' )
webapp.api_mapper.resource( 'visualization', 'visualizations', path_prefix='/api' )
webapp.api_mapper.resource( 'workflow', 'workflows', path_prefix='/api' )
webapp.api_mapper.resource_with_deleted( 'history', 'histories', path_prefix='/api' )
diff -r a3ca6efab775b82435770daea85c842617912727 -r 4051e35d4cfd4f0f692b392553d9d99c9f388724 lib/galaxy/web/controllers/tracks.py
--- a/lib/galaxy/web/controllers/tracks.py
+++ b/lib/galaxy/web/controllers/tracks.py
@@ -268,11 +268,11 @@
@web.json
def chroms( self, trans, dbkey=None, num=None, chrom=None, low=None ):
- return self.genomes.chroms( trans, dbkey=dbkey, num=num, chrom=chrom, low=low )
+ return self.app.genomes.chroms( trans, dbkey=dbkey, num=num, chrom=chrom, low=low )
@web.json
def reference( self, trans, dbkey, chrom, low, high, **kwargs ):
- return self.genomes.reference( trans, dbkey, chrom, low, high, **kwargs )
+ return self.app.genomes.reference( trans, dbkey, chrom, low, high, **kwargs )
@web.json
def raw_data( self, trans, dataset_id, chrom, low, high, **kwargs ):
@@ -771,7 +771,7 @@
# Get genome info.
dbkey = dataset.dbkey
- chroms_info = self.genomes.chroms( trans, dbkey=dbkey )
+ chroms_info = self.app.genomes.chroms( trans, dbkey=dbkey )
genome = { 'dbkey': dbkey, 'chroms_info': chroms_info }
# Get summary tree data for dataset.
https://bitbucket.org/galaxy/galaxy-central/changeset/ae4a2de89589/
changeset: ae4a2de89589
user: jgoecks
date: 2012-06-03 19:52:12
summary: Enable Genome objects and chromosome information to be dictified.
affected #: 1 file
diff -r 4051e35d4cfd4f0f692b392553d9d99c9f388724 -r ae4a2de89589270d6d872eef6c99848168a97240 lib/galaxy/visualization/genomes.py
--- a/lib/galaxy/visualization/genomes.py
+++ b/lib/galaxy/visualization/genomes.py
@@ -32,47 +32,12 @@
self.key = key
self.len_file = len_file
self.twobit_file = twobit_file
-
-
-class Genomes( object ):
- """
- Provides information about available genome data and methods for manipulating that data.
- """
-
- def __init__( self, app ):
- # Create list of known genomes from len files.
- self.genomes = {}
- len_files = glob.glob( os.path.join( app.config.len_file_path, "*.len" ) )
- for f in len_files:
- key = os.path.split( f )[1].split( ".len" )[0]
- self.genomes[ key ] = Genome( key, len_file=f )
-
- # Add genome data (twobit files) to genomes.
- for line in open( os.path.join( app.config.tool_data_path, "twobit.loc" ) ):
- if line.startswith("#"): continue
- val = line.split()
- if len( val ) == 2:
- key, path = val
- if key in self.genomes:
- self.genomes[ key ].twobit_file = path
-
- def get_dbkeys_with_chrom_info( self, trans ):
- """ Returns all valid dbkeys that have chromosome information. """
-
- # All user keys have a len file.
- user_keys = {}
- user = trans.get_user()
- if 'dbkeys' in user.preferences:
- user_keys = from_json_string( user.preferences['dbkeys'] )
-
- dbkeys = [ (v, k) for k, v in trans.db_builds if ( ( k in self.genomes and self.genomes[ k ].len_file ) or k in user_keys ) ]
- return dbkeys
-
- def chroms( self, trans, dbkey=None, num=None, chrom=None, low=None ):
+
+ def to_dict( self, num=None, chrom=None, low=None ):
"""
- Returns a naturally sorted list of chroms/contigs for a given dbkey.
- Use either chrom or low to specify the starting chrom in the return list.
+ Returns representation of self as a dictionary.
"""
+
def check_int(s):
if s.isdigit():
return int(s)
@@ -97,47 +62,13 @@
else:
low = 0
- # If there is no dbkey owner, default to current user.
- dbkey_owner, dbkey = decode_dbkey( dbkey )
- if dbkey_owner:
- dbkey_user = trans.sa_session.query( trans.app.model.User ).filter_by( username=dbkey_owner ).first()
- else:
- dbkey_user = trans.user
-
- #
- # Get len file.
- #
-
- # Look first in user's custom builds.
- len_file = None
- len_ds = None
- user_keys = {}
- if dbkey_user and 'dbkeys' in dbkey_user.preferences:
- user_keys = from_json_string( dbkey_user.preferences['dbkeys'] )
- if dbkey in user_keys:
- dbkey_attributes = user_keys[ dbkey ]
- if 'fasta' in dbkey_attributes:
- build_fasta = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( dbkey_attributes[ 'fasta' ] )
- len_file = build_fasta.get_converted_dataset( trans, 'len' ).file_name
- # Backwards compatibility: look for len file directly.
- elif 'len' in dbkey_attributes:
- len_file = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( user_keys[ dbkey ][ 'len' ] ).file_name
-
- # Look in system builds.
- if not len_file:
- len_ds = trans.db_dataset_for( dbkey )
- if not len_ds:
- len_file = self.genomes[ dbkey ].len_file
- else:
- len_file = len_ds.file_name
-
#
# Get chroms data:
# (a) chrom name, len;
# (b) whether there are previous, next chroms;
# (c) index of start chrom.
#
- len_file_enumerate = enumerate( open( len_file ) )
+ len_file_enumerate = enumerate( open( self.len_file ) )
chroms = {}
prev_chroms = False
start_index = 0
@@ -169,11 +100,6 @@
start_index = low
# Read chrom data from len file.
- # TODO: this may be too slow for very large numbers of chroms/contigs,
- # but try it out for now.
- if not os.path.exists( len_file ):
- return None
-
for line_num, line in len_file_enumerate:
if line_num < low:
continue
@@ -197,9 +123,99 @@
to_sort = [{ 'chrom': chrom, 'len': length } for chrom, length in chroms.iteritems()]
to_sort.sort(lambda a,b: cmp( split_by_number(a['chrom']), split_by_number(b['chrom']) ))
- return { 'reference': self.has_reference_data( trans, dbkey, dbkey_user ), 'chrom_info': to_sort,
- 'prev_chroms' : prev_chroms, 'next_chroms' : next_chroms, 'start_index' : start_index }
+ return {
+ 'id': self.key,
+ 'reference': self.twobit_file is not None,
+ 'chrom_info': to_sort,
+ 'prev_chroms' : prev_chroms,
+ 'next_chroms' : next_chroms,
+ 'start_index' : start_index
+ }
+
+class Genomes( object ):
+ """
+ Provides information about available genome data and methods for manipulating that data.
+ """
+
+ def __init__( self, app ):
+ # Create list of known genomes from len files.
+ self.genomes = {}
+ len_files = glob.glob( os.path.join( app.config.len_file_path, "*.len" ) )
+ for f in len_files:
+ key = os.path.split( f )[1].split( ".len" )[0]
+ self.genomes[ key ] = Genome( key, len_file=f )
+
+ # Add genome data (twobit files) to genomes.
+ for line in open( os.path.join( app.config.tool_data_path, "twobit.loc" ) ):
+ if line.startswith("#"): continue
+ val = line.split()
+ if len( val ) == 2:
+ key, path = val
+ if key in self.genomes:
+ self.genomes[ key ].twobit_file = path
+
+ def get_build( self, dbkey ):
+ """ Returns build for the given key. """
+ rval = None
+ if dbkey in self.genomes:
+ rval = self.genomes[ dbkey ]
+ return rval
+
+ def get_dbkeys_with_chrom_info( self, trans ):
+ """ Returns all valid dbkeys that have chromosome information. """
+ # All user keys have a len file.
+ user_keys = {}
+ user = trans.get_user()
+ if 'dbkeys' in user.preferences:
+ user_keys = from_json_string( user.preferences['dbkeys'] )
+
+ dbkeys = [ (v, k) for k, v in trans.db_builds if ( ( k in self.genomes and self.genomes[ k ].len_file ) or k in user_keys ) ]
+ return dbkeys
+
+ def chroms( self, trans, dbkey=None, num=None, chrom=None, low=None ):
+ """
+ Returns a naturally sorted list of chroms/contigs for a given dbkey.
+ Use either chrom or low to specify the starting chrom in the return list.
+ """
+
+ # If there is no dbkey owner, default to current user.
+ dbkey_owner, dbkey = decode_dbkey( dbkey )
+ if dbkey_owner:
+ dbkey_user = trans.sa_session.query( trans.app.model.User ).filter_by( username=dbkey_owner ).first()
+ else:
+ dbkey_user = trans.user
+
+ #
+ # Get/create genome object.
+ #
+ genome = None
+
+ # Look first in user's custom builds.
+ if dbkey_user and 'dbkeys' in dbkey_user.preferences:
+ user_keys = from_json_string( dbkey_user.preferences['dbkeys'] )
+ if dbkey in user_keys:
+ dbkey_attributes = user_keys[ dbkey ]
+ if 'fasta' in dbkey_attributes:
+ build_fasta = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( dbkey_attributes[ 'fasta' ] )
+ len_file = build_fasta.get_converted_dataset( trans, 'len' ).file_name
+ # Backwards compatibility: look for len file directly.
+ elif 'len' in dbkey_attributes:
+ len_file = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( user_keys[ dbkey ][ 'len' ] ).file_name
+ if len_file:
+ genome = Genome( dbkey, len_file=len_file )
+
+
+ # Look in system builds.
+ if not genome:
+ len_ds = trans.db_dataset_for( dbkey )
+ if not len_ds:
+ genome = self.genomes[ dbkey ]
+ else:
+ gneome = Genome( dbkey, len_file=len_ds.file_name )
+
+ return genome.to_dict( num=num, chrom=chrom, low=low )
+
def has_reference_data( self, trans, dbkey, dbkey_owner=None ):
"""
Returns true if there is reference data for the specified dbkey. If dbkey is custom,
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Fix for finding the desired file within a specified change set of a tool shed reposity manifest.
by Bitbucket 01 Jun '12
by Bitbucket 01 Jun '12
01 Jun '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/1890cb0d1cfb/
changeset: 1890cb0d1cfb
user: greg
date: 2012-06-01 21:37:38
summary: Fix for finding the desired file within a specified change set of a tool shed reposity manifest.
affected #: 1 file
diff -r 018179ad4c9bcec30baea5aee0918f45d254deb2 -r 1890cb0d1cfbb3ef5a09affcdd18d2b8acf7d811 lib/galaxy/webapps/community/controllers/common.py
--- a/lib/galaxy/webapps/community/controllers/common.py
+++ b/lib/galaxy/webapps/community/controllers/common.py
@@ -360,20 +360,32 @@
"""Copy a file named filename from somewhere in the repository manifest to the directory to which dir refers."""
filename = strip_path( filename )
fctx = None
- # First see if the file is in ctx.
+ found = False
+ # First see if the file is in ctx. We have to be careful in determining if we found the correct file because multiple files
+ # with the same name may be in different directories within ctx if the repository owner moved the files as part of the change set.
+ # For example, in the following ctx.files() list, the former may have been moved to the latter:
+ # ['tmap_wrapper_0.0.19/tool_data_table_conf.xml.sample', 'tmap_wrapper_0.3.3/tool_data_table_conf.xml.sample']
for ctx_file in ctx.files():
ctx_file_name = strip_path( ctx_file )
if filename == ctx_file_name:
- fctx = ctx[ ctx_file ]
- else:
+ try:
+ fctx = ctx[ ctx_file ]
+ found = True
+ break
+ except:
+ continue
+ if not found:
# Find the file in the repository manifest.
for changeset in repo.changelog:
prev_ctx = repo.changectx( changeset )
for ctx_file in prev_ctx.files():
ctx_file_name = strip_path( ctx_file )
if filename == ctx_file_name:
- fctx = prev_ctx[ ctx_file ]
- break
+ try:
+ fctx = prev_ctx[ ctx_file ]
+ break
+ except:
+ continue
if fctx:
file_path = os.path.join( dir, filename )
fh = open( file_path, 'wb' )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: 1) Enhance the InstallManager to provide the installer the ability to choose to install tool dependenceis (or not) rather than automatically installing them.
by Bitbucket 01 Jun '12
by Bitbucket 01 Jun '12
01 Jun '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/018179ad4c9b/
changeset: 018179ad4c9b
user: greg
date: 2012-06-01 20:51:41
summary: 1) Enhance the InstallManager to provide the installer the ability to choose to install tool dependenceis (or not) rather than automatically installing them.
2) Persist changes to the tool_data_table_conf.xml file only if within the Galaxy webapp.
3) Apply the 1-liner fix from change set 9ffef0de07f5 in Peter van Heusden's pull request which fixes a problem where the __init__ tests if config.database_connection is None.
affected #: 8 files
diff -r 9ae91fb49fdd5762dc9a0374dfc471733da47143 -r 018179ad4c9bcec30baea5aee0918f45d254deb2 lib/galaxy/app.py
--- a/lib/galaxy/app.py
+++ b/lib/galaxy/app.py
@@ -31,6 +31,11 @@
db_url = self.config.database_connection
else:
db_url = "sqlite:///%s?isolation_level=IMMEDIATE" % self.config.database
+ # Set up the tool sheds registry
+ if os.path.isfile( self.config.tool_sheds_config ):
+ self.tool_shed_registry = galaxy.tool_shed.tool_shed_registry.Registry( self.config.root, self.config.tool_sheds_config )
+ else:
+ self.tool_shed_registry = None
# Initialize database / check for appropriate schema version. # If this
# is a new installation, we'll restrict the tool migration messaging.
from galaxy.model.migrate.check import create_or_verify_database
@@ -47,11 +52,6 @@
self.config.database_engine_options,
database_query_profiling_proxy = self.config.database_query_profiling_proxy,
object_store = self.object_store )
- # Set up the tool sheds registry
- if os.path.isfile( self.config.tool_sheds_config ):
- self.tool_shed_registry = galaxy.tool_shed.tool_shed_registry.Registry( self.config.root, self.config.tool_sheds_config )
- else:
- self.tool_shed_registry = None
# Manage installed tool shed repositories.
self.installed_repository_manager = galaxy.tool_shed.InstalledRepositoryManager( self )
# Create an empty datatypes registry.
diff -r 9ae91fb49fdd5762dc9a0374dfc471733da47143 -r 018179ad4c9bcec30baea5aee0918f45d254deb2 lib/galaxy/tool_shed/install_manager.py
--- a/lib/galaxy/tool_shed/install_manager.py
+++ b/lib/galaxy/tool_shed/install_manager.py
@@ -7,9 +7,12 @@
from galaxy.util.json import from_json_string, to_json_string
from galaxy.util.shed_util import *
from galaxy.util.odict import odict
+from galaxy.tool_shed.migrate.common import *
+
+REPOSITORY_OWNER = 'devteam'
class InstallManager( object ):
- def __init__( self, app, latest_migration_script_number, tool_shed_install_config, migrated_tools_config ):
+ def __init__( self, app, latest_migration_script_number, tool_shed_install_config, migrated_tools_config, install_dependencies ):
"""
Check tool settings in tool_shed_install_config and install all repositories that are not already installed. The tool
panel configuration file is the received migrated_tools_config, which is the reserved file named migrated_tools_conf.xml.
@@ -30,9 +33,9 @@
tree = util.parse_xml( tool_shed_install_config )
root = tree.getroot()
self.tool_shed = clean_tool_shed_url( root.get( 'name' ) )
- self.repository_owner = 'devteam'
+ self.repository_owner = REPOSITORY_OWNER
for repository_elem in root:
- self.install_repository( repository_elem )
+ self.install_repository( repository_elem, install_dependencies )
def get_guid( self, repository_clone_url, relative_install_dir, tool_config ):
found = False
for root, dirs, files in os.walk( relative_install_dir ):
@@ -117,7 +120,8 @@
if not is_displayed:
is_displayed = True
return is_displayed, tool_sections
- def handle_repository_contents( self, repository_clone_url, relative_install_dir, repository_elem, repository_name, description, changeset_revision, ctx_rev ):
+ def handle_repository_contents( self, repository_clone_url, relative_install_dir, repository_elem, repository_name, description, changeset_revision,
+ ctx_rev, install_dependencies ):
# Generate the metadata for the installed tool shed repository, among other things. It is critical that the installed repository is
# updated to the desired changeset_revision before metadata is set because the process for setting metadata uses the repository files on disk.
# The values for the keys in each of the following dictionaries will be a list to allow for the same tool to be displayed in multiple places
@@ -162,7 +166,7 @@
repository_tools_tups, sample_files_copied = handle_missing_index_file( self.app, self.tool_path, sample_files, repository_tools_tups )
# Copy remaining sample files included in the repository to the ~/tool-data directory of the local Galaxy instance.
copy_sample_files( self.app, sample_files, sample_files_copied=sample_files_copied )
- if 'tool_dependencies' in metadata_dict:
+ if install_dependencies and 'tool_dependencies' in metadata_dict:
# Get the tool_dependencies.xml file from the repository.
tool_dependencies_config = get_config_from_repository( self.app,
'tool_dependencies.xml',
@@ -220,7 +224,7 @@
except:
pass
return tool_shed_repository, metadata_dict
- def install_repository( self, repository_elem ):
+ def install_repository( self, repository_elem, install_dependencies ):
# Install a single repository, loading contained tools into the tool panel.
name = repository_elem.get( 'name' )
description = repository_elem.get( 'description' )
@@ -241,7 +245,8 @@
name,
description,
changeset_revision,
- ctx_rev )
+ ctx_rev,
+ install_dependencies )
if 'tools' in metadata_dict:
# Get the tool_versions from the tool shed for each tool in the installed change set.
url = '%s/repository/get_tool_versions?name=%s&owner=%s&changeset_revision=%s&webapp=galaxy&no_reset=true' % \
diff -r 9ae91fb49fdd5762dc9a0374dfc471733da47143 -r 018179ad4c9bcec30baea5aee0918f45d254deb2 lib/galaxy/tool_shed/migrate/check.py
--- a/lib/galaxy/tool_shed/migrate/check.py
+++ b/lib/galaxy/tool_shed/migrate/check.py
@@ -6,6 +6,7 @@
from migrate.versioning import repository, schema
from sqlalchemy import *
from common import *
+from galaxy.util.odict import odict
log = logging.getLogger( __name__ )
@@ -44,13 +45,20 @@
if latest_tool_migration_script_number != db_schema.version:
if app.new_installation:
# New installations will not be missing tools, so we don't need to worry about them.
- missing_tool_configs = []
+ missing_tool_configs_dict = odict()
else:
tool_panel_configs = get_non_shed_tool_panel_configs( app )
if tool_panel_configs:
- missing_tool_configs = check_for_missing_tools( tool_panel_configs, latest_tool_migration_script_number )
+ # The missing_tool_configs_dict contents are something like:
+ # {'emboss_antigenic.xml': [('emboss', '5.0.0', 'package', '\nreadme blah blah blah\n')]}
+ missing_tool_configs_dict = check_for_missing_tools( app, tool_panel_configs, latest_tool_migration_script_number )
else:
- missing_tool_configs = []
+ missing_tool_configs_dict = odict()
+ have_tool_dependencies = False
+ for k, v in missing_tool_configs_dict.items():
+ if v:
+ have_tool_dependencies = True
+ break
config_arg = ''
if os.path.abspath( os.path.join( os.getcwd(), 'universe_wsgi.ini' ) ) != galaxy_config_file:
config_arg = ' -c %s' % galaxy_config_file.replace( os.path.abspath( os.getcwd() ), '.' )
@@ -62,7 +70,7 @@
output = proc.stdout.read( 32768 )
if return_code != 0:
raise Exception( "Error attempting to update the value of migrate_tools.version: %s" % output )
- elif missing_tool_configs:
+ elif missing_tool_configs_dict:
if len( tool_panel_configs ) == 1:
plural = ''
tool_panel_config_file_names = tool_panel_configs[ 0 ]
@@ -71,8 +79,8 @@
tool_panel_config_file_names = ', '.join( tool_panel_configs )
msg = "\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
msg += "\n\nThe list of files at the end of this message refers to tools that are configured to load into the tool panel for\n"
- msg += "this Galaxy instance, but have been removed from the Galaxy distribution. These tools can be automatically installed\n"
- msg += "from the Galaxy tool shed at http://toolshed.g2.bx.psu.edu.\n\n"
+ msg += "this Galaxy instance, but have been removed from the Galaxy distribution. These tools and their dependencies can be\n"
+ msg += "automatically installed from the Galaxy tool shed at http://toolshed.g2.bx.psu.edu.\n\n"
msg += "To skip this process, attempt to start your Galaxy server again (e.g., sh run.sh or whatever you use). If you do this,\n"
msg += "be aware that these tools will no longer be available in your Galaxy tool panel, and entries for each of them should\n"
msg += "be removed from your file%s named %s.\n\n" % ( plural, tool_panel_config_file_names )
@@ -87,17 +95,45 @@
msg += "configured could result in undesired behavior when modifying or updating your local Galaxy instance or the tool shed\n"
msg += "repositories if they are in directories that pose conflicts. See mercurial's .hgignore documentation at the following\n"
msg += "URL for details.\n\nhttp://mercurial.selenic.com/wiki/.hgignore\n\n"
- msg += output
+ if have_tool_dependencies:
+ msg += "The following tool dependencies can also optionally be installed (see the option flag in the command below). If you\n"
+ msg += "choose to install them (recommended), they will be installed within the location specified by the 'tool_dependency_dir'\n"
+ msg += "setting in your main Galaxy configuration file (e.g., uninverse_wsgi.ini).\n"
+ processed_tool_dependencies = []
+ for missing_tool_config, tool_dependencies in missing_tool_configs_dict.items():
+ for tool_dependencies_tup in tool_dependencies:
+ if tool_dependencies_tup not in processed_tool_dependencies:
+ msg += "------------------------------------\n"
+ msg += "Tool Dependency\n"
+ msg += "------------------------------------\n"
+ msg += "Name: %s, Version: %s, Type: %s\n" % ( tool_dependencies_tup[ 0 ],
+ tool_dependencies_tup[ 1 ],
+ tool_dependencies_tup[ 2 ] )
+ if tool_dependencies_tup[ 3 ]:
+ msg += "Requirements and installation information:\n"
+ msg += "%s\n" % tool_dependencies_tup[ 3 ]
+ else:
+ msg += "\n"
+ msg += "------------------------------------\n"
+ processed_tool_dependencies.append( tool_dependencies_tup )
+ msg += "\n"
+ msg += "%s" % output.replace( 'done', '' )
+ msg += "vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n"
+ msg += "sh ./scripts/migrate_tools/%04d_tools.sh\n" % latest_tool_migration_script_number
+ msg += "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n"
+ if have_tool_dependencies:
+ msg += "The tool dependencies listed above will be installed along with the repositories if you add the 'install_dependencies'\n"
+ msg += "option to the above command like this:\n\n"
+ msg += "vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n"
+ msg += "sh ./scripts/migrate_tools/%04d_tools.sh install_dependencies\n" % latest_tool_migration_script_number
+ msg += "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n"
+ msg += "Tool dependencies can be installed after the repositories have been installed, but installing them now is better.\n\n"
msg += "After the installation process finishes, you can start your Galaxy server. As part of this installation process,\n"
msg += "entries for each of the following tool config files will be added to the file named ./migrated_tool_conf.xml, so these\n"
msg += "tools will continue to be loaded into your tool panel. Because of this, existing entries for these files should be\n"
msg += "removed from your file%s named %s, but only after the installation process finishes.\n\n" % ( plural, tool_panel_config_file_names )
- for i, missing_tool_config in enumerate( missing_tool_configs ):
+ for missing_tool_config, tool_dependencies in missing_tool_configs_dict.items():
msg += "%s\n" % missing_tool_config
- # Should we do the following?
- #if i > 10:
- # msg += "\n...and %d more tools...\n" % ( len( missing_tool_configs ) - ( i + 1 ) )
- # break
msg += "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n"
raise Exception( msg )
else:
diff -r 9ae91fb49fdd5762dc9a0374dfc471733da47143 -r 018179ad4c9bcec30baea5aee0918f45d254deb2 lib/galaxy/tool_shed/migrate/common.py
--- a/lib/galaxy/tool_shed/migrate/common.py
+++ b/lib/galaxy/tool_shed/migrate/common.py
@@ -1,4 +1,4 @@
-import sys, os, ConfigParser
+import sys, os, ConfigParser, urllib2
import galaxy.config
import galaxy.datatypes.registry
from galaxy import util, tools
@@ -7,39 +7,63 @@
from galaxy.objectstore import build_object_store_from_config
import galaxy.tool_shed.tool_shed_registry
from galaxy.tool_shed import install_manager
-from galaxy.tool_shed.migrate.common import *
+from galaxy.tool_shed.encoding_util import *
+from galaxy.util.odict import odict
-def check_for_missing_tools( tool_panel_configs, latest_tool_migration_script_number ):
+REPOSITORY_OWNER = 'devteam'
+
+def check_for_missing_tools( app, tool_panel_configs, latest_tool_migration_script_number ):
# Get the 000x_tools.xml file associated with the current migrate_tools version number.
tools_xml_file_path = os.path.abspath( os.path.join( 'scripts', 'migrate_tools', '%04d_tools.xml' % latest_tool_migration_script_number ) )
# Parse the XML and load the file attributes for later checking against the proprietary tool_panel_config.
- migrated_tool_configs = []
+ migrated_tool_configs_dict = odict()
tree = util.parse_xml( tools_xml_file_path )
root = tree.getroot()
+ tool_shed = root.get( 'name' )
+ tool_shed_url = get_tool_shed_url_from_tools_xml_file_path( app, tool_shed )
for elem in root:
if elem.tag == 'repository':
+ tool_dependencies = []
+ tool_dependencies_dict = {}
+ repository_name = elem.get( 'name' )
+ changeset_revision = elem.get( 'changeset_revision' )
+ url = '%s/repository/get_tool_dependencies?name=%s&owner=%s&changeset_revision=%s&webapp=install_manager&no_reset=true' % \
+ ( tool_shed_url, repository_name, REPOSITORY_OWNER, changeset_revision )
+ response = urllib2.urlopen( url )
+ text = response.read()
+ response.close()
+ if text:
+ tool_dependencies_dict = tool_shed_decode( text )
+ for dependency_key, requirements_dict in tool_dependencies_dict.items():
+ tool_dependency_name = requirements_dict[ 'name' ]
+ tool_dependency_version = requirements_dict[ 'version' ]
+ tool_dependency_type = requirements_dict[ 'type' ]
+ tool_dependency_readme = requirements_dict.get( 'readme', '' )
+ tool_dependencies.append( ( tool_dependency_name, tool_dependency_version, tool_dependency_type, tool_dependency_readme ) )
for tool_elem in elem.findall( 'tool' ):
- migrated_tool_configs.append( tool_elem.get( 'file' ) )
+ migrated_tool_configs_dict[ tool_elem.get( 'file' ) ] = tool_dependencies
# Parse the proprietary tool_panel_configs (the default is tool_conf.xml) and generate the list of missing tool config file names.
- missing_tool_configs = []
+ missing_tool_configs_dict = odict()
for tool_panel_config in tool_panel_configs:
tree = util.parse_xml( tool_panel_config )
root = tree.getroot()
for elem in root:
+ missing_tool_dependencies = []
if elem.tag == 'tool':
- missing_tool_configs = check_tool_tag_set( elem, migrated_tool_configs, missing_tool_configs )
+ missing_tool_configs_dict = check_tool_tag_set( elem, migrated_tool_configs_dict, missing_tool_configs_dict )
elif elem.tag == 'section':
for section_elem in elem:
if section_elem.tag == 'tool':
- missing_tool_configs = check_tool_tag_set( section_elem, migrated_tool_configs, missing_tool_configs )
- return missing_tool_configs
-def check_tool_tag_set( elem, migrated_tool_configs, missing_tool_configs ):
+ missing_tool_configs_dict = check_tool_tag_set( section_elem, migrated_tool_configs_dict, missing_tool_configs_dict )
+ return missing_tool_configs_dict
+def check_tool_tag_set( elem, migrated_tool_configs_dict, missing_tool_configs_dict ):
file_path = elem.get( 'file', None )
if file_path:
path, name = os.path.split( file_path )
- if name in migrated_tool_configs:
- missing_tool_configs.append( name )
- return missing_tool_configs
+ if name in migrated_tool_configs_dict:
+ tool_dependencies = migrated_tool_configs_dict[ name ]
+ missing_tool_configs_dict[ name ] = tool_dependencies
+ return missing_tool_configs_dict
def get_non_shed_tool_panel_configs( app ):
# Get the non-shed related tool panel configs - there can be more than one, and the default is tool_conf.xml.
config_filenames = []
@@ -52,9 +76,18 @@
if tool_path is None:
config_filenames.append( config_filename )
return config_filenames
+def get_tool_shed_url_from_tools_xml_file_path( app, tool_shed ):
+ for shed_name, shed_url in app.tool_shed_registry.tool_sheds.items():
+ if shed_url.find( tool_shed ) >= 0:
+ if shed_url.endswith( '/' ):
+ shed_url = shed_url.rstrip( '/' )
+ return shed_url
+ return None
+
class MigrateToolsApplication( object ):
"""Encapsulates the state of a basic Galaxy Universe application in order to initiate the Install Manager"""
def __init__( self, tools_migration_config ):
+ install_dependencies = 'install_dependencies' in sys.argv
galaxy_config_file = 'universe_wsgi.ini'
if '-c' in sys.argv:
pos = sys.argv.index( '-c' )
@@ -69,7 +102,7 @@
for key, value in config_parser.items( "app:main" ):
galaxy_config_dict[ key ] = value
self.config = galaxy.config.Configuration( **galaxy_config_dict )
- if self.config.database_connection is None:
+ if not self.config.database_connection:
self.config.database_connection = "sqlite:///%s?isolation_level=IMMEDIATE" % self.config.database
self.config.update_integrated_tool_panel = True
self.object_store = build_object_store_from_config( self.config )
@@ -106,7 +139,8 @@
'scripts',
'migrate_tools',
tools_migration_config ),
- migrated_tools_config=self.config.migrated_tools_config )
+ migrated_tools_config=self.config.migrated_tools_config,
+ install_dependencies=install_dependencies )
@property
def sa_session( self ):
return self.model.context.current
diff -r 9ae91fb49fdd5762dc9a0374dfc471733da47143 -r 018179ad4c9bcec30baea5aee0918f45d254deb2 lib/galaxy/tool_shed/migrate/versions/0002_tools.py
--- a/lib/galaxy/tool_shed/migrate/versions/0002_tools.py
+++ b/lib/galaxy/tool_shed/migrate/versions/0002_tools.py
@@ -3,10 +3,6 @@
datatypes_conf.xml.sample. You should remove the Emboss datatypes from your version of datatypes_conf.xml. The
repositories named emboss_5 and emboss_datatypes from the main Galaxy tool shed at http://toolshed.g2.bx.psu.edu
will be installed into your local Galaxy instance at the location discussed above by running the following command.
-
-vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
-sh ./scripts/migrate_tools/0002_tools.sh
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""
import sys
diff -r 9ae91fb49fdd5762dc9a0374dfc471733da47143 -r 018179ad4c9bcec30baea5aee0918f45d254deb2 lib/galaxy/tools/data/__init__.py
--- a/lib/galaxy/tools/data/__init__.py
+++ b/lib/galaxy/tools/data/__init__.py
@@ -41,7 +41,7 @@
self.data_tables[ table.name ] = table
log.debug( "Loaded tool data table '%s'", table.name )
return table_elems
- def add_new_entries_from_config_file( self, config_filename, tool_data_table_config_path ):
+ def add_new_entries_from_config_file( self, config_filename, tool_data_table_config_path, persist=False ):
"""
This method is called when a tool shed repository that includes a tool_data_table_conf.xml.sample file is being
installed into a local galaxy instance. We have 2 cases to handle, files whose root tag is <tables>, for example:
@@ -78,7 +78,7 @@
if table.name not in self.data_tables:
self.data_tables[ table.name ] = table
log.debug( "Added new tool data table '%s'", table.name )
- if self.data_table_elem_names != original_data_table_elem_names:
+ if persist and self.data_table_elem_names != original_data_table_elem_names:
# Persist Galaxy's version of the changed tool_data_table_conf.xml file.
self.to_xml_file( tool_data_table_config_path )
return table_elems
diff -r 9ae91fb49fdd5762dc9a0374dfc471733da47143 -r 018179ad4c9bcec30baea5aee0918f45d254deb2 lib/galaxy/util/shed_util.py
--- a/lib/galaxy/util/shed_util.py
+++ b/lib/galaxy/util/shed_util.py
@@ -993,8 +993,8 @@
.first()
def get_url_from_repository_tool_shed( app, repository ):
"""
- The stored value of repository.tool_shed is something like: toolshed.g2.bx.psu.edu
- We need the URL to this tool shed, which is something like: http://toolshed.g2.bx.psu.edu/
+ The stored value of repository.tool_shed is something like: toolshed.g2.bx.psu.edu. We need the URL to this tool shed, which is
+ something like: http://toolshed.g2.bx.psu.edu/.
"""
for shed_name, shed_url in app.tool_shed_registry.tool_sheds.items():
if shed_url.find( repository.tool_shed ) >= 0:
@@ -1019,7 +1019,7 @@
sample_tool_data_table_conf = get_config_from_repository( app, 'tool_data_table_conf.xml.sample', repository, changeset_revision, dir )
# Add entries to the ToolDataTableManager's in-memory data_tables dictionary as well as the list of data_table_elems and the list of
# data_table_elem_names.
- error, correction_msg = handle_sample_tool_data_table_conf_file( app, sample_tool_data_table_conf )
+ error, correction_msg = handle_sample_tool_data_table_conf_file( app, sample_tool_data_table_conf, persist=True )
if error:
# TODO: Do more here than logging an exception.
log.debug( correction_msg )
@@ -1055,15 +1055,15 @@
repository_tool = app.toolbox.load_tool( os.path.join( tool_path, tup_path ), guid=guid )
repository_tools_tups[ index ] = ( tup_path, guid, repository_tool )
return repository_tools_tups, sample_files_copied
-def handle_sample_tool_data_table_conf_file( app, filename ):
+def handle_sample_tool_data_table_conf_file( app, filename, persist=False ):
"""
- Parse the incoming filename and add new entries to the in-memory app.tool_data_tables dictionary as well as appending them to
- Galaxy's tool_data_table_conf.xml file on disk.
+ Parse the incoming filename and add new entries to the in-memory app.tool_data_tables dictionary. If persist is True (should only occur)
+ if call is from the Galaxy side (not the tool shed), the new entries will be appended to Galaxy's tool_data_table_conf.xml file on disk.
"""
error = False
message = ''
try:
- new_table_elems = app.tool_data_tables.add_new_entries_from_config_file( filename, app.config.tool_data_table_config_path )
+ new_table_elems = app.tool_data_tables.add_new_entries_from_config_file( filename, app.config.tool_data_table_config_path, persist=persist )
except Exception, e:
message = str( e )
error = True
diff -r 9ae91fb49fdd5762dc9a0374dfc471733da47143 -r 018179ad4c9bcec30baea5aee0918f45d254deb2 lib/galaxy/webapps/community/controllers/repository.py
--- a/lib/galaxy/webapps/community/controllers/repository.py
+++ b/lib/galaxy/webapps/community/controllers/repository.py
@@ -11,6 +11,7 @@
from galaxy.util.json import from_json_string, to_json_string
from galaxy.model.orm import *
from galaxy.util.shed_util import get_changectx_for_changeset, get_configured_ui, make_tmp_directory, NOT_TOOL_CONFIGS, strip_path
+from galaxy.tool_shed.encoding_util import *
from common import *
from galaxy import eggs
@@ -995,6 +996,30 @@
url += '&latest_ctx_rev=%s' % str( latest_ctx.rev() )
return trans.response.send_redirect( url )
@web.expose
+ def get_tool_dependencies( self, trans, **kwd ):
+ # Handle a request from a local Galaxy instance. If the request originated with the Galaxy instances' InstallManager, the value of 'webapp'
+ # will be 'install_manager'.
+ params = util.Params( kwd )
+ message = util.restore_text( params.get( 'message', '' ) )
+ status = params.get( 'status', 'done' )
+ # If the request originated with the UpdateManager, it will not include a galaxy_url.
+ galaxy_url = kwd.get( 'galaxy_url', '' )
+ name = params.get( 'name', None )
+ owner = params.get( 'owner', None )
+ changeset_revision = params.get( 'changeset_revision', None )
+ webapp = params.get( 'webapp', 'community' )
+ repository = get_repository_by_name_and_owner( trans, name, owner )
+ for downloadable_revision in repository.downloadable_revisions:
+ if downloadable_revision.changeset_revision == changeset_revision:
+ break
+ metadata = downloadable_revision.metadata
+ tool_dependencies = metadata.get( 'tool_dependencies', '' )
+ if webapp == 'install_manager':
+ if tool_dependencies:
+ return tool_shed_encode( tool_dependencies )
+ return ''
+ # TODO: future handler where request comes from some Galaxy admin feature.
+ @web.expose
def browse_repositories( self, trans, **kwd ):
# We add params to the keyword dict in this method in order to rename the param
# with an "f-" prefix, simulating filtering by clicking a search link. We have
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0